early-access version 1255

This commit is contained in:
pineappleEA
2020-12-28 15:15:37 +00:00
parent 84b39492d1
commit 78b48028e1
6254 changed files with 1868140 additions and 0 deletions

71
src/audio_core/CMakeLists.txt Executable file
View File

@@ -0,0 +1,71 @@
add_library(audio_core STATIC
algorithm/filter.cpp
algorithm/filter.h
algorithm/interpolate.cpp
algorithm/interpolate.h
audio_out.cpp
audio_out.h
audio_renderer.cpp
audio_renderer.h
behavior_info.cpp
behavior_info.h
buffer.h
codec.cpp
codec.h
command_generator.cpp
command_generator.h
common.h
effect_context.cpp
effect_context.h
info_updater.cpp
info_updater.h
memory_pool.cpp
memory_pool.h
mix_context.cpp
mix_context.h
null_sink.h
sink.h
sink_context.cpp
sink_context.h
sink_details.cpp
sink_details.h
sink_stream.h
splitter_context.cpp
splitter_context.h
stream.cpp
stream.h
time_stretch.cpp
time_stretch.h
voice_context.cpp
voice_context.h
$<$<BOOL:${ENABLE_CUBEB}>:cubeb_sink.cpp cubeb_sink.h>
)
create_target_directory_groups(audio_core)
if (NOT MSVC)
target_compile_options(audio_core PRIVATE
-Werror=conversion
-Werror=ignored-qualifiers
-Werror=implicit-fallthrough
-Werror=reorder
-Werror=sign-compare
-Werror=shadow
-Werror=unused-parameter
-Werror=unused-variable
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
-Wno-sign-conversion
)
endif()
target_link_libraries(audio_core PUBLIC common core)
target_link_libraries(audio_core PRIVATE SoundTouch)
if(ENABLE_CUBEB)
target_link_libraries(audio_core PRIVATE cubeb)
target_compile_definitions(audio_core PRIVATE -DHAVE_CUBEB=1)
endif()

View File

@@ -0,0 +1,80 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#define _USE_MATH_DEFINES
#include <algorithm>
#include <array>
#include <cmath>
#include <vector>
#include "audio_core/algorithm/filter.h"
#include "common/common_types.h"
namespace AudioCore {
Filter Filter::LowPass(double cutoff, double Q) {
const double w0 = 2.0 * M_PI * cutoff;
const double sin_w0 = std::sin(w0);
const double cos_w0 = std::cos(w0);
const double alpha = sin_w0 / (2 * Q);
const double a0 = 1 + alpha;
const double a1 = -2.0 * cos_w0;
const double a2 = 1 - alpha;
const double b0 = 0.5 * (1 - cos_w0);
const double b1 = 1.0 * (1 - cos_w0);
const double b2 = 0.5 * (1 - cos_w0);
return {a0, a1, a2, b0, b1, b2};
}
Filter::Filter() : Filter(1.0, 0.0, 0.0, 1.0, 0.0, 0.0) {}
Filter::Filter(double a0_, double a1_, double a2_, double b0_, double b1_, double b2_)
: a1(a1_ / a0_), a2(a2_ / a0_), b0(b0_ / a0_), b1(b1_ / a0_), b2(b2_ / a0_) {}
void Filter::Process(std::vector<s16>& signal) {
const std::size_t num_frames = signal.size() / 2;
for (std::size_t i = 0; i < num_frames; i++) {
std::rotate(in.begin(), in.end() - 1, in.end());
std::rotate(out.begin(), out.end() - 1, out.end());
for (std::size_t ch = 0; ch < channel_count; ch++) {
in[0][ch] = signal[i * channel_count + ch];
out[0][ch] = b0 * in[0][ch] + b1 * in[1][ch] + b2 * in[2][ch] - a1 * out[1][ch] -
a2 * out[2][ch];
signal[i * 2 + ch] = static_cast<s16>(std::clamp(out[0][ch], -32768.0, 32767.0));
}
}
}
/// Calculates the appropriate Q for each biquad in a cascading filter.
/// @param total_count The total number of biquads to be cascaded.
/// @param index 0-index of the biquad to calculate the Q value for.
static double CascadingBiquadQ(std::size_t total_count, std::size_t index) {
const auto pole =
M_PI * static_cast<double>(2 * index + 1) / (4.0 * static_cast<double>(total_count));
return 1.0 / (2.0 * std::cos(pole));
}
CascadingFilter CascadingFilter::LowPass(double cutoff, std::size_t cascade_size) {
std::vector<Filter> cascade(cascade_size);
for (std::size_t i = 0; i < cascade_size; i++) {
cascade[i] = Filter::LowPass(cutoff, CascadingBiquadQ(cascade_size, i));
}
return CascadingFilter{std::move(cascade)};
}
CascadingFilter::CascadingFilter() = default;
CascadingFilter::CascadingFilter(std::vector<Filter> filters_) : filters(std::move(filters_)) {}
void CascadingFilter::Process(std::vector<s16>& signal) {
for (auto& filter : filters) {
filter.Process(signal);
}
}
} // namespace AudioCore

View File

@@ -0,0 +1,62 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <vector>
#include "common/common_types.h"
namespace AudioCore {
/// Digital biquad filter:
///
/// b0 + b1 z^-1 + b2 z^-2
/// H(z) = ------------------------
/// a0 + a1 z^-1 + b2 z^-2
class Filter {
public:
/// Creates a low-pass filter.
/// @param cutoff Determines the cutoff frequency. A value from 0.0 to 1.0.
/// @param Q Determines the quality factor of this filter.
static Filter LowPass(double cutoff, double Q = 0.7071);
/// Passthrough filter.
Filter();
Filter(double a0_, double a1_, double a2_, double b0_, double b1_, double b2_);
void Process(std::vector<s16>& signal);
private:
static constexpr std::size_t channel_count = 2;
/// Coefficients are in normalized form (a0 = 1.0).
double a1, a2, b0, b1, b2;
/// Input History
std::array<std::array<double, channel_count>, 3> in;
/// Output History
std::array<std::array<double, channel_count>, 3> out;
};
/// Cascade filters to build up higher-order filters from lower-order ones.
class CascadingFilter {
public:
/// Creates a cascading low-pass filter.
/// @param cutoff Determines the cutoff frequency. A value from 0.0 to 1.0.
/// @param cascade_size Number of biquads in cascade.
static CascadingFilter LowPass(double cutoff, std::size_t cascade_size);
/// Passthrough.
CascadingFilter();
explicit CascadingFilter(std::vector<Filter> filters_);
void Process(std::vector<s16>& signal);
private:
std::vector<Filter> filters;
};
} // namespace AudioCore

View File

@@ -0,0 +1,233 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#define _USE_MATH_DEFINES
#include <algorithm>
#include <climits>
#include <cmath>
#include <vector>
#include "audio_core/algorithm/interpolate.h"
#include "common/common_types.h"
#include "common/logging/log.h"
namespace AudioCore {
constexpr std::array<s16, 512> curve_lut0{
6600, 19426, 6722, 3, 6479, 19424, 6845, 9, 6359, 19419, 6968, 15, 6239,
19412, 7093, 22, 6121, 19403, 7219, 28, 6004, 19391, 7345, 34, 5888, 19377,
7472, 41, 5773, 19361, 7600, 48, 5659, 19342, 7728, 55, 5546, 19321, 7857,
62, 5434, 19298, 7987, 69, 5323, 19273, 8118, 77, 5213, 19245, 8249, 84,
5104, 19215, 8381, 92, 4997, 19183, 8513, 101, 4890, 19148, 8646, 109, 4785,
19112, 8780, 118, 4681, 19073, 8914, 127, 4579, 19031, 9048, 137, 4477, 18988,
9183, 147, 4377, 18942, 9318, 157, 4277, 18895, 9454, 168, 4179, 18845, 9590,
179, 4083, 18793, 9726, 190, 3987, 18738, 9863, 202, 3893, 18682, 10000, 215,
3800, 18624, 10137, 228, 3709, 18563, 10274, 241, 3618, 18500, 10411, 255, 3529,
18436, 10549, 270, 3441, 18369, 10687, 285, 3355, 18300, 10824, 300, 3269, 18230,
10962, 317, 3186, 18157, 11100, 334, 3103, 18082, 11238, 351, 3022, 18006, 11375,
369, 2942, 17927, 11513, 388, 2863, 17847, 11650, 408, 2785, 17765, 11788, 428,
2709, 17681, 11925, 449, 2635, 17595, 12062, 471, 2561, 17507, 12198, 494, 2489,
17418, 12334, 517, 2418, 17327, 12470, 541, 2348, 17234, 12606, 566, 2280, 17140,
12741, 592, 2213, 17044, 12876, 619, 2147, 16946, 13010, 647, 2083, 16846, 13144,
675, 2020, 16745, 13277, 704, 1958, 16643, 13409, 735, 1897, 16539, 13541, 766,
1838, 16434, 13673, 798, 1780, 16327, 13803, 832, 1723, 16218, 13933, 866, 1667,
16109, 14062, 901, 1613, 15998, 14191, 937, 1560, 15885, 14318, 975, 1508, 15772,
14445, 1013, 1457, 15657, 14571, 1052, 1407, 15540, 14695, 1093, 1359, 15423, 14819,
1134, 1312, 15304, 14942, 1177, 1266, 15185, 15064, 1221, 1221, 15064, 15185, 1266,
1177, 14942, 15304, 1312, 1134, 14819, 15423, 1359, 1093, 14695, 15540, 1407, 1052,
14571, 15657, 1457, 1013, 14445, 15772, 1508, 975, 14318, 15885, 1560, 937, 14191,
15998, 1613, 901, 14062, 16109, 1667, 866, 13933, 16218, 1723, 832, 13803, 16327,
1780, 798, 13673, 16434, 1838, 766, 13541, 16539, 1897, 735, 13409, 16643, 1958,
704, 13277, 16745, 2020, 675, 13144, 16846, 2083, 647, 13010, 16946, 2147, 619,
12876, 17044, 2213, 592, 12741, 17140, 2280, 566, 12606, 17234, 2348, 541, 12470,
17327, 2418, 517, 12334, 17418, 2489, 494, 12198, 17507, 2561, 471, 12062, 17595,
2635, 449, 11925, 17681, 2709, 428, 11788, 17765, 2785, 408, 11650, 17847, 2863,
388, 11513, 17927, 2942, 369, 11375, 18006, 3022, 351, 11238, 18082, 3103, 334,
11100, 18157, 3186, 317, 10962, 18230, 3269, 300, 10824, 18300, 3355, 285, 10687,
18369, 3441, 270, 10549, 18436, 3529, 255, 10411, 18500, 3618, 241, 10274, 18563,
3709, 228, 10137, 18624, 3800, 215, 10000, 18682, 3893, 202, 9863, 18738, 3987,
190, 9726, 18793, 4083, 179, 9590, 18845, 4179, 168, 9454, 18895, 4277, 157,
9318, 18942, 4377, 147, 9183, 18988, 4477, 137, 9048, 19031, 4579, 127, 8914,
19073, 4681, 118, 8780, 19112, 4785, 109, 8646, 19148, 4890, 101, 8513, 19183,
4997, 92, 8381, 19215, 5104, 84, 8249, 19245, 5213, 77, 8118, 19273, 5323,
69, 7987, 19298, 5434, 62, 7857, 19321, 5546, 55, 7728, 19342, 5659, 48,
7600, 19361, 5773, 41, 7472, 19377, 5888, 34, 7345, 19391, 6004, 28, 7219,
19403, 6121, 22, 7093, 19412, 6239, 15, 6968, 19419, 6359, 9, 6845, 19424,
6479, 3, 6722, 19426, 6600};
constexpr std::array<s16, 512> curve_lut1{
-68, 32639, 69, -5, -200, 32630, 212, -15, -328, 32613, 359, -26, -450,
32586, 512, -36, -568, 32551, 669, -47, -680, 32507, 832, -58, -788, 32454,
1000, -69, -891, 32393, 1174, -80, -990, 32323, 1352, -92, -1084, 32244, 1536,
-103, -1173, 32157, 1724, -115, -1258, 32061, 1919, -128, -1338, 31956, 2118, -140,
-1414, 31844, 2322, -153, -1486, 31723, 2532, -167, -1554, 31593, 2747, -180, -1617,
31456, 2967, -194, -1676, 31310, 3192, -209, -1732, 31157, 3422, -224, -1783, 30995,
3657, -240, -1830, 30826, 3897, -256, -1874, 30649, 4143, -272, -1914, 30464, 4393,
-289, -1951, 30272, 4648, -307, -1984, 30072, 4908, -325, -2014, 29866, 5172, -343,
-2040, 29652, 5442, -362, -2063, 29431, 5716, -382, -2083, 29203, 5994, -403, -2100,
28968, 6277, -424, -2114, 28727, 6565, -445, -2125, 28480, 6857, -468, -2133, 28226,
7153, -490, -2139, 27966, 7453, -514, -2142, 27700, 7758, -538, -2142, 27428, 8066,
-563, -2141, 27151, 8378, -588, -2136, 26867, 8694, -614, -2130, 26579, 9013, -641,
-2121, 26285, 9336, -668, -2111, 25987, 9663, -696, -2098, 25683, 9993, -724, -2084,
25375, 10326, -753, -2067, 25063, 10662, -783, -2049, 24746, 11000, -813, -2030, 24425,
11342, -844, -2009, 24100, 11686, -875, -1986, 23771, 12033, -907, -1962, 23438, 12382,
-939, -1937, 23103, 12733, -972, -1911, 22764, 13086, -1005, -1883, 22422, 13441, -1039,
-1855, 22077, 13798, -1072, -1825, 21729, 14156, -1107, -1795, 21380, 14516, -1141, -1764,
21027, 14877, -1176, -1732, 20673, 15239, -1211, -1700, 20317, 15602, -1246, -1667, 19959,
15965, -1282, -1633, 19600, 16329, -1317, -1599, 19239, 16694, -1353, -1564, 18878, 17058,
-1388, -1530, 18515, 17423, -1424, -1495, 18151, 17787, -1459, -1459, 17787, 18151, -1495,
-1424, 17423, 18515, -1530, -1388, 17058, 18878, -1564, -1353, 16694, 19239, -1599, -1317,
16329, 19600, -1633, -1282, 15965, 19959, -1667, -1246, 15602, 20317, -1700, -1211, 15239,
20673, -1732, -1176, 14877, 21027, -1764, -1141, 14516, 21380, -1795, -1107, 14156, 21729,
-1825, -1072, 13798, 22077, -1855, -1039, 13441, 22422, -1883, -1005, 13086, 22764, -1911,
-972, 12733, 23103, -1937, -939, 12382, 23438, -1962, -907, 12033, 23771, -1986, -875,
11686, 24100, -2009, -844, 11342, 24425, -2030, -813, 11000, 24746, -2049, -783, 10662,
25063, -2067, -753, 10326, 25375, -2084, -724, 9993, 25683, -2098, -696, 9663, 25987,
-2111, -668, 9336, 26285, -2121, -641, 9013, 26579, -2130, -614, 8694, 26867, -2136,
-588, 8378, 27151, -2141, -563, 8066, 27428, -2142, -538, 7758, 27700, -2142, -514,
7453, 27966, -2139, -490, 7153, 28226, -2133, -468, 6857, 28480, -2125, -445, 6565,
28727, -2114, -424, 6277, 28968, -2100, -403, 5994, 29203, -2083, -382, 5716, 29431,
-2063, -362, 5442, 29652, -2040, -343, 5172, 29866, -2014, -325, 4908, 30072, -1984,
-307, 4648, 30272, -1951, -289, 4393, 30464, -1914, -272, 4143, 30649, -1874, -256,
3897, 30826, -1830, -240, 3657, 30995, -1783, -224, 3422, 31157, -1732, -209, 3192,
31310, -1676, -194, 2967, 31456, -1617, -180, 2747, 31593, -1554, -167, 2532, 31723,
-1486, -153, 2322, 31844, -1414, -140, 2118, 31956, -1338, -128, 1919, 32061, -1258,
-115, 1724, 32157, -1173, -103, 1536, 32244, -1084, -92, 1352, 32323, -990, -80,
1174, 32393, -891, -69, 1000, 32454, -788, -58, 832, 32507, -680, -47, 669,
32551, -568, -36, 512, 32586, -450, -26, 359, 32613, -328, -15, 212, 32630,
-200, -5, 69, 32639, -68};
constexpr std::array<s16, 512> curve_lut2{
3195, 26287, 3329, -32, 3064, 26281, 3467, -34, 2936, 26270, 3608, -38, 2811,
26253, 3751, -42, 2688, 26230, 3897, -46, 2568, 26202, 4046, -50, 2451, 26169,
4199, -54, 2338, 26130, 4354, -58, 2227, 26085, 4512, -63, 2120, 26035, 4673,
-67, 2015, 25980, 4837, -72, 1912, 25919, 5004, -76, 1813, 25852, 5174, -81,
1716, 25780, 5347, -87, 1622, 25704, 5522, -92, 1531, 25621, 5701, -98, 1442,
25533, 5882, -103, 1357, 25440, 6066, -109, 1274, 25342, 6253, -115, 1193, 25239,
6442, -121, 1115, 25131, 6635, -127, 1040, 25018, 6830, -133, 967, 24899, 7027,
-140, 897, 24776, 7227, -146, 829, 24648, 7430, -153, 764, 24516, 7635, -159,
701, 24379, 7842, -166, 641, 24237, 8052, -174, 583, 24091, 8264, -181, 526,
23940, 8478, -187, 472, 23785, 8695, -194, 420, 23626, 8914, -202, 371, 23462,
9135, -209, 324, 23295, 9358, -215, 279, 23123, 9583, -222, 236, 22948, 9809,
-230, 194, 22769, 10038, -237, 154, 22586, 10269, -243, 117, 22399, 10501, -250,
81, 22208, 10735, -258, 47, 22015, 10970, -265, 15, 21818, 11206, -271, -16,
21618, 11444, -277, -44, 21415, 11684, -283, -71, 21208, 11924, -290, -97, 20999,
12166, -296, -121, 20786, 12409, -302, -143, 20571, 12653, -306, -163, 20354, 12898,
-311, -183, 20134, 13143, -316, -201, 19911, 13389, -321, -218, 19686, 13635, -325,
-234, 19459, 13882, -328, -248, 19230, 14130, -332, -261, 18998, 14377, -335, -273,
18765, 14625, -337, -284, 18531, 14873, -339, -294, 18295, 15121, -341, -302, 18057,
15369, -341, -310, 17817, 15617, -341, -317, 17577, 15864, -340, -323, 17335, 16111,
-340, -328, 17092, 16357, -338, -332, 16848, 16603, -336, -336, 16603, 16848, -332,
-338, 16357, 17092, -328, -340, 16111, 17335, -323, -340, 15864, 17577, -317, -341,
15617, 17817, -310, -341, 15369, 18057, -302, -341, 15121, 18295, -294, -339, 14873,
18531, -284, -337, 14625, 18765, -273, -335, 14377, 18998, -261, -332, 14130, 19230,
-248, -328, 13882, 19459, -234, -325, 13635, 19686, -218, -321, 13389, 19911, -201,
-316, 13143, 20134, -183, -311, 12898, 20354, -163, -306, 12653, 20571, -143, -302,
12409, 20786, -121, -296, 12166, 20999, -97, -290, 11924, 21208, -71, -283, 11684,
21415, -44, -277, 11444, 21618, -16, -271, 11206, 21818, 15, -265, 10970, 22015,
47, -258, 10735, 22208, 81, -250, 10501, 22399, 117, -243, 10269, 22586, 154,
-237, 10038, 22769, 194, -230, 9809, 22948, 236, -222, 9583, 23123, 279, -215,
9358, 23295, 324, -209, 9135, 23462, 371, -202, 8914, 23626, 420, -194, 8695,
23785, 472, -187, 8478, 23940, 526, -181, 8264, 24091, 583, -174, 8052, 24237,
641, -166, 7842, 24379, 701, -159, 7635, 24516, 764, -153, 7430, 24648, 829,
-146, 7227, 24776, 897, -140, 7027, 24899, 967, -133, 6830, 25018, 1040, -127,
6635, 25131, 1115, -121, 6442, 25239, 1193, -115, 6253, 25342, 1274, -109, 6066,
25440, 1357, -103, 5882, 25533, 1442, -98, 5701, 25621, 1531, -92, 5522, 25704,
1622, -87, 5347, 25780, 1716, -81, 5174, 25852, 1813, -76, 5004, 25919, 1912,
-72, 4837, 25980, 2015, -67, 4673, 26035, 2120, -63, 4512, 26085, 2227, -58,
4354, 26130, 2338, -54, 4199, 26169, 2451, -50, 4046, 26202, 2568, -46, 3897,
26230, 2688, -42, 3751, 26253, 2811, -38, 3608, 26270, 2936, -34, 3467, 26281,
3064, -32, 3329, 26287, 3195};
std::vector<s16> Interpolate(InterpolationState& state, std::vector<s16> input, double ratio) {
if (input.size() < 2)
return {};
if (ratio <= 0) {
LOG_ERROR(Audio, "Nonsensical interpolation ratio {}", ratio);
return input;
}
const s32 step{static_cast<s32>(ratio * 0x8000)};
const std::array<s16, 512>& lut = [step] {
if (step > 0xaaaa) {
return curve_lut0;
}
if (step <= 0x8000) {
return curve_lut1;
}
return curve_lut2;
}();
const std::size_t num_frames{input.size() / 2};
std::vector<s16> output;
output.reserve(static_cast<std::size_t>(static_cast<double>(input.size()) / ratio +
InterpolationState::taps));
for (std::size_t frame{}; frame < num_frames; ++frame) {
const std::size_t lut_index{(state.fraction >> 8) * InterpolationState::taps};
std::rotate(state.history.begin(), state.history.end() - 1, state.history.end());
state.history[0][0] = input[frame * 2 + 0];
state.history[0][1] = input[frame * 2 + 1];
while (state.position <= 1.0) {
const s32 left{state.history[0][0] * lut[lut_index + 0] +
state.history[1][0] * lut[lut_index + 1] +
state.history[2][0] * lut[lut_index + 2] +
state.history[3][0] * lut[lut_index + 3]};
const s32 right{state.history[0][1] * lut[lut_index + 0] +
state.history[1][1] * lut[lut_index + 1] +
state.history[2][1] * lut[lut_index + 2] +
state.history[3][1] * lut[lut_index + 3]};
const s32 new_offset{state.fraction + step};
state.fraction = new_offset & 0x7fff;
output.emplace_back(static_cast<s16>(std::clamp(left >> 15, SHRT_MIN, SHRT_MAX)));
output.emplace_back(static_cast<s16>(std::clamp(right >> 15, SHRT_MIN, SHRT_MAX)));
state.position += ratio;
}
state.position -= 1.0;
}
return output;
}
void Resample(s32* output, const s32* input, s32 pitch, s32& fraction, std::size_t sample_count) {
const std::array<s16, 512>& lut = [pitch] {
if (pitch > 0xaaaa) {
return curve_lut0;
}
if (pitch <= 0x8000) {
return curve_lut1;
}
return curve_lut2;
}();
std::size_t index{};
for (std::size_t i = 0; i < sample_count; i++) {
const std::size_t lut_index{(static_cast<std::size_t>(fraction) >> 8) * 4};
const auto l0 = lut[lut_index + 0];
const auto l1 = lut[lut_index + 1];
const auto l2 = lut[lut_index + 2];
const auto l3 = lut[lut_index + 3];
const auto s0 = static_cast<s32>(input[index]);
const auto s1 = static_cast<s32>(input[index + 1]);
const auto s2 = static_cast<s32>(input[index + 2]);
const auto s3 = static_cast<s32>(input[index + 3]);
output[i] = (l0 * s0 + l1 * s1 + l2 * s2 + l3 * s3) >> 15;
fraction += pitch;
index += (fraction >> 15);
fraction &= 0x7fff;
}
}
} // namespace AudioCore

View File

@@ -0,0 +1,44 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <vector>
#include "common/common_types.h"
namespace AudioCore {
struct InterpolationState {
static constexpr std::size_t taps{4};
static constexpr std::size_t history_size{taps * 2 - 1};
std::array<std::array<s16, 2>, history_size> history{};
double position{};
s32 fraction{};
};
/// Interpolates input signal to produce output signal.
/// @param input The signal to interpolate.
/// @param ratio Interpolation ratio.
/// ratio > 1.0 results in fewer output samples.
/// ratio < 1.0 results in more output samples.
/// @returns Output signal.
std::vector<s16> Interpolate(InterpolationState& state, std::vector<s16> input, double ratio);
/// Interpolates input signal to produce output signal.
/// @param input The signal to interpolate.
/// @param input_rate The sample rate of input.
/// @param output_rate The desired sample rate of the output.
/// @returns Output signal.
inline std::vector<s16> Interpolate(InterpolationState& state, std::vector<s16> input,
u32 input_rate, u32 output_rate) {
const double ratio = static_cast<double>(input_rate) / static_cast<double>(output_rate);
return Interpolate(state, std::move(input), ratio);
}
/// Nintendo Switchs DSP resampling algorithm. Based on a single channel
void Resample(s32* output, const s32* input, s32 pitch, s32& fraction, std::size_t sample_count);
} // namespace AudioCore

62
src/audio_core/audio_out.cpp Executable file
View File

@@ -0,0 +1,62 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "audio_core/audio_out.h"
#include "audio_core/sink.h"
#include "audio_core/sink_details.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/settings.h"
namespace AudioCore {
/// Returns the stream format from the specified number of channels
static Stream::Format ChannelsToStreamFormat(u32 num_channels) {
switch (num_channels) {
case 1:
return Stream::Format::Mono16;
case 2:
return Stream::Format::Stereo16;
case 6:
return Stream::Format::Multi51Channel16;
}
UNIMPLEMENTED_MSG("Unimplemented num_channels={}", num_channels);
return {};
}
StreamPtr AudioOut::OpenStream(Core::Timing::CoreTiming& core_timing, u32 sample_rate,
u32 num_channels, std::string&& name,
Stream::ReleaseCallback&& release_callback) {
if (!sink) {
sink = CreateSinkFromID(Settings::values.sink_id, Settings::values.audio_device_id);
}
return std::make_shared<Stream>(
core_timing, sample_rate, ChannelsToStreamFormat(num_channels), std::move(release_callback),
sink->AcquireSinkStream(sample_rate, num_channels, name), std::move(name));
}
std::vector<Buffer::Tag> AudioOut::GetTagsAndReleaseBuffers(StreamPtr stream,
std::size_t max_count) {
return stream->GetTagsAndReleaseBuffers(max_count);
}
std::vector<Buffer::Tag> AudioOut::GetTagsAndReleaseBuffers(StreamPtr stream) {
return stream->GetTagsAndReleaseBuffers();
}
void AudioOut::StartStream(StreamPtr stream) {
stream->Play();
}
void AudioOut::StopStream(StreamPtr stream) {
stream->Stop();
}
bool AudioOut::QueueBuffer(StreamPtr stream, Buffer::Tag tag, std::vector<s16>&& data) {
return stream->QueueBuffer(std::make_shared<Buffer>(tag, std::move(data)));
}
} // namespace AudioCore

50
src/audio_core/audio_out.h Executable file
View File

@@ -0,0 +1,50 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include <vector>
#include "audio_core/buffer.h"
#include "audio_core/sink.h"
#include "audio_core/stream.h"
#include "common/common_types.h"
namespace Core::Timing {
class CoreTiming;
}
namespace AudioCore {
/**
* Represents an audio playback interface, used to open and play audio streams
*/
class AudioOut {
public:
/// Opens a new audio stream
StreamPtr OpenStream(Core::Timing::CoreTiming& core_timing, u32 sample_rate, u32 num_channels,
std::string&& name, Stream::ReleaseCallback&& release_callback);
/// Returns a vector of recently released buffers specified by tag for the specified stream
std::vector<Buffer::Tag> GetTagsAndReleaseBuffers(StreamPtr stream, std::size_t max_count);
/// Returns a vector of all recently released buffers specified by tag for the specified stream
std::vector<Buffer::Tag> GetTagsAndReleaseBuffers(StreamPtr stream);
/// Starts an audio stream for playback
void StartStream(StreamPtr stream);
/// Stops an audio stream that is currently playing
void StopStream(StreamPtr stream);
/// Queues a buffer into the specified audio stream, returns true on success
bool QueueBuffer(StreamPtr stream, Buffer::Tag tag, std::vector<s16>&& data);
private:
SinkPtr sink;
};
} // namespace AudioCore

327
src/audio_core/audio_renderer.cpp Executable file
View File

@@ -0,0 +1,327 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <limits>
#include <vector>
#include "audio_core/audio_out.h"
#include "audio_core/audio_renderer.h"
#include "audio_core/common.h"
#include "audio_core/info_updater.h"
#include "audio_core/voice_context.h"
#include "common/logging/log.h"
#include "core/hle/kernel/writable_event.h"
#include "core/memory.h"
#include "core/settings.h"
namespace {
[[nodiscard]] static constexpr s16 ClampToS16(s32 value) {
return static_cast<s16>(std::clamp(value, s32{std::numeric_limits<s16>::min()},
s32{std::numeric_limits<s16>::max()}));
}
[[nodiscard]] static constexpr s16 Mix2To1(s16 l_channel, s16 r_channel) {
// Mix 50% from left and 50% from right channel
constexpr float l_mix_amount = 50.0f / 100.0f;
constexpr float r_mix_amount = 50.0f / 100.0f;
return ClampToS16(static_cast<s32>((static_cast<float>(l_channel) * l_mix_amount) +
(static_cast<float>(r_channel) * r_mix_amount)));
}
[[nodiscard]] static constexpr std::tuple<s16, s16> Mix6To2(s16 fl_channel, s16 fr_channel,
s16 fc_channel,
[[maybe_unused]] s16 lf_channel,
s16 bl_channel, s16 br_channel) {
// Front channels are mixed 36.94%, Center channels are mixed to be 26.12% & the back channels
// are mixed to be 36.94%
constexpr float front_mix_amount = 36.94f / 100.0f;
constexpr float center_mix_amount = 26.12f / 100.0f;
constexpr float back_mix_amount = 36.94f / 100.0f;
// Mix 50% from left and 50% from right channel
const auto left = front_mix_amount * static_cast<float>(fl_channel) +
center_mix_amount * static_cast<float>(fc_channel) +
back_mix_amount * static_cast<float>(bl_channel);
const auto right = front_mix_amount * static_cast<float>(fr_channel) +
center_mix_amount * static_cast<float>(fc_channel) +
back_mix_amount * static_cast<float>(br_channel);
return {ClampToS16(static_cast<s32>(left)), ClampToS16(static_cast<s32>(right))};
}
[[nodiscard]] static constexpr std::tuple<s16, s16> Mix6To2WithCoefficients(
s16 fl_channel, s16 fr_channel, s16 fc_channel, s16 lf_channel, s16 bl_channel, s16 br_channel,
const std::array<float_le, 4>& coeff) {
const auto left =
static_cast<float>(fl_channel) * coeff[0] + static_cast<float>(fc_channel) * coeff[1] +
static_cast<float>(lf_channel) * coeff[2] + static_cast<float>(bl_channel) * coeff[0];
const auto right =
static_cast<float>(fr_channel) * coeff[0] + static_cast<float>(fc_channel) * coeff[1] +
static_cast<float>(lf_channel) * coeff[2] + static_cast<float>(br_channel) * coeff[0];
return {ClampToS16(static_cast<s32>(left)), ClampToS16(static_cast<s32>(right))};
}
} // namespace
namespace AudioCore {
AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing, Core::Memory::Memory& memory_,
AudioCommon::AudioRendererParameter params,
std::shared_ptr<Kernel::WritableEvent> buffer_event_,
std::size_t instance_number)
: worker_params{params}, buffer_event{buffer_event_},
memory_pool_info(params.effect_count + params.voice_count * 4),
voice_context(params.voice_count), effect_context(params.effect_count), mix_context(),
sink_context(params.sink_count), splitter_context(),
voices(params.voice_count), memory{memory_},
command_generator(worker_params, voice_context, mix_context, splitter_context, effect_context,
memory) {
behavior_info.SetUserRevision(params.revision);
splitter_context.Initialize(behavior_info, params.splitter_count,
params.num_splitter_send_channels);
mix_context.Initialize(behavior_info, params.submix_count + 1, params.effect_count);
audio_out = std::make_unique<AudioCore::AudioOut>();
stream =
audio_out->OpenStream(core_timing, params.sample_rate, AudioCommon::STREAM_NUM_CHANNELS,
fmt::format("AudioRenderer-Instance{}", instance_number),
[=]() { buffer_event_->Signal(); });
audio_out->StartStream(stream);
QueueMixedBuffer(0);
QueueMixedBuffer(1);
QueueMixedBuffer(2);
QueueMixedBuffer(3);
}
AudioRenderer::~AudioRenderer() = default;
u32 AudioRenderer::GetSampleRate() const {
return worker_params.sample_rate;
}
u32 AudioRenderer::GetSampleCount() const {
return worker_params.sample_count;
}
u32 AudioRenderer::GetMixBufferCount() const {
return worker_params.mix_buffer_count;
}
Stream::State AudioRenderer::GetStreamState() const {
return stream->GetState();
}
ResultCode AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_params,
std::vector<u8>& output_params) {
InfoUpdater info_updater{input_params, output_params, behavior_info};
if (!info_updater.UpdateBehaviorInfo(behavior_info)) {
LOG_ERROR(Audio, "Failed to update behavior info input parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (!info_updater.UpdateMemoryPools(memory_pool_info)) {
LOG_ERROR(Audio, "Failed to update memory pool parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (!info_updater.UpdateVoiceChannelResources(voice_context)) {
LOG_ERROR(Audio, "Failed to update voice channel resource parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (!info_updater.UpdateVoices(voice_context, memory_pool_info, 0)) {
LOG_ERROR(Audio, "Failed to update voice parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
// TODO(ogniK): Deal with stopped audio renderer but updates still taking place
if (!info_updater.UpdateEffects(effect_context, true)) {
LOG_ERROR(Audio, "Failed to update effect parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (behavior_info.IsSplitterSupported()) {
if (!info_updater.UpdateSplitterInfo(splitter_context)) {
LOG_ERROR(Audio, "Failed to update splitter parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
}
const auto mix_result = info_updater.UpdateMixes(mix_context, worker_params.mix_buffer_count,
splitter_context, effect_context);
if (mix_result.IsError()) {
LOG_ERROR(Audio, "Failed to update mix parameters");
return mix_result;
}
// TODO(ogniK): Sinks
if (!info_updater.UpdateSinks(sink_context)) {
LOG_ERROR(Audio, "Failed to update sink parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
// TODO(ogniK): Performance buffer
if (!info_updater.UpdatePerformanceBuffer()) {
LOG_ERROR(Audio, "Failed to update performance buffer parameters");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (!info_updater.UpdateErrorInfo(behavior_info)) {
LOG_ERROR(Audio, "Failed to update error info");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (behavior_info.IsElapsedFrameCountSupported()) {
if (!info_updater.UpdateRendererInfo(elapsed_frame_count)) {
LOG_ERROR(Audio, "Failed to update renderer info");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
}
// TODO(ogniK): Statistics
if (!info_updater.WriteOutputHeader()) {
LOG_ERROR(Audio, "Failed to write output header");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
// TODO(ogniK): Check when all sections are implemented
if (!info_updater.CheckConsumedSize()) {
LOG_ERROR(Audio, "Audio buffers were not consumed!");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
ReleaseAndQueueBuffers();
return RESULT_SUCCESS;
}
void AudioRenderer::QueueMixedBuffer(Buffer::Tag tag) {
command_generator.PreCommand();
// Clear mix buffers before our next operation
command_generator.ClearMixBuffers();
// If the splitter is not in use, sort our mixes
if (!splitter_context.UsingSplitter()) {
mix_context.SortInfo();
}
// Sort our voices
voice_context.SortInfo();
// Handle samples
command_generator.GenerateVoiceCommands();
command_generator.GenerateSubMixCommands();
command_generator.GenerateFinalMixCommands();
command_generator.PostCommand();
// Base sample size
std::size_t BUFFER_SIZE{worker_params.sample_count};
// Samples
std::vector<s16> buffer(BUFFER_SIZE * stream->GetNumChannels());
// Make sure to clear our samples
std::memset(buffer.data(), 0, buffer.size() * sizeof(s16));
if (sink_context.InUse()) {
const auto stream_channel_count = stream->GetNumChannels();
const auto buffer_offsets = sink_context.OutputBuffers();
const auto channel_count = buffer_offsets.size();
const auto& final_mix = mix_context.GetFinalMixInfo();
const auto& in_params = final_mix.GetInParams();
std::vector<s32*> mix_buffers(channel_count);
for (std::size_t i = 0; i < channel_count; i++) {
mix_buffers[i] =
command_generator.GetMixBuffer(in_params.buffer_offset + buffer_offsets[i]);
}
for (std::size_t i = 0; i < BUFFER_SIZE; i++) {
if (channel_count == 1) {
const auto sample = ClampToS16(mix_buffers[0][i]);
// Place sample in all channels
for (u32 channel = 0; channel < stream_channel_count; channel++) {
buffer[i * stream_channel_count + channel] = sample;
}
if (stream_channel_count == 6) {
// Output stream has a LF channel, mute it!
buffer[i * stream_channel_count + 3] = 0;
}
} else if (channel_count == 2) {
const auto l_sample = ClampToS16(mix_buffers[0][i]);
const auto r_sample = ClampToS16(mix_buffers[1][i]);
if (stream_channel_count == 1) {
buffer[i * stream_channel_count + 0] = Mix2To1(l_sample, r_sample);
} else if (stream_channel_count == 2) {
buffer[i * stream_channel_count + 0] = l_sample;
buffer[i * stream_channel_count + 1] = r_sample;
} else if (stream_channel_count == 6) {
buffer[i * stream_channel_count + 0] = l_sample;
buffer[i * stream_channel_count + 1] = r_sample;
// Combine both left and right channels to the center channel
buffer[i * stream_channel_count + 2] = Mix2To1(l_sample, r_sample);
buffer[i * stream_channel_count + 4] = l_sample;
buffer[i * stream_channel_count + 5] = r_sample;
}
} else if (channel_count == 6) {
const auto fl_sample = ClampToS16(mix_buffers[0][i]);
const auto fr_sample = ClampToS16(mix_buffers[1][i]);
const auto fc_sample = ClampToS16(mix_buffers[2][i]);
const auto lf_sample = ClampToS16(mix_buffers[3][i]);
const auto bl_sample = ClampToS16(mix_buffers[4][i]);
const auto br_sample = ClampToS16(mix_buffers[5][i]);
if (stream_channel_count == 1) {
// Games seem to ignore the center channel half the time, we use the front left
// and right channel for mixing as that's where majority of the audio goes
buffer[i * stream_channel_count + 0] = Mix2To1(fl_sample, fr_sample);
} else if (stream_channel_count == 2) {
// Mix all channels into 2 channels
if (sink_context.HasDownMixingCoefficients()) {
const auto [left, right] = Mix6To2WithCoefficients(
fl_sample, fr_sample, fc_sample, lf_sample, bl_sample, br_sample,
sink_context.GetDownmixCoefficients());
buffer[i * stream_channel_count + 0] = left;
buffer[i * stream_channel_count + 1] = right;
} else {
const auto [left, right] = Mix6To2(fl_sample, fr_sample, fc_sample,
lf_sample, bl_sample, br_sample);
buffer[i * stream_channel_count + 0] = left;
buffer[i * stream_channel_count + 1] = right;
}
} else if (stream_channel_count == 6) {
// Pass through
buffer[i * stream_channel_count + 0] = fl_sample;
buffer[i * stream_channel_count + 1] = fr_sample;
buffer[i * stream_channel_count + 2] = fc_sample;
buffer[i * stream_channel_count + 3] = lf_sample;
buffer[i * stream_channel_count + 4] = bl_sample;
buffer[i * stream_channel_count + 5] = br_sample;
}
}
}
}
audio_out->QueueBuffer(stream, tag, std::move(buffer));
elapsed_frame_count++;
voice_context.UpdateStateByDspShared();
}
void AudioRenderer::ReleaseAndQueueBuffers() {
const auto released_buffers{audio_out->GetTagsAndReleaseBuffers(stream)};
for (const auto& tag : released_buffers) {
QueueMixedBuffer(tag);
}
}
} // namespace AudioCore

79
src/audio_core/audio_renderer.h Executable file
View File

@@ -0,0 +1,79 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <memory>
#include <vector>
#include "audio_core/behavior_info.h"
#include "audio_core/command_generator.h"
#include "audio_core/common.h"
#include "audio_core/effect_context.h"
#include "audio_core/memory_pool.h"
#include "audio_core/mix_context.h"
#include "audio_core/sink_context.h"
#include "audio_core/splitter_context.h"
#include "audio_core/stream.h"
#include "audio_core/voice_context.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
#include "core/hle/result.h"
namespace Core::Timing {
class CoreTiming;
}
namespace Kernel {
class WritableEvent;
}
namespace Core::Memory {
class Memory;
}
namespace AudioCore {
using DSPStateHolder = std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>;
class AudioOut;
class AudioRenderer {
public:
AudioRenderer(Core::Timing::CoreTiming& core_timing, Core::Memory::Memory& memory_,
AudioCommon::AudioRendererParameter params,
std::shared_ptr<Kernel::WritableEvent> buffer_event_,
std::size_t instance_number);
~AudioRenderer();
[[nodiscard]] ResultCode UpdateAudioRenderer(const std::vector<u8>& input_params,
std::vector<u8>& output_params);
void QueueMixedBuffer(Buffer::Tag tag);
void ReleaseAndQueueBuffers();
[[nodiscard]] u32 GetSampleRate() const;
[[nodiscard]] u32 GetSampleCount() const;
[[nodiscard]] u32 GetMixBufferCount() const;
[[nodiscard]] Stream::State GetStreamState() const;
private:
BehaviorInfo behavior_info{};
AudioCommon::AudioRendererParameter worker_params;
std::shared_ptr<Kernel::WritableEvent> buffer_event;
std::vector<ServerMemoryPoolInfo> memory_pool_info;
VoiceContext voice_context;
EffectContext effect_context;
MixContext mix_context;
SinkContext sink_context;
SplitterContext splitter_context;
std::vector<VoiceState> voices;
std::unique_ptr<AudioOut> audio_out;
StreamPtr stream;
Core::Memory::Memory& memory;
CommandGenerator command_generator;
std::size_t elapsed_frame_count{};
};
} // namespace AudioCore

105
src/audio_core/behavior_info.cpp Executable file
View File

@@ -0,0 +1,105 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <cstring>
#include "audio_core/behavior_info.h"
#include "audio_core/common.h"
#include "common/logging/log.h"
namespace AudioCore {
BehaviorInfo::BehaviorInfo() : process_revision(AudioCommon::CURRENT_PROCESS_REVISION) {}
BehaviorInfo::~BehaviorInfo() = default;
bool BehaviorInfo::UpdateOutput(std::vector<u8>& buffer, std::size_t offset) {
if (!AudioCommon::CanConsumeBuffer(buffer.size(), offset, sizeof(OutParams))) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
OutParams params{};
std::memcpy(params.errors.data(), errors.data(), sizeof(ErrorInfo) * errors.size());
params.error_count = static_cast<u32_le>(error_count);
std::memcpy(buffer.data() + offset, &params, sizeof(OutParams));
return true;
}
void BehaviorInfo::ClearError() {
error_count = 0;
}
void BehaviorInfo::UpdateFlags(u64_le dest_flags) {
flags = dest_flags;
}
void BehaviorInfo::SetUserRevision(u32_le revision) {
user_revision = revision;
}
u32_le BehaviorInfo::GetUserRevision() const {
return user_revision;
}
u32_le BehaviorInfo::GetProcessRevision() const {
return process_revision;
}
bool BehaviorInfo::IsAdpcmLoopContextBugFixed() const {
return AudioCommon::IsRevisionSupported(2, user_revision);
}
bool BehaviorInfo::IsSplitterSupported() const {
return AudioCommon::IsRevisionSupported(2, user_revision);
}
bool BehaviorInfo::IsLongSizePreDelaySupported() const {
return AudioCommon::IsRevisionSupported(3, user_revision);
}
bool BehaviorInfo::IsAudioRendererProcessingTimeLimit80PercentSupported() const {
return AudioCommon::IsRevisionSupported(5, user_revision);
}
bool BehaviorInfo::IsAudioRendererProcessingTimeLimit75PercentSupported() const {
return AudioCommon::IsRevisionSupported(4, user_revision);
}
bool BehaviorInfo::IsAudioRendererProcessingTimeLimit70PercentSupported() const {
return AudioCommon::IsRevisionSupported(1, user_revision);
}
bool BehaviorInfo::IsElapsedFrameCountSupported() const {
return AudioCommon::IsRevisionSupported(5, user_revision);
}
bool BehaviorInfo::IsMemoryPoolForceMappingEnabled() const {
return (flags & 1) != 0;
}
bool BehaviorInfo::IsFlushVoiceWaveBuffersSupported() const {
return AudioCommon::IsRevisionSupported(5, user_revision);
}
bool BehaviorInfo::IsVoicePlayedSampleCountResetAtLoopPointSupported() const {
return AudioCommon::IsRevisionSupported(5, user_revision);
}
bool BehaviorInfo::IsVoicePitchAndSrcSkippedSupported() const {
return AudioCommon::IsRevisionSupported(5, user_revision);
}
bool BehaviorInfo::IsMixInParameterDirtyOnlyUpdateSupported() const {
return AudioCommon::IsRevisionSupported(7, user_revision);
}
bool BehaviorInfo::IsSplitterBugFixed() const {
return AudioCommon::IsRevisionSupported(5, user_revision);
}
void BehaviorInfo::CopyErrorInfo(BehaviorInfo::OutParams& dst) {
dst.error_count = static_cast<u32>(error_count);
std::copy(errors.begin(), errors.begin() + error_count, dst.errors.begin());
}
} // namespace AudioCore

72
src/audio_core/behavior_info.h Executable file
View File

@@ -0,0 +1,72 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <vector>
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
namespace AudioCore {
class BehaviorInfo {
public:
struct ErrorInfo {
u32_le result{};
INSERT_PADDING_WORDS(1);
u64_le result_info{};
};
static_assert(sizeof(ErrorInfo) == 0x10, "ErrorInfo is an invalid size");
struct InParams {
u32_le revision{};
u32_le padding{};
u64_le flags{};
};
static_assert(sizeof(InParams) == 0x10, "InParams is an invalid size");
struct OutParams {
std::array<ErrorInfo, 10> errors{};
u32_le error_count{};
INSERT_PADDING_BYTES(12);
};
static_assert(sizeof(OutParams) == 0xb0, "OutParams is an invalid size");
explicit BehaviorInfo();
~BehaviorInfo();
bool UpdateOutput(std::vector<u8>& buffer, std::size_t offset);
void ClearError();
void UpdateFlags(u64_le dest_flags);
void SetUserRevision(u32_le revision);
[[nodiscard]] u32_le GetUserRevision() const;
[[nodiscard]] u32_le GetProcessRevision() const;
[[nodiscard]] bool IsAdpcmLoopContextBugFixed() const;
[[nodiscard]] bool IsSplitterSupported() const;
[[nodiscard]] bool IsLongSizePreDelaySupported() const;
[[nodiscard]] bool IsAudioRendererProcessingTimeLimit80PercentSupported() const;
[[nodiscard]] bool IsAudioRendererProcessingTimeLimit75PercentSupported() const;
[[nodiscard]] bool IsAudioRendererProcessingTimeLimit70PercentSupported() const;
[[nodiscard]] bool IsElapsedFrameCountSupported() const;
[[nodiscard]] bool IsMemoryPoolForceMappingEnabled() const;
[[nodiscard]] bool IsFlushVoiceWaveBuffersSupported() const;
[[nodiscard]] bool IsVoicePlayedSampleCountResetAtLoopPointSupported() const;
[[nodiscard]] bool IsVoicePitchAndSrcSkippedSupported() const;
[[nodiscard]] bool IsMixInParameterDirtyOnlyUpdateSupported() const;
[[nodiscard]] bool IsSplitterBugFixed() const;
void CopyErrorInfo(OutParams& dst);
private:
u32_le process_revision{};
u32_le user_revision{};
u64_le flags{};
std::array<ErrorInfo, 10> errors{};
std::size_t error_count{};
};
} // namespace AudioCore

45
src/audio_core/buffer.h Executable file
View File

@@ -0,0 +1,45 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <vector>
#include "common/common_types.h"
namespace AudioCore {
/**
* Represents a buffer of audio samples to be played in an audio stream
*/
class Buffer {
public:
using Tag = u64;
Buffer(Tag tag_, std::vector<s16>&& samples_) : tag{tag_}, samples{std::move(samples_)} {}
/// Returns the raw audio data for the buffer
std::vector<s16>& GetSamples() {
return samples;
}
/// Returns the raw audio data for the buffer
const std::vector<s16>& GetSamples() const {
return samples;
}
/// Returns the buffer tag, this is provided by the game to the audout service
Tag GetTag() const {
return tag;
}
private:
Tag tag;
std::vector<s16> samples;
};
using BufferPtr = std::shared_ptr<Buffer>;
} // namespace AudioCore

78
src/audio_core/codec.cpp Executable file
View File

@@ -0,0 +1,78 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include "audio_core/codec.h"
namespace AudioCore::Codec {
std::vector<s16> DecodeADPCM(const u8* const data, std::size_t size, const ADPCM_Coeff& coeff,
ADPCMState& state) {
// GC-ADPCM with scale factor and variable coefficients.
// Frames are 8 bytes long containing 14 samples each.
// Samples are 4 bits (one nibble) long.
constexpr std::size_t FRAME_LEN = 8;
constexpr std::size_t SAMPLES_PER_FRAME = 14;
static constexpr std::array<int, 16> SIGNED_NIBBLES{
0, 1, 2, 3, 4, 5, 6, 7, -8, -7, -6, -5, -4, -3, -2, -1,
};
const std::size_t sample_count = (size / FRAME_LEN) * SAMPLES_PER_FRAME;
const std::size_t ret_size =
sample_count % 2 == 0 ? sample_count : sample_count + 1; // Ensure multiple of two.
std::vector<s16> ret(ret_size);
int yn1 = state.yn1, yn2 = state.yn2;
const std::size_t NUM_FRAMES =
(sample_count + (SAMPLES_PER_FRAME - 1)) / SAMPLES_PER_FRAME; // Round up.
for (std::size_t framei = 0; framei < NUM_FRAMES; framei++) {
const int frame_header = data[framei * FRAME_LEN];
const int scale = 1 << (frame_header & 0xF);
const int idx = (frame_header >> 4) & 0x7;
// Coefficients are fixed point with 11 bits fractional part.
const int coef1 = coeff[idx * 2 + 0];
const int coef2 = coeff[idx * 2 + 1];
// Decodes an audio sample. One nibble produces one sample.
const auto decode_sample = [&](const int nibble) -> s16 {
const int xn = nibble * scale;
// We first transform everything into 11 bit fixed point, perform the second order
// digital filter, then transform back.
// 0x400 == 0.5 in 11 bit fixed point.
// Filter: y[n] = x[n] + 0.5 + c1 * y[n-1] + c2 * y[n-2]
int val = ((xn << 11) + 0x400 + coef1 * yn1 + coef2 * yn2) >> 11;
// Clamp to output range.
val = std::clamp<s32>(val, -32768, 32767);
// Advance output feedback.
yn2 = yn1;
yn1 = val;
return static_cast<s16>(val);
};
std::size_t outputi = framei * SAMPLES_PER_FRAME;
std::size_t datai = framei * FRAME_LEN + 1;
for (std::size_t i = 0; i < SAMPLES_PER_FRAME && outputi < sample_count; i += 2) {
const s16 sample1 = decode_sample(SIGNED_NIBBLES[data[datai] >> 4]);
ret[outputi] = sample1;
outputi++;
const s16 sample2 = decode_sample(SIGNED_NIBBLES[data[datai] & 0xF]);
ret[outputi] = sample2;
outputi++;
datai++;
}
}
state.yn1 = static_cast<s16>(yn1);
state.yn2 = static_cast<s16>(yn2);
return ret;
}
} // namespace AudioCore::Codec

44
src/audio_core/codec.h Executable file
View File

@@ -0,0 +1,44 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <vector>
#include "common/common_types.h"
namespace AudioCore::Codec {
enum class PcmFormat : u32 {
Invalid = 0,
Int8 = 1,
Int16 = 2,
Int24 = 3,
Int32 = 4,
PcmFloat = 5,
Adpcm = 6,
};
/// See: Codec::DecodeADPCM
struct ADPCMState {
// Two historical samples from previous processed buffer,
// required for ADPCM decoding
s16 yn1; ///< y[n-1]
s16 yn2; ///< y[n-2]
};
using ADPCM_Coeff = std::array<s16, 16>;
/**
* @param data Pointer to buffer that contains ADPCM data to decode
* @param size Size of buffer in bytes
* @param coeff ADPCM coefficients
* @param state ADPCM state, this is updated with new state
* @return Decoded stereo signed PCM16 data, sample_count in length
*/
std::vector<s16> DecodeADPCM(const u8* data, std::size_t size, const ADPCM_Coeff& coeff,
ADPCMState& state);
}; // namespace AudioCore::Codec

View File

@@ -0,0 +1,982 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "audio_core/algorithm/interpolate.h"
#include "audio_core/command_generator.h"
#include "audio_core/effect_context.h"
#include "audio_core/mix_context.h"
#include "audio_core/voice_context.h"
#include "core/memory.h"
namespace AudioCore {
namespace {
constexpr std::size_t MIX_BUFFER_SIZE = 0x3f00;
constexpr std::size_t SCALED_MIX_BUFFER_SIZE = MIX_BUFFER_SIZE << 15ULL;
template <std::size_t N>
void ApplyMix(s32* output, const s32* input, s32 gain, s32 sample_count) {
for (std::size_t i = 0; i < static_cast<std::size_t>(sample_count); i += N) {
for (std::size_t j = 0; j < N; j++) {
output[i + j] +=
static_cast<s32>((static_cast<s64>(input[i + j]) * gain + 0x4000) >> 15);
}
}
}
s32 ApplyMixRamp(s32* output, const s32* input, float gain, float delta, s32 sample_count) {
s32 x = 0;
for (s32 i = 0; i < sample_count; i++) {
x = static_cast<s32>(static_cast<float>(input[i]) * gain);
output[i] += x;
gain += delta;
}
return x;
}
void ApplyGain(s32* output, const s32* input, s32 gain, s32 delta, s32 sample_count) {
for (s32 i = 0; i < sample_count; i++) {
output[i] = static_cast<s32>((static_cast<s64>(input[i]) * gain + 0x4000) >> 15);
gain += delta;
}
}
void ApplyGainWithoutDelta(s32* output, const s32* input, s32 gain, s32 sample_count) {
for (s32 i = 0; i < sample_count; i++) {
output[i] = static_cast<s32>((static_cast<s64>(input[i]) * gain + 0x4000) >> 15);
}
}
s32 ApplyMixDepop(s32* output, s32 first_sample, s32 delta, s32 sample_count) {
const bool positive = first_sample > 0;
auto final_sample = std::abs(first_sample);
for (s32 i = 0; i < sample_count; i++) {
final_sample = static_cast<s32>((static_cast<s64>(final_sample) * delta) >> 15);
if (positive) {
output[i] += final_sample;
} else {
output[i] -= final_sample;
}
}
if (positive) {
return final_sample;
} else {
return -final_sample;
}
}
} // namespace
CommandGenerator::CommandGenerator(AudioCommon::AudioRendererParameter& worker_params_,
VoiceContext& voice_context_, MixContext& mix_context_,
SplitterContext& splitter_context_,
EffectContext& effect_context_, Core::Memory::Memory& memory_)
: worker_params(worker_params_), voice_context(voice_context_), mix_context(mix_context_),
splitter_context(splitter_context_), effect_context(effect_context_), memory(memory_),
mix_buffer((worker_params.mix_buffer_count + AudioCommon::MAX_CHANNEL_COUNT) *
worker_params.sample_count),
sample_buffer(MIX_BUFFER_SIZE),
depop_buffer((worker_params.mix_buffer_count + AudioCommon::MAX_CHANNEL_COUNT) *
worker_params.sample_count) {}
CommandGenerator::~CommandGenerator() = default;
void CommandGenerator::ClearMixBuffers() {
std::fill(mix_buffer.begin(), mix_buffer.end(), 0);
std::fill(sample_buffer.begin(), sample_buffer.end(), 0);
// std::fill(depop_buffer.begin(), depop_buffer.end(), 0);
}
void CommandGenerator::GenerateVoiceCommands() {
if (dumping_frame) {
LOG_DEBUG(Audio, "(DSP_TRACE) GenerateVoiceCommands");
}
// Grab all our voices
const auto voice_count = voice_context.GetVoiceCount();
for (std::size_t i = 0; i < voice_count; i++) {
auto& voice_info = voice_context.GetSortedInfo(i);
// Update voices and check if we should queue them
if (voice_info.ShouldSkip() || !voice_info.UpdateForCommandGeneration(voice_context)) {
continue;
}
// Queue our voice
GenerateVoiceCommand(voice_info);
}
// Update our splitters
splitter_context.UpdateInternalState();
}
void CommandGenerator::GenerateVoiceCommand(ServerVoiceInfo& voice_info) {
auto& in_params = voice_info.GetInParams();
const auto channel_count = in_params.channel_count;
for (s32 channel = 0; channel < channel_count; channel++) {
const auto resource_id = in_params.voice_channel_resource_id[channel];
auto& dsp_state = voice_context.GetDspSharedState(resource_id);
auto& channel_resource = voice_context.GetChannelResource(resource_id);
// Decode our samples for our channel
GenerateDataSourceCommand(voice_info, dsp_state, channel);
if (in_params.should_depop) {
in_params.last_volume = 0.0f;
} else if (in_params.splitter_info_id != AudioCommon::NO_SPLITTER ||
in_params.mix_id != AudioCommon::NO_MIX) {
// Apply a biquad filter if needed
GenerateBiquadFilterCommandForVoice(voice_info, dsp_state,
worker_params.mix_buffer_count, channel);
// Base voice volume ramping
GenerateVolumeRampCommand(in_params.last_volume, in_params.volume, channel,
in_params.node_id);
in_params.last_volume = in_params.volume;
if (in_params.mix_id != AudioCommon::NO_MIX) {
// If we're using a mix id
auto& mix_info = mix_context.GetInfo(in_params.mix_id);
const auto& dest_mix_params = mix_info.GetInParams();
// Voice Mixing
GenerateVoiceMixCommand(
channel_resource.GetCurrentMixVolume(), channel_resource.GetLastMixVolume(),
dsp_state, dest_mix_params.buffer_offset, dest_mix_params.buffer_count,
worker_params.mix_buffer_count + channel, in_params.node_id);
// Update last mix volumes
channel_resource.UpdateLastMixVolumes();
} else if (in_params.splitter_info_id != AudioCommon::NO_SPLITTER) {
s32 base = channel;
while (auto* destination_data =
GetDestinationData(in_params.splitter_info_id, base)) {
base += channel_count;
if (!destination_data->IsConfigured()) {
continue;
}
if (destination_data->GetMixId() >= static_cast<int>(mix_context.GetCount())) {
continue;
}
const auto& mix_info = mix_context.GetInfo(destination_data->GetMixId());
const auto& dest_mix_params = mix_info.GetInParams();
GenerateVoiceMixCommand(
destination_data->CurrentMixVolumes(), destination_data->LastMixVolumes(),
dsp_state, dest_mix_params.buffer_offset, dest_mix_params.buffer_count,
worker_params.mix_buffer_count + channel, in_params.node_id);
destination_data->MarkDirty();
}
}
// Update biquad filter enabled states
for (std::size_t i = 0; i < AudioCommon::MAX_BIQUAD_FILTERS; i++) {
in_params.was_biquad_filter_enabled[i] = in_params.biquad_filter[i].enabled;
}
}
}
}
void CommandGenerator::GenerateSubMixCommands() {
const auto mix_count = mix_context.GetCount();
for (std::size_t i = 0; i < mix_count; i++) {
auto& mix_info = mix_context.GetSortedInfo(i);
const auto& in_params = mix_info.GetInParams();
if (!in_params.in_use || in_params.mix_id == AudioCommon::FINAL_MIX) {
continue;
}
GenerateSubMixCommand(mix_info);
}
}
void CommandGenerator::GenerateFinalMixCommands() {
GenerateFinalMixCommand();
}
void CommandGenerator::PreCommand() {
if (!dumping_frame) {
return;
}
for (std::size_t i = 0; i < splitter_context.GetInfoCount(); i++) {
const auto& base = splitter_context.GetInfo(i);
std::string graph = fmt::format("b[{}]", i);
const auto* head = base.GetHead();
while (head != nullptr) {
graph += fmt::format("->{}", head->GetMixId());
head = head->GetNextDestination();
}
LOG_DEBUG(Audio, "(DSP_TRACE) SplitterGraph splitter_info={}, {}", i, graph);
}
}
void CommandGenerator::PostCommand() {
if (!dumping_frame) {
return;
}
dumping_frame = false;
}
void CommandGenerator::GenerateDataSourceCommand(ServerVoiceInfo& voice_info, VoiceState& dsp_state,
s32 channel) {
const auto& in_params = voice_info.GetInParams();
const auto depop = in_params.should_depop;
if (depop) {
if (in_params.mix_id != AudioCommon::NO_MIX) {
auto& mix_info = mix_context.GetInfo(in_params.mix_id);
const auto& mix_in = mix_info.GetInParams();
GenerateDepopPrepareCommand(dsp_state, mix_in.buffer_count, mix_in.buffer_offset);
} else if (in_params.splitter_info_id != AudioCommon::NO_SPLITTER) {
s32 index{};
while (const auto* destination =
GetDestinationData(in_params.splitter_info_id, index++)) {
if (!destination->IsConfigured()) {
continue;
}
auto& mix_info = mix_context.GetInfo(destination->GetMixId());
const auto& mix_in = mix_info.GetInParams();
GenerateDepopPrepareCommand(dsp_state, mix_in.buffer_count, mix_in.buffer_offset);
}
}
} else {
switch (in_params.sample_format) {
case SampleFormat::Pcm16:
DecodeFromWaveBuffers(voice_info, GetChannelMixBuffer(channel), dsp_state, channel,
worker_params.sample_rate, worker_params.sample_count,
in_params.node_id);
break;
case SampleFormat::Adpcm:
ASSERT(channel == 0 && in_params.channel_count == 1);
DecodeFromWaveBuffers(voice_info, GetChannelMixBuffer(0), dsp_state, 0,
worker_params.sample_rate, worker_params.sample_count,
in_params.node_id);
break;
default:
UNREACHABLE_MSG("Unimplemented sample format={}", in_params.sample_format);
}
}
}
void CommandGenerator::GenerateBiquadFilterCommandForVoice(ServerVoiceInfo& voice_info,
VoiceState& dsp_state,
[[maybe_unused]] s32 mix_buffer_count,
[[maybe_unused]] s32 channel) {
for (std::size_t i = 0; i < AudioCommon::MAX_BIQUAD_FILTERS; i++) {
const auto& in_params = voice_info.GetInParams();
auto& biquad_filter = in_params.biquad_filter[i];
// Check if biquad filter is actually used
if (!biquad_filter.enabled) {
continue;
}
// Reinitialize our biquad filter state if it was enabled previously
if (!in_params.was_biquad_filter_enabled[i]) {
dsp_state.biquad_filter_state.fill(0);
}
// Generate biquad filter
// GenerateBiquadFilterCommand(mix_buffer_count, biquad_filter,
// dsp_state.biquad_filter_state,
// mix_buffer_count + channel, mix_buffer_count +
// channel, worker_params.sample_count,
// voice_info.GetInParams().node_id);
}
}
void CommandGenerator::GenerateBiquadFilterCommand([[maybe_unused]] s32 mix_buffer_id,
const BiquadFilterParameter& params,
std::array<s64, 2>& state,
std::size_t input_offset,
std::size_t output_offset, s32 sample_count,
s32 node_id) {
if (dumping_frame) {
LOG_DEBUG(Audio,
"(DSP_TRACE) GenerateBiquadFilterCommand node_id={}, "
"input_mix_buffer={}, output_mix_buffer={}",
node_id, input_offset, output_offset);
}
const auto* input = GetMixBuffer(input_offset);
auto* output = GetMixBuffer(output_offset);
// Biquad filter parameters
const auto [n0, n1, n2] = params.numerator;
const auto [d0, d1] = params.denominator;
// Biquad filter states
auto [s0, s1] = state;
constexpr s64 int32_min = std::numeric_limits<s32>::min();
constexpr s64 int32_max = std::numeric_limits<s32>::max();
for (int i = 0; i < sample_count; ++i) {
const auto sample = static_cast<s64>(input[i]);
const auto f = (sample * n0 + s0 + 0x4000) >> 15;
const auto y = std::clamp(f, int32_min, int32_max);
s0 = sample * n1 + y * d0 + s1;
s1 = sample * n2 + y * d1;
output[i] = static_cast<s32>(y);
}
state = {s0, s1};
}
void CommandGenerator::GenerateDepopPrepareCommand(VoiceState& dsp_state,
std::size_t mix_buffer_count,
std::size_t mix_buffer_offset) {
for (std::size_t i = 0; i < mix_buffer_count; i++) {
auto& sample = dsp_state.previous_samples[i];
if (sample != 0) {
depop_buffer[mix_buffer_offset + i] += sample;
sample = 0;
}
}
}
void CommandGenerator::GenerateDepopForMixBuffersCommand(std::size_t mix_buffer_count,
std::size_t mix_buffer_offset,
s32 sample_rate) {
const std::size_t end_offset =
std::min(mix_buffer_offset + mix_buffer_count, GetTotalMixBufferCount());
const s32 delta = sample_rate == 48000 ? 0x7B29 : 0x78CB;
for (std::size_t i = mix_buffer_offset; i < end_offset; i++) {
if (depop_buffer[i] == 0) {
continue;
}
depop_buffer[i] =
ApplyMixDepop(GetMixBuffer(i), depop_buffer[i], delta, worker_params.sample_count);
}
}
void CommandGenerator::GenerateEffectCommand(ServerMixInfo& mix_info) {
const std::size_t effect_count = effect_context.GetCount();
const auto buffer_offset = mix_info.GetInParams().buffer_offset;
for (std::size_t i = 0; i < effect_count; i++) {
const auto index = mix_info.GetEffectOrder(i);
if (index == AudioCommon::NO_EFFECT_ORDER) {
break;
}
auto* info = effect_context.GetInfo(index);
const auto type = info->GetType();
// TODO(ogniK): Finish remaining effects
switch (type) {
case EffectType::Aux:
GenerateAuxCommand(buffer_offset, info, info->IsEnabled());
break;
case EffectType::I3dl2Reverb:
GenerateI3dl2ReverbEffectCommand(buffer_offset, info, info->IsEnabled());
break;
case EffectType::BiquadFilter:
GenerateBiquadFilterEffectCommand(buffer_offset, info, info->IsEnabled());
break;
default:
break;
}
info->UpdateForCommandGeneration();
}
}
void CommandGenerator::GenerateI3dl2ReverbEffectCommand(s32 mix_buffer_offset, EffectBase* info,
bool enabled) {
if (!enabled) {
return;
}
const auto& params = dynamic_cast<EffectI3dl2Reverb*>(info)->GetParams();
const auto channel_count = params.channel_count;
for (s32 i = 0; i < channel_count; i++) {
// TODO(ogniK): Actually implement reverb
if (params.input[i] != params.output[i]) {
const auto* input = GetMixBuffer(mix_buffer_offset + params.input[i]);
auto* output = GetMixBuffer(mix_buffer_offset + params.output[i]);
ApplyMix<1>(output, input, 32768, worker_params.sample_count);
}
}
}
void CommandGenerator::GenerateBiquadFilterEffectCommand(s32 mix_buffer_offset, EffectBase* info,
bool enabled) {
if (!enabled) {
return;
}
const auto& params = dynamic_cast<EffectBiquadFilter*>(info)->GetParams();
const auto channel_count = params.channel_count;
for (s32 i = 0; i < channel_count; i++) {
// TODO(ogniK): Actually implement biquad filter
if (params.input[i] != params.output[i]) {
const auto* input = GetMixBuffer(mix_buffer_offset + params.input[i]);
auto* output = GetMixBuffer(mix_buffer_offset + params.output[i]);
ApplyMix<1>(output, input, 32768, worker_params.sample_count);
}
}
}
void CommandGenerator::GenerateAuxCommand(s32 mix_buffer_offset, EffectBase* info, bool enabled) {
auto* aux = dynamic_cast<EffectAuxInfo*>(info);
const auto& params = aux->GetParams();
if (aux->GetSendBuffer() != 0 && aux->GetRecvBuffer() != 0) {
const auto max_channels = params.count;
u32 offset{};
for (u32 channel = 0; channel < max_channels; channel++) {
u32 write_count = 0;
if (channel == (max_channels - 1)) {
write_count = offset + worker_params.sample_count;
}
const auto input_index = params.input_mix_buffers[channel] + mix_buffer_offset;
const auto output_index = params.output_mix_buffers[channel] + mix_buffer_offset;
if (enabled) {
AuxInfoDSP send_info{};
AuxInfoDSP recv_info{};
memory.ReadBlock(aux->GetSendInfo(), &send_info, sizeof(AuxInfoDSP));
memory.ReadBlock(aux->GetRecvInfo(), &recv_info, sizeof(AuxInfoDSP));
WriteAuxBuffer(send_info, aux->GetSendBuffer(), params.sample_count,
GetMixBuffer(input_index), worker_params.sample_count, offset,
write_count);
memory.WriteBlock(aux->GetSendInfo(), &send_info, sizeof(AuxInfoDSP));
const auto samples_read = ReadAuxBuffer(
recv_info, aux->GetRecvBuffer(), params.sample_count,
GetMixBuffer(output_index), worker_params.sample_count, offset, write_count);
memory.WriteBlock(aux->GetRecvInfo(), &recv_info, sizeof(AuxInfoDSP));
if (samples_read != static_cast<int>(worker_params.sample_count) &&
samples_read <= params.sample_count) {
std::memset(GetMixBuffer(output_index), 0, params.sample_count - samples_read);
}
} else {
AuxInfoDSP empty{};
memory.WriteBlock(aux->GetSendInfo(), &empty, sizeof(AuxInfoDSP));
memory.WriteBlock(aux->GetRecvInfo(), &empty, sizeof(AuxInfoDSP));
if (output_index != input_index) {
std::memcpy(GetMixBuffer(output_index), GetMixBuffer(input_index),
worker_params.sample_count * sizeof(s32));
}
}
offset += worker_params.sample_count;
}
}
}
ServerSplitterDestinationData* CommandGenerator::GetDestinationData(s32 splitter_id, s32 index) {
if (splitter_id == AudioCommon::NO_SPLITTER) {
return nullptr;
}
return splitter_context.GetDestinationData(splitter_id, index);
}
s32 CommandGenerator::WriteAuxBuffer(AuxInfoDSP& dsp_info, VAddr send_buffer, u32 max_samples,
const s32* data, u32 sample_count, u32 write_offset,
u32 write_count) {
if (max_samples == 0) {
return 0;
}
u32 offset = dsp_info.write_offset + write_offset;
if (send_buffer == 0 || offset > max_samples) {
return 0;
}
std::size_t data_offset{};
u32 remaining = sample_count;
while (remaining > 0) {
// Get position in buffer
const auto base = send_buffer + (offset * sizeof(u32));
const auto samples_to_grab = std::min(max_samples - offset, remaining);
// Write to output
memory.WriteBlock(base, (data + data_offset), samples_to_grab * sizeof(u32));
offset = (offset + samples_to_grab) % max_samples;
remaining -= samples_to_grab;
data_offset += samples_to_grab;
}
if (write_count != 0) {
dsp_info.write_offset = (dsp_info.write_offset + write_count) % max_samples;
}
return sample_count;
}
s32 CommandGenerator::ReadAuxBuffer(AuxInfoDSP& recv_info, VAddr recv_buffer, u32 max_samples,
s32* out_data, u32 sample_count, u32 read_offset,
u32 read_count) {
if (max_samples == 0) {
return 0;
}
u32 offset = recv_info.read_offset + read_offset;
if (recv_buffer == 0 || offset > max_samples) {
return 0;
}
u32 remaining = sample_count;
while (remaining > 0) {
const auto base = recv_buffer + (offset * sizeof(u32));
const auto samples_to_grab = std::min(max_samples - offset, remaining);
std::vector<s32> buffer(samples_to_grab);
memory.ReadBlock(base, buffer.data(), buffer.size() * sizeof(u32));
std::memcpy(out_data, buffer.data(), buffer.size() * sizeof(u32));
out_data += samples_to_grab;
offset = (offset + samples_to_grab) % max_samples;
remaining -= samples_to_grab;
}
if (read_count != 0) {
recv_info.read_offset = (recv_info.read_offset + read_count) % max_samples;
}
return sample_count;
}
void CommandGenerator::GenerateVolumeRampCommand(float last_volume, float current_volume,
s32 channel, s32 node_id) {
const auto last = static_cast<s32>(last_volume * 32768.0f);
const auto current = static_cast<s32>(current_volume * 32768.0f);
const auto delta = static_cast<s32>((static_cast<float>(current) - static_cast<float>(last)) /
static_cast<float>(worker_params.sample_count));
if (dumping_frame) {
LOG_DEBUG(Audio,
"(DSP_TRACE) GenerateVolumeRampCommand node_id={}, input={}, output={}, "
"last_volume={}, current_volume={}",
node_id, GetMixChannelBufferOffset(channel), GetMixChannelBufferOffset(channel),
last_volume, current_volume);
}
// Apply generic gain on samples
ApplyGain(GetChannelMixBuffer(channel), GetChannelMixBuffer(channel), last, delta,
worker_params.sample_count);
}
void CommandGenerator::GenerateVoiceMixCommand(const MixVolumeBuffer& mix_volumes,
const MixVolumeBuffer& last_mix_volumes,
VoiceState& dsp_state, s32 mix_buffer_offset,
s32 mix_buffer_count, s32 voice_index, s32 node_id) {
// Loop all our mix buffers
for (s32 i = 0; i < mix_buffer_count; i++) {
if (last_mix_volumes[i] != 0.0f || mix_volumes[i] != 0.0f) {
const auto delta = static_cast<float>((mix_volumes[i] - last_mix_volumes[i])) /
static_cast<float>(worker_params.sample_count);
if (dumping_frame) {
LOG_DEBUG(Audio,
"(DSP_TRACE) GenerateVoiceMixCommand node_id={}, input={}, "
"output={}, last_volume={}, current_volume={}",
node_id, voice_index, mix_buffer_offset + i, last_mix_volumes[i],
mix_volumes[i]);
}
dsp_state.previous_samples[i] =
ApplyMixRamp(GetMixBuffer(mix_buffer_offset + i), GetMixBuffer(voice_index),
last_mix_volumes[i], delta, worker_params.sample_count);
} else {
dsp_state.previous_samples[i] = 0;
}
}
}
void CommandGenerator::GenerateSubMixCommand(ServerMixInfo& mix_info) {
if (dumping_frame) {
LOG_DEBUG(Audio, "(DSP_TRACE) GenerateSubMixCommand");
}
const auto& in_params = mix_info.GetInParams();
GenerateDepopForMixBuffersCommand(in_params.buffer_count, in_params.buffer_offset,
in_params.sample_rate);
GenerateEffectCommand(mix_info);
GenerateMixCommands(mix_info);
}
void CommandGenerator::GenerateMixCommands(ServerMixInfo& mix_info) {
if (!mix_info.HasAnyConnection()) {
return;
}
const auto& in_params = mix_info.GetInParams();
if (in_params.dest_mix_id != AudioCommon::NO_MIX) {
const auto& dest_mix = mix_context.GetInfo(in_params.dest_mix_id);
const auto& dest_in_params = dest_mix.GetInParams();
const auto buffer_count = in_params.buffer_count;
for (s32 i = 0; i < buffer_count; i++) {
for (s32 j = 0; j < dest_in_params.buffer_count; j++) {
const auto mixed_volume = in_params.volume * in_params.mix_volume[i][j];
if (mixed_volume != 0.0f) {
GenerateMixCommand(dest_in_params.buffer_offset + j,
in_params.buffer_offset + i, mixed_volume,
in_params.node_id);
}
}
}
} else if (in_params.splitter_id != AudioCommon::NO_SPLITTER) {
s32 base{};
while (const auto* destination_data = GetDestinationData(in_params.splitter_id, base++)) {
if (!destination_data->IsConfigured()) {
continue;
}
const auto& dest_mix = mix_context.GetInfo(destination_data->GetMixId());
const auto& dest_in_params = dest_mix.GetInParams();
const auto mix_index = (base - 1) % in_params.buffer_count + in_params.buffer_offset;
for (std::size_t i = 0; i < static_cast<std::size_t>(dest_in_params.buffer_count);
i++) {
const auto mixed_volume = in_params.volume * destination_data->GetMixVolume(i);
if (mixed_volume != 0.0f) {
GenerateMixCommand(dest_in_params.buffer_offset + i, mix_index, mixed_volume,
in_params.node_id);
}
}
}
}
}
void CommandGenerator::GenerateMixCommand(std::size_t output_offset, std::size_t input_offset,
float volume, s32 node_id) {
if (dumping_frame) {
LOG_DEBUG(Audio,
"(DSP_TRACE) GenerateMixCommand node_id={}, input={}, output={}, volume={}",
node_id, input_offset, output_offset, volume);
}
auto* output = GetMixBuffer(output_offset);
const auto* input = GetMixBuffer(input_offset);
const s32 gain = static_cast<s32>(volume * 32768.0f);
// Mix with loop unrolling
if (worker_params.sample_count % 4 == 0) {
ApplyMix<4>(output, input, gain, worker_params.sample_count);
} else if (worker_params.sample_count % 2 == 0) {
ApplyMix<2>(output, input, gain, worker_params.sample_count);
} else {
ApplyMix<1>(output, input, gain, worker_params.sample_count);
}
}
void CommandGenerator::GenerateFinalMixCommand() {
if (dumping_frame) {
LOG_DEBUG(Audio, "(DSP_TRACE) GenerateFinalMixCommand");
}
auto& mix_info = mix_context.GetFinalMixInfo();
const auto& in_params = mix_info.GetInParams();
GenerateDepopForMixBuffersCommand(in_params.buffer_count, in_params.buffer_offset,
in_params.sample_rate);
GenerateEffectCommand(mix_info);
for (s32 i = 0; i < in_params.buffer_count; i++) {
const s32 gain = static_cast<s32>(in_params.volume * 32768.0f);
if (dumping_frame) {
LOG_DEBUG(
Audio,
"(DSP_TRACE) ApplyGainWithoutDelta node_id={}, input={}, output={}, volume={}",
in_params.node_id, in_params.buffer_offset + i, in_params.buffer_offset + i,
in_params.volume);
}
ApplyGainWithoutDelta(GetMixBuffer(in_params.buffer_offset + i),
GetMixBuffer(in_params.buffer_offset + i), gain,
worker_params.sample_count);
}
}
s32 CommandGenerator::DecodePcm16(ServerVoiceInfo& voice_info, VoiceState& dsp_state,
s32 sample_count, s32 channel, std::size_t mix_offset) {
const auto& in_params = voice_info.GetInParams();
const auto& wave_buffer = in_params.wave_buffer[dsp_state.wave_buffer_index];
if (wave_buffer.buffer_address == 0) {
return 0;
}
if (wave_buffer.buffer_size == 0) {
return 0;
}
if (wave_buffer.end_sample_offset < wave_buffer.start_sample_offset) {
return 0;
}
const auto samples_remaining =
(wave_buffer.end_sample_offset - wave_buffer.start_sample_offset) - dsp_state.offset;
const auto start_offset =
((wave_buffer.start_sample_offset + dsp_state.offset) * in_params.channel_count) *
sizeof(s16);
const auto buffer_pos = wave_buffer.buffer_address + start_offset;
const auto samples_processed = std::min(sample_count, samples_remaining);
if (in_params.channel_count == 1) {
std::vector<s16> buffer(samples_processed);
memory.ReadBlock(buffer_pos, buffer.data(), buffer.size() * sizeof(s16));
for (std::size_t i = 0; i < buffer.size(); i++) {
sample_buffer[mix_offset + i] = buffer[i];
}
} else {
const auto channel_count = in_params.channel_count;
std::vector<s16> buffer(samples_processed * channel_count);
memory.ReadBlock(buffer_pos, buffer.data(), buffer.size() * sizeof(s16));
for (std::size_t i = 0; i < static_cast<std::size_t>(samples_processed); i++) {
sample_buffer[mix_offset + i] = buffer[i * channel_count + channel];
}
}
return samples_processed;
}
s32 CommandGenerator::DecodeAdpcm(ServerVoiceInfo& voice_info, VoiceState& dsp_state,
s32 sample_count, [[maybe_unused]] s32 channel,
std::size_t mix_offset) {
const auto& in_params = voice_info.GetInParams();
const auto& wave_buffer = in_params.wave_buffer[dsp_state.wave_buffer_index];
if (wave_buffer.buffer_address == 0) {
return 0;
}
if (wave_buffer.buffer_size == 0) {
return 0;
}
if (wave_buffer.end_sample_offset < wave_buffer.start_sample_offset) {
return 0;
}
static constexpr std::array<int, 16> SIGNED_NIBBLES{
0, 1, 2, 3, 4, 5, 6, 7, -8, -7, -6, -5, -4, -3, -2, -1,
};
constexpr std::size_t FRAME_LEN = 8;
constexpr std::size_t NIBBLES_PER_SAMPLE = 16;
constexpr std::size_t SAMPLES_PER_FRAME = 14;
auto frame_header = dsp_state.context.header;
s32 idx = (frame_header >> 4) & 0xf;
s32 scale = frame_header & 0xf;
s16 yn1 = dsp_state.context.yn1;
s16 yn2 = dsp_state.context.yn2;
Codec::ADPCM_Coeff coeffs;
memory.ReadBlock(in_params.additional_params_address, coeffs.data(),
sizeof(Codec::ADPCM_Coeff));
s32 coef1 = coeffs[idx * 2];
s32 coef2 = coeffs[idx * 2 + 1];
const auto samples_remaining =
(wave_buffer.end_sample_offset - wave_buffer.start_sample_offset) - dsp_state.offset;
const auto samples_processed = std::min(sample_count, samples_remaining);
const auto sample_pos = wave_buffer.start_sample_offset + dsp_state.offset;
const auto samples_remaining_in_frame = sample_pos % SAMPLES_PER_FRAME;
auto position_in_frame = ((sample_pos / SAMPLES_PER_FRAME) * NIBBLES_PER_SAMPLE) +
samples_remaining_in_frame + (samples_remaining_in_frame != 0 ? 2 : 0);
const auto decode_sample = [&](const int nibble) -> s16 {
const int xn = nibble * (1 << scale);
// We first transform everything into 11 bit fixed point, perform the second order
// digital filter, then transform back.
// 0x400 == 0.5 in 11 bit fixed point.
// Filter: y[n] = x[n] + 0.5 + c1 * y[n-1] + c2 * y[n-2]
int val = ((xn << 11) + 0x400 + coef1 * yn1 + coef2 * yn2) >> 11;
// Clamp to output range.
val = std::clamp<s32>(val, -32768, 32767);
// Advance output feedback.
yn2 = yn1;
yn1 = static_cast<s16>(val);
return yn1;
};
std::size_t buffer_offset{};
std::vector<u8> buffer(
std::max((samples_processed / FRAME_LEN) * SAMPLES_PER_FRAME, FRAME_LEN));
memory.ReadBlock(wave_buffer.buffer_address + (position_in_frame / 2), buffer.data(),
buffer.size());
std::size_t cur_mix_offset = mix_offset;
auto remaining_samples = samples_processed;
while (remaining_samples > 0) {
if (position_in_frame % NIBBLES_PER_SAMPLE == 0) {
// Read header
frame_header = buffer[buffer_offset++];
idx = (frame_header >> 4) & 0xf;
scale = frame_header & 0xf;
coef1 = coeffs[idx * 2];
coef2 = coeffs[idx * 2 + 1];
position_in_frame += 2;
// Decode entire frame
if (remaining_samples >= static_cast<int>(SAMPLES_PER_FRAME)) {
for (std::size_t i = 0; i < SAMPLES_PER_FRAME / 2; i++) {
// Sample 1
const s32 s0 = SIGNED_NIBBLES[buffer[buffer_offset] >> 4];
const s32 s1 = SIGNED_NIBBLES[buffer[buffer_offset++] & 0xf];
const s16 sample_1 = decode_sample(s0);
const s16 sample_2 = decode_sample(s1);
sample_buffer[cur_mix_offset++] = sample_1;
sample_buffer[cur_mix_offset++] = sample_2;
}
remaining_samples -= static_cast<int>(SAMPLES_PER_FRAME);
position_in_frame += SAMPLES_PER_FRAME;
continue;
}
}
// Decode mid frame
s32 current_nibble = buffer[buffer_offset];
if (position_in_frame++ & 0x1) {
current_nibble &= 0xf;
buffer_offset++;
} else {
current_nibble >>= 4;
}
const s16 sample = decode_sample(SIGNED_NIBBLES[current_nibble]);
sample_buffer[cur_mix_offset++] = sample;
remaining_samples--;
}
dsp_state.context.header = frame_header;
dsp_state.context.yn1 = yn1;
dsp_state.context.yn2 = yn2;
return samples_processed;
}
s32* CommandGenerator::GetMixBuffer(std::size_t index) {
return mix_buffer.data() + (index * worker_params.sample_count);
}
const s32* CommandGenerator::GetMixBuffer(std::size_t index) const {
return mix_buffer.data() + (index * worker_params.sample_count);
}
std::size_t CommandGenerator::GetMixChannelBufferOffset(s32 channel) const {
return worker_params.mix_buffer_count + channel;
}
std::size_t CommandGenerator::GetTotalMixBufferCount() const {
return worker_params.mix_buffer_count + AudioCommon::MAX_CHANNEL_COUNT;
}
s32* CommandGenerator::GetChannelMixBuffer(s32 channel) {
return GetMixBuffer(worker_params.mix_buffer_count + channel);
}
const s32* CommandGenerator::GetChannelMixBuffer(s32 channel) const {
return GetMixBuffer(worker_params.mix_buffer_count + channel);
}
void CommandGenerator::DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, s32* output,
VoiceState& dsp_state, s32 channel,
s32 target_sample_rate, s32 sample_count,
s32 node_id) {
const auto& in_params = voice_info.GetInParams();
if (dumping_frame) {
LOG_DEBUG(Audio,
"(DSP_TRACE) DecodeFromWaveBuffers, node_id={}, channel={}, "
"format={}, sample_count={}, sample_rate={}, mix_id={}, splitter_id={}",
node_id, channel, in_params.sample_format, sample_count, in_params.sample_rate,
in_params.mix_id, in_params.splitter_info_id);
}
ASSERT_OR_EXECUTE(output != nullptr, { return; });
const auto resample_rate = static_cast<s32>(
static_cast<float>(in_params.sample_rate) / static_cast<float>(target_sample_rate) *
static_cast<float>(static_cast<s32>(in_params.pitch * 32768.0f)));
if (dsp_state.fraction + sample_count * resample_rate >
static_cast<s32>(SCALED_MIX_BUFFER_SIZE - 4ULL)) {
return;
}
auto min_required_samples =
std::min(static_cast<s32>(SCALED_MIX_BUFFER_SIZE) - dsp_state.fraction, resample_rate);
if (min_required_samples >= sample_count) {
min_required_samples = sample_count;
}
std::size_t temp_mix_offset{};
bool is_buffer_completed{false};
auto samples_remaining = sample_count;
while (samples_remaining > 0 && !is_buffer_completed) {
const auto samples_to_output = std::min(samples_remaining, min_required_samples);
const auto samples_to_read = (samples_to_output * resample_rate + dsp_state.fraction) >> 15;
if (!in_params.behavior_flags.is_pitch_and_src_skipped) {
// Append sample histtory for resampler
for (std::size_t i = 0; i < AudioCommon::MAX_SAMPLE_HISTORY; i++) {
sample_buffer[temp_mix_offset + i] = dsp_state.sample_history[i];
}
temp_mix_offset += 4;
}
s32 samples_read{};
while (samples_read < samples_to_read) {
const auto& wave_buffer = in_params.wave_buffer[dsp_state.wave_buffer_index];
// No more data can be read
if (!dsp_state.is_wave_buffer_valid[dsp_state.wave_buffer_index]) {
is_buffer_completed = true;
break;
}
if (in_params.sample_format == SampleFormat::Adpcm && dsp_state.offset == 0 &&
wave_buffer.context_address != 0 && wave_buffer.context_size != 0) {
// TODO(ogniK): ADPCM loop context
}
s32 samples_decoded{0};
switch (in_params.sample_format) {
case SampleFormat::Pcm16:
samples_decoded = DecodePcm16(voice_info, dsp_state, samples_to_read - samples_read,
channel, temp_mix_offset);
break;
case SampleFormat::Adpcm:
samples_decoded = DecodeAdpcm(voice_info, dsp_state, samples_to_read - samples_read,
channel, temp_mix_offset);
break;
default:
UNREACHABLE_MSG("Unimplemented sample format={}", in_params.sample_format);
}
temp_mix_offset += samples_decoded;
samples_read += samples_decoded;
dsp_state.offset += samples_decoded;
dsp_state.played_sample_count += samples_decoded;
if (dsp_state.offset >=
(wave_buffer.end_sample_offset - wave_buffer.start_sample_offset) ||
samples_decoded == 0) {
// Reset our sample offset
dsp_state.offset = 0;
if (wave_buffer.is_looping) {
if (samples_decoded == 0) {
// End of our buffer
is_buffer_completed = true;
break;
}
if (in_params.behavior_flags.is_played_samples_reset_at_loop_point.Value()) {
dsp_state.played_sample_count = 0;
}
} else {
// Update our wave buffer states
dsp_state.is_wave_buffer_valid[dsp_state.wave_buffer_index] = false;
dsp_state.wave_buffer_consumed++;
dsp_state.wave_buffer_index =
(dsp_state.wave_buffer_index + 1) % AudioCommon::MAX_WAVE_BUFFERS;
if (wave_buffer.end_of_stream) {
dsp_state.played_sample_count = 0;
}
}
}
}
if (in_params.behavior_flags.is_pitch_and_src_skipped.Value()) {
// No need to resample
std::memcpy(output, sample_buffer.data(), samples_read * sizeof(s32));
} else {
std::fill(sample_buffer.begin() + temp_mix_offset,
sample_buffer.begin() + temp_mix_offset + (samples_to_read - samples_read),
0);
AudioCore::Resample(output, sample_buffer.data(), resample_rate, dsp_state.fraction,
samples_to_output);
// Resample
for (std::size_t i = 0; i < AudioCommon::MAX_SAMPLE_HISTORY; i++) {
dsp_state.sample_history[i] = sample_buffer[samples_to_read + i];
}
}
output += samples_to_output;
samples_remaining -= samples_to_output;
}
}
} // namespace AudioCore

View File

@@ -0,0 +1,102 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include "audio_core/common.h"
#include "audio_core/voice_context.h"
#include "common/common_types.h"
namespace Core::Memory {
class Memory;
}
namespace AudioCore {
class MixContext;
class SplitterContext;
class ServerSplitterDestinationData;
class ServerMixInfo;
class EffectContext;
class EffectBase;
struct AuxInfoDSP;
using MixVolumeBuffer = std::array<float, AudioCommon::MAX_MIX_BUFFERS>;
class CommandGenerator {
public:
explicit CommandGenerator(AudioCommon::AudioRendererParameter& worker_params_,
VoiceContext& voice_context_, MixContext& mix_context_,
SplitterContext& splitter_context_, EffectContext& effect_context_,
Core::Memory::Memory& memory_);
~CommandGenerator();
void ClearMixBuffers();
void GenerateVoiceCommands();
void GenerateVoiceCommand(ServerVoiceInfo& voice_info);
void GenerateSubMixCommands();
void GenerateFinalMixCommands();
void PreCommand();
void PostCommand();
[[nodiscard]] s32* GetChannelMixBuffer(s32 channel);
[[nodiscard]] const s32* GetChannelMixBuffer(s32 channel) const;
[[nodiscard]] s32* GetMixBuffer(std::size_t index);
[[nodiscard]] const s32* GetMixBuffer(std::size_t index) const;
[[nodiscard]] std::size_t GetMixChannelBufferOffset(s32 channel) const;
[[nodiscard]] std::size_t GetTotalMixBufferCount() const;
private:
void GenerateDataSourceCommand(ServerVoiceInfo& voice_info, VoiceState& dsp_state, s32 channel);
void GenerateBiquadFilterCommandForVoice(ServerVoiceInfo& voice_info, VoiceState& dsp_state,
s32 mix_buffer_count, s32 channel);
void GenerateVolumeRampCommand(float last_volume, float current_volume, s32 channel,
s32 node_id);
void GenerateVoiceMixCommand(const MixVolumeBuffer& mix_volumes,
const MixVolumeBuffer& last_mix_volumes, VoiceState& dsp_state,
s32 mix_buffer_offset, s32 mix_buffer_count, s32 voice_index,
s32 node_id);
void GenerateSubMixCommand(ServerMixInfo& mix_info);
void GenerateMixCommands(ServerMixInfo& mix_info);
void GenerateMixCommand(std::size_t output_offset, std::size_t input_offset, float volume,
s32 node_id);
void GenerateFinalMixCommand();
void GenerateBiquadFilterCommand(s32 mix_buffer, const BiquadFilterParameter& params,
std::array<s64, 2>& state, std::size_t input_offset,
std::size_t output_offset, s32 sample_count, s32 node_id);
void GenerateDepopPrepareCommand(VoiceState& dsp_state, std::size_t mix_buffer_count,
std::size_t mix_buffer_offset);
void GenerateDepopForMixBuffersCommand(std::size_t mix_buffer_count,
std::size_t mix_buffer_offset, s32 sample_rate);
void GenerateEffectCommand(ServerMixInfo& mix_info);
void GenerateI3dl2ReverbEffectCommand(s32 mix_buffer_offset, EffectBase* info, bool enabled);
void GenerateBiquadFilterEffectCommand(s32 mix_buffer_offset, EffectBase* info, bool enabled);
void GenerateAuxCommand(s32 mix_buffer_offset, EffectBase* info, bool enabled);
[[nodiscard]] ServerSplitterDestinationData* GetDestinationData(s32 splitter_id, s32 index);
s32 WriteAuxBuffer(AuxInfoDSP& dsp_info, VAddr send_buffer, u32 max_samples, const s32* data,
u32 sample_count, u32 write_offset, u32 write_count);
s32 ReadAuxBuffer(AuxInfoDSP& recv_info, VAddr recv_buffer, u32 max_samples, s32* out_data,
u32 sample_count, u32 read_offset, u32 read_count);
// DSP Code
s32 DecodePcm16(ServerVoiceInfo& voice_info, VoiceState& dsp_state, s32 sample_count,
s32 channel, std::size_t mix_offset);
s32 DecodeAdpcm(ServerVoiceInfo& voice_info, VoiceState& dsp_state, s32 sample_count,
s32 channel, std::size_t mix_offset);
void DecodeFromWaveBuffers(ServerVoiceInfo& voice_info, s32* output, VoiceState& dsp_state,
s32 channel, s32 target_sample_rate, s32 sample_count, s32 node_id);
AudioCommon::AudioRendererParameter& worker_params;
VoiceContext& voice_context;
MixContext& mix_context;
SplitterContext& splitter_context;
EffectContext& effect_context;
Core::Memory::Memory& memory;
std::vector<s32> mix_buffer{};
std::vector<s32> sample_buffer{};
std::vector<s32> depop_buffer{};
bool dumping_frame{false};
};
} // namespace AudioCore

108
src/audio_core/common.h Executable file
View File

@@ -0,0 +1,108 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
#include "core/hle/result.h"
namespace AudioCommon {
namespace Audren {
constexpr ResultCode ERR_INVALID_PARAMETERS{ErrorModule::Audio, 41};
constexpr ResultCode ERR_SPLITTER_SORT_FAILED{ErrorModule::Audio, 43};
} // namespace Audren
constexpr u32_le CURRENT_PROCESS_REVISION = Common::MakeMagic('R', 'E', 'V', '8');
constexpr std::size_t MAX_MIX_BUFFERS = 24;
constexpr std::size_t MAX_BIQUAD_FILTERS = 2;
constexpr std::size_t MAX_CHANNEL_COUNT = 6;
constexpr std::size_t MAX_WAVE_BUFFERS = 4;
constexpr std::size_t MAX_SAMPLE_HISTORY = 4;
constexpr u32 STREAM_SAMPLE_RATE = 48000;
constexpr u32 STREAM_NUM_CHANNELS = 2;
constexpr s32 NO_SPLITTER = -1;
constexpr s32 NO_MIX = 0x7fffffff;
constexpr s32 NO_FINAL_MIX = std::numeric_limits<s32>::min();
constexpr s32 FINAL_MIX = 0;
constexpr s32 NO_EFFECT_ORDER = -1;
constexpr std::size_t TEMP_MIX_BASE_SIZE = 0x3f00; // TODO(ogniK): Work out this constant
// Any size checks seem to take the sample history into account
// and our const ends up being 0x3f04, the 4 bytes are most
// likely the sample history
constexpr std::size_t TOTAL_TEMP_MIX_SIZE = TEMP_MIX_BASE_SIZE + AudioCommon::MAX_SAMPLE_HISTORY;
static constexpr u32 VersionFromRevision(u32_le rev) {
// "REV7" -> 7
return ((rev >> 24) & 0xff) - 0x30;
}
static constexpr bool IsRevisionSupported(u32 required, u32_le user_revision) {
const auto base = VersionFromRevision(user_revision);
return required <= base;
}
static constexpr bool IsValidRevision(u32_le revision) {
const auto base = VersionFromRevision(revision);
constexpr auto max_rev = VersionFromRevision(CURRENT_PROCESS_REVISION);
return base <= max_rev;
}
static constexpr bool CanConsumeBuffer(std::size_t size, std::size_t offset, std::size_t required) {
if (offset > size) {
return false;
}
if (size < required) {
return false;
}
if ((size - offset) < required) {
return false;
}
return true;
}
struct UpdateDataSizes {
u32_le behavior{};
u32_le memory_pool{};
u32_le voice{};
u32_le voice_channel_resource{};
u32_le effect{};
u32_le mixer{};
u32_le sink{};
u32_le performance{};
u32_le splitter{};
u32_le render_info{};
INSERT_PADDING_WORDS(4);
};
static_assert(sizeof(UpdateDataSizes) == 0x38, "UpdateDataSizes is an invalid size");
struct UpdateDataHeader {
u32_le revision{};
UpdateDataSizes size{};
u32_le total_size{};
};
static_assert(sizeof(UpdateDataHeader) == 0x40, "UpdateDataHeader is an invalid size");
struct AudioRendererParameter {
u32_le sample_rate;
u32_le sample_count;
u32_le mix_buffer_count;
u32_le submix_count;
u32_le voice_count;
u32_le sink_count;
u32_le effect_count;
u32_le performance_frame_count;
u8 is_voice_drop_enabled;
u8 unknown_21;
u8 unknown_22;
u8 execution_mode;
u32_le splitter_count;
u32_le num_splitter_send_channels;
u32_le unknown_30;
u32_le revision;
};
static_assert(sizeof(AudioRendererParameter) == 52, "AudioRendererParameter is an invalid size");
} // namespace AudioCommon

271
src/audio_core/cubeb_sink.cpp Executable file
View File

@@ -0,0 +1,271 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <atomic>
#include <cstring>
#include "audio_core/cubeb_sink.h"
#include "audio_core/stream.h"
#include "audio_core/time_stretch.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "common/ring_buffer.h"
#include "core/settings.h"
#ifdef _WIN32
#include <objbase.h>
#endif
namespace AudioCore {
class CubebSinkStream final : public SinkStream {
public:
CubebSinkStream(cubeb* ctx_, u32 sample_rate, u32 num_channels_, cubeb_devid output_device,
const std::string& name)
: ctx{ctx_}, num_channels{std::min(num_channels_, 6u)}, time_stretch{sample_rate,
num_channels} {
cubeb_stream_params params{};
params.rate = sample_rate;
params.channels = num_channels;
params.format = CUBEB_SAMPLE_S16NE;
params.prefs = CUBEB_STREAM_PREF_PERSIST;
switch (num_channels) {
case 1:
params.layout = CUBEB_LAYOUT_MONO;
break;
case 2:
params.layout = CUBEB_LAYOUT_STEREO;
break;
case 6:
params.layout = CUBEB_LAYOUT_3F2_LFE;
break;
}
u32 minimum_latency{};
if (cubeb_get_min_latency(ctx, &params, &minimum_latency) != CUBEB_OK) {
LOG_CRITICAL(Audio_Sink, "Error getting minimum latency");
}
if (cubeb_stream_init(ctx, &stream_backend, name.c_str(), nullptr, nullptr, output_device,
&params, std::max(512u, minimum_latency),
&CubebSinkStream::DataCallback, &CubebSinkStream::StateCallback,
this) != CUBEB_OK) {
LOG_CRITICAL(Audio_Sink, "Error initializing cubeb stream");
return;
}
if (cubeb_stream_start(stream_backend) != CUBEB_OK) {
LOG_CRITICAL(Audio_Sink, "Error starting cubeb stream");
return;
}
}
~CubebSinkStream() override {
if (!ctx) {
return;
}
if (cubeb_stream_stop(stream_backend) != CUBEB_OK) {
LOG_CRITICAL(Audio_Sink, "Error stopping cubeb stream");
}
cubeb_stream_destroy(stream_backend);
}
void EnqueueSamples(u32 source_num_channels, const std::vector<s16>& samples) override {
if (source_num_channels > num_channels) {
// Downsample 6 channels to 2
ASSERT_MSG(source_num_channels == 6, "Channel count must be 6");
std::vector<s16> buf;
buf.reserve(samples.size() * num_channels / source_num_channels);
for (std::size_t i = 0; i < samples.size(); i += source_num_channels) {
// Downmixing implementation taken from the ATSC standard
const s16 left{samples[i + 0]};
const s16 right{samples[i + 1]};
const s16 center{samples[i + 2]};
const s16 surround_left{samples[i + 4]};
const s16 surround_right{samples[i + 5]};
// Not used in the ATSC reference implementation
[[maybe_unused]] const s16 low_frequency_effects{samples[i + 3]};
constexpr s32 clev{707}; // center mixing level coefficient
constexpr s32 slev{707}; // surround mixing level coefficient
buf.push_back(static_cast<s16>(left + (clev * center / 1000) +
(slev * surround_left / 1000)));
buf.push_back(static_cast<s16>(right + (clev * center / 1000) +
(slev * surround_right / 1000)));
}
queue.Push(buf);
return;
}
queue.Push(samples);
}
std::size_t SamplesInQueue(u32 channel_count) const override {
if (!ctx)
return 0;
return queue.Size() / channel_count;
}
void Flush() override {
should_flush = true;
}
u32 GetNumChannels() const {
return num_channels;
}
private:
std::vector<std::string> device_list;
cubeb* ctx{};
cubeb_stream* stream_backend{};
u32 num_channels{};
Common::RingBuffer<s16, 0x10000> queue;
std::array<s16, 2> last_frame{};
std::atomic<bool> should_flush{};
TimeStretcher time_stretch;
static long DataCallback(cubeb_stream* stream, void* user_data, const void* input_buffer,
void* output_buffer, long num_frames);
static void StateCallback(cubeb_stream* stream, void* user_data, cubeb_state state);
};
CubebSink::CubebSink(std::string_view target_device_name) {
// Cubeb requires COM to be initialized on the thread calling cubeb_init on Windows
#ifdef _WIN32
com_init_result = CoInitializeEx(nullptr, COINIT_MULTITHREADED);
#endif
if (cubeb_init(&ctx, "yuzu", nullptr) != CUBEB_OK) {
LOG_CRITICAL(Audio_Sink, "cubeb_init failed");
return;
}
if (target_device_name != auto_device_name && !target_device_name.empty()) {
cubeb_device_collection collection;
if (cubeb_enumerate_devices(ctx, CUBEB_DEVICE_TYPE_OUTPUT, &collection) != CUBEB_OK) {
LOG_WARNING(Audio_Sink, "Audio output device enumeration not supported");
} else {
const auto collection_end{collection.device + collection.count};
const auto device{
std::find_if(collection.device, collection_end, [&](const cubeb_device_info& info) {
return info.friendly_name != nullptr &&
target_device_name == info.friendly_name;
})};
if (device != collection_end) {
output_device = device->devid;
}
cubeb_device_collection_destroy(ctx, &collection);
}
}
}
CubebSink::~CubebSink() {
if (!ctx) {
return;
}
for (auto& sink_stream : sink_streams) {
sink_stream.reset();
}
cubeb_destroy(ctx);
#ifdef _WIN32
if (SUCCEEDED(com_init_result)) {
CoUninitialize();
}
#endif
}
SinkStream& CubebSink::AcquireSinkStream(u32 sample_rate, u32 num_channels,
const std::string& name) {
sink_streams.push_back(
std::make_unique<CubebSinkStream>(ctx, sample_rate, num_channels, output_device, name));
return *sink_streams.back();
}
long CubebSinkStream::DataCallback([[maybe_unused]] cubeb_stream* stream, void* user_data,
[[maybe_unused]] const void* input_buffer, void* output_buffer,
long num_frames) {
auto* impl = static_cast<CubebSinkStream*>(user_data);
auto* buffer = static_cast<u8*>(output_buffer);
if (!impl) {
return {};
}
const std::size_t num_channels = impl->GetNumChannels();
const std::size_t samples_to_write = num_channels * num_frames;
std::size_t samples_written;
/*
if (Settings::values.enable_audio_stretching.GetValue()) {
const std::vector<s16> in{impl->queue.Pop()};
const std::size_t num_in{in.size() / num_channels};
s16* const out{reinterpret_cast<s16*>(buffer)};
const std::size_t out_frames =
impl->time_stretch.Process(in.data(), num_in, out, num_frames);
samples_written = out_frames * num_channels;
if (impl->should_flush) {
impl->time_stretch.Flush();
impl->should_flush = false;
}
} else {
samples_written = impl->queue.Pop(buffer, samples_to_write);
}*/
samples_written = impl->queue.Pop(buffer, samples_to_write);
if (samples_written >= num_channels) {
std::memcpy(&impl->last_frame[0], buffer + (samples_written - num_channels) * sizeof(s16),
num_channels * sizeof(s16));
}
// Fill the rest of the frames with last_frame
for (std::size_t i = samples_written; i < samples_to_write; i += num_channels) {
std::memcpy(buffer + i * sizeof(s16), &impl->last_frame[0], num_channels * sizeof(s16));
}
return num_frames;
}
void CubebSinkStream::StateCallback([[maybe_unused]] cubeb_stream* stream,
[[maybe_unused]] void* user_data,
[[maybe_unused]] cubeb_state state) {}
std::vector<std::string> ListCubebSinkDevices() {
std::vector<std::string> device_list;
cubeb* ctx;
if (cubeb_init(&ctx, "yuzu Device Enumerator", nullptr) != CUBEB_OK) {
LOG_CRITICAL(Audio_Sink, "cubeb_init failed");
return {};
}
cubeb_device_collection collection;
if (cubeb_enumerate_devices(ctx, CUBEB_DEVICE_TYPE_OUTPUT, &collection) != CUBEB_OK) {
LOG_WARNING(Audio_Sink, "Audio output device enumeration not supported");
} else {
for (std::size_t i = 0; i < collection.count; i++) {
const cubeb_device_info& device = collection.device[i];
if (device.friendly_name) {
device_list.emplace_back(device.friendly_name);
}
}
cubeb_device_collection_destroy(ctx, &collection);
}
cubeb_destroy(ctx);
return device_list;
}
} // namespace AudioCore

36
src/audio_core/cubeb_sink.h Executable file
View File

@@ -0,0 +1,36 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include <vector>
#include <cubeb/cubeb.h>
#include "audio_core/sink.h"
namespace AudioCore {
class CubebSink final : public Sink {
public:
explicit CubebSink(std::string_view device_id);
~CubebSink() override;
SinkStream& AcquireSinkStream(u32 sample_rate, u32 num_channels,
const std::string& name) override;
private:
cubeb* ctx{};
cubeb_devid output_device{};
std::vector<SinkStreamPtr> sink_streams;
#ifdef _WIN32
u32 com_init_result = 0;
#endif
};
std::vector<std::string> ListCubebSinkDevices();
} // namespace AudioCore

299
src/audio_core/effect_context.cpp Executable file
View File

@@ -0,0 +1,299 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include "audio_core/effect_context.h"
namespace AudioCore {
namespace {
bool ValidChannelCountForEffect(s32 channel_count) {
return channel_count == 1 || channel_count == 2 || channel_count == 4 || channel_count == 6;
}
} // namespace
EffectContext::EffectContext(std::size_t effect_count_) : effect_count(effect_count_) {
effects.reserve(effect_count);
std::generate_n(std::back_inserter(effects), effect_count,
[] { return std::make_unique<EffectStubbed>(); });
}
EffectContext::~EffectContext() = default;
std::size_t EffectContext::GetCount() const {
return effect_count;
}
EffectBase* EffectContext::GetInfo(std::size_t i) {
return effects.at(i).get();
}
EffectBase* EffectContext::RetargetEffect(std::size_t i, EffectType effect) {
switch (effect) {
case EffectType::Invalid:
effects[i] = std::make_unique<EffectStubbed>();
break;
case EffectType::BufferMixer:
effects[i] = std::make_unique<EffectBufferMixer>();
break;
case EffectType::Aux:
effects[i] = std::make_unique<EffectAuxInfo>();
break;
case EffectType::Delay:
effects[i] = std::make_unique<EffectDelay>();
break;
case EffectType::Reverb:
effects[i] = std::make_unique<EffectReverb>();
break;
case EffectType::I3dl2Reverb:
effects[i] = std::make_unique<EffectI3dl2Reverb>();
break;
case EffectType::BiquadFilter:
effects[i] = std::make_unique<EffectBiquadFilter>();
break;
default:
UNREACHABLE_MSG("Unimplemented effect {}", effect);
effects[i] = std::make_unique<EffectStubbed>();
}
return GetInfo(i);
}
const EffectBase* EffectContext::GetInfo(std::size_t i) const {
return effects.at(i).get();
}
EffectStubbed::EffectStubbed() : EffectBase(EffectType::Invalid) {}
EffectStubbed::~EffectStubbed() = default;
void EffectStubbed::Update([[maybe_unused]] EffectInfo::InParams& in_params) {}
void EffectStubbed::UpdateForCommandGeneration() {}
EffectBase::EffectBase(EffectType effect_type_) : effect_type(effect_type_) {}
EffectBase::~EffectBase() = default;
UsageState EffectBase::GetUsage() const {
return usage;
}
EffectType EffectBase::GetType() const {
return effect_type;
}
bool EffectBase::IsEnabled() const {
return enabled;
}
s32 EffectBase::GetMixID() const {
return mix_id;
}
s32 EffectBase::GetProcessingOrder() const {
return processing_order;
}
EffectI3dl2Reverb::EffectI3dl2Reverb() : EffectGeneric(EffectType::I3dl2Reverb) {}
EffectI3dl2Reverb::~EffectI3dl2Reverb() = default;
void EffectI3dl2Reverb::Update(EffectInfo::InParams& in_params) {
auto& params = GetParams();
const auto* reverb_params = reinterpret_cast<I3dl2ReverbParams*>(in_params.raw.data());
if (!ValidChannelCountForEffect(reverb_params->max_channels)) {
UNREACHABLE_MSG("Invalid reverb max channel count {}", reverb_params->max_channels);
return;
}
const auto last_status = params.status;
mix_id = in_params.mix_id;
processing_order = in_params.processing_order;
params = *reverb_params;
if (!ValidChannelCountForEffect(reverb_params->channel_count)) {
params.channel_count = params.max_channels;
}
enabled = in_params.is_enabled;
if (last_status != ParameterStatus::Updated) {
params.status = last_status;
}
if (in_params.is_new || skipped) {
usage = UsageState::Initialized;
params.status = ParameterStatus::Initialized;
skipped = in_params.buffer_address == 0 || in_params.buffer_size == 0;
}
}
void EffectI3dl2Reverb::UpdateForCommandGeneration() {
if (enabled) {
usage = UsageState::Running;
} else {
usage = UsageState::Stopped;
}
GetParams().status = ParameterStatus::Updated;
}
EffectBiquadFilter::EffectBiquadFilter() : EffectGeneric(EffectType::BiquadFilter) {}
EffectBiquadFilter::~EffectBiquadFilter() = default;
void EffectBiquadFilter::Update(EffectInfo::InParams& in_params) {
auto& params = GetParams();
const auto* biquad_params = reinterpret_cast<BiquadFilterParams*>(in_params.raw.data());
mix_id = in_params.mix_id;
processing_order = in_params.processing_order;
params = *biquad_params;
enabled = in_params.is_enabled;
}
void EffectBiquadFilter::UpdateForCommandGeneration() {
if (enabled) {
usage = UsageState::Running;
} else {
usage = UsageState::Stopped;
}
GetParams().status = ParameterStatus::Updated;
}
EffectAuxInfo::EffectAuxInfo() : EffectGeneric(EffectType::Aux) {}
EffectAuxInfo::~EffectAuxInfo() = default;
void EffectAuxInfo::Update(EffectInfo::InParams& in_params) {
const auto* aux_params = reinterpret_cast<AuxInfo*>(in_params.raw.data());
mix_id = in_params.mix_id;
processing_order = in_params.processing_order;
GetParams() = *aux_params;
enabled = in_params.is_enabled;
if (in_params.is_new || skipped) {
skipped = aux_params->send_buffer_info == 0 || aux_params->return_buffer_info == 0;
if (skipped) {
return;
}
// There's two AuxInfos which are an identical size, the first one is managed by the cpu,
// the second is managed by the dsp. All we care about is managing the DSP one
send_info = aux_params->send_buffer_info + sizeof(AuxInfoDSP);
send_buffer = aux_params->send_buffer_info + (sizeof(AuxInfoDSP) * 2);
recv_info = aux_params->return_buffer_info + sizeof(AuxInfoDSP);
recv_buffer = aux_params->return_buffer_info + (sizeof(AuxInfoDSP) * 2);
}
}
void EffectAuxInfo::UpdateForCommandGeneration() {
if (enabled) {
usage = UsageState::Running;
} else {
usage = UsageState::Stopped;
}
}
VAddr EffectAuxInfo::GetSendInfo() const {
return send_info;
}
VAddr EffectAuxInfo::GetSendBuffer() const {
return send_buffer;
}
VAddr EffectAuxInfo::GetRecvInfo() const {
return recv_info;
}
VAddr EffectAuxInfo::GetRecvBuffer() const {
return recv_buffer;
}
EffectDelay::EffectDelay() : EffectGeneric(EffectType::Delay) {}
EffectDelay::~EffectDelay() = default;
void EffectDelay::Update(EffectInfo::InParams& in_params) {
const auto* delay_params = reinterpret_cast<DelayParams*>(in_params.raw.data());
auto& params = GetParams();
if (!ValidChannelCountForEffect(delay_params->max_channels)) {
return;
}
const auto last_status = params.status;
mix_id = in_params.mix_id;
processing_order = in_params.processing_order;
params = *delay_params;
if (!ValidChannelCountForEffect(delay_params->channels)) {
params.channels = params.max_channels;
}
enabled = in_params.is_enabled;
if (last_status != ParameterStatus::Updated) {
params.status = last_status;
}
if (in_params.is_new || skipped) {
usage = UsageState::Initialized;
params.status = ParameterStatus::Initialized;
skipped = in_params.buffer_address == 0 || in_params.buffer_size == 0;
}
}
void EffectDelay::UpdateForCommandGeneration() {
if (enabled) {
usage = UsageState::Running;
} else {
usage = UsageState::Stopped;
}
GetParams().status = ParameterStatus::Updated;
}
EffectBufferMixer::EffectBufferMixer() : EffectGeneric(EffectType::BufferMixer) {}
EffectBufferMixer::~EffectBufferMixer() = default;
void EffectBufferMixer::Update(EffectInfo::InParams& in_params) {
mix_id = in_params.mix_id;
processing_order = in_params.processing_order;
GetParams() = *reinterpret_cast<BufferMixerParams*>(in_params.raw.data());
enabled = in_params.is_enabled;
}
void EffectBufferMixer::UpdateForCommandGeneration() {
if (enabled) {
usage = UsageState::Running;
} else {
usage = UsageState::Stopped;
}
}
EffectReverb::EffectReverb() : EffectGeneric(EffectType::Reverb) {}
EffectReverb::~EffectReverb() = default;
void EffectReverb::Update(EffectInfo::InParams& in_params) {
const auto* reverb_params = reinterpret_cast<ReverbParams*>(in_params.raw.data());
auto& params = GetParams();
if (!ValidChannelCountForEffect(reverb_params->max_channels)) {
return;
}
const auto last_status = params.status;
mix_id = in_params.mix_id;
processing_order = in_params.processing_order;
params = *reverb_params;
if (!ValidChannelCountForEffect(reverb_params->channels)) {
params.channels = params.max_channels;
}
enabled = in_params.is_enabled;
if (last_status != ParameterStatus::Updated) {
params.status = last_status;
}
if (in_params.is_new || skipped) {
usage = UsageState::Initialized;
params.status = ParameterStatus::Initialized;
skipped = in_params.buffer_address == 0 || in_params.buffer_size == 0;
}
}
void EffectReverb::UpdateForCommandGeneration() {
if (enabled) {
usage = UsageState::Running;
} else {
usage = UsageState::Stopped;
}
GetParams().status = ParameterStatus::Updated;
}
} // namespace AudioCore

321
src/audio_core/effect_context.h Executable file
View File

@@ -0,0 +1,321 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <memory>
#include <vector>
#include "audio_core/common.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
namespace AudioCore {
enum class EffectType : u8 {
Invalid = 0,
BufferMixer = 1,
Aux = 2,
Delay = 3,
Reverb = 4,
I3dl2Reverb = 5,
BiquadFilter = 6,
};
enum class UsageStatus : u8 {
Invalid = 0,
New = 1,
Initialized = 2,
Used = 3,
Removed = 4,
};
enum class UsageState {
Invalid = 0,
Initialized = 1,
Running = 2,
Stopped = 3,
};
enum class ParameterStatus : u8 {
Initialized = 0,
Updating = 1,
Updated = 2,
};
struct BufferMixerParams {
std::array<s8, AudioCommon::MAX_MIX_BUFFERS> input{};
std::array<s8, AudioCommon::MAX_MIX_BUFFERS> output{};
std::array<float_le, AudioCommon::MAX_MIX_BUFFERS> volume{};
s32_le count{};
};
static_assert(sizeof(BufferMixerParams) == 0x94, "BufferMixerParams is an invalid size");
struct AuxInfoDSP {
u32_le read_offset{};
u32_le write_offset{};
u32_le remaining{};
INSERT_PADDING_WORDS(13);
};
static_assert(sizeof(AuxInfoDSP) == 0x40, "AuxInfoDSP is an invalid size");
struct AuxInfo {
std::array<s8, AudioCommon::MAX_MIX_BUFFERS> input_mix_buffers{};
std::array<s8, AudioCommon::MAX_MIX_BUFFERS> output_mix_buffers{};
u32_le count{};
s32_le sample_rate{};
s32_le sample_count{};
s32_le mix_buffer_count{};
u64_le send_buffer_info{};
u64_le send_buffer_base{};
u64_le return_buffer_info{};
u64_le return_buffer_base{};
};
static_assert(sizeof(AuxInfo) == 0x60, "AuxInfo is an invalid size");
struct I3dl2ReverbParams {
std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> input{};
std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> output{};
u16_le max_channels{};
u16_le channel_count{};
INSERT_PADDING_BYTES(1);
u32_le sample_rate{};
f32 room_hf{};
f32 hf_reference{};
f32 decay_time{};
f32 hf_decay_ratio{};
f32 room{};
f32 reflection{};
f32 reverb{};
f32 diffusion{};
f32 reflection_delay{};
f32 reverb_delay{};
f32 density{};
f32 dry_gain{};
ParameterStatus status{};
INSERT_PADDING_BYTES(3);
};
static_assert(sizeof(I3dl2ReverbParams) == 0x4c, "I3dl2ReverbParams is an invalid size");
struct BiquadFilterParams {
std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> input{};
std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> output{};
std::array<s16_le, 3> numerator;
std::array<s16_le, 2> denominator;
s8 channel_count{};
ParameterStatus status{};
};
static_assert(sizeof(BiquadFilterParams) == 0x18, "BiquadFilterParams is an invalid size");
struct DelayParams {
std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> input{};
std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> output{};
u16_le max_channels{};
u16_le channels{};
s32_le max_delay{};
s32_le delay{};
s32_le sample_rate{};
s32_le gain{};
s32_le feedback_gain{};
s32_le out_gain{};
s32_le dry_gain{};
s32_le channel_spread{};
s32_le low_pass{};
ParameterStatus status{};
INSERT_PADDING_BYTES(3);
};
static_assert(sizeof(DelayParams) == 0x38, "DelayParams is an invalid size");
struct ReverbParams {
std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> input{};
std::array<s8, AudioCommon::MAX_CHANNEL_COUNT> output{};
u16_le max_channels{};
u16_le channels{};
s32_le sample_rate{};
s32_le mode0{};
s32_le mode0_gain{};
s32_le pre_delay{};
s32_le mode1{};
s32_le mode1_gain{};
s32_le decay{};
s32_le hf_decay_ratio{};
s32_le coloration{};
s32_le reverb_gain{};
s32_le out_gain{};
s32_le dry_gain{};
ParameterStatus status{};
INSERT_PADDING_BYTES(3);
};
static_assert(sizeof(ReverbParams) == 0x44, "ReverbParams is an invalid size");
class EffectInfo {
public:
struct InParams {
EffectType type{};
u8 is_new{};
u8 is_enabled{};
INSERT_PADDING_BYTES(1);
s32_le mix_id{};
u64_le buffer_address{};
u64_le buffer_size{};
s32_le processing_order{};
INSERT_PADDING_BYTES(4);
union {
std::array<u8, 0xa0> raw;
};
};
static_assert(sizeof(InParams) == 0xc0, "InParams is an invalid size");
struct OutParams {
UsageStatus status{};
INSERT_PADDING_BYTES(15);
};
static_assert(sizeof(OutParams) == 0x10, "OutParams is an invalid size");
};
struct AuxAddress {
VAddr send_dsp_info{};
VAddr send_buffer_base{};
VAddr return_dsp_info{};
VAddr return_buffer_base{};
};
class EffectBase {
public:
explicit EffectBase(EffectType effect_type_);
virtual ~EffectBase();
virtual void Update(EffectInfo::InParams& in_params) = 0;
virtual void UpdateForCommandGeneration() = 0;
[[nodiscard]] UsageState GetUsage() const;
[[nodiscard]] EffectType GetType() const;
[[nodiscard]] bool IsEnabled() const;
[[nodiscard]] s32 GetMixID() const;
[[nodiscard]] s32 GetProcessingOrder() const;
protected:
UsageState usage{UsageState::Invalid};
EffectType effect_type{};
s32 mix_id{};
s32 processing_order{};
bool enabled = false;
};
template <typename T>
class EffectGeneric : public EffectBase {
public:
explicit EffectGeneric(EffectType effect_type_) : EffectBase(effect_type_) {}
T& GetParams() {
return internal_params;
}
const I3dl2ReverbParams& GetParams() const {
return internal_params;
}
private:
T internal_params{};
};
class EffectStubbed : public EffectBase {
public:
explicit EffectStubbed();
~EffectStubbed() override;
void Update(EffectInfo::InParams& in_params) override;
void UpdateForCommandGeneration() override;
};
class EffectI3dl2Reverb : public EffectGeneric<I3dl2ReverbParams> {
public:
explicit EffectI3dl2Reverb();
~EffectI3dl2Reverb() override;
void Update(EffectInfo::InParams& in_params) override;
void UpdateForCommandGeneration() override;
private:
bool skipped = false;
};
class EffectBiquadFilter : public EffectGeneric<BiquadFilterParams> {
public:
explicit EffectBiquadFilter();
~EffectBiquadFilter() override;
void Update(EffectInfo::InParams& in_params) override;
void UpdateForCommandGeneration() override;
};
class EffectAuxInfo : public EffectGeneric<AuxInfo> {
public:
explicit EffectAuxInfo();
~EffectAuxInfo() override;
void Update(EffectInfo::InParams& in_params) override;
void UpdateForCommandGeneration() override;
[[nodiscard]] VAddr GetSendInfo() const;
[[nodiscard]] VAddr GetSendBuffer() const;
[[nodiscard]] VAddr GetRecvInfo() const;
[[nodiscard]] VAddr GetRecvBuffer() const;
private:
VAddr send_info{};
VAddr send_buffer{};
VAddr recv_info{};
VAddr recv_buffer{};
bool skipped = false;
AuxAddress addresses{};
};
class EffectDelay : public EffectGeneric<DelayParams> {
public:
explicit EffectDelay();
~EffectDelay() override;
void Update(EffectInfo::InParams& in_params) override;
void UpdateForCommandGeneration() override;
private:
bool skipped = false;
};
class EffectBufferMixer : public EffectGeneric<BufferMixerParams> {
public:
explicit EffectBufferMixer();
~EffectBufferMixer() override;
void Update(EffectInfo::InParams& in_params) override;
void UpdateForCommandGeneration() override;
};
class EffectReverb : public EffectGeneric<ReverbParams> {
public:
explicit EffectReverb();
~EffectReverb() override;
void Update(EffectInfo::InParams& in_params) override;
void UpdateForCommandGeneration() override;
private:
bool skipped = false;
};
class EffectContext {
public:
explicit EffectContext(std::size_t effect_count_);
~EffectContext();
[[nodiscard]] std::size_t GetCount() const;
[[nodiscard]] EffectBase* GetInfo(std::size_t i);
[[nodiscard]] EffectBase* RetargetEffect(std::size_t i, EffectType effect);
[[nodiscard]] const EffectBase* GetInfo(std::size_t i) const;
private:
std::size_t effect_count{};
std::vector<std::unique_ptr<EffectBase>> effects;
};
} // namespace AudioCore

516
src/audio_core/info_updater.cpp Executable file
View File

@@ -0,0 +1,516 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "audio_core/behavior_info.h"
#include "audio_core/effect_context.h"
#include "audio_core/info_updater.h"
#include "audio_core/memory_pool.h"
#include "audio_core/mix_context.h"
#include "audio_core/sink_context.h"
#include "audio_core/splitter_context.h"
#include "audio_core/voice_context.h"
#include "common/logging/log.h"
namespace AudioCore {
InfoUpdater::InfoUpdater(const std::vector<u8>& in_params_, std::vector<u8>& out_params_,
BehaviorInfo& behavior_info_)
: in_params(in_params_), out_params(out_params_), behavior_info(behavior_info_) {
ASSERT(
AudioCommon::CanConsumeBuffer(in_params.size(), 0, sizeof(AudioCommon::UpdateDataHeader)));
std::memcpy(&input_header, in_params.data(), sizeof(AudioCommon::UpdateDataHeader));
output_header.total_size = sizeof(AudioCommon::UpdateDataHeader);
}
InfoUpdater::~InfoUpdater() = default;
bool InfoUpdater::UpdateBehaviorInfo(BehaviorInfo& in_behavior_info) {
if (input_header.size.behavior != sizeof(BehaviorInfo::InParams)) {
LOG_ERROR(Audio, "Behavior info is an invalid size, expecting 0x{:X} but got 0x{:X}",
sizeof(BehaviorInfo::InParams), input_header.size.behavior);
return false;
}
if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset,
sizeof(BehaviorInfo::InParams))) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
BehaviorInfo::InParams behavior_in{};
std::memcpy(&behavior_in, in_params.data() + input_offset, sizeof(BehaviorInfo::InParams));
input_offset += sizeof(BehaviorInfo::InParams);
// Make sure it's an audio revision we can actually support
if (!AudioCommon::IsValidRevision(behavior_in.revision)) {
LOG_ERROR(Audio, "Invalid input revision, revision=0x{:08X}", behavior_in.revision);
return false;
}
// Make sure that our behavior info revision matches the input
if (in_behavior_info.GetUserRevision() != behavior_in.revision) {
LOG_ERROR(Audio,
"User revision differs from input revision, expecting 0x{:08X} but got 0x{:08X}",
in_behavior_info.GetUserRevision(), behavior_in.revision);
return false;
}
// Update behavior info flags
in_behavior_info.ClearError();
in_behavior_info.UpdateFlags(behavior_in.flags);
return true;
}
bool InfoUpdater::UpdateMemoryPools(std::vector<ServerMemoryPoolInfo>& memory_pool_info) {
const auto memory_pool_count = memory_pool_info.size();
const auto total_memory_pool_in = sizeof(ServerMemoryPoolInfo::InParams) * memory_pool_count;
const auto total_memory_pool_out = sizeof(ServerMemoryPoolInfo::OutParams) * memory_pool_count;
if (input_header.size.memory_pool != total_memory_pool_in) {
LOG_ERROR(Audio, "Memory pools are an invalid size, expecting 0x{:X} but got 0x{:X}",
total_memory_pool_in, input_header.size.memory_pool);
return false;
}
if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset, total_memory_pool_in)) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
std::vector<ServerMemoryPoolInfo::InParams> mempool_in(memory_pool_count);
std::vector<ServerMemoryPoolInfo::OutParams> mempool_out(memory_pool_count);
std::memcpy(mempool_in.data(), in_params.data() + input_offset, total_memory_pool_in);
input_offset += total_memory_pool_in;
// Update our memory pools
for (std::size_t i = 0; i < memory_pool_count; i++) {
if (!memory_pool_info[i].Update(mempool_in[i], mempool_out[i])) {
LOG_ERROR(Audio, "Failed to update memory pool {}!", i);
return false;
}
}
if (!AudioCommon::CanConsumeBuffer(out_params.size(), output_offset,
sizeof(BehaviorInfo::InParams))) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
std::memcpy(out_params.data() + output_offset, mempool_out.data(), total_memory_pool_out);
output_offset += total_memory_pool_out;
output_header.size.memory_pool = static_cast<u32>(total_memory_pool_out);
return true;
}
bool InfoUpdater::UpdateVoiceChannelResources(VoiceContext& voice_context) {
const auto voice_count = voice_context.GetVoiceCount();
const auto voice_size = voice_count * sizeof(VoiceChannelResource::InParams);
std::vector<VoiceChannelResource::InParams> resources_in(voice_count);
if (input_header.size.voice_channel_resource != voice_size) {
LOG_ERROR(Audio, "VoiceChannelResource is an invalid size, expecting 0x{:X} but got 0x{:X}",
voice_size, input_header.size.voice_channel_resource);
return false;
}
if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset, voice_size)) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
std::memcpy(resources_in.data(), in_params.data() + input_offset, voice_size);
input_offset += voice_size;
// Update our channel resources
for (std::size_t i = 0; i < voice_count; i++) {
// Grab our channel resource
auto& resource = voice_context.GetChannelResource(i);
resource.Update(resources_in[i]);
}
return true;
}
bool InfoUpdater::UpdateVoices(VoiceContext& voice_context,
[[maybe_unused]] std::vector<ServerMemoryPoolInfo>& memory_pool_info,
[[maybe_unused]] VAddr audio_codec_dsp_addr) {
const auto voice_count = voice_context.GetVoiceCount();
std::vector<VoiceInfo::InParams> voice_in(voice_count);
std::vector<VoiceInfo::OutParams> voice_out(voice_count);
const auto voice_in_size = voice_count * sizeof(VoiceInfo::InParams);
const auto voice_out_size = voice_count * sizeof(VoiceInfo::OutParams);
if (input_header.size.voice != voice_in_size) {
LOG_ERROR(Audio, "Voices are an invalid size, expecting 0x{:X} but got 0x{:X}",
voice_in_size, input_header.size.voice);
return false;
}
if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset, voice_in_size)) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
std::memcpy(voice_in.data(), in_params.data() + input_offset, voice_in_size);
input_offset += voice_in_size;
// Set all voices to not be in use
for (std::size_t i = 0; i < voice_count; i++) {
voice_context.GetInfo(i).GetInParams().in_use = false;
}
// Update our voices
for (std::size_t i = 0; i < voice_count; i++) {
auto& voice_in_params = voice_in[i];
const auto channel_count = static_cast<std::size_t>(voice_in_params.channel_count);
// Skip if it's not currently in use
if (!voice_in_params.is_in_use) {
continue;
}
// Voice states for each channel
std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT> voice_states{};
ASSERT(static_cast<std::size_t>(voice_in_params.id) < voice_count);
// Grab our current voice info
auto& voice_info = voice_context.GetInfo(static_cast<std::size_t>(voice_in_params.id));
ASSERT(channel_count <= AudioCommon::MAX_CHANNEL_COUNT);
// Get all our channel voice states
for (std::size_t channel = 0; channel < channel_count; channel++) {
voice_states[channel] =
&voice_context.GetState(voice_in_params.voice_channel_resource_ids[channel]);
}
if (voice_in_params.is_new) {
// Default our values for our voice
voice_info.Initialize();
if (channel_count == 0 || channel_count > AudioCommon::MAX_CHANNEL_COUNT) {
continue;
}
// Zero out our voice states
for (std::size_t channel = 0; channel < channel_count; channel++) {
std::memset(voice_states[channel], 0, sizeof(VoiceState));
}
}
// Update our voice
voice_info.UpdateParameters(voice_in_params, behavior_info);
// TODO(ogniK): Handle mapping errors with behavior info based on in params response
// Update our wave buffers
voice_info.UpdateWaveBuffers(voice_in_params, voice_states, behavior_info);
voice_info.WriteOutStatus(voice_out[i], voice_in_params, voice_states);
}
if (!AudioCommon::CanConsumeBuffer(out_params.size(), output_offset, voice_out_size)) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
std::memcpy(out_params.data() + output_offset, voice_out.data(), voice_out_size);
output_offset += voice_out_size;
output_header.size.voice = static_cast<u32>(voice_out_size);
return true;
}
bool InfoUpdater::UpdateEffects(EffectContext& effect_context, bool is_active) {
const auto effect_count = effect_context.GetCount();
std::vector<EffectInfo::InParams> effect_in(effect_count);
std::vector<EffectInfo::OutParams> effect_out(effect_count);
const auto total_effect_in = effect_count * sizeof(EffectInfo::InParams);
const auto total_effect_out = effect_count * sizeof(EffectInfo::OutParams);
if (input_header.size.effect != total_effect_in) {
LOG_ERROR(Audio, "Effects are an invalid size, expecting 0x{:X} but got 0x{:X}",
total_effect_in, input_header.size.effect);
return false;
}
if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset, total_effect_in)) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
std::memcpy(effect_in.data(), in_params.data() + input_offset, total_effect_in);
input_offset += total_effect_in;
// Update effects
for (std::size_t i = 0; i < effect_count; i++) {
auto* info = effect_context.GetInfo(i);
if (effect_in[i].type != info->GetType()) {
info = effect_context.RetargetEffect(i, effect_in[i].type);
}
info->Update(effect_in[i]);
if ((!is_active && info->GetUsage() != UsageState::Initialized) ||
info->GetUsage() == UsageState::Stopped) {
effect_out[i].status = UsageStatus::Removed;
} else {
effect_out[i].status = UsageStatus::Used;
}
}
if (!AudioCommon::CanConsumeBuffer(out_params.size(), output_offset, total_effect_out)) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
std::memcpy(out_params.data() + output_offset, effect_out.data(), total_effect_out);
output_offset += total_effect_out;
output_header.size.effect = static_cast<u32>(total_effect_out);
return true;
}
bool InfoUpdater::UpdateSplitterInfo(SplitterContext& splitter_context) {
std::size_t start_offset = input_offset;
std::size_t bytes_read{};
// Update splitter context
if (!splitter_context.Update(in_params, input_offset, bytes_read)) {
LOG_ERROR(Audio, "Failed to update splitter context!");
return false;
}
const auto consumed = input_offset - start_offset;
if (input_header.size.splitter != consumed) {
LOG_ERROR(Audio, "Splitters is an invalid size, expecting 0x{:X} but got 0x{:X}",
bytes_read, input_header.size.splitter);
return false;
}
return true;
}
ResultCode InfoUpdater::UpdateMixes(MixContext& mix_context, std::size_t mix_buffer_count,
SplitterContext& splitter_context,
EffectContext& effect_context) {
std::vector<MixInfo::InParams> mix_in_params;
if (!behavior_info.IsMixInParameterDirtyOnlyUpdateSupported()) {
// If we're not dirty, get ALL mix in parameters
const auto context_mix_count = mix_context.GetCount();
const auto total_mix_in = context_mix_count * sizeof(MixInfo::InParams);
if (input_header.size.mixer != total_mix_in) {
LOG_ERROR(Audio, "Mixer is an invalid size, expecting 0x{:X} but got 0x{:X}",
total_mix_in, input_header.size.mixer);
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset, total_mix_in)) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
mix_in_params.resize(context_mix_count);
std::memcpy(mix_in_params.data(), in_params.data() + input_offset, total_mix_in);
input_offset += total_mix_in;
} else {
// Only update the "dirty" mixes
MixInfo::DirtyHeader dirty_header{};
if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset,
sizeof(MixInfo::DirtyHeader))) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
std::memcpy(&dirty_header, in_params.data() + input_offset, sizeof(MixInfo::DirtyHeader));
input_offset += sizeof(MixInfo::DirtyHeader);
const auto total_mix_in =
dirty_header.mixer_count * sizeof(MixInfo::InParams) + sizeof(MixInfo::DirtyHeader);
if (input_header.size.mixer != total_mix_in) {
LOG_ERROR(Audio, "Mixer is an invalid size, expecting 0x{:X} but got 0x{:X}",
total_mix_in, input_header.size.mixer);
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
if (dirty_header.mixer_count != 0) {
mix_in_params.resize(dirty_header.mixer_count);
std::memcpy(mix_in_params.data(), in_params.data() + input_offset,
mix_in_params.size() * sizeof(MixInfo::InParams));
input_offset += mix_in_params.size() * sizeof(MixInfo::InParams);
}
}
// Get our total input count
const auto mix_count = mix_in_params.size();
if (!behavior_info.IsMixInParameterDirtyOnlyUpdateSupported()) {
// Only verify our buffer count if we're not dirty
std::size_t total_buffer_count{};
for (std::size_t i = 0; i < mix_count; i++) {
const auto& in = mix_in_params[i];
total_buffer_count += in.buffer_count;
if (static_cast<std::size_t>(in.dest_mix_id) > mix_count &&
in.dest_mix_id != AudioCommon::NO_MIX && in.mix_id != AudioCommon::FINAL_MIX) {
LOG_ERROR(
Audio,
"Invalid mix destination, mix_id={:X}, dest_mix_id={:X}, mix_buffer_count={:X}",
in.mix_id, in.dest_mix_id, mix_buffer_count);
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
}
if (total_buffer_count > mix_buffer_count) {
LOG_ERROR(Audio,
"Too many mix buffers used! mix_buffer_count={:X}, requesting_buffers={:X}",
mix_buffer_count, total_buffer_count);
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
}
if (mix_buffer_count == 0) {
LOG_ERROR(Audio, "No mix buffers!");
return AudioCommon::Audren::ERR_INVALID_PARAMETERS;
}
bool should_sort = false;
for (std::size_t i = 0; i < mix_count; i++) {
const auto& mix_in = mix_in_params[i];
std::size_t target_mix{};
if (behavior_info.IsMixInParameterDirtyOnlyUpdateSupported()) {
target_mix = mix_in.mix_id;
} else {
// Non dirty supported games just use i instead of the actual mix_id
target_mix = i;
}
auto& mix_info = mix_context.GetInfo(target_mix);
auto& mix_info_params = mix_info.GetInParams();
if (mix_info_params.in_use != mix_in.in_use) {
mix_info_params.in_use = mix_in.in_use;
mix_info.ResetEffectProcessingOrder();
should_sort = true;
}
if (mix_in.in_use) {
should_sort |= mix_info.Update(mix_context.GetEdgeMatrix(), mix_in, behavior_info,
splitter_context, effect_context);
}
}
if (should_sort && behavior_info.IsSplitterSupported()) {
// Sort our splitter data
if (!mix_context.TsortInfo(splitter_context)) {
return AudioCommon::Audren::ERR_SPLITTER_SORT_FAILED;
}
}
// TODO(ogniK): Sort when splitter is suppoorted
return RESULT_SUCCESS;
}
bool InfoUpdater::UpdateSinks(SinkContext& sink_context) {
const auto sink_count = sink_context.GetCount();
std::vector<SinkInfo::InParams> sink_in_params(sink_count);
const auto total_sink_in = sink_count * sizeof(SinkInfo::InParams);
if (input_header.size.sink != total_sink_in) {
LOG_ERROR(Audio, "Sinks are an invalid size, expecting 0x{:X} but got 0x{:X}",
total_sink_in, input_header.size.effect);
return false;
}
if (!AudioCommon::CanConsumeBuffer(in_params.size(), input_offset, total_sink_in)) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
std::memcpy(sink_in_params.data(), in_params.data() + input_offset, total_sink_in);
input_offset += total_sink_in;
// TODO(ogniK): Properly update sinks
if (!sink_in_params.empty()) {
sink_context.UpdateMainSink(sink_in_params[0]);
}
output_header.size.sink = static_cast<u32>(0x20 * sink_count);
output_offset += 0x20 * sink_count;
return true;
}
bool InfoUpdater::UpdatePerformanceBuffer() {
output_header.size.performance = 0x10;
output_offset += 0x10;
return true;
}
bool InfoUpdater::UpdateErrorInfo([[maybe_unused]] BehaviorInfo& in_behavior_info) {
const auto total_beahvior_info_out = sizeof(BehaviorInfo::OutParams);
if (!AudioCommon::CanConsumeBuffer(out_params.size(), output_offset, total_beahvior_info_out)) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
BehaviorInfo::OutParams behavior_info_out{};
behavior_info.CopyErrorInfo(behavior_info_out);
std::memcpy(out_params.data() + output_offset, &behavior_info_out, total_beahvior_info_out);
output_offset += total_beahvior_info_out;
output_header.size.behavior = total_beahvior_info_out;
return true;
}
struct RendererInfo {
u64_le elasped_frame_count{};
INSERT_PADDING_WORDS(2);
};
static_assert(sizeof(RendererInfo) == 0x10, "RendererInfo is an invalid size");
bool InfoUpdater::UpdateRendererInfo(std::size_t elapsed_frame_count) {
const auto total_renderer_info_out = sizeof(RendererInfo);
if (!AudioCommon::CanConsumeBuffer(out_params.size(), output_offset, total_renderer_info_out)) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
RendererInfo out{};
out.elasped_frame_count = elapsed_frame_count;
std::memcpy(out_params.data() + output_offset, &out, total_renderer_info_out);
output_offset += total_renderer_info_out;
output_header.size.render_info = total_renderer_info_out;
return true;
}
bool InfoUpdater::CheckConsumedSize() const {
if (output_offset != out_params.size()) {
LOG_ERROR(Audio, "Output is not consumed! Consumed {}, but requires {}. {} bytes remaining",
output_offset, out_params.size(), out_params.size() - output_offset);
return false;
}
/*if (input_offset != in_params.size()) {
LOG_ERROR(Audio, "Input is not consumed!");
return false;
}*/
return true;
}
bool InfoUpdater::WriteOutputHeader() {
if (!AudioCommon::CanConsumeBuffer(out_params.size(), 0,
sizeof(AudioCommon::UpdateDataHeader))) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
output_header.revision = AudioCommon::CURRENT_PROCESS_REVISION;
const auto& sz = output_header.size;
output_header.total_size += sz.behavior + sz.memory_pool + sz.voice +
sz.voice_channel_resource + sz.effect + sz.mixer + sz.sink +
sz.performance + sz.splitter + sz.render_info;
std::memcpy(out_params.data(), &output_header, sizeof(AudioCommon::UpdateDataHeader));
return true;
}
} // namespace AudioCore

58
src/audio_core/info_updater.h Executable file
View File

@@ -0,0 +1,58 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <vector>
#include "audio_core/common.h"
#include "common/common_types.h"
namespace AudioCore {
class BehaviorInfo;
class ServerMemoryPoolInfo;
class VoiceContext;
class EffectContext;
class MixContext;
class SinkContext;
class SplitterContext;
class InfoUpdater {
public:
// TODO(ogniK): Pass process handle when we support it
InfoUpdater(const std::vector<u8>& in_params_, std::vector<u8>& out_params_,
BehaviorInfo& behavior_info_);
~InfoUpdater();
bool UpdateBehaviorInfo(BehaviorInfo& in_behavior_info);
bool UpdateMemoryPools(std::vector<ServerMemoryPoolInfo>& memory_pool_info);
bool UpdateVoiceChannelResources(VoiceContext& voice_context);
bool UpdateVoices(VoiceContext& voice_context,
std::vector<ServerMemoryPoolInfo>& memory_pool_info,
VAddr audio_codec_dsp_addr);
bool UpdateEffects(EffectContext& effect_context, bool is_active);
bool UpdateSplitterInfo(SplitterContext& splitter_context);
ResultCode UpdateMixes(MixContext& mix_context, std::size_t mix_buffer_count,
SplitterContext& splitter_context, EffectContext& effect_context);
bool UpdateSinks(SinkContext& sink_context);
bool UpdatePerformanceBuffer();
bool UpdateErrorInfo(BehaviorInfo& in_behavior_info);
bool UpdateRendererInfo(std::size_t elapsed_frame_count);
bool CheckConsumedSize() const;
bool WriteOutputHeader();
private:
const std::vector<u8>& in_params;
std::vector<u8>& out_params;
BehaviorInfo& behavior_info;
AudioCommon::UpdateDataHeader input_header{};
AudioCommon::UpdateDataHeader output_header{};
std::size_t input_offset{sizeof(AudioCommon::UpdateDataHeader)};
std::size_t output_offset{sizeof(AudioCommon::UpdateDataHeader)};
};
} // namespace AudioCore

61
src/audio_core/memory_pool.cpp Executable file
View File

@@ -0,0 +1,61 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "audio_core/memory_pool.h"
#include "common/logging/log.h"
namespace AudioCore {
ServerMemoryPoolInfo::ServerMemoryPoolInfo() = default;
ServerMemoryPoolInfo::~ServerMemoryPoolInfo() = default;
bool ServerMemoryPoolInfo::Update(const InParams& in_params, OutParams& out_params) {
// Our state does not need to be changed
if (in_params.state != State::RequestAttach && in_params.state != State::RequestDetach) {
return true;
}
// Address or size is null
if (in_params.address == 0 || in_params.size == 0) {
LOG_ERROR(Audio, "Memory pool address or size is zero! address={:X}, size={:X}",
in_params.address, in_params.size);
return false;
}
// Address or size is not aligned
if ((in_params.address % 0x1000) != 0 || (in_params.size % 0x1000) != 0) {
LOG_ERROR(Audio, "Memory pool address or size is not aligned! address={:X}, size={:X}",
in_params.address, in_params.size);
return false;
}
if (in_params.state == State::RequestAttach) {
cpu_address = in_params.address;
size = in_params.size;
used = true;
out_params.state = State::Attached;
} else {
// Unexpected address
if (cpu_address != in_params.address) {
LOG_ERROR(Audio, "Memory pool address differs! Expecting {:X} but address is {:X}",
cpu_address, in_params.address);
return false;
}
if (size != in_params.size) {
LOG_ERROR(Audio, "Memory pool size differs! Expecting {:X} but size is {:X}", size,
in_params.size);
return false;
}
cpu_address = 0;
size = 0;
used = false;
out_params.state = State::Detached;
}
return true;
}
} // namespace AudioCore

52
src/audio_core/memory_pool.h Executable file
View File

@@ -0,0 +1,52 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
namespace AudioCore {
class ServerMemoryPoolInfo {
public:
ServerMemoryPoolInfo();
~ServerMemoryPoolInfo();
enum class State : u32_le {
Invalid = 0x0,
Aquired = 0x1,
RequestDetach = 0x2,
Detached = 0x3,
RequestAttach = 0x4,
Attached = 0x5,
Released = 0x6,
};
struct InParams {
u64_le address{};
u64_le size{};
State state{};
INSERT_PADDING_WORDS(3);
};
static_assert(sizeof(InParams) == 0x20, "InParams are an invalid size");
struct OutParams {
State state{};
INSERT_PADDING_WORDS(3);
};
static_assert(sizeof(OutParams) == 0x10, "OutParams are an invalid size");
bool Update(const InParams& in_params, OutParams& out_params);
private:
// There's another entry here which is the DSP address, however since we're not talking to the
// DSP we can just use the same address provided by the guest without needing to remap
u64_le cpu_address{};
u64_le size{};
bool used{};
};
} // namespace AudioCore

296
src/audio_core/mix_context.cpp Executable file
View File

@@ -0,0 +1,296 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "audio_core/behavior_info.h"
#include "audio_core/common.h"
#include "audio_core/effect_context.h"
#include "audio_core/mix_context.h"
#include "audio_core/splitter_context.h"
namespace AudioCore {
MixContext::MixContext() = default;
MixContext::~MixContext() = default;
void MixContext::Initialize(const BehaviorInfo& behavior_info, std::size_t mix_count,
std::size_t effect_count) {
info_count = mix_count;
infos.resize(info_count);
auto& final_mix = GetInfo(AudioCommon::FINAL_MIX);
final_mix.GetInParams().mix_id = AudioCommon::FINAL_MIX;
sorted_info.reserve(infos.size());
for (auto& info : infos) {
sorted_info.push_back(&info);
}
for (auto& info : infos) {
info.SetEffectCount(effect_count);
}
// Only initialize our edge matrix and node states if splitters are supported
if (behavior_info.IsSplitterSupported()) {
node_states.Initialize(mix_count);
edge_matrix.Initialize(mix_count);
}
}
void MixContext::UpdateDistancesFromFinalMix() {
// Set all distances to be invalid
for (std::size_t i = 0; i < info_count; i++) {
GetInfo(i).GetInParams().final_mix_distance = AudioCommon::NO_FINAL_MIX;
}
for (std::size_t i = 0; i < info_count; i++) {
auto& info = GetInfo(i);
auto& in_params = info.GetInParams();
// Populate our sorted info
sorted_info[i] = &info;
if (!in_params.in_use) {
continue;
}
auto mix_id = in_params.mix_id;
// Needs to be referenced out of scope
s32 distance_to_final_mix{AudioCommon::FINAL_MIX};
for (; distance_to_final_mix < static_cast<s32>(info_count); distance_to_final_mix++) {
if (mix_id == AudioCommon::FINAL_MIX) {
// If we're at the final mix, we're done
break;
} else if (mix_id == AudioCommon::NO_MIX) {
// If we have no more mix ids, we're done
distance_to_final_mix = AudioCommon::NO_FINAL_MIX;
break;
} else {
const auto& dest_mix = GetInfo(mix_id);
const auto dest_mix_distance = dest_mix.GetInParams().final_mix_distance;
if (dest_mix_distance == AudioCommon::NO_FINAL_MIX) {
// If our current mix isn't pointing to a final mix, follow through
mix_id = dest_mix.GetInParams().dest_mix_id;
} else {
// Our current mix + 1 = final distance
distance_to_final_mix = dest_mix_distance + 1;
break;
}
}
}
// If we're out of range for our distance, mark it as no final mix
if (distance_to_final_mix >= static_cast<s32>(info_count)) {
distance_to_final_mix = AudioCommon::NO_FINAL_MIX;
}
in_params.final_mix_distance = distance_to_final_mix;
}
}
void MixContext::CalcMixBufferOffset() {
s32 offset{};
for (std::size_t i = 0; i < info_count; i++) {
auto& info = GetSortedInfo(i);
auto& in_params = info.GetInParams();
if (in_params.in_use) {
// Only update if in use
in_params.buffer_offset = offset;
offset += in_params.buffer_count;
}
}
}
void MixContext::SortInfo() {
// Get the distance to the final mix
UpdateDistancesFromFinalMix();
// Sort based on the distance to the final mix
std::sort(sorted_info.begin(), sorted_info.end(),
[](const ServerMixInfo* lhs, const ServerMixInfo* rhs) {
return lhs->GetInParams().final_mix_distance >
rhs->GetInParams().final_mix_distance;
});
// Calculate the mix buffer offset
CalcMixBufferOffset();
}
bool MixContext::TsortInfo(SplitterContext& splitter_context) {
// If we're not using mixes, just calculate the mix buffer offset
if (!splitter_context.UsingSplitter()) {
CalcMixBufferOffset();
return true;
}
// Sort our node states
if (!node_states.Tsort(edge_matrix)) {
return false;
}
// Get our sorted list
const auto sorted_list = node_states.GetIndexList();
std::size_t info_id{};
for (auto itr = sorted_list.rbegin(); itr != sorted_list.rend(); ++itr) {
// Set our sorted info
sorted_info[info_id++] = &GetInfo(*itr);
}
// Calculate the mix buffer offset
CalcMixBufferOffset();
return true;
}
std::size_t MixContext::GetCount() const {
return info_count;
}
ServerMixInfo& MixContext::GetInfo(std::size_t i) {
ASSERT(i < info_count);
return infos.at(i);
}
const ServerMixInfo& MixContext::GetInfo(std::size_t i) const {
ASSERT(i < info_count);
return infos.at(i);
}
ServerMixInfo& MixContext::GetSortedInfo(std::size_t i) {
ASSERT(i < info_count);
return *sorted_info.at(i);
}
const ServerMixInfo& MixContext::GetSortedInfo(std::size_t i) const {
ASSERT(i < info_count);
return *sorted_info.at(i);
}
ServerMixInfo& MixContext::GetFinalMixInfo() {
return infos.at(AudioCommon::FINAL_MIX);
}
const ServerMixInfo& MixContext::GetFinalMixInfo() const {
return infos.at(AudioCommon::FINAL_MIX);
}
EdgeMatrix& MixContext::GetEdgeMatrix() {
return edge_matrix;
}
const EdgeMatrix& MixContext::GetEdgeMatrix() const {
return edge_matrix;
}
ServerMixInfo::ServerMixInfo() {
Cleanup();
}
ServerMixInfo::~ServerMixInfo() = default;
const ServerMixInfo::InParams& ServerMixInfo::GetInParams() const {
return in_params;
}
ServerMixInfo::InParams& ServerMixInfo::GetInParams() {
return in_params;
}
bool ServerMixInfo::Update(EdgeMatrix& edge_matrix, const MixInfo::InParams& mix_in,
BehaviorInfo& behavior_info, SplitterContext& splitter_context,
EffectContext& effect_context) {
in_params.volume = mix_in.volume;
in_params.sample_rate = mix_in.sample_rate;
in_params.buffer_count = mix_in.buffer_count;
in_params.in_use = mix_in.in_use;
in_params.mix_id = mix_in.mix_id;
in_params.node_id = mix_in.node_id;
for (std::size_t i = 0; i < mix_in.mix_volume.size(); i++) {
std::copy(mix_in.mix_volume[i].begin(), mix_in.mix_volume[i].end(),
in_params.mix_volume[i].begin());
}
bool require_sort = false;
if (behavior_info.IsSplitterSupported()) {
require_sort = UpdateConnection(edge_matrix, mix_in, splitter_context);
} else {
in_params.dest_mix_id = mix_in.dest_mix_id;
in_params.splitter_id = AudioCommon::NO_SPLITTER;
}
ResetEffectProcessingOrder();
const auto effect_count = effect_context.GetCount();
for (std::size_t i = 0; i < effect_count; i++) {
auto* effect_info = effect_context.GetInfo(i);
if (effect_info->GetMixID() == in_params.mix_id) {
effect_processing_order[effect_info->GetProcessingOrder()] = static_cast<s32>(i);
}
}
// TODO(ogniK): Update effect processing order
return require_sort;
}
bool ServerMixInfo::HasAnyConnection() const {
return in_params.splitter_id != AudioCommon::NO_SPLITTER ||
in_params.mix_id != AudioCommon::NO_MIX;
}
void ServerMixInfo::Cleanup() {
in_params.volume = 0.0f;
in_params.sample_rate = 0;
in_params.buffer_count = 0;
in_params.in_use = false;
in_params.mix_id = AudioCommon::NO_MIX;
in_params.node_id = 0;
in_params.buffer_offset = 0;
in_params.dest_mix_id = AudioCommon::NO_MIX;
in_params.splitter_id = AudioCommon::NO_SPLITTER;
std::memset(in_params.mix_volume.data(), 0, sizeof(float) * in_params.mix_volume.size());
}
void ServerMixInfo::SetEffectCount(std::size_t count) {
effect_processing_order.resize(count);
ResetEffectProcessingOrder();
}
void ServerMixInfo::ResetEffectProcessingOrder() {
for (auto& order : effect_processing_order) {
order = AudioCommon::NO_EFFECT_ORDER;
}
}
s32 ServerMixInfo::GetEffectOrder(std::size_t i) const {
return effect_processing_order.at(i);
}
bool ServerMixInfo::UpdateConnection(EdgeMatrix& edge_matrix, const MixInfo::InParams& mix_in,
SplitterContext& splitter_context) {
// Mixes are identical
if (in_params.dest_mix_id == mix_in.dest_mix_id &&
in_params.splitter_id == mix_in.splitter_id &&
((in_params.splitter_id == AudioCommon::NO_SPLITTER) ||
!splitter_context.GetInfo(in_params.splitter_id).HasNewConnection())) {
return false;
}
// Remove current edges for mix id
edge_matrix.RemoveEdges(in_params.mix_id);
if (mix_in.dest_mix_id != AudioCommon::NO_MIX) {
// If we have a valid destination mix id, set our edge matrix
edge_matrix.Connect(in_params.mix_id, mix_in.dest_mix_id);
} else if (mix_in.splitter_id != AudioCommon::NO_SPLITTER) {
// Recurse our splitter linked and set our edges
auto& splitter_info = splitter_context.GetInfo(mix_in.splitter_id);
const auto length = splitter_info.GetLength();
for (s32 i = 0; i < length; i++) {
const auto* splitter_destination =
splitter_context.GetDestinationData(mix_in.splitter_id, i);
if (splitter_destination == nullptr) {
continue;
}
if (splitter_destination->ValidMixId()) {
edge_matrix.Connect(in_params.mix_id, splitter_destination->GetMixId());
}
}
}
in_params.dest_mix_id = mix_in.dest_mix_id;
in_params.splitter_id = mix_in.splitter_id;
return true;
}
} // namespace AudioCore

114
src/audio_core/mix_context.h Executable file
View File

@@ -0,0 +1,114 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <vector>
#include "audio_core/common.h"
#include "audio_core/splitter_context.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
namespace AudioCore {
class BehaviorInfo;
class EffectContext;
class MixInfo {
public:
struct DirtyHeader {
u32_le magic{};
u32_le mixer_count{};
INSERT_PADDING_BYTES(0x18);
};
static_assert(sizeof(DirtyHeader) == 0x20, "MixInfo::DirtyHeader is an invalid size");
struct InParams {
float_le volume{};
s32_le sample_rate{};
s32_le buffer_count{};
bool in_use{};
INSERT_PADDING_BYTES(3);
s32_le mix_id{};
s32_le effect_count{};
u32_le node_id{};
INSERT_PADDING_WORDS(2);
std::array<std::array<float_le, AudioCommon::MAX_MIX_BUFFERS>, AudioCommon::MAX_MIX_BUFFERS>
mix_volume{};
s32_le dest_mix_id{};
s32_le splitter_id{};
INSERT_PADDING_WORDS(1);
};
static_assert(sizeof(MixInfo::InParams) == 0x930, "MixInfo::InParams is an invalid size");
};
class ServerMixInfo {
public:
struct InParams {
float volume{};
s32 sample_rate{};
s32 buffer_count{};
bool in_use{};
s32 mix_id{};
u32 node_id{};
std::array<std::array<float_le, AudioCommon::MAX_MIX_BUFFERS>, AudioCommon::MAX_MIX_BUFFERS>
mix_volume{};
s32 dest_mix_id{};
s32 splitter_id{};
s32 buffer_offset{};
s32 final_mix_distance{};
};
ServerMixInfo();
~ServerMixInfo();
[[nodiscard]] const ServerMixInfo::InParams& GetInParams() const;
[[nodiscard]] ServerMixInfo::InParams& GetInParams();
bool Update(EdgeMatrix& edge_matrix, const MixInfo::InParams& mix_in,
BehaviorInfo& behavior_info, SplitterContext& splitter_context,
EffectContext& effect_context);
[[nodiscard]] bool HasAnyConnection() const;
void Cleanup();
void SetEffectCount(std::size_t count);
void ResetEffectProcessingOrder();
[[nodiscard]] s32 GetEffectOrder(std::size_t i) const;
private:
std::vector<s32> effect_processing_order;
InParams in_params{};
bool UpdateConnection(EdgeMatrix& edge_matrix, const MixInfo::InParams& mix_in,
SplitterContext& splitter_context);
};
class MixContext {
public:
MixContext();
~MixContext();
void Initialize(const BehaviorInfo& behavior_info, std::size_t mix_count,
std::size_t effect_count);
void SortInfo();
bool TsortInfo(SplitterContext& splitter_context);
[[nodiscard]] std::size_t GetCount() const;
[[nodiscard]] ServerMixInfo& GetInfo(std::size_t i);
[[nodiscard]] const ServerMixInfo& GetInfo(std::size_t i) const;
[[nodiscard]] ServerMixInfo& GetSortedInfo(std::size_t i);
[[nodiscard]] const ServerMixInfo& GetSortedInfo(std::size_t i) const;
[[nodiscard]] ServerMixInfo& GetFinalMixInfo();
[[nodiscard]] const ServerMixInfo& GetFinalMixInfo() const;
[[nodiscard]] EdgeMatrix& GetEdgeMatrix();
[[nodiscard]] const EdgeMatrix& GetEdgeMatrix() const;
private:
void CalcMixBufferOffset();
void UpdateDistancesFromFinalMix();
NodeStates node_states{};
EdgeMatrix edge_matrix{};
std::size_t info_count{};
std::vector<ServerMixInfo> infos{};
std::vector<ServerMixInfo*> sorted_info{};
};
} // namespace AudioCore

33
src/audio_core/null_sink.h Executable file
View File

@@ -0,0 +1,33 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "audio_core/sink.h"
namespace AudioCore {
class NullSink final : public Sink {
public:
explicit NullSink(std::string_view) {}
~NullSink() override = default;
SinkStream& AcquireSinkStream(u32 /*sample_rate*/, u32 /*num_channels*/,
const std::string& /*name*/) override {
return null_sink_stream;
}
private:
struct NullSinkStreamImpl final : SinkStream {
void EnqueueSamples(u32 /*num_channels*/, const std::vector<s16>& /*samples*/) override {}
std::size_t SamplesInQueue(u32 /*num_channels*/) const override {
return 0;
}
void Flush() override {}
} null_sink_stream;
};
} // namespace AudioCore

31
src/audio_core/sink.h Executable file
View File

@@ -0,0 +1,31 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <string>
#include "audio_core/sink_stream.h"
#include "common/common_types.h"
namespace AudioCore {
constexpr char auto_device_name[] = "auto";
/**
* This class is an interface for an audio sink. An audio sink accepts samples in stereo signed
* PCM16 format to be output. Sinks *do not* handle resampling and expect the correct sample rate.
* They are dumb outputs.
*/
class Sink {
public:
virtual ~Sink() = default;
virtual SinkStream& AcquireSinkStream(u32 sample_rate, u32 num_channels,
const std::string& name) = 0;
};
using SinkPtr = std::unique_ptr<Sink>;
} // namespace AudioCore

45
src/audio_core/sink_context.cpp Executable file
View File

@@ -0,0 +1,45 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "audio_core/sink_context.h"
namespace AudioCore {
SinkContext::SinkContext(std::size_t sink_count_) : sink_count{sink_count_} {}
SinkContext::~SinkContext() = default;
std::size_t SinkContext::GetCount() const {
return sink_count;
}
void SinkContext::UpdateMainSink(const SinkInfo::InParams& in) {
ASSERT(in.type == SinkTypes::Device);
has_downmix_coefs = in.device.down_matrix_enabled;
if (has_downmix_coefs) {
downmix_coefficients = in.device.down_matrix_coef;
}
in_use = in.in_use;
use_count = in.device.input_count;
buffers = in.device.input;
}
bool SinkContext::InUse() const {
return in_use;
}
std::vector<u8> SinkContext::OutputBuffers() const {
std::vector<u8> buffer_ret(use_count);
std::memcpy(buffer_ret.data(), buffers.data(), use_count);
return buffer_ret;
}
bool SinkContext::HasDownMixingCoefficients() const {
return has_downmix_coefs;
}
const DownmixCoefficients& SinkContext::GetDownmixCoefficients() const {
return downmix_coefficients;
}
} // namespace AudioCore

96
src/audio_core/sink_context.h Executable file
View File

@@ -0,0 +1,96 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "audio_core/common.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
namespace AudioCore {
using DownmixCoefficients = std::array<float_le, 4>;
enum class SinkTypes : u8 {
Invalid = 0,
Device = 1,
Circular = 2,
};
enum class SinkSampleFormat : u32_le {
None = 0,
Pcm8 = 1,
Pcm16 = 2,
Pcm24 = 3,
Pcm32 = 4,
PcmFloat = 5,
Adpcm = 6,
};
class SinkInfo {
public:
struct CircularBufferIn {
u64_le address;
u32_le size;
u32_le input_count;
u32_le sample_count;
u32_le previous_position;
SinkSampleFormat sample_format;
std::array<u8, AudioCommon::MAX_CHANNEL_COUNT> input;
bool in_use;
INSERT_UNION_PADDING_BYTES(5);
};
static_assert(sizeof(CircularBufferIn) == 0x28,
"SinkInfo::CircularBufferIn is in invalid size");
struct DeviceIn {
std::array<u8, 255> device_name;
INSERT_UNION_PADDING_BYTES(1);
s32_le input_count;
std::array<u8, AudioCommon::MAX_CHANNEL_COUNT> input;
INSERT_UNION_PADDING_BYTES(1);
bool down_matrix_enabled;
DownmixCoefficients down_matrix_coef;
};
static_assert(sizeof(DeviceIn) == 0x11c, "SinkInfo::DeviceIn is an invalid size");
struct InParams {
SinkTypes type{};
bool in_use{};
INSERT_PADDING_BYTES(2);
u32_le node_id{};
INSERT_PADDING_WORDS(6);
union {
// std::array<u8, 0x120> raw{};
DeviceIn device;
CircularBufferIn circular_buffer;
};
};
static_assert(sizeof(InParams) == 0x140, "SinkInfo::InParams are an invalid size!");
};
class SinkContext {
public:
explicit SinkContext(std::size_t sink_count_);
~SinkContext();
[[nodiscard]] std::size_t GetCount() const;
void UpdateMainSink(const SinkInfo::InParams& in);
[[nodiscard]] bool InUse() const;
[[nodiscard]] std::vector<u8> OutputBuffers() const;
[[nodiscard]] bool HasDownMixingCoefficients() const;
[[nodiscard]] const DownmixCoefficients& GetDownmixCoefficients() const;
private:
bool in_use{false};
s32 use_count{};
std::array<u8, AudioCommon::MAX_CHANNEL_COUNT> buffers{};
std::size_t sink_count{};
bool has_downmix_coefs{false};
DownmixCoefficients downmix_coefficients{};
};
} // namespace AudioCore

81
src/audio_core/sink_details.cpp Executable file
View File

@@ -0,0 +1,81 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include "audio_core/null_sink.h"
#include "audio_core/sink_details.h"
#ifdef HAVE_CUBEB
#include "audio_core/cubeb_sink.h"
#endif
#include "common/logging/log.h"
namespace AudioCore {
namespace {
struct SinkDetails {
using FactoryFn = std::unique_ptr<Sink> (*)(std::string_view);
using ListDevicesFn = std::vector<std::string> (*)();
/// Name for this sink.
const char* id;
/// A method to call to construct an instance of this type of sink.
FactoryFn factory;
/// A method to call to list available devices.
ListDevicesFn list_devices;
};
// sink_details is ordered in terms of desirability, with the best choice at the top.
constexpr SinkDetails sink_details[] = {
#ifdef HAVE_CUBEB
SinkDetails{"cubeb",
[](std::string_view device_id) -> std::unique_ptr<Sink> {
return std::make_unique<CubebSink>(device_id);
},
&ListCubebSinkDevices},
#endif
SinkDetails{"null",
[](std::string_view device_id) -> std::unique_ptr<Sink> {
return std::make_unique<NullSink>(device_id);
},
[] { return std::vector<std::string>{"null"}; }},
};
const SinkDetails& GetSinkDetails(std::string_view sink_id) {
auto iter =
std::find_if(std::begin(sink_details), std::end(sink_details),
[sink_id](const auto& sink_detail) { return sink_detail.id == sink_id; });
if (sink_id == "auto" || iter == std::end(sink_details)) {
if (sink_id != "auto") {
LOG_ERROR(Audio, "AudioCore::SelectSink given invalid sink_id {}", sink_id);
}
// Auto-select.
// sink_details is ordered in terms of desirability, with the best choice at the front.
iter = std::begin(sink_details);
}
return *iter;
}
} // Anonymous namespace
std::vector<const char*> GetSinkIDs() {
std::vector<const char*> sink_ids(std::size(sink_details));
std::transform(std::begin(sink_details), std::end(sink_details), std::begin(sink_ids),
[](const auto& sink) { return sink.id; });
return sink_ids;
}
std::vector<std::string> GetDeviceListForSink(std::string_view sink_id) {
return GetSinkDetails(sink_id).list_devices();
}
std::unique_ptr<Sink> CreateSinkFromID(std::string_view sink_id, std::string_view device_id) {
return GetSinkDetails(sink_id).factory(device_id);
}
} // namespace AudioCore

24
src/audio_core/sink_details.h Executable file
View File

@@ -0,0 +1,24 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include <string_view>
#include <vector>
namespace AudioCore {
class Sink;
/// Retrieves the IDs for all available audio sinks.
std::vector<const char*> GetSinkIDs();
/// Gets the list of devices for a particular sink identified by the given ID.
std::vector<std::string> GetDeviceListForSink(std::string_view sink_id);
/// Creates an audio sink identified by the given device ID.
std::unique_ptr<Sink> CreateSinkFromID(std::string_view sink_id, std::string_view device_id);
} // namespace AudioCore

36
src/audio_core/sink_stream.h Executable file
View File

@@ -0,0 +1,36 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <memory>
#include <vector>
#include "common/common_types.h"
namespace AudioCore {
/**
* Accepts samples in stereo signed PCM16 format to be output. Sinks *do not* handle resampling and
* expect the correct sample rate. They are dumb outputs.
*/
class SinkStream {
public:
virtual ~SinkStream() = default;
/**
* Feed stereo samples to sink.
* @param num_channels Number of channels used.
* @param samples Samples in interleaved stereo PCM16 format.
*/
virtual void EnqueueSamples(u32 num_channels, const std::vector<s16>& samples) = 0;
virtual std::size_t SamplesInQueue(u32 num_channels) const = 0;
virtual void Flush() = 0;
};
using SinkStreamPtr = std::unique_ptr<SinkStream>;
} // namespace AudioCore

View File

@@ -0,0 +1,617 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "audio_core/behavior_info.h"
#include "audio_core/splitter_context.h"
#include "common/alignment.h"
#include "common/assert.h"
#include "common/logging/log.h"
namespace AudioCore {
ServerSplitterDestinationData::ServerSplitterDestinationData(s32 id_) : id{id_} {}
ServerSplitterDestinationData::~ServerSplitterDestinationData() = default;
void ServerSplitterDestinationData::Update(SplitterInfo::InDestinationParams& header) {
// Log error as these are not actually failure states
if (header.magic != SplitterMagic::DataHeader) {
LOG_ERROR(Audio, "Splitter destination header is invalid!");
return;
}
// Incorrect splitter id
if (header.splitter_id != id) {
LOG_ERROR(Audio, "Splitter destination ids do not match!");
return;
}
mix_id = header.mix_id;
// Copy our mix volumes
std::copy(header.mix_volumes.begin(), header.mix_volumes.end(), current_mix_volumes.begin());
if (!in_use && header.in_use) {
// Update mix volumes
std::copy(current_mix_volumes.begin(), current_mix_volumes.end(), last_mix_volumes.begin());
needs_update = false;
}
in_use = header.in_use;
}
ServerSplitterDestinationData* ServerSplitterDestinationData::GetNextDestination() {
return next;
}
const ServerSplitterDestinationData* ServerSplitterDestinationData::GetNextDestination() const {
return next;
}
void ServerSplitterDestinationData::SetNextDestination(ServerSplitterDestinationData* dest) {
next = dest;
}
bool ServerSplitterDestinationData::ValidMixId() const {
return GetMixId() != AudioCommon::NO_MIX;
}
s32 ServerSplitterDestinationData::GetMixId() const {
return mix_id;
}
bool ServerSplitterDestinationData::IsConfigured() const {
return in_use && ValidMixId();
}
float ServerSplitterDestinationData::GetMixVolume(std::size_t i) const {
ASSERT(i < AudioCommon::MAX_MIX_BUFFERS);
return current_mix_volumes.at(i);
}
const std::array<float, AudioCommon::MAX_MIX_BUFFERS>&
ServerSplitterDestinationData::CurrentMixVolumes() const {
return current_mix_volumes;
}
const std::array<float, AudioCommon::MAX_MIX_BUFFERS>&
ServerSplitterDestinationData::LastMixVolumes() const {
return last_mix_volumes;
}
void ServerSplitterDestinationData::MarkDirty() {
needs_update = true;
}
void ServerSplitterDestinationData::UpdateInternalState() {
if (in_use && needs_update) {
std::copy(current_mix_volumes.begin(), current_mix_volumes.end(), last_mix_volumes.begin());
}
needs_update = false;
}
ServerSplitterInfo::ServerSplitterInfo(s32 id_) : id(id_) {}
ServerSplitterInfo::~ServerSplitterInfo() = default;
void ServerSplitterInfo::InitializeInfos() {
send_length = 0;
head = nullptr;
new_connection = true;
}
void ServerSplitterInfo::ClearNewConnectionFlag() {
new_connection = false;
}
std::size_t ServerSplitterInfo::Update(SplitterInfo::InInfoPrams& header) {
if (header.send_id != id) {
return 0;
}
sample_rate = header.sample_rate;
new_connection = true;
// We need to update the size here due to the splitter bug being present and providing an
// incorrect size. We're suppose to also update the header here but we just ignore and continue
return (sizeof(s32_le) * (header.length - 1)) + (sizeof(s32_le) * 3);
}
ServerSplitterDestinationData* ServerSplitterInfo::GetHead() {
return head;
}
const ServerSplitterDestinationData* ServerSplitterInfo::GetHead() const {
return head;
}
ServerSplitterDestinationData* ServerSplitterInfo::GetData(std::size_t depth) {
auto* current_head = head;
for (std::size_t i = 0; i < depth; i++) {
if (current_head == nullptr) {
return nullptr;
}
current_head = current_head->GetNextDestination();
}
return current_head;
}
const ServerSplitterDestinationData* ServerSplitterInfo::GetData(std::size_t depth) const {
auto* current_head = head;
for (std::size_t i = 0; i < depth; i++) {
if (current_head == nullptr) {
return nullptr;
}
current_head = current_head->GetNextDestination();
}
return current_head;
}
bool ServerSplitterInfo::HasNewConnection() const {
return new_connection;
}
s32 ServerSplitterInfo::GetLength() const {
return send_length;
}
void ServerSplitterInfo::SetHead(ServerSplitterDestinationData* new_head) {
head = new_head;
}
void ServerSplitterInfo::SetHeadDepth(s32 length) {
send_length = length;
}
SplitterContext::SplitterContext() = default;
SplitterContext::~SplitterContext() = default;
void SplitterContext::Initialize(BehaviorInfo& behavior_info, std::size_t _info_count,
std::size_t _data_count) {
if (!behavior_info.IsSplitterSupported() || _data_count == 0 || _info_count == 0) {
Setup(0, 0, false);
return;
}
// Only initialize if we're using splitters
Setup(_info_count, _data_count, behavior_info.IsSplitterBugFixed());
}
bool SplitterContext::Update(const std::vector<u8>& input, std::size_t& input_offset,
std::size_t& bytes_read) {
const auto UpdateOffsets = [&](std::size_t read) {
input_offset += read;
bytes_read += read;
};
if (info_count == 0 || data_count == 0) {
bytes_read = 0;
return true;
}
if (!AudioCommon::CanConsumeBuffer(input.size(), input_offset,
sizeof(SplitterInfo::InHeader))) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
SplitterInfo::InHeader header{};
std::memcpy(&header, input.data() + input_offset, sizeof(SplitterInfo::InHeader));
UpdateOffsets(sizeof(SplitterInfo::InHeader));
if (header.magic != SplitterMagic::SplitterHeader) {
LOG_ERROR(Audio, "Invalid header magic! Expecting {:X} but got {:X}",
SplitterMagic::SplitterHeader, header.magic);
return false;
}
// Clear all connections
for (auto& info : infos) {
info.ClearNewConnectionFlag();
}
UpdateInfo(input, input_offset, bytes_read, header.info_count);
UpdateData(input, input_offset, bytes_read, header.data_count);
const auto aligned_bytes_read = Common::AlignUp(bytes_read, 16);
input_offset += aligned_bytes_read - bytes_read;
bytes_read = aligned_bytes_read;
return true;
}
bool SplitterContext::UsingSplitter() const {
return info_count > 0 && data_count > 0;
}
ServerSplitterInfo& SplitterContext::GetInfo(std::size_t i) {
ASSERT(i < info_count);
return infos.at(i);
}
const ServerSplitterInfo& SplitterContext::GetInfo(std::size_t i) const {
ASSERT(i < info_count);
return infos.at(i);
}
ServerSplitterDestinationData& SplitterContext::GetData(std::size_t i) {
ASSERT(i < data_count);
return datas.at(i);
}
const ServerSplitterDestinationData& SplitterContext::GetData(std::size_t i) const {
ASSERT(i < data_count);
return datas.at(i);
}
ServerSplitterDestinationData* SplitterContext::GetDestinationData(std::size_t info,
std::size_t data) {
ASSERT(info < info_count);
auto& cur_info = GetInfo(info);
return cur_info.GetData(data);
}
const ServerSplitterDestinationData* SplitterContext::GetDestinationData(std::size_t info,
std::size_t data) const {
ASSERT(info < info_count);
const auto& cur_info = GetInfo(info);
return cur_info.GetData(data);
}
void SplitterContext::UpdateInternalState() {
if (data_count == 0) {
return;
}
for (auto& data : datas) {
data.UpdateInternalState();
}
}
std::size_t SplitterContext::GetInfoCount() const {
return info_count;
}
std::size_t SplitterContext::GetDataCount() const {
return data_count;
}
void SplitterContext::Setup(std::size_t info_count_, std::size_t data_count_,
bool is_splitter_bug_fixed) {
info_count = info_count_;
data_count = data_count_;
for (std::size_t i = 0; i < info_count; i++) {
auto& splitter = infos.emplace_back(static_cast<s32>(i));
splitter.InitializeInfos();
}
for (std::size_t i = 0; i < data_count; i++) {
datas.emplace_back(static_cast<s32>(i));
}
bug_fixed = is_splitter_bug_fixed;
}
bool SplitterContext::UpdateInfo(const std::vector<u8>& input, std::size_t& input_offset,
std::size_t& bytes_read, s32 in_splitter_count) {
const auto UpdateOffsets = [&](std::size_t read) {
input_offset += read;
bytes_read += read;
};
for (s32 i = 0; i < in_splitter_count; i++) {
if (!AudioCommon::CanConsumeBuffer(input.size(), input_offset,
sizeof(SplitterInfo::InInfoPrams))) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
SplitterInfo::InInfoPrams header{};
std::memcpy(&header, input.data() + input_offset, sizeof(SplitterInfo::InInfoPrams));
// Logged as warning as these don't actually cause a bailout for some reason
if (header.magic != SplitterMagic::InfoHeader) {
LOG_ERROR(Audio, "Bad splitter data header");
break;
}
if (header.send_id < 0 || static_cast<std::size_t>(header.send_id) > info_count) {
LOG_ERROR(Audio, "Bad splitter data id");
break;
}
UpdateOffsets(sizeof(SplitterInfo::InInfoPrams));
auto& info = GetInfo(header.send_id);
if (!RecomposeDestination(info, header, input, input_offset)) {
LOG_ERROR(Audio, "Failed to recompose destination for splitter!");
return false;
}
const std::size_t read = info.Update(header);
bytes_read += read;
input_offset += read;
}
return true;
}
bool SplitterContext::UpdateData(const std::vector<u8>& input, std::size_t& input_offset,
std::size_t& bytes_read, s32 in_data_count) {
const auto UpdateOffsets = [&](std::size_t read) {
input_offset += read;
bytes_read += read;
};
for (s32 i = 0; i < in_data_count; i++) {
if (!AudioCommon::CanConsumeBuffer(input.size(), input_offset,
sizeof(SplitterInfo::InDestinationParams))) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
SplitterInfo::InDestinationParams header{};
std::memcpy(&header, input.data() + input_offset,
sizeof(SplitterInfo::InDestinationParams));
UpdateOffsets(sizeof(SplitterInfo::InDestinationParams));
// Logged as warning as these don't actually cause a bailout for some reason
if (header.magic != SplitterMagic::DataHeader) {
LOG_ERROR(Audio, "Bad splitter data header");
break;
}
if (header.splitter_id < 0 || static_cast<std::size_t>(header.splitter_id) > data_count) {
LOG_ERROR(Audio, "Bad splitter data id");
break;
}
GetData(header.splitter_id).Update(header);
}
return true;
}
bool SplitterContext::RecomposeDestination(ServerSplitterInfo& info,
SplitterInfo::InInfoPrams& header,
const std::vector<u8>& input,
const std::size_t& input_offset) {
// Clear our current destinations
auto* current_head = info.GetHead();
while (current_head != nullptr) {
auto* next_head = current_head->GetNextDestination();
current_head->SetNextDestination(nullptr);
current_head = next_head;
}
info.SetHead(nullptr);
s32 size = header.length;
// If the splitter bug is present, calculate fixed size
if (!bug_fixed) {
if (info_count > 0) {
const auto factor = data_count / info_count;
size = std::min(header.length, static_cast<s32>(factor));
} else {
size = 0;
}
}
if (size < 1) {
LOG_ERROR(Audio, "Invalid splitter info size! size={:X}", size);
return true;
}
auto* start_head = &GetData(header.resource_id_base);
current_head = start_head;
std::vector<s32_le> resource_ids(size - 1);
if (!AudioCommon::CanConsumeBuffer(input.size(), input_offset,
resource_ids.size() * sizeof(s32_le))) {
LOG_ERROR(Audio, "Buffer is an invalid size!");
return false;
}
std::memcpy(resource_ids.data(), input.data() + input_offset,
resource_ids.size() * sizeof(s32_le));
for (auto resource_id : resource_ids) {
auto* head = &GetData(resource_id);
current_head->SetNextDestination(head);
current_head = head;
}
info.SetHead(start_head);
info.SetHeadDepth(size);
return true;
}
NodeStates::NodeStates() = default;
NodeStates::~NodeStates() = default;
void NodeStates::Initialize(std::size_t node_count_) {
// Setup our work parameters
node_count = node_count_;
was_node_found.resize(node_count);
was_node_completed.resize(node_count);
index_list.resize(node_count);
index_stack.Reset(node_count * node_count);
}
bool NodeStates::Tsort(EdgeMatrix& edge_matrix) {
return DepthFirstSearch(edge_matrix);
}
std::size_t NodeStates::GetIndexPos() const {
return index_pos;
}
const std::vector<s32>& NodeStates::GetIndexList() const {
return index_list;
}
void NodeStates::PushTsortResult(s32 index) {
ASSERT(index < static_cast<s32>(node_count));
index_list[index_pos++] = index;
}
bool NodeStates::DepthFirstSearch(EdgeMatrix& edge_matrix) {
ResetState();
for (std::size_t i = 0; i < node_count; i++) {
const auto node_id = static_cast<s32>(i);
// If we don't have a state, send to our index stack for work
if (GetState(i) == NodeStates::State::NoState) {
index_stack.push(node_id);
}
// While we have work to do in our stack
while (index_stack.Count() > 0) {
// Get the current node
const auto current_stack_index = index_stack.top();
// Check if we've seen the node yet
const auto index_state = GetState(current_stack_index);
if (index_state == NodeStates::State::NoState) {
// Mark the node as seen
UpdateState(NodeStates::State::InFound, current_stack_index);
} else if (index_state == NodeStates::State::InFound) {
// We've seen this node before, mark it as completed
UpdateState(NodeStates::State::InCompleted, current_stack_index);
// Update our index list
PushTsortResult(current_stack_index);
// Pop the stack
index_stack.pop();
continue;
} else if (index_state == NodeStates::State::InCompleted) {
// If our node is already sorted, clear it
index_stack.pop();
continue;
}
const auto edge_node_count = edge_matrix.GetNodeCount();
for (s32 j = 0; j < static_cast<s32>(edge_node_count); j++) {
// Check if our node is connected to our edge matrix
if (!edge_matrix.Connected(current_stack_index, j)) {
continue;
}
// Check if our node exists
const auto node_state = GetState(j);
if (node_state == NodeStates::State::NoState) {
// Add more work
index_stack.push(j);
} else if (node_state == NodeStates::State::InFound) {
UNREACHABLE_MSG("Node start marked as found");
ResetState();
return false;
}
}
}
}
return true;
}
void NodeStates::ResetState() {
// Reset to the start of our index stack
index_pos = 0;
for (std::size_t i = 0; i < node_count; i++) {
// Mark all nodes as not found
was_node_found[i] = false;
// Mark all nodes as uncompleted
was_node_completed[i] = false;
// Mark all indexes as invalid
index_list[i] = -1;
}
}
void NodeStates::UpdateState(NodeStates::State state, std::size_t i) {
switch (state) {
case NodeStates::State::NoState:
was_node_found[i] = false;
was_node_completed[i] = false;
break;
case NodeStates::State::InFound:
was_node_found[i] = true;
was_node_completed[i] = false;
break;
case NodeStates::State::InCompleted:
was_node_found[i] = false;
was_node_completed[i] = true;
break;
}
}
NodeStates::State NodeStates::GetState(std::size_t i) {
ASSERT(i < node_count);
if (was_node_found[i]) {
// If our node exists in our found list
return NodeStates::State::InFound;
} else if (was_node_completed[i]) {
// If node is in the completed list
return NodeStates::State::InCompleted;
} else {
// If in neither
return NodeStates::State::NoState;
}
}
NodeStates::Stack::Stack() = default;
NodeStates::Stack::~Stack() = default;
void NodeStates::Stack::Reset(std::size_t size) {
// Mark our stack as empty
stack.resize(size);
stack_size = size;
stack_pos = 0;
std::fill(stack.begin(), stack.end(), 0);
}
void NodeStates::Stack::push(s32 val) {
ASSERT(stack_pos < stack_size);
stack[stack_pos++] = val;
}
std::size_t NodeStates::Stack::Count() const {
return stack_pos;
}
s32 NodeStates::Stack::top() const {
ASSERT(stack_pos > 0);
return stack[stack_pos - 1];
}
s32 NodeStates::Stack::pop() {
ASSERT(stack_pos > 0);
stack_pos--;
return stack[stack_pos];
}
EdgeMatrix::EdgeMatrix() = default;
EdgeMatrix::~EdgeMatrix() = default;
void EdgeMatrix::Initialize(std::size_t _node_count) {
node_count = _node_count;
edge_matrix.resize(node_count * node_count);
}
bool EdgeMatrix::Connected(s32 a, s32 b) {
return GetState(a, b);
}
void EdgeMatrix::Connect(s32 a, s32 b) {
SetState(a, b, true);
}
void EdgeMatrix::Disconnect(s32 a, s32 b) {
SetState(a, b, false);
}
void EdgeMatrix::RemoveEdges(s32 edge) {
for (std::size_t i = 0; i < node_count; i++) {
SetState(edge, static_cast<s32>(i), false);
}
}
std::size_t EdgeMatrix::GetNodeCount() const {
return node_count;
}
void EdgeMatrix::SetState(s32 a, s32 b, bool state) {
ASSERT(InRange(a, b));
edge_matrix.at(a * node_count + b) = state;
}
bool EdgeMatrix::GetState(s32 a, s32 b) {
ASSERT(InRange(a, b));
return edge_matrix.at(a * node_count + b);
}
bool EdgeMatrix::InRange(s32 a, s32 b) const {
const std::size_t pos = a * node_count + b;
return pos < (node_count * node_count);
}
} // namespace AudioCore

219
src/audio_core/splitter_context.h Executable file
View File

@@ -0,0 +1,219 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <stack>
#include <vector>
#include "audio_core/common.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
namespace AudioCore {
class BehaviorInfo;
class EdgeMatrix {
public:
EdgeMatrix();
~EdgeMatrix();
void Initialize(std::size_t _node_count);
bool Connected(s32 a, s32 b);
void Connect(s32 a, s32 b);
void Disconnect(s32 a, s32 b);
void RemoveEdges(s32 edge);
std::size_t GetNodeCount() const;
private:
void SetState(s32 a, s32 b, bool state);
bool GetState(s32 a, s32 b);
bool InRange(s32 a, s32 b) const;
std::vector<bool> edge_matrix{};
std::size_t node_count{};
};
class NodeStates {
public:
enum class State {
NoState = 0,
InFound = 1,
InCompleted = 2,
};
// Looks to be a fixed size stack. Placed within the NodeStates class based on symbols
class Stack {
public:
Stack();
~Stack();
void Reset(std::size_t size);
void push(s32 val);
std::size_t Count() const;
s32 top() const;
s32 pop();
private:
std::vector<s32> stack{};
std::size_t stack_size{};
std::size_t stack_pos{};
};
NodeStates();
~NodeStates();
void Initialize(std::size_t node_count_);
bool Tsort(EdgeMatrix& edge_matrix);
std::size_t GetIndexPos() const;
const std::vector<s32>& GetIndexList() const;
private:
void PushTsortResult(s32 index);
bool DepthFirstSearch(EdgeMatrix& edge_matrix);
void ResetState();
void UpdateState(State state, std::size_t i);
State GetState(std::size_t i);
std::size_t node_count{};
std::vector<bool> was_node_found{};
std::vector<bool> was_node_completed{};
std::size_t index_pos{};
std::vector<s32> index_list{};
Stack index_stack{};
};
enum class SplitterMagic : u32_le {
SplitterHeader = Common::MakeMagic('S', 'N', 'D', 'H'),
DataHeader = Common::MakeMagic('S', 'N', 'D', 'D'),
InfoHeader = Common::MakeMagic('S', 'N', 'D', 'I'),
};
class SplitterInfo {
public:
struct InHeader {
SplitterMagic magic{};
s32_le info_count{};
s32_le data_count{};
INSERT_PADDING_WORDS(5);
};
static_assert(sizeof(InHeader) == 0x20, "SplitterInfo::InHeader is an invalid size");
struct InInfoPrams {
SplitterMagic magic{};
s32_le send_id{};
s32_le sample_rate{};
s32_le length{};
s32_le resource_id_base{};
};
static_assert(sizeof(InInfoPrams) == 0x14, "SplitterInfo::InInfoPrams is an invalid size");
struct InDestinationParams {
SplitterMagic magic{};
s32_le splitter_id{};
std::array<float_le, AudioCommon::MAX_MIX_BUFFERS> mix_volumes{};
s32_le mix_id{};
bool in_use{};
INSERT_PADDING_BYTES(3);
};
static_assert(sizeof(InDestinationParams) == 0x70,
"SplitterInfo::InDestinationParams is an invalid size");
};
class ServerSplitterDestinationData {
public:
explicit ServerSplitterDestinationData(s32 id_);
~ServerSplitterDestinationData();
void Update(SplitterInfo::InDestinationParams& header);
ServerSplitterDestinationData* GetNextDestination();
const ServerSplitterDestinationData* GetNextDestination() const;
void SetNextDestination(ServerSplitterDestinationData* dest);
bool ValidMixId() const;
s32 GetMixId() const;
bool IsConfigured() const;
float GetMixVolume(std::size_t i) const;
const std::array<float, AudioCommon::MAX_MIX_BUFFERS>& CurrentMixVolumes() const;
const std::array<float, AudioCommon::MAX_MIX_BUFFERS>& LastMixVolumes() const;
void MarkDirty();
void UpdateInternalState();
private:
bool needs_update{};
bool in_use{};
s32 id{};
s32 mix_id{};
std::array<float, AudioCommon::MAX_MIX_BUFFERS> current_mix_volumes{};
std::array<float, AudioCommon::MAX_MIX_BUFFERS> last_mix_volumes{};
ServerSplitterDestinationData* next = nullptr;
};
class ServerSplitterInfo {
public:
explicit ServerSplitterInfo(s32 id_);
~ServerSplitterInfo();
void InitializeInfos();
void ClearNewConnectionFlag();
std::size_t Update(SplitterInfo::InInfoPrams& header);
ServerSplitterDestinationData* GetHead();
const ServerSplitterDestinationData* GetHead() const;
ServerSplitterDestinationData* GetData(std::size_t depth);
const ServerSplitterDestinationData* GetData(std::size_t depth) const;
bool HasNewConnection() const;
s32 GetLength() const;
void SetHead(ServerSplitterDestinationData* new_head);
void SetHeadDepth(s32 length);
private:
s32 sample_rate{};
s32 id{};
s32 send_length{};
ServerSplitterDestinationData* head = nullptr;
bool new_connection{};
};
class SplitterContext {
public:
SplitterContext();
~SplitterContext();
void Initialize(BehaviorInfo& behavior_info, std::size_t splitter_count,
std::size_t data_count);
bool Update(const std::vector<u8>& input, std::size_t& input_offset, std::size_t& bytes_read);
bool UsingSplitter() const;
ServerSplitterInfo& GetInfo(std::size_t i);
const ServerSplitterInfo& GetInfo(std::size_t i) const;
ServerSplitterDestinationData& GetData(std::size_t i);
const ServerSplitterDestinationData& GetData(std::size_t i) const;
ServerSplitterDestinationData* GetDestinationData(std::size_t info, std::size_t data);
const ServerSplitterDestinationData* GetDestinationData(std::size_t info,
std::size_t data) const;
void UpdateInternalState();
std::size_t GetInfoCount() const;
std::size_t GetDataCount() const;
private:
void Setup(std::size_t info_count, std::size_t data_count, bool is_splitter_bug_fixed);
bool UpdateInfo(const std::vector<u8>& input, std::size_t& input_offset,
std::size_t& bytes_read, s32 in_splitter_count);
bool UpdateData(const std::vector<u8>& input, std::size_t& input_offset,
std::size_t& bytes_read, s32 in_data_count);
bool RecomposeDestination(ServerSplitterInfo& info, SplitterInfo::InInfoPrams& header,
const std::vector<u8>& input, const std::size_t& input_offset);
std::vector<ServerSplitterInfo> infos{};
std::vector<ServerSplitterDestinationData> datas{};
std::size_t info_count{};
std::size_t data_count{};
bool bug_fixed{};
};
} // namespace AudioCore

153
src/audio_core/stream.cpp Executable file
View File

@@ -0,0 +1,153 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <cmath>
#include "audio_core/sink.h"
#include "audio_core/sink_details.h"
#include "audio_core/sink_stream.h"
#include "audio_core/stream.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/core_timing.h"
#include "core/settings.h"
namespace AudioCore {
constexpr std::size_t MaxAudioBufferCount{32};
u32 Stream::GetNumChannels() const {
switch (format) {
case Format::Mono16:
return 1;
case Format::Stereo16:
return 2;
case Format::Multi51Channel16:
return 6;
}
UNIMPLEMENTED_MSG("Unimplemented format={}", static_cast<u32>(format));
return {};
}
Stream::Stream(Core::Timing::CoreTiming& core_timing_, u32 sample_rate_, Format format_,
ReleaseCallback&& release_callback_, SinkStream& sink_stream_, std::string&& name_)
: sample_rate{sample_rate_}, format{format_}, release_callback{std::move(release_callback_)},
sink_stream{sink_stream_}, core_timing{core_timing_}, name{std::move(name_)} {
release_event =
Core::Timing::CreateEvent(name, [this](std::uintptr_t, std::chrono::nanoseconds ns_late) {
ReleaseActiveBuffer(ns_late);
});
}
void Stream::Play() {
state = State::Playing;
PlayNextBuffer();
}
void Stream::Stop() {
state = State::Stopped;
UNIMPLEMENTED();
}
void Stream::SetVolume(float volume) {
game_volume = volume;
}
Stream::State Stream::GetState() const {
return state;
}
std::chrono::nanoseconds Stream::GetBufferReleaseNS(const Buffer& buffer) const {
const std::size_t num_samples{buffer.GetSamples().size() / GetNumChannels()};
return std::chrono::nanoseconds((static_cast<u64>(num_samples) * 1000000000ULL) / sample_rate);
}
static void VolumeAdjustSamples(std::vector<s16>& samples, float game_volume) {
const float volume{std::clamp(Settings::Volume() - (1.0f - game_volume), 0.0f, 1.0f)};
if (volume == 1.0f) {
return;
}
// Implementation of a volume slider with a dynamic range of 60 dB
const float volume_scale_factor = volume == 0 ? 0 : std::exp(6.90775f * volume) * 0.001f;
for (auto& sample : samples) {
sample = static_cast<s16>(sample * volume_scale_factor);
}
}
void Stream::PlayNextBuffer(std::chrono::nanoseconds ns_late) {
if (!IsPlaying()) {
// Ensure we are in playing state before playing the next buffer
sink_stream.Flush();
return;
}
if (active_buffer) {
// Do not queue a new buffer if we are already playing a buffer
return;
}
if (queued_buffers.empty()) {
// No queued buffers - we are effectively paused
sink_stream.Flush();
return;
}
active_buffer = queued_buffers.front();
queued_buffers.pop();
VolumeAdjustSamples(active_buffer->GetSamples(), game_volume);
sink_stream.EnqueueSamples(GetNumChannels(), active_buffer->GetSamples());
core_timing.ScheduleEvent(GetBufferReleaseNS(*active_buffer) - ns_late, release_event, {});
}
void Stream::ReleaseActiveBuffer(std::chrono::nanoseconds ns_late) {
ASSERT(active_buffer);
released_buffers.push(std::move(active_buffer));
release_callback();
PlayNextBuffer(ns_late);
}
bool Stream::QueueBuffer(BufferPtr&& buffer) {
if (queued_buffers.size() < MaxAudioBufferCount) {
queued_buffers.push(std::move(buffer));
PlayNextBuffer();
return true;
}
return false;
}
bool Stream::ContainsBuffer([[maybe_unused]] Buffer::Tag tag) const {
UNIMPLEMENTED();
return {};
}
std::vector<Buffer::Tag> Stream::GetTagsAndReleaseBuffers(std::size_t max_count) {
std::vector<Buffer::Tag> tags;
for (std::size_t count = 0; count < max_count && !released_buffers.empty(); ++count) {
if (released_buffers.front()) {
tags.push_back(released_buffers.front()->GetTag());
}
released_buffers.pop();
}
return tags;
}
std::vector<Buffer::Tag> Stream::GetTagsAndReleaseBuffers() {
std::vector<Buffer::Tag> tags;
tags.reserve(released_buffers.size());
while (!released_buffers.empty()) {
if (released_buffers.front()) {
tags.push_back(released_buffers.front()->GetTag());
}
released_buffers.pop();
}
return tags;
}
} // namespace AudioCore

122
src/audio_core/stream.h Executable file
View File

@@ -0,0 +1,122 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <chrono>
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include <queue>
#include "audio_core/buffer.h"
#include "common/common_types.h"
namespace Core::Timing {
class CoreTiming;
struct EventType;
} // namespace Core::Timing
namespace AudioCore {
class SinkStream;
/**
* Represents an audio stream, which is a sequence of queued buffers, to be outputed by AudioOut
*/
class Stream {
public:
/// Audio format of the stream
enum class Format {
Mono16,
Stereo16,
Multi51Channel16,
};
/// Current state of the stream
enum class State {
Stopped,
Playing,
};
/// Callback function type, used to change guest state on a buffer being released
using ReleaseCallback = std::function<void()>;
Stream(Core::Timing::CoreTiming& core_timing_, u32 sample_rate_, Format format_,
ReleaseCallback&& release_callback_, SinkStream& sink_stream_, std::string&& name_);
/// Plays the audio stream
void Play();
/// Stops the audio stream
void Stop();
/// Queues a buffer into the audio stream, returns true on success
bool QueueBuffer(BufferPtr&& buffer);
/// Returns true if the audio stream contains a buffer with the specified tag
[[nodiscard]] bool ContainsBuffer(Buffer::Tag tag) const;
/// Returns a vector of recently released buffers specified by tag
[[nodiscard]] std::vector<Buffer::Tag> GetTagsAndReleaseBuffers(std::size_t max_count);
/// Returns a vector of all recently released buffers specified by tag
[[nodiscard]] std::vector<Buffer::Tag> GetTagsAndReleaseBuffers();
void SetVolume(float volume);
[[nodiscard]] float GetVolume() const {
return game_volume;
}
/// Returns true if the stream is currently playing
[[nodiscard]] bool IsPlaying() const {
return state == State::Playing;
}
/// Returns the number of queued buffers
[[nodiscard]] std::size_t GetQueueSize() const {
return queued_buffers.size();
}
/// Gets the sample rate
[[nodiscard]] u32 GetSampleRate() const {
return sample_rate;
}
/// Gets the number of channels
[[nodiscard]] u32 GetNumChannels() const;
/// Get the state
[[nodiscard]] State GetState() const;
private:
/// Plays the next queued buffer in the audio stream, starting playback if necessary
void PlayNextBuffer(std::chrono::nanoseconds ns_late = {});
/// Releases the actively playing buffer, signalling that it has been completed
void ReleaseActiveBuffer(std::chrono::nanoseconds ns_late = {});
/// Gets the number of core cycles when the specified buffer will be released
[[nodiscard]] std::chrono::nanoseconds GetBufferReleaseNS(const Buffer& buffer) const;
u32 sample_rate; ///< Sample rate of the stream
Format format; ///< Format of the stream
float game_volume = 1.0f; ///< The volume the game currently has set
ReleaseCallback release_callback; ///< Buffer release callback for the stream
State state{State::Stopped}; ///< Playback state of the stream
std::shared_ptr<Core::Timing::EventType>
release_event; ///< Core timing release event for the stream
BufferPtr active_buffer; ///< Actively playing buffer in the stream
std::queue<BufferPtr> queued_buffers; ///< Buffers queued to be played in the stream
std::queue<BufferPtr> released_buffers; ///< Buffers recently released from the stream
SinkStream& sink_stream; ///< Output sink for the stream
Core::Timing::CoreTiming& core_timing; ///< Core timing instance.
std::string name; ///< Name of the stream, must be unique
};
using StreamPtr = std::shared_ptr<Stream>;
} // namespace AudioCore

68
src/audio_core/time_stretch.cpp Executable file
View File

@@ -0,0 +1,68 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <cmath>
#include <cstddef>
#include "audio_core/time_stretch.h"
#include "common/logging/log.h"
namespace AudioCore {
TimeStretcher::TimeStretcher(u32 sample_rate, u32 channel_count) : m_sample_rate{sample_rate} {
m_sound_touch.setChannels(channel_count);
m_sound_touch.setSampleRate(sample_rate);
m_sound_touch.setPitch(1.0);
m_sound_touch.setTempo(1.0);
}
void TimeStretcher::Clear() {
m_sound_touch.clear();
}
void TimeStretcher::Flush() {
m_sound_touch.flush();
}
std::size_t TimeStretcher::Process(const s16* in, std::size_t num_in, s16* out,
std::size_t num_out) {
const double time_delta = static_cast<double>(num_out) / m_sample_rate; // seconds
// We were given actual_samples number of samples, and num_samples were requested from us.
double current_ratio = static_cast<double>(num_in) / static_cast<double>(num_out);
const double max_latency = 0.25; // seconds
const double max_backlog = m_sample_rate * max_latency;
const double backlog_fullness = m_sound_touch.numSamples() / max_backlog;
if (backlog_fullness > 4.0) {
// Too many samples in backlog: Don't push anymore on
num_in = 0;
}
// We ideally want the backlog to be about 50% full.
// This gives some headroom both ways to prevent underflow and overflow.
// We tweak current_ratio to encourage this.
constexpr double tweak_time_scale = 0.05; // seconds
const double tweak_correction = (backlog_fullness - 0.5) * (time_delta / tweak_time_scale);
current_ratio *= std::pow(1.0 + 2.0 * tweak_correction, tweak_correction < 0 ? 3.0 : 1.0);
// This low-pass filter smoothes out variance in the calculated stretch ratio.
// The time-scale determines how responsive this filter is.
constexpr double lpf_time_scale = 0.712; // seconds
const double lpf_gain = 1.0 - std::exp(-time_delta / lpf_time_scale);
m_stretch_ratio += lpf_gain * (current_ratio - m_stretch_ratio);
// Place a lower limit of 5% speed. When a game boots up, there will be
// many silence samples. These do not need to be timestretched.
m_stretch_ratio = std::max(m_stretch_ratio, 0.05);
m_sound_touch.setTempo(m_stretch_ratio);
LOG_TRACE(Audio, "{:5}/{:5} ratio:{:0.6f} backlog:{:0.6f}", num_in, num_out, m_stretch_ratio,
backlog_fullness);
m_sound_touch.putSamples(in, static_cast<u32>(num_in));
return m_sound_touch.receiveSamples(out, static_cast<u32>(num_out));
}
} // namespace AudioCore

34
src/audio_core/time_stretch.h Executable file
View File

@@ -0,0 +1,34 @@
// Copyright 2018 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <cstddef>
#include <SoundTouch.h>
#include "common/common_types.h"
namespace AudioCore {
class TimeStretcher {
public:
TimeStretcher(u32 sample_rate, u32 channel_count);
/// @param in Input sample buffer
/// @param num_in Number of input frames in `in`
/// @param out Output sample buffer
/// @param num_out Desired number of output frames in `out`
/// @returns Actual number of frames written to `out`
std::size_t Process(const s16* in, std::size_t num_in, s16* out, std::size_t num_out);
void Clear();
void Flush();
private:
u32 m_sample_rate;
soundtouch::SoundTouch m_sound_touch;
double m_stretch_ratio = 1.0;
};
} // namespace AudioCore

530
src/audio_core/voice_context.cpp Executable file
View File

@@ -0,0 +1,530 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "audio_core/behavior_info.h"
#include "audio_core/voice_context.h"
#include "core/memory.h"
namespace AudioCore {
ServerVoiceChannelResource::ServerVoiceChannelResource(s32 id_) : id(id_) {}
ServerVoiceChannelResource::~ServerVoiceChannelResource() = default;
bool ServerVoiceChannelResource::InUse() const {
return in_use;
}
float ServerVoiceChannelResource::GetCurrentMixVolumeAt(std::size_t i) const {
ASSERT(i < AudioCommon::MAX_MIX_BUFFERS);
return mix_volume.at(i);
}
float ServerVoiceChannelResource::GetLastMixVolumeAt(std::size_t i) const {
ASSERT(i < AudioCommon::MAX_MIX_BUFFERS);
return last_mix_volume.at(i);
}
void ServerVoiceChannelResource::Update(VoiceChannelResource::InParams& in_params) {
in_use = in_params.in_use;
// Update our mix volumes only if it's in use
if (in_params.in_use) {
mix_volume = in_params.mix_volume;
}
}
void ServerVoiceChannelResource::UpdateLastMixVolumes() {
last_mix_volume = mix_volume;
}
const std::array<float, AudioCommon::MAX_MIX_BUFFERS>&
ServerVoiceChannelResource::GetCurrentMixVolume() const {
return mix_volume;
}
const std::array<float, AudioCommon::MAX_MIX_BUFFERS>&
ServerVoiceChannelResource::GetLastMixVolume() const {
return last_mix_volume;
}
ServerVoiceInfo::ServerVoiceInfo() {
Initialize();
}
ServerVoiceInfo::~ServerVoiceInfo() = default;
void ServerVoiceInfo::Initialize() {
in_params.in_use = false;
in_params.node_id = 0;
in_params.id = 0;
in_params.current_playstate = ServerPlayState::Stop;
in_params.priority = 255;
in_params.sample_rate = 0;
in_params.sample_format = SampleFormat::Invalid;
in_params.channel_count = 0;
in_params.pitch = 0.0f;
in_params.volume = 0.0f;
in_params.last_volume = 0.0f;
in_params.biquad_filter.fill({});
in_params.wave_buffer_count = 0;
in_params.wave_bufffer_head = 0;
in_params.mix_id = AudioCommon::NO_MIX;
in_params.splitter_info_id = AudioCommon::NO_SPLITTER;
in_params.additional_params_address = 0;
in_params.additional_params_size = 0;
in_params.is_new = false;
out_params.played_sample_count = 0;
out_params.wave_buffer_consumed = 0;
in_params.voice_drop_flag = false;
in_params.buffer_mapped = false;
in_params.wave_buffer_flush_request_count = 0;
in_params.was_biquad_filter_enabled.fill(false);
for (auto& wave_buffer : in_params.wave_buffer) {
wave_buffer.start_sample_offset = 0;
wave_buffer.end_sample_offset = 0;
wave_buffer.is_looping = false;
wave_buffer.end_of_stream = false;
wave_buffer.buffer_address = 0;
wave_buffer.buffer_size = 0;
wave_buffer.context_address = 0;
wave_buffer.context_size = 0;
wave_buffer.sent_to_dsp = true;
}
stored_samples.clear();
}
void ServerVoiceInfo::UpdateParameters(const VoiceInfo::InParams& voice_in,
BehaviorInfo& behavior_info) {
in_params.in_use = voice_in.is_in_use;
in_params.id = voice_in.id;
in_params.node_id = voice_in.node_id;
in_params.last_playstate = in_params.current_playstate;
switch (voice_in.play_state) {
case PlayState::Paused:
in_params.current_playstate = ServerPlayState::Paused;
break;
case PlayState::Stopped:
if (in_params.current_playstate != ServerPlayState::Stop) {
in_params.current_playstate = ServerPlayState::RequestStop;
}
break;
case PlayState::Started:
in_params.current_playstate = ServerPlayState::Play;
break;
default:
UNREACHABLE_MSG("Unknown playstate {}", voice_in.play_state);
break;
}
in_params.priority = voice_in.priority;
in_params.sorting_order = voice_in.sorting_order;
in_params.sample_rate = voice_in.sample_rate;
in_params.sample_format = voice_in.sample_format;
in_params.channel_count = voice_in.channel_count;
in_params.pitch = voice_in.pitch;
in_params.volume = voice_in.volume;
in_params.biquad_filter = voice_in.biquad_filter;
in_params.wave_buffer_count = voice_in.wave_buffer_count;
in_params.wave_bufffer_head = voice_in.wave_buffer_head;
if (behavior_info.IsFlushVoiceWaveBuffersSupported()) {
const auto in_request_count = in_params.wave_buffer_flush_request_count;
const auto voice_request_count = voice_in.wave_buffer_flush_request_count;
in_params.wave_buffer_flush_request_count =
static_cast<u8>(in_request_count + voice_request_count);
}
in_params.mix_id = voice_in.mix_id;
if (behavior_info.IsSplitterSupported()) {
in_params.splitter_info_id = voice_in.splitter_info_id;
} else {
in_params.splitter_info_id = AudioCommon::NO_SPLITTER;
}
std::memcpy(in_params.voice_channel_resource_id.data(),
voice_in.voice_channel_resource_ids.data(),
sizeof(s32) * in_params.voice_channel_resource_id.size());
if (behavior_info.IsVoicePlayedSampleCountResetAtLoopPointSupported()) {
in_params.behavior_flags.is_played_samples_reset_at_loop_point =
voice_in.behavior_flags.is_played_samples_reset_at_loop_point;
} else {
in_params.behavior_flags.is_played_samples_reset_at_loop_point.Assign(0);
}
if (behavior_info.IsVoicePitchAndSrcSkippedSupported()) {
in_params.behavior_flags.is_pitch_and_src_skipped =
voice_in.behavior_flags.is_pitch_and_src_skipped;
} else {
in_params.behavior_flags.is_pitch_and_src_skipped.Assign(0);
}
if (voice_in.is_voice_drop_flag_clear_requested) {
in_params.voice_drop_flag = false;
}
if (in_params.additional_params_address != voice_in.additional_params_address ||
in_params.additional_params_size != voice_in.additional_params_size) {
in_params.additional_params_address = voice_in.additional_params_address;
in_params.additional_params_size = voice_in.additional_params_size;
// TODO(ogniK): Reattach buffer, do we actually need to? Maybe just signal to the DSP that
// our context is new
}
}
void ServerVoiceInfo::UpdateWaveBuffers(
const VoiceInfo::InParams& voice_in,
std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& voice_states,
BehaviorInfo& behavior_info) {
if (voice_in.is_new) {
// Initialize our wave buffers
for (auto& wave_buffer : in_params.wave_buffer) {
wave_buffer.start_sample_offset = 0;
wave_buffer.end_sample_offset = 0;
wave_buffer.is_looping = false;
wave_buffer.end_of_stream = false;
wave_buffer.buffer_address = 0;
wave_buffer.buffer_size = 0;
wave_buffer.context_address = 0;
wave_buffer.context_size = 0;
wave_buffer.sent_to_dsp = true;
}
// Mark all our wave buffers as invalid
for (std::size_t channel = 0; channel < static_cast<std::size_t>(in_params.channel_count);
channel++) {
for (auto& is_valid : voice_states[channel]->is_wave_buffer_valid) {
is_valid = false;
}
}
}
// Update our wave buffers
for (std::size_t i = 0; i < AudioCommon::MAX_WAVE_BUFFERS; i++) {
// Assume that we have at least 1 channel voice state
const auto have_valid_wave_buffer = voice_states[0]->is_wave_buffer_valid[i];
UpdateWaveBuffer(in_params.wave_buffer[i], voice_in.wave_buffer[i], in_params.sample_format,
have_valid_wave_buffer, behavior_info);
}
}
void ServerVoiceInfo::UpdateWaveBuffer(ServerWaveBuffer& out_wavebuffer,
const WaveBuffer& in_wave_buffer, SampleFormat sample_format,
bool is_buffer_valid,
[[maybe_unused]] BehaviorInfo& behavior_info) {
if (!is_buffer_valid && out_wavebuffer.sent_to_dsp) {
out_wavebuffer.buffer_address = 0;
out_wavebuffer.buffer_size = 0;
}
if (!in_wave_buffer.sent_to_server || !in_params.buffer_mapped) {
// Validate sample offset sizings
if (sample_format == SampleFormat::Pcm16) {
const auto buffer_size = in_wave_buffer.buffer_size;
if (in_wave_buffer.start_sample_offset < 0 || in_wave_buffer.end_sample_offset < 0 ||
(buffer_size < (sizeof(s16) * in_wave_buffer.start_sample_offset)) ||
(buffer_size < (sizeof(s16) * in_wave_buffer.end_sample_offset))) {
// TODO(ogniK): Write error info
return;
}
}
// TODO(ogniK): ADPCM Size error
out_wavebuffer.sent_to_dsp = false;
out_wavebuffer.start_sample_offset = in_wave_buffer.start_sample_offset;
out_wavebuffer.end_sample_offset = in_wave_buffer.end_sample_offset;
out_wavebuffer.is_looping = in_wave_buffer.is_looping;
out_wavebuffer.end_of_stream = in_wave_buffer.end_of_stream;
out_wavebuffer.buffer_address = in_wave_buffer.buffer_address;
out_wavebuffer.buffer_size = in_wave_buffer.buffer_size;
out_wavebuffer.context_address = in_wave_buffer.context_address;
out_wavebuffer.context_size = in_wave_buffer.context_size;
in_params.buffer_mapped =
in_wave_buffer.buffer_address != 0 && in_wave_buffer.buffer_size != 0;
// TODO(ogniK): Pool mapper attachment
// TODO(ogniK): IsAdpcmLoopContextBugFixed
}
}
void ServerVoiceInfo::WriteOutStatus(
VoiceInfo::OutParams& voice_out, VoiceInfo::InParams& voice_in,
std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& voice_states) {
if (voice_in.is_new) {
in_params.is_new = true;
voice_out.wave_buffer_consumed = 0;
voice_out.played_sample_count = 0;
voice_out.voice_dropped = false;
} else if (!in_params.is_new) {
voice_out.wave_buffer_consumed = voice_states[0]->wave_buffer_consumed;
voice_out.played_sample_count = voice_states[0]->played_sample_count;
voice_out.voice_dropped = in_params.voice_drop_flag;
} else {
voice_out.wave_buffer_consumed = 0;
voice_out.played_sample_count = 0;
voice_out.voice_dropped = false;
}
}
const ServerVoiceInfo::InParams& ServerVoiceInfo::GetInParams() const {
return in_params;
}
ServerVoiceInfo::InParams& ServerVoiceInfo::GetInParams() {
return in_params;
}
const ServerVoiceInfo::OutParams& ServerVoiceInfo::GetOutParams() const {
return out_params;
}
ServerVoiceInfo::OutParams& ServerVoiceInfo::GetOutParams() {
return out_params;
}
bool ServerVoiceInfo::ShouldSkip() const {
// TODO(ogniK): Handle unmapped wave buffers or parameters
return !in_params.in_use || (in_params.wave_buffer_count == 0) || in_params.voice_drop_flag;
}
bool ServerVoiceInfo::UpdateForCommandGeneration(VoiceContext& voice_context) {
std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT> dsp_voice_states{};
if (in_params.is_new) {
ResetResources(voice_context);
in_params.last_volume = in_params.volume;
in_params.is_new = false;
}
const s32 channel_count = in_params.channel_count;
for (s32 i = 0; i < channel_count; i++) {
const auto channel_resource = in_params.voice_channel_resource_id[i];
dsp_voice_states[i] =
&voice_context.GetDspSharedState(static_cast<std::size_t>(channel_resource));
}
return UpdateParametersForCommandGeneration(dsp_voice_states);
}
void ServerVoiceInfo::ResetResources(VoiceContext& voice_context) {
const s32 channel_count = in_params.channel_count;
for (s32 i = 0; i < channel_count; i++) {
const auto channel_resource = in_params.voice_channel_resource_id[i];
auto& dsp_state =
voice_context.GetDspSharedState(static_cast<std::size_t>(channel_resource));
dsp_state = {};
voice_context.GetChannelResource(static_cast<std::size_t>(channel_resource))
.UpdateLastMixVolumes();
}
}
bool ServerVoiceInfo::UpdateParametersForCommandGeneration(
std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& dsp_voice_states) {
const s32 channel_count = in_params.channel_count;
if (in_params.wave_buffer_flush_request_count > 0) {
FlushWaveBuffers(in_params.wave_buffer_flush_request_count, dsp_voice_states,
channel_count);
in_params.wave_buffer_flush_request_count = 0;
}
switch (in_params.current_playstate) {
case ServerPlayState::Play: {
for (std::size_t i = 0; i < AudioCommon::MAX_WAVE_BUFFERS; i++) {
if (!in_params.wave_buffer[i].sent_to_dsp) {
for (s32 channel = 0; channel < channel_count; channel++) {
dsp_voice_states[channel]->is_wave_buffer_valid[i] = true;
}
in_params.wave_buffer[i].sent_to_dsp = true;
}
}
in_params.should_depop = false;
return HasValidWaveBuffer(dsp_voice_states[0]);
}
case ServerPlayState::Paused:
case ServerPlayState::Stop: {
in_params.should_depop = in_params.last_playstate == ServerPlayState::Play;
return in_params.should_depop;
}
case ServerPlayState::RequestStop: {
for (std::size_t i = 0; i < AudioCommon::MAX_WAVE_BUFFERS; i++) {
in_params.wave_buffer[i].sent_to_dsp = true;
for (s32 channel = 0; channel < channel_count; channel++) {
auto* dsp_state = dsp_voice_states[channel];
if (dsp_state->is_wave_buffer_valid[i]) {
dsp_state->wave_buffer_index =
(dsp_state->wave_buffer_index + 1) % AudioCommon::MAX_WAVE_BUFFERS;
dsp_state->wave_buffer_consumed++;
}
dsp_state->is_wave_buffer_valid[i] = false;
}
}
for (s32 channel = 0; channel < channel_count; channel++) {
auto* dsp_state = dsp_voice_states[channel];
dsp_state->offset = 0;
dsp_state->played_sample_count = 0;
dsp_state->fraction = 0;
dsp_state->sample_history.fill(0);
dsp_state->context = {};
}
in_params.current_playstate = ServerPlayState::Stop;
in_params.should_depop = in_params.last_playstate == ServerPlayState::Play;
return in_params.should_depop;
}
default:
UNREACHABLE_MSG("Invalid playstate {}", in_params.current_playstate);
}
return false;
}
void ServerVoiceInfo::FlushWaveBuffers(
u8 flush_count, std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& dsp_voice_states,
s32 channel_count) {
auto wave_head = in_params.wave_bufffer_head;
for (u8 i = 0; i < flush_count; i++) {
in_params.wave_buffer[wave_head].sent_to_dsp = true;
for (s32 channel = 0; channel < channel_count; channel++) {
auto* dsp_state = dsp_voice_states[channel];
dsp_state->wave_buffer_consumed++;
dsp_state->is_wave_buffer_valid[wave_head] = false;
dsp_state->wave_buffer_index =
(dsp_state->wave_buffer_index + 1) % AudioCommon::MAX_WAVE_BUFFERS;
}
wave_head = (wave_head + 1) % AudioCommon::MAX_WAVE_BUFFERS;
}
}
bool ServerVoiceInfo::HasValidWaveBuffer(const VoiceState* state) const {
const auto& valid_wb = state->is_wave_buffer_valid;
return std::find(valid_wb.begin(), valid_wb.end(), true) != valid_wb.end();
}
VoiceContext::VoiceContext(std::size_t voice_count_) : voice_count{voice_count_} {
for (std::size_t i = 0; i < voice_count; i++) {
voice_channel_resources.emplace_back(static_cast<s32>(i));
sorted_voice_info.push_back(&voice_info.emplace_back());
voice_states.emplace_back();
dsp_voice_states.emplace_back();
}
}
VoiceContext::~VoiceContext() {
sorted_voice_info.clear();
}
std::size_t VoiceContext::GetVoiceCount() const {
return voice_count;
}
ServerVoiceChannelResource& VoiceContext::GetChannelResource(std::size_t i) {
ASSERT(i < voice_count);
return voice_channel_resources.at(i);
}
const ServerVoiceChannelResource& VoiceContext::GetChannelResource(std::size_t i) const {
ASSERT(i < voice_count);
return voice_channel_resources.at(i);
}
VoiceState& VoiceContext::GetState(std::size_t i) {
ASSERT(i < voice_count);
return voice_states.at(i);
}
const VoiceState& VoiceContext::GetState(std::size_t i) const {
ASSERT(i < voice_count);
return voice_states.at(i);
}
VoiceState& VoiceContext::GetDspSharedState(std::size_t i) {
ASSERT(i < voice_count);
return dsp_voice_states.at(i);
}
const VoiceState& VoiceContext::GetDspSharedState(std::size_t i) const {
ASSERT(i < voice_count);
return dsp_voice_states.at(i);
}
ServerVoiceInfo& VoiceContext::GetInfo(std::size_t i) {
ASSERT(i < voice_count);
return voice_info.at(i);
}
const ServerVoiceInfo& VoiceContext::GetInfo(std::size_t i) const {
ASSERT(i < voice_count);
return voice_info.at(i);
}
ServerVoiceInfo& VoiceContext::GetSortedInfo(std::size_t i) {
ASSERT(i < voice_count);
return *sorted_voice_info.at(i);
}
const ServerVoiceInfo& VoiceContext::GetSortedInfo(std::size_t i) const {
ASSERT(i < voice_count);
return *sorted_voice_info.at(i);
}
s32 VoiceContext::DecodePcm16(s32* output_buffer, ServerWaveBuffer* wave_buffer, s32 channel,
s32 channel_count, s32 buffer_offset, s32 sample_count,
Core::Memory::Memory& memory) {
if (wave_buffer->buffer_address == 0) {
return 0;
}
if (wave_buffer->buffer_size == 0) {
return 0;
}
if (wave_buffer->end_sample_offset < wave_buffer->start_sample_offset) {
return 0;
}
const auto samples_remaining =
(wave_buffer->end_sample_offset - wave_buffer->start_sample_offset) - buffer_offset;
const auto start_offset = (wave_buffer->start_sample_offset + buffer_offset) * channel_count;
const auto buffer_pos = wave_buffer->buffer_address + start_offset;
s16* buffer_data = reinterpret_cast<s16*>(memory.GetPointer(buffer_pos));
const auto samples_processed = std::min(sample_count, samples_remaining);
// Fast path
if (channel_count == 1) {
for (std::ptrdiff_t i = 0; i < samples_processed; i++) {
output_buffer[i] = buffer_data[i];
}
} else {
for (std::ptrdiff_t i = 0; i < samples_processed; i++) {
output_buffer[i] = buffer_data[i * channel_count + channel];
}
}
return samples_processed;
}
void VoiceContext::SortInfo() {
for (std::size_t i = 0; i < voice_count; i++) {
sorted_voice_info[i] = &voice_info[i];
}
std::sort(sorted_voice_info.begin(), sorted_voice_info.end(),
[](const ServerVoiceInfo* lhs, const ServerVoiceInfo* rhs) {
const auto& lhs_in = lhs->GetInParams();
const auto& rhs_in = rhs->GetInParams();
// Sort by priority
if (lhs_in.priority != rhs_in.priority) {
return lhs_in.priority > rhs_in.priority;
} else {
// If the priorities match, sort by sorting order
return lhs_in.sorting_order > rhs_in.sorting_order;
}
});
}
void VoiceContext::UpdateStateByDspShared() {
voice_states = dsp_voice_states;
}
} // namespace AudioCore

296
src/audio_core/voice_context.h Executable file
View File

@@ -0,0 +1,296 @@
// Copyright 2020 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include "audio_core/algorithm/interpolate.h"
#include "audio_core/codec.h"
#include "audio_core/common.h"
#include "common/bit_field.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
namespace Core::Memory {
class Memory;
}
namespace AudioCore {
class BehaviorInfo;
class VoiceContext;
enum class SampleFormat : u8 {
Invalid = 0,
Pcm8 = 1,
Pcm16 = 2,
Pcm24 = 3,
Pcm32 = 4,
PcmFloat = 5,
Adpcm = 6,
};
enum class PlayState : u8 {
Started = 0,
Stopped = 1,
Paused = 2,
};
enum class ServerPlayState {
Play = 0,
Stop = 1,
RequestStop = 2,
Paused = 3,
};
struct BiquadFilterParameter {
bool enabled{};
INSERT_PADDING_BYTES(1);
std::array<s16, 3> numerator{};
std::array<s16, 2> denominator{};
};
static_assert(sizeof(BiquadFilterParameter) == 0xc, "BiquadFilterParameter is an invalid size");
struct WaveBuffer {
u64_le buffer_address{};
u64_le buffer_size{};
s32_le start_sample_offset{};
s32_le end_sample_offset{};
u8 is_looping{};
u8 end_of_stream{};
u8 sent_to_server{};
INSERT_PADDING_BYTES(5);
u64 context_address{};
u64 context_size{};
INSERT_PADDING_BYTES(8);
};
static_assert(sizeof(WaveBuffer) == 0x38, "WaveBuffer is an invalid size");
struct ServerWaveBuffer {
VAddr buffer_address{};
std::size_t buffer_size{};
s32 start_sample_offset{};
s32 end_sample_offset{};
bool is_looping{};
bool end_of_stream{};
VAddr context_address{};
std::size_t context_size{};
bool sent_to_dsp{true};
};
struct BehaviorFlags {
BitField<0, 1, u16> is_played_samples_reset_at_loop_point;
BitField<1, 1, u16> is_pitch_and_src_skipped;
};
static_assert(sizeof(BehaviorFlags) == 0x4, "BehaviorFlags is an invalid size");
struct ADPCMContext {
u16 header{};
s16 yn1{};
s16 yn2{};
};
static_assert(sizeof(ADPCMContext) == 0x6, "ADPCMContext is an invalid size");
struct VoiceState {
s64 played_sample_count{};
s32 offset{};
s32 wave_buffer_index{};
std::array<bool, AudioCommon::MAX_WAVE_BUFFERS> is_wave_buffer_valid{};
s32 wave_buffer_consumed{};
std::array<s32, AudioCommon::MAX_SAMPLE_HISTORY> sample_history{};
s32 fraction{};
VAddr context_address{};
Codec::ADPCM_Coeff coeff{};
ADPCMContext context{};
std::array<s64, 2> biquad_filter_state{};
std::array<s32, AudioCommon::MAX_MIX_BUFFERS> previous_samples{};
u32 external_context_size{};
bool is_external_context_used{};
bool voice_dropped{};
};
class VoiceChannelResource {
public:
struct InParams {
s32_le id{};
std::array<float_le, AudioCommon::MAX_MIX_BUFFERS> mix_volume{};
bool in_use{};
INSERT_PADDING_BYTES(11);
};
static_assert(sizeof(InParams) == 0x70, "InParams is an invalid size");
};
class ServerVoiceChannelResource {
public:
explicit ServerVoiceChannelResource(s32 id_);
~ServerVoiceChannelResource();
bool InUse() const;
float GetCurrentMixVolumeAt(std::size_t i) const;
float GetLastMixVolumeAt(std::size_t i) const;
void Update(VoiceChannelResource::InParams& in_params);
void UpdateLastMixVolumes();
const std::array<float, AudioCommon::MAX_MIX_BUFFERS>& GetCurrentMixVolume() const;
const std::array<float, AudioCommon::MAX_MIX_BUFFERS>& GetLastMixVolume() const;
private:
s32 id{};
std::array<float, AudioCommon::MAX_MIX_BUFFERS> mix_volume{};
std::array<float, AudioCommon::MAX_MIX_BUFFERS> last_mix_volume{};
bool in_use{};
};
class VoiceInfo {
public:
struct InParams {
s32_le id{};
u32_le node_id{};
u8 is_new{};
u8 is_in_use{};
PlayState play_state{};
SampleFormat sample_format{};
s32_le sample_rate{};
s32_le priority{};
s32_le sorting_order{};
s32_le channel_count{};
float_le pitch{};
float_le volume{};
std::array<BiquadFilterParameter, 2> biquad_filter{};
s32_le wave_buffer_count{};
s16_le wave_buffer_head{};
INSERT_PADDING_BYTES(6);
u64_le additional_params_address{};
u64_le additional_params_size{};
s32_le mix_id{};
s32_le splitter_info_id{};
std::array<WaveBuffer, 4> wave_buffer{};
std::array<u32_le, 6> voice_channel_resource_ids{};
// TODO(ogniK): Remaining flags
u8 is_voice_drop_flag_clear_requested{};
u8 wave_buffer_flush_request_count{};
INSERT_PADDING_BYTES(2);
BehaviorFlags behavior_flags{};
INSERT_PADDING_BYTES(16);
};
static_assert(sizeof(InParams) == 0x170, "InParams is an invalid size");
struct OutParams {
u64_le played_sample_count{};
u32_le wave_buffer_consumed{};
u8 voice_dropped{};
INSERT_PADDING_BYTES(3);
};
static_assert(sizeof(OutParams) == 0x10, "OutParams is an invalid size");
};
class ServerVoiceInfo {
public:
struct InParams {
bool in_use{};
bool is_new{};
bool should_depop{};
SampleFormat sample_format{};
s32 sample_rate{};
s32 channel_count{};
s32 id{};
s32 node_id{};
s32 mix_id{};
ServerPlayState current_playstate{};
ServerPlayState last_playstate{};
s32 priority{};
s32 sorting_order{};
float pitch{};
float volume{};
float last_volume{};
std::array<BiquadFilterParameter, AudioCommon::MAX_BIQUAD_FILTERS> biquad_filter{};
s32 wave_buffer_count{};
s16 wave_bufffer_head{};
INSERT_PADDING_BYTES(2);
BehaviorFlags behavior_flags{};
VAddr additional_params_address{};
std::size_t additional_params_size{};
std::array<ServerWaveBuffer, AudioCommon::MAX_WAVE_BUFFERS> wave_buffer{};
std::array<s32, AudioCommon::MAX_CHANNEL_COUNT> voice_channel_resource_id{};
s32 splitter_info_id{};
u8 wave_buffer_flush_request_count{};
bool voice_drop_flag{};
bool buffer_mapped{};
std::array<bool, AudioCommon::MAX_BIQUAD_FILTERS> was_biquad_filter_enabled{};
};
struct OutParams {
s64 played_sample_count{};
s32 wave_buffer_consumed{};
};
ServerVoiceInfo();
~ServerVoiceInfo();
void Initialize();
void UpdateParameters(const VoiceInfo::InParams& voice_in, BehaviorInfo& behavior_info);
void UpdateWaveBuffers(const VoiceInfo::InParams& voice_in,
std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& voice_states,
BehaviorInfo& behavior_info);
void UpdateWaveBuffer(ServerWaveBuffer& out_wavebuffer, const WaveBuffer& in_wave_buffer,
SampleFormat sample_format, bool is_buffer_valid,
BehaviorInfo& behavior_info);
void WriteOutStatus(VoiceInfo::OutParams& voice_out, VoiceInfo::InParams& voice_in,
std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& voice_states);
const InParams& GetInParams() const;
InParams& GetInParams();
const OutParams& GetOutParams() const;
OutParams& GetOutParams();
bool ShouldSkip() const;
bool UpdateForCommandGeneration(VoiceContext& voice_context);
void ResetResources(VoiceContext& voice_context);
bool UpdateParametersForCommandGeneration(
std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& dsp_voice_states);
void FlushWaveBuffers(u8 flush_count,
std::array<VoiceState*, AudioCommon::MAX_CHANNEL_COUNT>& dsp_voice_states,
s32 channel_count);
private:
std::vector<s16> stored_samples;
InParams in_params{};
OutParams out_params{};
bool HasValidWaveBuffer(const VoiceState* state) const;
};
class VoiceContext {
public:
explicit VoiceContext(std::size_t voice_count_);
~VoiceContext();
std::size_t GetVoiceCount() const;
ServerVoiceChannelResource& GetChannelResource(std::size_t i);
const ServerVoiceChannelResource& GetChannelResource(std::size_t i) const;
VoiceState& GetState(std::size_t i);
const VoiceState& GetState(std::size_t i) const;
VoiceState& GetDspSharedState(std::size_t i);
const VoiceState& GetDspSharedState(std::size_t i) const;
ServerVoiceInfo& GetInfo(std::size_t i);
const ServerVoiceInfo& GetInfo(std::size_t i) const;
ServerVoiceInfo& GetSortedInfo(std::size_t i);
const ServerVoiceInfo& GetSortedInfo(std::size_t i) const;
s32 DecodePcm16(s32* output_buffer, ServerWaveBuffer* wave_buffer, s32 channel,
s32 channel_count, s32 buffer_offset, s32 sample_count,
Core::Memory::Memory& memory);
void SortInfo();
void UpdateStateByDspShared();
private:
std::size_t voice_count{};
std::vector<ServerVoiceChannelResource> voice_channel_resources{};
std::vector<VoiceState> voice_states{};
std::vector<VoiceState> dsp_voice_states{};
std::vector<ServerVoiceInfo> voice_info{};
std::vector<ServerVoiceInfo*> sorted_voice_info{};
};
} // namespace AudioCore