Everywhere: rename 'Sample' type to 'Frame'

Because it's what it really is. A frame is composed of 1 or more samples, in
the case of SerenityOS 2 (stereo). This will make it less confusing for
future mantainability.
This commit is contained in:
Cesar Torres 2021-03-19 22:15:54 +01:00 committed by Andreas Kling
parent f4bd095aa3
commit 0d5e1e9df1
Notes: sideshowbarker 2024-07-18 21:03:26 +09:00
7 changed files with 36 additions and 36 deletions

View File

@ -83,7 +83,7 @@ constexpr const char* wave_strings[] = {
"Square",
"Saw",
"Noise",
"Sample",
"Frame",
};
constexpr int first_wave = Sine;

View File

@ -45,7 +45,7 @@ Track::~Track()
void Track::fill_sample(Sample& sample)
{
Audio::Sample new_sample;
Audio::Frame new_sample;
for (size_t note = 0; note < note_count; ++note) {
if (!m_roll_iters[note].is_end()) {
@ -86,7 +86,7 @@ void Track::fill_sample(Sample& sample)
VERIFY_NOT_REACHED();
}
Audio::Sample note_sample;
Audio::Frame note_sample;
switch (m_wave) {
case Wave::Sine:
note_sample = sine(note);
@ -172,7 +172,7 @@ String Track::set_recorded_sample(const StringView& path)
// All of the information for these waves is on Wikipedia.
Audio::Sample Track::sine(size_t note)
Audio::Frame Track::sine(size_t note)
{
double pos = note_frequencies[note] / sample_rate;
double sin_step = pos * 2 * M_PI;
@ -181,7 +181,7 @@ Audio::Sample Track::sine(size_t note)
return w;
}
Audio::Sample Track::saw(size_t note)
Audio::Frame Track::saw(size_t note)
{
double saw_step = note_frequencies[note] / sample_rate;
double t = m_pos[note];
@ -190,7 +190,7 @@ Audio::Sample Track::saw(size_t note)
return w;
}
Audio::Sample Track::square(size_t note)
Audio::Frame Track::square(size_t note)
{
double pos = note_frequencies[note] / sample_rate;
double square_step = pos * 2 * M_PI;
@ -199,7 +199,7 @@ Audio::Sample Track::square(size_t note)
return w;
}
Audio::Sample Track::triangle(size_t note)
Audio::Frame Track::triangle(size_t note)
{
double triangle_step = note_frequencies[note] / sample_rate;
double t = m_pos[note];
@ -208,14 +208,14 @@ Audio::Sample Track::triangle(size_t note)
return w;
}
Audio::Sample Track::noise() const
Audio::Frame Track::noise() const
{
double random_percentage = static_cast<double>(rand()) / RAND_MAX;
double w = (random_percentage * 2) - 1;
return w;
}
Audio::Sample Track::recorded_sample(size_t note)
Audio::Frame Track::recorded_sample(size_t note)
{
int t = m_pos[note];
if (t >= static_cast<int>(m_recorded_sample.size()))

View File

@ -42,7 +42,7 @@ public:
explicit Track(const u32& time);
~Track();
const Vector<Audio::Sample>& recorded_sample() const { return m_recorded_sample; }
const Vector<Audio::Frame>& recorded_sample() const { return m_recorded_sample; }
const SinglyLinkedList<RollNote>& roll_notes(int note) const { return m_roll_notes[note]; }
int wave() const { return m_wave; }
int attack() const { return m_attack; }
@ -65,19 +65,19 @@ public:
void set_delay(int delay);
private:
Audio::Sample sine(size_t note);
Audio::Sample saw(size_t note);
Audio::Sample square(size_t note);
Audio::Sample triangle(size_t note);
Audio::Sample noise() const;
Audio::Sample recorded_sample(size_t note);
Audio::Frame sine(size_t note);
Audio::Frame saw(size_t note);
Audio::Frame square(size_t note);
Audio::Frame triangle(size_t note);
Audio::Frame noise() const;
Audio::Frame recorded_sample(size_t note);
void sync_roll(int note);
void set_sustain_impl(int sustain);
Vector<Sample> m_delay_buffer;
Vector<Audio::Sample> m_recorded_sample;
Vector<Audio::Frame> m_recorded_sample;
u8 m_note_on[note_count] { 0 };
double m_power[note_count] { 0 };

View File

@ -36,7 +36,7 @@ i32 Buffer::allocate_id()
}
template<typename SampleReader>
static void read_samples_from_stream(InputMemoryStream& stream, SampleReader read_sample, Vector<Sample>& samples, ResampleHelper& resampler, int num_channels)
static void read_samples_from_stream(InputMemoryStream& stream, SampleReader read_sample, Vector<Frame>& samples, ResampleHelper& resampler, int num_channels)
{
double norm_l = 0;
double norm_r = 0;
@ -45,7 +45,7 @@ static void read_samples_from_stream(InputMemoryStream& stream, SampleReader rea
case 1:
for (;;) {
while (resampler.read_sample(norm_l, norm_r)) {
samples.append(Sample(norm_l));
samples.append(Frame(norm_l));
}
norm_l = read_sample(stream);
@ -58,7 +58,7 @@ static void read_samples_from_stream(InputMemoryStream& stream, SampleReader rea
case 2:
for (;;) {
while (resampler.read_sample(norm_l, norm_r)) {
samples.append(Sample(norm_l, norm_r));
samples.append(Frame(norm_l, norm_r));
}
norm_l = read_sample(stream);
norm_r = read_sample(stream);
@ -113,7 +113,7 @@ RefPtr<Buffer> Buffer::from_pcm_data(ReadonlyBytes data, ResampleHelper& resampl
RefPtr<Buffer> Buffer::from_pcm_stream(InputMemoryStream& stream, ResampleHelper& resampler, int num_channels, int bits_per_sample, int num_samples)
{
Vector<Sample> fdata;
Vector<Frame> fdata;
fdata.ensure_capacity(num_samples);
switch (bits_per_sample) {

View File

@ -37,22 +37,22 @@ namespace Audio {
// A single sample in an audio buffer.
// Values are floating point, and should range from -1.0 to +1.0
struct Sample {
Sample()
struct Frame {
Frame()
: left(0)
, right(0)
{
}
// For mono
Sample(double left)
Frame(double left)
: left(left)
, right(left)
{
}
// For stereo
Sample(double left, double right)
Frame(double left, double right)
: left(left)
, right(right)
{
@ -78,7 +78,7 @@ struct Sample {
right *= pct;
}
Sample& operator+=(const Sample& other)
Frame& operator+=(const Frame& other)
{
left += other.left;
right += other.right;
@ -111,7 +111,7 @@ class Buffer : public RefCounted<Buffer> {
public:
static RefPtr<Buffer> from_pcm_data(ReadonlyBytes data, ResampleHelper& resampler, int num_channels, int bits_per_sample);
static RefPtr<Buffer> from_pcm_stream(InputMemoryStream& stream, ResampleHelper& resampler, int num_channels, int bits_per_sample, int num_samples);
static NonnullRefPtr<Buffer> create_with_samples(Vector<Sample>&& samples)
static NonnullRefPtr<Buffer> create_with_samples(Vector<Frame>&& samples)
{
return adopt(*new Buffer(move(samples)));
}
@ -120,20 +120,20 @@ public:
return adopt(*new Buffer(move(buffer), buffer_id, sample_count));
}
const Sample* samples() const { return (const Sample*)data(); }
const Frame* samples() const { return (const Frame*)data(); }
int sample_count() const { return m_sample_count; }
const void* data() const { return m_buffer.data<void>(); }
int size_in_bytes() const { return m_sample_count * (int)sizeof(Sample); }
int size_in_bytes() const { return m_sample_count * (int)sizeof(Frame); }
int id() const { return m_id; }
const Core::AnonymousBuffer& anonymous_buffer() const { return m_buffer; }
private:
explicit Buffer(const Vector<Sample> samples)
: m_buffer(Core::AnonymousBuffer::create_with_size(samples.size() * sizeof(Sample)))
explicit Buffer(const Vector<Frame> samples)
: m_buffer(Core::AnonymousBuffer::create_with_size(samples.size() * sizeof(Frame)))
, m_id(allocate_id())
, m_sample_count(samples.size())
{
memcpy(m_buffer.data<void>(), samples.data(), samples.size() * sizeof(Sample));
memcpy(m_buffer.data<void>(), samples.data(), samples.size() * sizeof(Frame));
}
explicit Buffer(Core::AnonymousBuffer buffer, i32 buffer_id, int sample_count)

View File

@ -86,8 +86,8 @@ void Mixer::mix()
active_mix_queues.remove_all_matching([&](auto& entry) { return !entry->client(); });
Audio::Sample mixed_buffer[1024];
auto mixed_buffer_length = (int)(sizeof(mixed_buffer) / sizeof(Audio::Sample));
Audio::Frame mixed_buffer[1024];
auto mixed_buffer_length = (int)(sizeof(mixed_buffer) / sizeof(Audio::Frame));
// Mix the buffers together into the output
for (auto& queue : active_mix_queues) {
@ -98,7 +98,7 @@ void Mixer::mix()
for (int i = 0; i < mixed_buffer_length; ++i) {
auto& mixed_sample = mixed_buffer[i];
Audio::Sample sample;
Audio::Frame sample;
if (!queue->get_next_sample(sample))
break;
mixed_sample += sample;

View File

@ -51,7 +51,7 @@ public:
bool is_full() const { return m_queue.size() >= 3; }
void enqueue(NonnullRefPtr<Audio::Buffer>&&);
bool get_next_sample(Audio::Sample& sample)
bool get_next_sample(Audio::Frame& sample)
{
if (m_paused)
return false;