AK: Efficiently resize CircularBuffer seekback copy distance

Previously, if we copied the last byte for a length of 100, we'd
recalculate the read span 100 times and memmove one byte 100 times,
which resulted in a lot of overhead.

Now, if we know that we have two consecutive copies of the data, we just
extend the distance to cover both copies, which halves the number of
times that we recalculate the span and actually call memmove.

This takes the running time of the attached benchmark case from 150ms
down to 15ms.
This commit is contained in:
Tim Schumacher 2023-04-13 15:47:12 +02:00 committed by Andreas Kling
parent 3e1626acdc
commit b1136ba357
Notes: sideshowbarker 2024-07-17 00:47:29 +09:00
2 changed files with 22 additions and 1 deletions

View File

@ -220,7 +220,13 @@ ErrorOr<size_t> CircularBuffer::copy_from_seekback(size_t distance, size_t lengt
if (next_span.size() == 0)
break;
remaining_length -= write(next_span.trim(remaining_length));
auto length_written = write(next_span.trim(remaining_length));
remaining_length -= length_written;
// If we copied right from the end of the seekback area (i.e. our length is larger than the distance)
// and the last copy was one complete "chunk", we can now double the distance to copy twice as much data in one go.
if (remaining_length > distance && length_written == distance)
distance *= 2;
}
return length - remaining_length;

View File

@ -348,3 +348,18 @@ TEST_CASE(offset_of_with_until_and_after_wrapping_around)
result = circular_buffer.offset_of("Well "sv, 14, 19);
EXPECT_EQ(result.value_or(42), 14ul);
}
BENCHMARK_CASE(looping_copy_from_seekback)
{
auto circular_buffer = MUST(CircularBuffer::create_empty(16 * MiB));
{
auto written_bytes = circular_buffer.write("\0"sv.bytes());
EXPECT_EQ(written_bytes, 1ul);
}
{
auto copied_bytes = TRY_OR_FAIL(circular_buffer.copy_from_seekback(1, 15 * MiB));
EXPECT_EQ(copied_bytes, 15 * MiB);
}
}