/* * Copyright (c) 2021, kleines Filmröllchen . * Copyright (c) 2022, Tim Schumacher . * * SPDX-License-Identifier: BSD-2-Clause */ #include #include #include #include namespace AK { FixedMemoryStream::FixedMemoryStream(Bytes bytes, Mode mode) : m_bytes(bytes) , m_writing_enabled(mode == Mode::ReadWrite) { } FixedMemoryStream::FixedMemoryStream(ReadonlyBytes bytes) : m_bytes({ const_cast(bytes.data()), bytes.size() }) , m_writing_enabled(false) { } bool FixedMemoryStream::is_eof() const { return m_offset >= m_bytes.size(); } bool FixedMemoryStream::is_open() const { return true; } void FixedMemoryStream::close() { // FIXME: It doesn't make sense to close a memory stream. Therefore, we don't do anything here. Is that fine? } ErrorOr FixedMemoryStream::truncate(size_t) { return Error::from_errno(EBADF); } ErrorOr FixedMemoryStream::read_some(Bytes bytes) { auto to_read = min(remaining(), bytes.size()); if (to_read == 0) return Bytes {}; m_bytes.slice(m_offset, to_read).copy_to(bytes); m_offset += to_read; return bytes.trim(to_read); } ErrorOr FixedMemoryStream::read_until_filled(AK::Bytes bytes) { if (remaining() < bytes.size()) return Error::from_string_view_or_print_error_and_return_errno("Can't read past the end of the stream memory"sv, EINVAL); m_bytes.slice(m_offset).copy_trimmed_to(bytes); m_offset += bytes.size(); return {}; } ErrorOr FixedMemoryStream::seek(i64 offset, SeekMode seek_mode) { switch (seek_mode) { case SeekMode::SetPosition: if (offset > static_cast(m_bytes.size())) return Error::from_string_view_or_print_error_and_return_errno("Offset past the end of the stream memory"sv, EINVAL); m_offset = offset; break; case SeekMode::FromCurrentPosition: if (offset + static_cast(m_offset) > static_cast(m_bytes.size())) return Error::from_string_view_or_print_error_and_return_errno("Offset past the end of the stream memory"sv, EINVAL); m_offset += offset; break; case SeekMode::FromEndPosition: if (-offset > static_cast(m_bytes.size())) return Error::from_string_view_or_print_error_and_return_errno("Offset past the start of the stream memory"sv, EINVAL); m_offset = m_bytes.size() + offset; break; } return m_offset; } ErrorOr FixedMemoryStream::write_some(ReadonlyBytes bytes) { // MemoryStream isn't based on file-descriptors, but since most other // Stream implementations are, the interface specifies EBADF as the // "we don't support this particular operation" error code. if (!m_writing_enabled) return Error::from_errno(EBADF); // FIXME: Can this not error? auto const nwritten = bytes.copy_trimmed_to(m_bytes.slice(m_offset)); m_offset += nwritten; return nwritten; } ErrorOr FixedMemoryStream::write_until_depleted(ReadonlyBytes bytes) { if (remaining() < bytes.size()) return Error::from_string_view_or_print_error_and_return_errno("Write of entire buffer ends past the memory area"sv, EINVAL); TRY(write_some(bytes)); return {}; } size_t FixedMemoryStream::offset() const { return m_offset; } size_t FixedMemoryStream::remaining() const { return m_bytes.size() - m_offset; } ErrorOr AllocatingMemoryStream::read_some(Bytes bytes) { size_t read_bytes = 0; while (read_bytes < bytes.size()) { VERIFY(m_write_offset >= m_read_offset); auto range = TRY(next_read_range()); if (range.size() == 0) break; auto copied_bytes = range.copy_trimmed_to(bytes.slice(read_bytes)); read_bytes += copied_bytes; m_read_offset += copied_bytes; } cleanup_unused_chunks(); return bytes.trim(read_bytes); } ErrorOr AllocatingMemoryStream::write_some(ReadonlyBytes bytes) { size_t written_bytes = 0; while (written_bytes < bytes.size()) { VERIFY(m_write_offset >= m_read_offset); auto range = TRY(next_write_range()); auto copied_bytes = bytes.slice(written_bytes).copy_trimmed_to(range); written_bytes += copied_bytes; m_write_offset += copied_bytes; } return written_bytes; } ErrorOr AllocatingMemoryStream::discard(size_t count) { VERIFY(m_write_offset >= m_read_offset); if (count > used_buffer_size()) return Error::from_string_view_or_print_error_and_return_errno("Number of discarded bytes is higher than the number of allocated bytes"sv, EINVAL); m_read_offset += count; cleanup_unused_chunks(); return {}; } bool AllocatingMemoryStream::is_eof() const { return used_buffer_size() == 0; } bool AllocatingMemoryStream::is_open() const { return true; } void AllocatingMemoryStream::close() { } size_t AllocatingMemoryStream::used_buffer_size() const { return m_write_offset - m_read_offset; } ErrorOr> AllocatingMemoryStream::offset_of(ReadonlyBytes needle) const { VERIFY(m_write_offset >= m_read_offset); if (m_chunks.size() == 0) return Optional {}; // Ensure that we don't have empty chunks at the beginning of the stream. Our trimming implementation // assumes this to be the case, since this should be held up by `cleanup_unused_chunks()` at all times. VERIFY(m_read_offset < CHUNK_SIZE); auto empty_chunks_at_end = ((m_chunks.size() * CHUNK_SIZE - m_write_offset) / CHUNK_SIZE); auto chunk_count = m_chunks.size() - empty_chunks_at_end; auto search_spans = TRY(FixedArray::create(chunk_count)); for (size_t i = 0; i < chunk_count; i++) { search_spans[i] = m_chunks[i].span(); } auto used_size_of_last_chunk = m_write_offset % CHUNK_SIZE; // The case where the stored write offset is actually the used space is the only case where a result of zero // actually is zero. In other cases (i.e. our write offset is beyond the size of a chunk) the write offset // already points to the beginning of the next chunk, in that case a result of zero indicates "use the last chunk in full". if (m_write_offset >= CHUNK_SIZE && used_size_of_last_chunk == 0) used_size_of_last_chunk = CHUNK_SIZE; // Trimming is done first to ensure that we don't unintentionally shift around if the first and last chunks are the same. search_spans[chunk_count - 1] = search_spans[chunk_count - 1].trim(used_size_of_last_chunk); search_spans[0] = search_spans[0].slice(m_read_offset); return AK::memmem(search_spans.begin(), search_spans.end(), needle); } ErrorOr AllocatingMemoryStream::next_read_range() { VERIFY(m_write_offset >= m_read_offset); size_t const chunk_index = m_read_offset / CHUNK_SIZE; size_t const chunk_offset = m_read_offset % CHUNK_SIZE; size_t const read_size = min(CHUNK_SIZE - m_read_offset % CHUNK_SIZE, m_write_offset - m_read_offset); if (read_size == 0) return ReadonlyBytes { static_cast(nullptr), 0 }; VERIFY(chunk_index < m_chunks.size()); return ReadonlyBytes { m_chunks[chunk_index].data() + chunk_offset, read_size }; } ErrorOr AllocatingMemoryStream::next_write_range() { VERIFY(m_write_offset >= m_read_offset); size_t const chunk_index = m_write_offset / CHUNK_SIZE; size_t const chunk_offset = m_write_offset % CHUNK_SIZE; size_t const write_size = CHUNK_SIZE - m_write_offset % CHUNK_SIZE; if (chunk_index >= m_chunks.size()) TRY(m_chunks.try_append(TRY(Chunk::create_uninitialized(CHUNK_SIZE)))); VERIFY(chunk_index < m_chunks.size()); return Bytes { m_chunks[chunk_index].data() + chunk_offset, write_size }; } void AllocatingMemoryStream::cleanup_unused_chunks() { VERIFY(m_write_offset >= m_read_offset); auto const chunks_to_remove = m_read_offset / CHUNK_SIZE; m_chunks.remove(0, chunks_to_remove); m_read_offset -= CHUNK_SIZE * chunks_to_remove; m_write_offset -= CHUNK_SIZE * chunks_to_remove; } }