Kernel: Finish base implementation of VirtQueues

This commit includes a lot of small changes and additions needed to
finalize the base implementation of VirtIOQueues and VirtDevices:
* The device specific driver implementation now has to handle setting
up the queues it needs before letting the base device class know it
finised initialization
* Supplying buffers to VirtQueues is now done via ScatterGatherLists
instead of arbitary buffer pointers - this ensures the pointers are
physical and allows us to follow the specification in regards to the
requirement that individual descriptors must point to physically
contiguous buffers. This can be further improved in the future by
implementating support for the Indirect-Descriptors feature (as
defined by the specification) to reduce descriptor usage for very
fragmented buffers.
* When supplying buffers to a VirtQueue the driver must supply a
(temporarily-)unique token (usually the supplied buffer's virtual
address) to ensure the driver can discern which buffer has finished
processing by the device in the case in which the device does not
offer the F_IN_ORDER feature.
* Device drivers now handle queue updates (supplied buffers being
returned from the device) by implementing a single pure virtual
method instead of setting a seperate callback for each queue
* Two new VirtQueue methods were added to allow the device driver
to either discard or get used/returned buffers from the device by
cleanly removing them off the descriptor chain (This also allows
the VirtQueue implementation to reuse those freed descriptors)

This also includes the necessary changes to the VirtIOConsole
implementation to match these interface changes.

Co-authored-by: Sahan <sahan.h.fernando@gmail.com>
This commit is contained in:
Idan Horowitz 2021-04-15 19:39:48 +10:00 committed by Andreas Kling
parent acdd1424bc
commit d1f7a2f9a5
Notes: sideshowbarker 2024-07-18 19:31:23 +09:00
7 changed files with 154 additions and 85 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, the SerenityOS developers.
* Copyright (c) 2021, the SerenityOS developers.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -123,7 +123,7 @@ auto VirtIODevice::mapping_for_bar(u8 bar) -> MappedMMIO&
void VirtIODevice::notify_queue(u16 queue_index)
{
dbgln("VirtIODevice: notifying about queue change at idx: {}", queue_index);
dbgln_if(VIRTIO_DEBUG, "{}: notifying about queue change at idx: {}", m_class_name, queue_index);
if (!m_notify_cfg)
out<u16>(REG_QUEUE_NOTIFY, queue_index);
else
@ -207,9 +207,7 @@ bool VirtIODevice::accept_device_features(u64 device_features, u64 accepted_feat
m_did_accept_features = true;
if (is_feature_set(device_features, VIRTIO_F_VERSION_1)) {
accepted_features |= VIRTIO_F_VERSION_1;
} else {
dbgln_if(VIRTIO_DEBUG, "{}: legacy device detected", m_class_name);
accepted_features |= VIRTIO_F_VERSION_1; // let the device know were not a legacy driver
}
if (is_feature_set(device_features, VIRTIO_F_RING_PACKED)) {
@ -217,6 +215,11 @@ bool VirtIODevice::accept_device_features(u64 device_features, u64 accepted_feat
accepted_features &= ~(VIRTIO_F_RING_PACKED);
}
// TODO: implement indirect descriptors to allow queue_size buffers instead of buffers totalling (PAGE_SIZE * queue_size) bytes
if (is_feature_set(device_features, VIRTIO_F_INDIRECT_DESC)) {
// accepted_features |= VIRTIO_F_INDIRECT_DESC;
}
if (is_feature_set(device_features, VIRTIO_F_IN_ORDER)) {
accepted_features |= VIRTIO_F_IN_ORDER;
}
@ -301,21 +304,24 @@ bool VirtIODevice::activate_queue(u16 queue_index)
return true;
}
void VirtIODevice::set_requested_queue_count(u16 count)
bool VirtIODevice::setup_queues(u16 requested_queue_count)
{
m_queue_count = count;
}
VERIFY(!m_did_setup_queues);
m_did_setup_queues = true;
bool VirtIODevice::setup_queues()
{
if (m_common_cfg) {
auto maximum_queue_count = config_read16(*m_common_cfg, COMMON_CFG_NUM_QUEUES);
if (m_queue_count == 0) {
if (requested_queue_count == 0) {
m_queue_count = maximum_queue_count;
} else if (m_queue_count > maximum_queue_count) {
} else if (requested_queue_count > maximum_queue_count) {
dbgln("{}: {} queues requested but only {} available!", m_class_name, m_queue_count, maximum_queue_count);
return false;
} else {
m_queue_count = requested_queue_count;
}
} else {
m_queue_count = requested_queue_count;
dbgln("{}: device's available queue count could not be determined!", m_class_name);
}
dbgln_if(VIRTIO_DEBUG, "{}: Setting up {} queues", m_class_name, m_queue_count);
@ -330,23 +336,20 @@ bool VirtIODevice::setup_queues()
return true;
}
bool VirtIODevice::finish_init()
void VirtIODevice::finish_init()
{
VERIFY(m_did_accept_features);
VERIFY(!(m_status & DEVICE_STATUS_DRIVER_OK));
if (!setup_queues()) {
dbgln("{}: Failed to setup queues", m_class_name);
return false;
}
VERIFY(m_did_accept_features); // ensure features were negotiated
VERIFY(m_did_setup_queues); // ensure queues were set-up
VERIFY(!(m_status & DEVICE_STATUS_DRIVER_OK)); // ensure we didnt already finish the initialization
set_status_bit(DEVICE_STATUS_DRIVER_OK);
dbgln_if(VIRTIO_DEBUG, "{}: Finished initialization", m_class_name);
return true;
}
void VirtIODevice::supply_buffer_and_notify(u16 queue_index, const u8* buffer, u32 len, BufferType buffer_type)
void VirtIODevice::supply_buffer_and_notify(u16 queue_index, const ScatterGatherList& scatter_list, BufferType buffer_type, void* token)
{
VERIFY(queue_index < m_queue_count);
if (get_queue(queue_index).supply_buffer({}, buffer, len, buffer_type))
if (get_queue(queue_index).supply_buffer({}, scatter_list, buffer_type, token))
notify_queue(queue_index);
}
@ -367,10 +370,11 @@ void VirtIODevice::handle_irq(const RegisterState&)
}
}
if (isr_type & QUEUE_INTERRUPT) {
for (auto& queue : m_queues) {
if (queue.handle_interrupt())
return;
for (size_t i = 0; i < m_queues.size(); i++) {
if (get_queue(i).new_data_available())
return handle_queue_update(i);
}
dbgln_if(VIRTIO_DEBUG, "{}: Got queue interrupt but all queues are up to date!", m_class_name);
}
if (isr_type & ~(QUEUE_INTERRUPT | DEVICE_CONFIG_INTERRUPT))
dbgln("{}: Handling interrupt with unknown type: {}", m_class_name, isr_type);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, the SerenityOS developers.
* Copyright (c) 2021, the SerenityOS developers.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -32,6 +32,7 @@
#include <Kernel/PCI/Access.h>
#include <Kernel/PCI/Device.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VM/ScatterGatherList.h>
#include <Kernel/VirtIO/VirtIOQueue.h>
namespace Kernel {
@ -54,6 +55,7 @@ namespace Kernel {
#define DEVICE_STATUS_DEVICE_NEEDS_RESET (1 << 6)
#define DEVICE_STATUS_FAILED (1 << 7)
#define VIRTIO_F_INDIRECT_DESC ((u64)1 << 28)
#define VIRTIO_F_VERSION_1 ((u64)1 << 32)
#define VIRTIO_F_RING_PACKED ((u64)1 << 34)
#define VIRTIO_F_IN_ORDER ((u64)1 << 35)
@ -181,14 +183,14 @@ protected:
void clear_status_bit(u8);
void set_status_bit(u8);
u64 get_device_features();
bool finish_init();
bool setup_queues(u16 requested_queue_count = 0);
void finish_init();
VirtIOQueue& get_queue(u16 queue_index)
{
VERIFY(queue_index < m_queue_count);
return m_queues[queue_index];
}
void set_requested_queue_count(u16);
template<typename F>
bool negotiate_features(F f)
@ -210,10 +212,10 @@ protected:
return is_feature_set(m_accepted_features, feature);
}
void supply_buffer_and_notify(u16 queue_index, const u8* buffer, u32 len, BufferType);
void supply_buffer_and_notify(u16 queue_index, const ScatterGatherList&, BufferType, void* token);
virtual void handle_irq(const RegisterState&) override;
virtual bool handle_device_config_change() = 0;
virtual void handle_queue_update(u16 queue_index) = 0;
private:
template<typename T>
@ -230,7 +232,6 @@ private:
bool accept_device_features(u64 device_features, u64 accepted_features);
bool setup_queues();
bool setup_queue(u16 queue_index);
bool activate_queue(u16 queue_index);
void notify_queue(u16 queue_index);
@ -238,6 +239,7 @@ private:
void reset_device();
u8 isr_status();
virtual void handle_irq(const RegisterState&) override;
NonnullOwnPtrVector<VirtIOQueue> m_queues;
NonnullOwnPtrVector<Configuration> m_configs;
@ -252,6 +254,7 @@ private:
u8 m_status { 0 };
u64 m_accepted_features { 0 };
bool m_did_accept_features { false };
bool m_did_setup_queues { false };
u32 m_notify_multiplier { 0 };
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, the SerenityOS developers.
* Copyright (c) 2021, the SerenityOS developers.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -24,12 +24,15 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <Kernel/VM/ScatterGatherList.h>
#include <Kernel/VirtIO/VirtIOConsole.h>
namespace Kernel {
unsigned VirtIOConsole::next_device_id = 0;
VirtIOConsole::VirtIOConsole(PCI::Address address)
: CharacterDevice(229, 0)
: CharacterDevice(229, next_device_id++)
, VirtIODevice(address, "VirtIOConsole")
{
if (auto cfg = get_config(ConfigurationType::Device)) {
@ -54,22 +57,15 @@ VirtIOConsole::VirtIOConsole(PCI::Address address)
}
});
dbgln("VirtIOConsole: cols: {}, rows: {}, max nr ports {}", cols, rows, max_nr_ports);
set_requested_queue_count(2 + max_nr_ports * 2); // base receiveq/transmitq for port0 + 2 per every additional port
success = finish_init();
success = setup_queues(2 + max_nr_ports * 2); // base receiveq/transmitq for port0 + 2 per every additional port
}
if (success) {
get_queue(RECEIVEQ).on_data_available = [&]() {
dbgln("VirtIOConsole: receive_queue on_data_available");
};
finish_init();
m_receive_region = MM.allocate_contiguous_kernel_region(PAGE_SIZE, "VirtIOConsole Receive", Region::Access::Read | Region::Access::Write);
if (m_receive_region) {
supply_buffer_and_notify(RECEIVEQ, m_receive_region->physical_page(0)->paddr().as_ptr(), m_receive_region->size(), BufferType::DeviceWritable);
supply_buffer_and_notify(RECEIVEQ, ScatterGatherList::create_from_physical(m_receive_region->physical_page(0)->paddr(), m_receive_region->size()), BufferType::DeviceWritable, m_receive_region->vaddr().as_ptr());
}
get_queue(TRANSMITQ).on_data_available = [&]() {
dbgln("VirtIOConsole: send_queue on_data_available");
};
m_transmit_region = MM.allocate_contiguous_kernel_region(PAGE_SIZE, "VirtIOConsole Transmit", Region::Access::Read | Region::Access::Write);
dbgln("TODO: Populate receive queue with a receive buffer");
}
}
}
@ -84,6 +80,21 @@ bool VirtIOConsole::handle_device_config_change()
return true;
}
void VirtIOConsole::handle_queue_update(u16 queue_index)
{
VERIFY(queue_index <= TRANSMITQ);
switch (queue_index) {
case RECEIVEQ:
get_queue(RECEIVEQ).discard_used_buffers(); // TODO: do something with incoming data (users writing into qemu console) instead of just clearing
break;
case TRANSMITQ:
get_queue(TRANSMITQ).discard_used_buffers(); // clear outgoing buffers that the device finished with
break;
default:
VERIFY_NOT_REACHED();
}
}
bool VirtIOConsole::can_read(const FileDescription&, size_t) const
{
return false;
@ -106,12 +117,9 @@ KResultOr<size_t> VirtIOConsole::write(FileDescription&, u64, const UserOrKernel
{
if (!size)
return 0;
VERIFY(size <= PAGE_SIZE);
if (!data.read(m_transmit_region->vaddr().as_ptr(), size)) {
return Kernel::KResult((ErrnoCode)-EFAULT);
}
supply_buffer_and_notify(TRANSMITQ, m_transmit_region->physical_page(0)->paddr().as_ptr(), size, BufferType::DeviceReadable);
auto scatter_list = ScatterGatherList::create_from_buffer(static_cast<const u8*>(data.user_or_kernel_ptr()), size);
supply_buffer_and_notify(TRANSMITQ, scatter_list, BufferType::DeviceReadable, const_cast<void*>(data.user_or_kernel_ptr()));
return size;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, the SerenityOS developers.
* Copyright (c) 2021, the SerenityOS developers.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -58,9 +58,12 @@ private:
virtual bool handle_device_config_change() override;
virtual String device_name() const override { return String::formatted("hvc{}", minor()); }
virtual void handle_queue_update(u16 queue_index) override;
OwnPtr<Region> m_receive_region;
OwnPtr<Region> m_transmit_region;
static unsigned next_device_id;
};
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, the SerenityOS developers.
* Copyright (c) 2021, the SerenityOS developers.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -37,18 +37,22 @@ VirtIOQueue::VirtIOQueue(u16 queue_size, u16 notify_offset)
size_t size_of_descriptors = sizeof(VirtIOQueueDescriptor) * queue_size;
size_t size_of_driver = sizeof(VirtIOQueueDriver) + queue_size * sizeof(u16);
size_t size_of_device = sizeof(VirtIOQueueDevice) + queue_size * sizeof(VirtIOQueueDeviceItem);
m_region = MM.allocate_contiguous_kernel_region(page_round_up(size_of_descriptors + size_of_driver + size_of_device), "VirtIO Queue", Region::Access::Read | Region::Access::Write);
if (m_region) {
m_queue_region = MM.allocate_contiguous_kernel_region(page_round_up(size_of_descriptors + size_of_driver + size_of_device), "VirtIO Queue", Region::Access::Read | Region::Access::Write);
VERIFY(m_queue_region);
// TODO: ensure alignment!!!
u8* ptr = m_region->vaddr().as_ptr();
memset(ptr, 0, m_region->size());
u8* ptr = m_queue_region->vaddr().as_ptr();
memset(ptr, 0, m_queue_region->size());
m_descriptors = reinterpret_cast<VirtIOQueueDescriptor*>(ptr);
m_driver = reinterpret_cast<VirtIOQueueDriver*>(ptr + size_of_descriptors);
m_device = reinterpret_cast<VirtIOQueueDevice*>(ptr + size_of_descriptors + size_of_driver);
m_tokens.resize(queue_size);
for (auto i = 0; i < queue_size; i++) {
m_descriptors[i].next = (i + 1) % queue_size; // link all of the descriptors in a circle
}
enable_interrupts();
}
}
VirtIOQueue::~VirtIOQueue()
{
@ -64,20 +68,25 @@ void VirtIOQueue::disable_interrupts()
m_driver->flags = 1;
}
bool VirtIOQueue::supply_buffer(Badge<VirtIODevice>, const u8* buffer, u32 length, BufferType buffer_type)
bool VirtIOQueue::supply_buffer(Badge<VirtIODevice>, const ScatterGatherList& scatter_list, BufferType buffer_type, void* token)
{
VERIFY(buffer && length > 0);
VERIFY(m_free_buffers > 0);
VERIFY(scatter_list.length() && scatter_list.length() <= m_free_buffers);
m_free_buffers -= scatter_list.length();
auto descriptor_index = m_free_head;
m_descriptors[descriptor_index].flags = static_cast<u16>(buffer_type);
m_descriptors[descriptor_index].address = reinterpret_cast<u64>(buffer);
m_descriptors[descriptor_index].length = length;
auto last_index = descriptor_index;
scatter_list.for_each_entry([&](auto paddr, auto size) {
m_descriptors[descriptor_index].flags = static_cast<u16>(buffer_type) | VIRTQ_DESC_F_NEXT;
m_descriptors[descriptor_index].address = static_cast<u64>(paddr);
m_descriptors[descriptor_index].length = static_cast<u32>(size);
last_index = descriptor_index;
descriptor_index = m_descriptors[descriptor_index].next; // ensure we place the buffer in chain order
});
m_descriptors[last_index].flags &= ~(VIRTQ_DESC_F_NEXT); // last descriptor in chain doesnt have a next descriptor
m_free_buffers--;
m_free_head = (m_free_head + 1) % m_queue_size;
m_driver->rings[m_driver_index_shadow % m_queue_size] = descriptor_index; // m_driver_index_shadow is used to prevent accesses to index before the rings are updated
m_driver->rings[m_driver_index_shadow % m_queue_size] = m_free_head; // m_driver_index_shadow is used to prevent accesses to index before the rings are updated
m_tokens[m_free_head] = token;
m_free_head = descriptor_index;
full_memory_barrier();
@ -89,18 +98,51 @@ bool VirtIOQueue::supply_buffer(Badge<VirtIODevice>, const u8* buffer, u32 lengt
auto device_flags = m_device->flags;
return !(device_flags & 1); // if bit 1 is enabled the device disabled interrupts
}
bool VirtIOQueue::new_data_available() const
{
return m_device->index != m_used_tail;
}
bool VirtIOQueue::handle_interrupt()
void* VirtIOQueue::get_buffer(size_t* size)
{
if (!new_data_available())
return false;
if (!new_data_available()) {
*size = 0;
return nullptr;
}
if (on_data_available)
on_data_available();
return true;
full_memory_barrier();
auto descriptor_index = m_device->rings[m_used_tail % m_queue_size].index;
*size = m_device->rings[m_used_tail % m_queue_size].length;
m_used_tail++;
auto token = m_tokens[descriptor_index];
pop_buffer(descriptor_index);
return token;
}
void VirtIOQueue::discard_used_buffers()
{
size_t size;
while (!get_buffer(&size)) {
}
}
void VirtIOQueue::pop_buffer(u16 descriptor_index)
{
m_tokens[descriptor_index] = nullptr;
auto i = descriptor_index;
while (m_descriptors[i].flags & VIRTQ_DESC_F_NEXT) {
m_free_buffers++;
i = m_descriptors[i].next;
}
m_free_buffers++; // the last descriptor in the chain doesnt have the NEXT flag
m_descriptors[i].next = m_free_head; // empend the popped descriptors to the free chain
m_free_head = descriptor_index;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, the SerenityOS developers.
* Copyright (c) 2021, the SerenityOS developers.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -29,9 +29,13 @@
#include <AK/Badge.h>
#include <Kernel/SpinLock.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VM/ScatterGatherList.h>
namespace Kernel {
#define VIRTQ_DESC_F_NEXT 1
#define VIRTQ_DESC_F_INDIRECT 4
enum class BufferType {
DeviceReadable = 0,
DeviceWritable = 2
@ -44,7 +48,7 @@ public:
VirtIOQueue(u16 queue_size, u16 notify_offset);
~VirtIOQueue();
bool is_null() const { return !m_region; }
bool is_null() const { return !m_queue_region; }
u16 notify_offset() const { return m_notify_offset; }
void enable_interrupts();
@ -54,17 +58,18 @@ public:
PhysicalAddress driver_area() const { return to_physical(m_driver.ptr()); }
PhysicalAddress device_area() const { return to_physical(m_device.ptr()); }
bool supply_buffer(Badge<VirtIODevice>, const u8* buffer, u32 length, BufferType);
bool supply_buffer(Badge<VirtIODevice>, const ScatterGatherList&, BufferType, void* token);
bool new_data_available() const;
bool handle_interrupt();
Function<void()> on_data_available;
void* get_buffer(size_t*);
void discard_used_buffers();
private:
void pop_buffer(u16 descriptor_index);
PhysicalAddress to_physical(const void* ptr) const
{
auto offset = FlatPtr(ptr) - m_region->vaddr().get();
return m_region->physical_page(0)->paddr().offset(offset);
auto offset = FlatPtr(ptr) - m_queue_region->vaddr().get();
return m_queue_region->physical_page(0)->paddr().offset(offset);
}
struct [[gnu::packed]] VirtIOQueueDescriptor {
u64 address;
@ -100,7 +105,8 @@ private:
OwnPtr<VirtIOQueueDescriptor> m_descriptors { nullptr };
OwnPtr<VirtIOQueueDriver> m_driver { nullptr };
OwnPtr<VirtIOQueueDevice> m_device { nullptr };
OwnPtr<Region> m_region;
Vector<void*> m_tokens;
OwnPtr<Region> m_queue_region;
SpinLock<u8> m_lock;
};

View File

@ -73,7 +73,10 @@ $SERENITY_EXTRA_QEMU_ARGS
-device ahci,id=ahci
-device ide-hd,bus=ahci.0,drive=disk,unit=0
-usb
-debugcon stdio
-device virtio-serial
-chardev stdio,id=stdout,mux=on
-device virtconsole,chardev=stdout
-device isa-debugcon,chardev=stdout
-soundhw pcspk
-device sb16
"