Kernel: Share the "return to ring 0/3 from signal" trampolines globally.

Generate a special page containing the "return from signal" trampoline code
on startup and then route signalled threads to it. This avoids a page
allocation in every process that ever receives a signal.
This commit is contained in:
Andreas Kling 2019-07-19 17:01:16 +02:00
parent fdf931cfce
commit f8beb0f665
Notes: sideshowbarker 2024-07-19 13:08:12 +09:00
5 changed files with 36 additions and 21 deletions

View File

@ -35,10 +35,14 @@
//#define SIGNAL_DEBUG
//#define SHARED_BUFFER_DEBUG
static void create_signal_trampolines();
static pid_t next_pid;
InlineLinkedList<Process>* g_processes;
static String* s_hostname;
static Lock* s_hostname_lock;
VirtualAddress g_return_to_ring3_from_signal_trampoline;
VirtualAddress g_return_to_ring0_from_signal_trampoline;
void Process::initialize()
{
@ -46,6 +50,8 @@ void Process::initialize()
g_processes = new InlineLinkedList<Process>;
s_hostname = new String("courage");
s_hostname_lock = new Lock;
create_signal_trampolines();
}
Vector<pid_t> Process::all_pids()
@ -673,15 +679,15 @@ void Process::sys$exit(int status)
ASSERT_NOT_REACHED();
}
void Process::create_signal_trampolines_if_needed()
void create_signal_trampolines()
{
if (!m_return_to_ring3_from_signal_trampoline.is_null())
return;
// FIXME: This should be a global trampoline shared by all processes, not one created per process!
// FIXME: Remap as read-only after setup.
auto* region = allocate_region(VirtualAddress(), PAGE_SIZE, "Signal trampolines", PROT_READ | PROT_WRITE | PROT_EXEC);
m_return_to_ring3_from_signal_trampoline = region->vaddr();
u8* code_ptr = m_return_to_ring3_from_signal_trampoline.as_ptr();
InterruptDisabler disabler;
// NOTE: We leak this region.
auto* trampoline_region = MM.allocate_user_accessible_kernel_region(PAGE_SIZE, "Signal trampolines").leak_ref();
g_return_to_ring3_from_signal_trampoline = trampoline_region->vaddr();
u8* code_ptr = (u8*)trampoline_region->vaddr().as_ptr();
*code_ptr++ = 0x58; // pop eax (Argument to signal handler (ignored here))
*code_ptr++ = 0x5a; // pop edx (Original signal mask to restore)
*code_ptr++ = 0xb8; // mov eax, <u32>
@ -700,7 +706,7 @@ void Process::create_signal_trampolines_if_needed()
*code_ptr++ = 0x0f; // ud2
*code_ptr++ = 0x0b;
m_return_to_ring0_from_signal_trampoline = VirtualAddress((u32)code_ptr);
g_return_to_ring0_from_signal_trampoline = VirtualAddress((u32)code_ptr);
*code_ptr++ = 0x58; // pop eax (Argument to signal handler (ignored here))
*code_ptr++ = 0x5a; // pop edx (Original signal mask to restore)
*code_ptr++ = 0xb8; // mov eax, <u32>
@ -717,6 +723,9 @@ void Process::create_signal_trampolines_if_needed()
*code_ptr++ = 0x82;
*code_ptr++ = 0x0f; // ud2
*code_ptr++ = 0x0b;
trampoline_region->set_writable(false);
MM.remap_region(*trampoline_region->page_directory(), *trampoline_region);
}
int Process::sys$restore_signal_mask(u32 mask)

View File

@ -26,6 +26,9 @@ class ProcessTracer;
timeval kgettimeofday();
void kgettimeofday(timeval&);
extern VirtualAddress g_return_to_ring3_from_signal_trampoline;
extern VirtualAddress g_return_to_ring0_from_signal_trampoline;
class Process : public InlineLinkedListNode<Process>
, public Weakable<Process> {
friend class InlineLinkedListNode<Process>;
@ -287,8 +290,6 @@ private:
int alloc_fd(int first_candidate_fd = 0);
void disown_all_shared_buffers();
void create_signal_trampolines_if_needed();
Thread* m_main_thread { nullptr };
RefPtr<PageDirectory> m_page_directory;
@ -332,9 +333,6 @@ private:
NonnullRefPtrVector<Region> m_regions;
VirtualAddress m_return_to_ring3_from_signal_trampoline;
VirtualAddress m_return_to_ring0_from_signal_trampoline;
pid_t m_ppid { 0 };
mode_t m_umask { 022 };

View File

@ -362,7 +362,6 @@ ShouldUnblockThread Thread::dispatch_signal(u8 signal)
bool interrupting_in_kernel = (ret_cs & 3) == 0;
ProcessPagingScope paging_scope(m_process);
m_process.create_signal_trampolines_if_needed();
if (interrupting_in_kernel) {
#ifdef SIGNAL_DEBUG
@ -419,9 +418,9 @@ ShouldUnblockThread Thread::dispatch_signal(u8 signal)
push_value_on_stack(signal);
if (interrupting_in_kernel)
push_value_on_stack(m_process.m_return_to_ring0_from_signal_trampoline.get());
push_value_on_stack(g_return_to_ring0_from_signal_trampoline.get());
else
push_value_on_stack(m_process.m_return_to_ring3_from_signal_trampoline.get());
push_value_on_stack(g_return_to_ring3_from_signal_trampoline.get());
ASSERT((m_tss.esp % 16) == 0);

View File

@ -450,20 +450,28 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
return PageFaultResponse::ShouldCrash;
}
RefPtr<Region> MemoryManager::allocate_kernel_region(size_t size, String&& name)
RefPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, bool user_accessible)
{
InterruptDisabler disabler;
ASSERT(!(size % PAGE_SIZE));
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
ASSERT(range.is_valid());
auto region = Region::create_kernel_only(range, move(name), PROT_READ | PROT_WRITE | PROT_EXEC, false);
RefPtr<Region> region;
if (user_accessible)
region = Region::create_user_accessible(range, name, PROT_READ | PROT_WRITE | PROT_EXEC, false);
else
region = Region::create_kernel_only(range, name, PROT_READ | PROT_WRITE | PROT_EXEC, false);
MM.map_region_at_address(*m_kernel_page_directory, *region, range.base());
// FIXME: It would be cool if these could zero-fill on demand instead.
region->commit();
return region;
}
RefPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name)
{
return allocate_kernel_region(size, name, true);
}
void MemoryManager::deallocate_user_physical_page(PhysicalPage&& page)
{
for (auto& region : m_user_physical_regions) {

View File

@ -70,7 +70,8 @@ public:
void map_for_kernel(VirtualAddress, PhysicalAddress);
RefPtr<Region> allocate_kernel_region(size_t, String&& name);
RefPtr<Region> allocate_kernel_region(size_t, const StringView& name, bool user_accessible = false);
RefPtr<Region> allocate_user_accessible_kernel_region(size_t, const StringView& name);
void map_region_at_address(PageDirectory&, Region&, VirtualAddress);
unsigned user_physical_pages() const { return m_user_physical_pages; }