Kernel: Don't trigger page faults during profiling stack walk

The kernel sampling profiler will walk thread stacks during the timer
tick handler. Since it's not safe to trigger page faults during IRQ's,
we now avoid this by checking the page tables manually before accessing
each stack location.
This commit is contained in:
Andreas Kling 2020-02-21 13:05:39 +01:00
parent f9a138aa4b
commit 59b9e49bcd
Notes: sideshowbarker 2024-07-19 09:11:13 +09:00
4 changed files with 31 additions and 1 deletions

View File

@ -115,6 +115,7 @@ union [[gnu::packed]] Descriptor
class PageDirectoryEntry {
public:
const PageTableEntry* page_table_base() const { return reinterpret_cast<PageTableEntry*>(m_raw & 0xfffff000u); }
PageTableEntry* page_table_base() { return reinterpret_cast<PageTableEntry*>(m_raw & 0xfffff000u); }
void set_page_table_base(u32 value)
{

View File

@ -813,7 +813,7 @@ Vector<uintptr_t> Thread::raw_backtrace(uintptr_t ebp) const
ProcessPagingScope paging_scope(process);
Vector<uintptr_t, Profiling::max_stack_frame_count> backtrace;
backtrace.append(ebp);
for (uintptr_t* stack_ptr = (uintptr_t*)ebp; process.validate_read_from_kernel(VirtualAddress(stack_ptr), sizeof(uintptr_t) * 2); stack_ptr = (uintptr_t*)*stack_ptr) {
for (uintptr_t* stack_ptr = (uintptr_t*)ebp; MM.can_read_without_faulting(process, VirtualAddress(stack_ptr), sizeof(uintptr_t) * 2); stack_ptr = (uintptr_t*)*stack_ptr) {
uintptr_t retaddr = stack_ptr[1];
backtrace.append(retaddr);
if (backtrace.size() == Profiling::max_stack_frame_count)

View File

@ -191,6 +191,21 @@ void MemoryManager::parse_memory_map()
m_user_physical_pages += region.finalize_capacity();
}
const PageTableEntry* MemoryManager::pte(const PageDirectory& page_directory, VirtualAddress vaddr)
{
ASSERT_INTERRUPTS_DISABLED();
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3;
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
auto* pd = quickmap_pd(const_cast<PageDirectory&>(page_directory), page_directory_table_index);
const PageDirectoryEntry& pde = pd[page_directory_index];
if (!pde.is_present())
return nullptr;
return &quickmap_pt(PhysicalAddress((uintptr_t)pde.page_table_base()))[page_table_index];
}
PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr)
{
ASSERT_INTERRUPTS_DISABLED();
@ -611,6 +626,17 @@ bool MemoryManager::validate_kernel_read(const Process& process, VirtualAddress
return validate_range<AccessSpace::Kernel, AccessType::Read>(process, vaddr, size);
}
bool MemoryManager::can_read_without_faulting(const Process& process, VirtualAddress vaddr, size_t size) const
{
// FIXME: Use the size argument!
UNUSED_PARAM(size);
auto* pte = const_cast<MemoryManager*>(this)->pte(process.page_directory(), vaddr);
if (!pte)
return false;
return pte->is_present();
}
bool MemoryManager::validate_user_read(const Process& process, VirtualAddress vaddr, size_t size) const
{
if (!is_user_address(vaddr))

View File

@ -91,6 +91,8 @@ public:
bool validate_kernel_read(const Process&, VirtualAddress, size_t) const;
bool can_read_without_faulting(const Process&, VirtualAddress, size_t) const;
enum class ShouldZeroFill {
No,
Yes
@ -164,6 +166,7 @@ private:
PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
const PageTableEntry* pte(const PageDirectory&, VirtualAddress);
PageTableEntry& ensure_pte(PageDirectory&, VirtualAddress);
RefPtr<PageDirectory> m_kernel_page_directory;