2020-01-18 11:38:21 +03:00
/*
* Copyright ( c ) 2018 - 2020 , Andreas Kling < kling @ serenityos . org >
* All rights reserved .
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions are met :
*
* 1. Redistributions of source code must retain the above copyright notice , this
* list of conditions and the following disclaimer .
*
* 2. Redistributions in binary form must reproduce the above copyright notice ,
* this list of conditions and the following disclaimer in the documentation
* and / or other materials provided with the distribution .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS "
* AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR CONSEQUENTIAL
* DAMAGES ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES ; LOSS OF USE , DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY ,
* OR TORT ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
*/
2020-03-08 14:33:14 +03:00
# include <AK/Memory.h>
2020-03-23 15:45:10 +03:00
# include <AK/StringView.h>
2019-07-19 17:09:34 +03:00
# include <Kernel/FileSystem/Inode.h>
2019-04-03 16:13:07 +03:00
# include <Kernel/Process.h>
# include <Kernel/Thread.h>
2019-08-07 19:06:17 +03:00
# include <Kernel/VM/AnonymousVMObject.h>
2019-06-07 12:43:58 +03:00
# include <Kernel/VM/MemoryManager.h>
2020-02-16 03:33:41 +03:00
# include <Kernel/VM/PageDirectory.h>
2019-06-07 12:43:58 +03:00
# include <Kernel/VM/Region.h>
2020-02-29 14:51:44 +03:00
# include <Kernel/VM/SharedInodeVMObject.h>
2019-04-03 16:13:07 +03:00
2019-11-04 02:05:57 +03:00
//#define MM_DEBUG
2019-11-04 02:45:33 +03:00
//#define PAGE_FAULT_DEBUG
2019-11-04 02:05:57 +03:00
2020-02-16 03:27:42 +03:00
namespace Kernel {
2020-06-02 07:55:09 +03:00
Region : : Region ( const Range & range , NonnullRefPtr < VMObject > vmobject , size_t offset_in_vmobject , const String & name , u8 access , bool cacheable , bool kernel )
2019-05-17 05:32:08 +03:00
: m_range ( range )
2019-12-19 21:13:44 +03:00
, m_offset_in_vmobject ( offset_in_vmobject )
, m_vmobject ( move ( vmobject ) )
2019-06-07 21:58:12 +03:00
, m_name ( name )
2019-05-30 17:14:37 +03:00
, m_access ( access )
2020-01-10 00:29:31 +03:00
, m_cacheable ( cacheable )
2020-06-02 07:55:09 +03:00
, m_kernel ( kernel )
2019-04-03 16:13:07 +03:00
{
MM . register_region ( * this ) ;
}
Region : : ~ Region ( )
{
2019-09-05 12:13:10 +03:00
// Make sure we disable interrupts so we don't get interrupted between unmapping and unregistering.
// Unmapping the region will give the VM back to the RangeAllocator, so an interrupt handler would
// find the address<->region mappings in an invalid state there.
2020-07-06 16:27:22 +03:00
ScopedSpinLock lock ( s_mm_lock ) ;
2019-04-03 16:13:07 +03:00
if ( m_page_directory ) {
2019-11-03 22:37:03 +03:00
unmap ( ShouldDeallocateVirtualMemoryRange : : Yes ) ;
2019-04-03 16:13:07 +03:00
ASSERT ( ! m_page_directory ) ;
}
MM . unregister_region ( * this ) ;
}
2019-09-27 15:19:07 +03:00
NonnullOwnPtr < Region > Region : : clone ( )
2019-04-03 16:13:07 +03:00
{
2020-06-29 00:34:31 +03:00
ASSERT ( Process : : current ( ) ) ;
2019-07-19 17:09:34 +03:00
2020-07-06 21:47:08 +03:00
ScopedSpinLock lock ( s_mm_lock ) ;
2020-04-12 21:22:26 +03:00
if ( m_inherit_mode = = InheritMode : : ZeroedOnFork ) {
ASSERT ( m_mmap ) ;
ASSERT ( ! m_shared ) ;
ASSERT ( vmobject ( ) . is_anonymous ( ) ) ;
auto zeroed_region = Region : : create_user_accessible ( m_range , AnonymousVMObject : : create_with_size ( size ( ) ) , 0 , m_name , m_access ) ;
zeroed_region - > set_mmap ( m_mmap ) ;
zeroed_region - > set_inherit_mode ( m_inherit_mode ) ;
return zeroed_region ;
}
2020-03-01 12:54:18 +03:00
if ( m_shared ) {
2019-11-17 14:11:43 +03:00
ASSERT ( ! m_stack ) ;
2019-04-03 16:13:07 +03:00
# ifdef MM_DEBUG
2020-02-29 14:51:44 +03:00
dbg ( ) < < " Region::clone(): Sharing " < < name ( ) < < " ( " < < vaddr ( ) < < " ) " ;
2019-04-03 16:13:07 +03:00
# endif
2020-03-01 13:08:28 +03:00
if ( vmobject ( ) . is_inode ( ) )
ASSERT ( vmobject ( ) . is_shared_inode ( ) ) ;
2019-04-03 16:13:07 +03:00
// Create a new region backed by the same VMObject.
2020-01-10 21:24:01 +03:00
auto region = Region : : create_user_accessible ( m_range , m_vmobject , m_offset_in_vmobject , m_name , m_access ) ;
region - > set_mmap ( m_mmap ) ;
2020-02-08 04:39:46 +03:00
region - > set_shared ( m_shared ) ;
2020-01-10 21:24:01 +03:00
return region ;
2019-04-03 16:13:07 +03:00
}
2020-03-01 13:08:28 +03:00
if ( vmobject ( ) . is_inode ( ) )
ASSERT ( vmobject ( ) . is_private_inode ( ) ) ;
2019-04-03 16:13:07 +03:00
# ifdef MM_DEBUG
2020-02-29 14:51:44 +03:00
dbg ( ) < < " Region::clone(): CoWing " < < name ( ) < < " ( " < < vaddr ( ) < < " ) " ;
2019-04-03 16:13:07 +03:00
# endif
2020-02-29 14:51:44 +03:00
// Set up a COW region. The parent (this) region becomes COW as well!
ensure_cow_map ( ) . fill ( true ) ;
2019-11-03 22:59:54 +03:00
remap ( ) ;
2019-12-19 21:13:44 +03:00
auto clone_region = Region : : create_user_accessible ( m_range , m_vmobject - > clone ( ) , m_offset_in_vmobject , m_name , m_access ) ;
2019-10-01 20:58:41 +03:00
clone_region - > ensure_cow_map ( ) ;
2019-11-17 14:11:43 +03:00
if ( m_stack ) {
ASSERT ( is_readable ( ) ) ;
ASSERT ( is_writable ( ) ) ;
ASSERT ( vmobject ( ) . is_anonymous ( ) ) ;
clone_region - > set_stack ( true ) ;
}
2020-01-10 21:24:01 +03:00
clone_region - > set_mmap ( m_mmap ) ;
2019-10-01 20:58:41 +03:00
return clone_region ;
2019-04-03 16:13:07 +03:00
}
2019-12-19 00:43:32 +03:00
bool Region : : commit ( )
2019-04-03 16:13:07 +03:00
{
2020-07-06 16:27:22 +03:00
ScopedSpinLock lock ( s_mm_lock ) ;
2019-04-03 16:13:07 +03:00
# ifdef MM_DEBUG
2020-02-29 14:51:44 +03:00
dbg ( ) < < " MM: Commit " < < page_count ( ) < < " pages in Region " < < this < < " (VMO= " < < & vmobject ( ) < < " ) at " < < vaddr ( ) ;
2019-04-03 16:13:07 +03:00
# endif
2019-11-03 17:41:49 +03:00
for ( size_t i = 0 ; i < page_count ( ) ; + + i ) {
2020-07-06 21:47:08 +03:00
if ( ! commit ( i ) ) {
// Flush what we did commit
if ( i > 0 )
MM . flush_tlb ( vaddr ( ) , i + 1 ) ;
2019-12-19 00:43:32 +03:00
return false ;
2020-07-06 21:47:08 +03:00
}
2019-04-03 16:13:07 +03:00
}
2020-07-06 21:47:08 +03:00
MM . flush_tlb ( vaddr ( ) , page_count ( ) ) ;
2019-12-19 00:43:32 +03:00
return true ;
}
bool Region : : commit ( size_t page_index )
{
ASSERT ( vmobject ( ) . is_anonymous ( ) | | vmobject ( ) . is_purgeable ( ) ) ;
2020-07-06 21:47:08 +03:00
ASSERT ( s_mm_lock . own_lock ( ) ) ;
2020-04-28 17:19:50 +03:00
auto & vmobject_physical_page_entry = physical_page_slot ( page_index ) ;
2020-02-15 15:12:02 +03:00
if ( ! vmobject_physical_page_entry . is_null ( ) & & ! vmobject_physical_page_entry - > is_shared_zero_page ( ) )
2019-12-19 00:43:32 +03:00
return true ;
auto physical_page = MM . allocate_user_physical_page ( MemoryManager : : ShouldZeroFill : : Yes ) ;
if ( ! physical_page ) {
2020-03-01 22:45:39 +03:00
klog ( ) < < " MM: commit was unable to allocate a physical page " ;
2019-12-19 00:43:32 +03:00
return false ;
}
vmobject_physical_page_entry = move ( physical_page ) ;
2020-07-06 21:47:08 +03:00
remap_page ( page_index , false ) ; // caller is in charge of flushing tlb
2019-12-19 00:43:32 +03:00
return true ;
2019-04-03 16:13:07 +03:00
}
2019-12-15 18:53:00 +03:00
u32 Region : : cow_pages ( ) const
{
if ( ! m_cow_map )
return 0 ;
u32 count = 0 ;
2020-02-24 11:55:46 +03:00
for ( size_t i = 0 ; i < m_cow_map - > size ( ) ; + + i )
2019-12-15 18:53:00 +03:00
count + = m_cow_map - > get ( i ) ;
return count ;
}
2019-12-29 14:28:32 +03:00
size_t Region : : amount_dirty ( ) const
{
if ( ! vmobject ( ) . is_inode ( ) )
return amount_resident ( ) ;
2020-03-01 14:27:03 +03:00
return static_cast < const InodeVMObject & > ( vmobject ( ) ) . amount_dirty ( ) ;
2019-12-29 14:28:32 +03:00
}
2019-04-03 16:13:07 +03:00
size_t Region : : amount_resident ( ) const
{
size_t bytes = 0 ;
for ( size_t i = 0 ; i < page_count ( ) ; + + i ) {
2020-04-28 17:19:50 +03:00
auto * page = physical_page ( i ) ;
if ( page & & ! page - > is_shared_zero_page ( ) )
2019-04-03 16:13:07 +03:00
bytes + = PAGE_SIZE ;
}
return bytes ;
}
size_t Region : : amount_shared ( ) const
{
size_t bytes = 0 ;
for ( size_t i = 0 ; i < page_count ( ) ; + + i ) {
2020-04-28 17:19:50 +03:00
auto * page = physical_page ( i ) ;
if ( page & & page - > ref_count ( ) > 1 & & ! page - > is_shared_zero_page ( ) )
2019-04-03 16:13:07 +03:00
bytes + = PAGE_SIZE ;
}
return bytes ;
}
2019-07-19 17:09:34 +03:00
2020-01-10 00:29:31 +03:00
NonnullOwnPtr < Region > Region : : create_user_accessible ( const Range & range , NonnullRefPtr < VMObject > vmobject , size_t offset_in_vmobject , const StringView & name , u8 access , bool cacheable )
2019-07-19 17:09:34 +03:00
{
2020-06-02 07:55:09 +03:00
auto region = make < Region > ( range , move ( vmobject ) , offset_in_vmobject , name , access , cacheable , false ) ;
2019-07-19 17:09:34 +03:00
region - > m_user_accessible = true ;
return region ;
}
2020-01-10 00:29:31 +03:00
NonnullOwnPtr < Region > Region : : create_kernel_only ( const Range & range , NonnullRefPtr < VMObject > vmobject , size_t offset_in_vmobject , const StringView & name , u8 access , bool cacheable )
2019-07-19 17:09:34 +03:00
{
2020-06-02 07:55:09 +03:00
auto region = make < Region > ( range , move ( vmobject ) , offset_in_vmobject , name , access , cacheable , true ) ;
2019-07-19 17:09:34 +03:00
region - > m_user_accessible = false ;
2020-01-10 08:57:18 +03:00
return region ;
}
2019-10-01 20:58:41 +03:00
bool Region : : should_cow ( size_t page_index ) const
{
2020-04-28 17:19:50 +03:00
auto * page = physical_page ( page_index ) ;
if ( page & & page - > is_shared_zero_page ( ) )
2020-02-15 15:12:02 +03:00
return true ;
2019-10-01 20:58:41 +03:00
if ( m_shared )
return false ;
return m_cow_map & & m_cow_map - > get ( page_index ) ;
}
void Region : : set_should_cow ( size_t page_index , bool cow )
{
ASSERT ( ! m_shared ) ;
ensure_cow_map ( ) . set ( page_index , cow ) ;
}
Bitmap & Region : : ensure_cow_map ( ) const
{
if ( ! m_cow_map )
m_cow_map = make < Bitmap > ( page_count ( ) , true ) ;
return * m_cow_map ;
}
2019-11-03 17:32:11 +03:00
2020-09-02 01:10:54 +03:00
bool Region : : map_individual_page_impl ( size_t page_index )
2019-11-03 17:32:11 +03:00
{
2020-11-01 02:19:18 +03:00
ASSERT ( m_page_directory - > get_lock ( ) . own_lock ( ) ) ;
2020-07-06 21:47:08 +03:00
auto page_vaddr = vaddr_from_page_index ( page_index ) ;
2020-09-02 01:10:54 +03:00
auto * pte = MM . ensure_pte ( * m_page_directory , page_vaddr ) ;
if ( ! pte ) {
# ifdef MM_DEBUG
dbg ( ) < < " MM: >> region map (PD= " < < m_page_directory - > cr3 ( ) < < " " < < name ( ) < < " cannot create PTE for " < < page_vaddr ;
# endif
return false ;
}
2020-04-28 17:19:50 +03:00
auto * page = physical_page ( page_index ) ;
if ( ! page | | ( ! is_readable ( ) & & ! is_writable ( ) ) ) {
2020-09-02 01:10:54 +03:00
pte - > clear ( ) ;
2020-01-01 21:30:38 +03:00
} else {
2020-09-02 01:10:54 +03:00
pte - > set_cache_disabled ( ! m_cacheable ) ;
pte - > set_physical_page_base ( page - > paddr ( ) . get ( ) ) ;
pte - > set_present ( true ) ;
2020-01-01 21:30:38 +03:00
if ( should_cow ( page_index ) )
2020-09-02 01:10:54 +03:00
pte - > set_writable ( false ) ;
2020-01-01 21:30:38 +03:00
else
2020-09-02 01:10:54 +03:00
pte - > set_writable ( is_writable ( ) ) ;
2020-07-03 19:23:09 +03:00
if ( Processor : : current ( ) . has_feature ( CPUFeature : : NX ) )
2020-09-02 01:10:54 +03:00
pte - > set_execute_disabled ( ! is_executable ( ) ) ;
pte - > set_user_allowed ( is_user_accessible ( ) ) ;
2019-11-03 17:32:11 +03:00
# ifdef MM_DEBUG
2020-09-02 01:10:54 +03:00
dbg ( ) < < " MM: >> region map (PD= " < < m_page_directory - > cr3 ( ) < < " , PTE= " < < ( void * ) pte - > raw ( ) < < " { " < < pte < < " }) " < < name ( ) < < " " < < page_vaddr < < " => " < < page - > paddr ( ) < < " (@ " < < page < < " ) " ;
2019-11-03 17:32:11 +03:00
# endif
2020-01-10 00:29:31 +03:00
}
2020-09-02 01:10:54 +03:00
return true ;
2019-11-03 17:32:11 +03:00
}
2019-11-03 22:37:03 +03:00
2020-09-02 01:10:54 +03:00
bool Region : : remap_page ( size_t page_index , bool with_flush )
2020-01-01 21:30:38 +03:00
{
2020-07-06 16:27:22 +03:00
ScopedSpinLock lock ( s_mm_lock ) ;
2020-11-01 02:19:18 +03:00
ASSERT ( m_page_directory ) ;
ScopedSpinLock page_lock ( m_page_directory - > get_lock ( ) ) ;
2020-04-28 17:19:50 +03:00
ASSERT ( physical_page ( page_index ) ) ;
2020-09-02 01:10:54 +03:00
bool success = map_individual_page_impl ( page_index ) ;
2020-07-06 21:47:08 +03:00
if ( with_flush )
MM . flush_tlb ( vaddr_from_page_index ( page_index ) ) ;
2020-09-02 01:10:54 +03:00
return success ;
2020-01-01 21:30:38 +03:00
}
2019-11-03 22:37:03 +03:00
void Region : : unmap ( ShouldDeallocateVirtualMemoryRange deallocate_range )
{
2020-07-06 16:27:22 +03:00
ScopedSpinLock lock ( s_mm_lock ) ;
2019-11-04 02:26:00 +03:00
ASSERT ( m_page_directory ) ;
2020-11-01 02:19:18 +03:00
ScopedSpinLock page_lock ( m_page_directory - > get_lock ( ) ) ;
2020-08-28 06:29:17 +03:00
size_t count = page_count ( ) ;
for ( size_t i = 0 ; i < count ; + + i ) {
2020-07-06 21:47:08 +03:00
auto vaddr = vaddr_from_page_index ( i ) ;
2020-08-28 06:29:17 +03:00
MM . release_pte ( * m_page_directory , vaddr , i = = count - 1 ) ;
2019-11-03 22:37:03 +03:00
# ifdef MM_DEBUG
2020-04-28 17:19:50 +03:00
auto * page = physical_page ( i ) ;
dbg ( ) < < " MM: >> Unmapped " < < vaddr < < " => P " < < String : : format ( " %p " , page ? page - > paddr ( ) . get ( ) : 0 ) < < " << " ;
2019-11-03 22:37:03 +03:00
# endif
}
2020-07-06 21:47:08 +03:00
MM . flush_tlb ( vaddr ( ) , page_count ( ) ) ;
2020-06-02 07:55:09 +03:00
if ( deallocate_range = = ShouldDeallocateVirtualMemoryRange : : Yes ) {
if ( m_page_directory - > range_allocator ( ) . contains ( range ( ) ) )
m_page_directory - > range_allocator ( ) . deallocate ( range ( ) ) ;
else
m_page_directory - > identity_range_allocator ( ) . deallocate ( range ( ) ) ;
}
2019-11-04 02:23:31 +03:00
m_page_directory = nullptr ;
2019-11-03 22:37:03 +03:00
}
2020-01-10 00:29:31 +03:00
void Region : : set_page_directory ( PageDirectory & page_directory )
2019-11-03 22:37:03 +03:00
{
2019-11-04 02:23:31 +03:00
ASSERT ( ! m_page_directory | | m_page_directory = = & page_directory ) ;
2020-07-06 21:47:08 +03:00
ASSERT ( s_mm_lock . own_lock ( ) ) ;
2019-11-04 02:23:31 +03:00
m_page_directory = page_directory ;
2020-01-10 00:29:31 +03:00
}
2020-07-06 21:47:08 +03:00
2020-09-02 01:10:54 +03:00
bool Region : : map ( PageDirectory & page_directory )
2020-01-10 00:29:31 +03:00
{
2020-07-06 16:27:22 +03:00
ScopedSpinLock lock ( s_mm_lock ) ;
2020-11-01 02:19:18 +03:00
ScopedSpinLock page_lock ( page_directory . get_lock ( ) ) ;
2020-07-06 21:47:08 +03:00
set_page_directory ( page_directory ) ;
2019-11-04 02:05:57 +03:00
# ifdef MM_DEBUG
2020-02-24 17:40:42 +03:00
dbg ( ) < < " MM: Region::map() will map VMO pages " < < first_page_index ( ) < < " - " < < last_page_index ( ) < < " (VMO page count: " < < vmobject ( ) . page_count ( ) < < " ) " ;
2019-11-04 02:05:57 +03:00
# endif
2020-09-02 01:10:54 +03:00
size_t page_index = 0 ;
while ( page_index < page_count ( ) ) {
if ( ! map_individual_page_impl ( page_index ) )
break ;
+ + page_index ;
}
if ( page_index > 0 ) {
MM . flush_tlb ( vaddr ( ) , page_index ) ;
return page_index = = page_count ( ) ;
}
return false ;
2019-11-03 22:37:03 +03:00
}
2019-11-03 22:59:54 +03:00
void Region : : remap ( )
{
ASSERT ( m_page_directory ) ;
2019-11-04 02:05:57 +03:00
map ( * m_page_directory ) ;
2019-11-03 22:59:54 +03:00
}
2019-11-04 02:45:33 +03:00
PageFaultResponse Region : : handle_fault ( const PageFault & fault )
{
auto page_index_in_region = page_index_from_address ( fault . vaddr ( ) ) ;
if ( fault . type ( ) = = PageFault : : Type : : PageNotPresent ) {
2020-01-20 15:06:55 +03:00
if ( fault . is_read ( ) & & ! is_readable ( ) ) {
2020-02-24 17:40:42 +03:00
dbg ( ) < < " NP(non-readable) fault in Region{ " < < this < < " }[ " < < page_index_in_region < < " ] " ;
2019-12-02 21:14:16 +03:00
return PageFaultResponse : : ShouldCrash ;
}
2020-03-06 11:58:59 +03:00
if ( fault . is_write ( ) & & ! is_writable ( ) ) {
dbg ( ) < < " NP(non-writable) write fault in Region{ " < < this < < " }[ " < < page_index_in_region < < " ] at " < < fault . vaddr ( ) ;
return PageFaultResponse : : ShouldCrash ;
}
2019-11-04 02:45:33 +03:00
if ( vmobject ( ) . is_inode ( ) ) {
# ifdef PAGE_FAULT_DEBUG
2020-02-24 17:40:42 +03:00
dbg ( ) < < " NP(inode) fault in Region{ " < < this < < " }[ " < < page_index_in_region < < " ] " ;
2019-11-04 02:45:33 +03:00
# endif
return handle_inode_fault ( page_index_in_region ) ;
}
2020-02-15 15:12:02 +03:00
# ifdef MAP_SHARED_ZERO_PAGE_LAZILY
if ( fault . is_read ( ) ) {
2020-04-28 17:19:50 +03:00
physical_page_slot ( page_index_in_region ) = MM . shared_zero_page ( ) ;
2020-02-15 15:12:02 +03:00
remap_page ( page_index_in_region ) ;
return PageFaultResponse : : Continue ;
}
2019-11-04 02:45:33 +03:00
return handle_zero_fault ( page_index_in_region ) ;
2020-02-15 15:12:02 +03:00
# else
2020-03-07 12:27:02 +03:00
dbg ( ) < < " BUG! Unexpected NP fault at " < < fault . vaddr ( ) ;
return PageFaultResponse : : ShouldCrash ;
2020-02-15 15:12:02 +03:00
# endif
2019-11-04 02:45:33 +03:00
}
ASSERT ( fault . type ( ) = = PageFault : : Type : : ProtectionViolation ) ;
2019-12-02 21:20:09 +03:00
if ( fault . access ( ) = = PageFault : : Access : : Write & & is_writable ( ) & & should_cow ( page_index_in_region ) ) {
2019-11-04 02:45:33 +03:00
# ifdef PAGE_FAULT_DEBUG
2020-02-24 17:40:42 +03:00
dbg ( ) < < " PV(cow) fault in Region{ " < < this < < " }[ " < < page_index_in_region < < " ] " ;
2019-11-04 02:45:33 +03:00
# endif
2020-04-28 17:19:50 +03:00
if ( physical_page ( page_index_in_region ) - > is_shared_zero_page ( ) ) {
2020-02-15 15:12:02 +03:00
# ifdef PAGE_FAULT_DEBUG
2020-02-24 17:40:42 +03:00
dbg ( ) < < " NP(zero) fault in Region{ " < < this < < " }[ " < < page_index_in_region < < " ] " ;
2020-02-15 15:12:02 +03:00
# endif
return handle_zero_fault ( page_index_in_region ) ;
}
2019-11-04 02:45:33 +03:00
return handle_cow_fault ( page_index_in_region ) ;
}
2020-03-01 22:45:39 +03:00
dbg ( ) < < " PV(error) fault in Region{ " < < this < < " }[ " < < page_index_in_region < < " ] at " < < fault . vaddr ( ) ;
2019-11-04 02:45:33 +03:00
return PageFaultResponse : : ShouldCrash ;
}
PageFaultResponse Region : : handle_zero_fault ( size_t page_index_in_region )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
ASSERT ( vmobject ( ) . is_anonymous ( ) ) ;
2019-12-09 21:12:38 +03:00
LOCKER ( vmobject ( ) . m_paging_lock ) ;
2019-11-04 02:45:33 +03:00
2020-04-28 17:19:50 +03:00
auto & page_slot = physical_page_slot ( page_index_in_region ) ;
2019-11-04 02:45:33 +03:00
2020-04-28 17:19:50 +03:00
if ( ! page_slot . is_null ( ) & & ! page_slot - > is_shared_zero_page ( ) ) {
2019-11-04 02:45:33 +03:00
# ifdef PAGE_FAULT_DEBUG
2020-02-24 17:40:42 +03:00
dbg ( ) < < " MM: zero_page() but page already present. Fine with me! " ;
2019-11-04 02:45:33 +03:00
# endif
2020-09-02 01:10:54 +03:00
if ( ! remap_page ( page_index_in_region ) )
return PageFaultResponse : : OutOfMemory ;
2019-11-04 02:45:33 +03:00
return PageFaultResponse : : Continue ;
}
2020-06-29 00:34:31 +03:00
auto current_thread = Thread : : current ( ) ;
if ( current_thread ! = nullptr )
current_thread - > did_zero_fault ( ) ;
2019-11-04 02:45:33 +03:00
2020-04-28 17:19:50 +03:00
auto page = MM . allocate_user_physical_page ( MemoryManager : : ShouldZeroFill : : Yes ) ;
if ( page . is_null ( ) ) {
2020-03-01 22:45:39 +03:00
klog ( ) < < " MM: handle_zero_fault was unable to allocate a physical page " ;
2020-05-06 22:11:38 +03:00
return PageFaultResponse : : OutOfMemory ;
2019-11-04 02:45:33 +03:00
}
# ifdef PAGE_FAULT_DEBUG
2020-08-27 01:58:09 +03:00
dbg ( ) < < " >> ZERO " < < page - > paddr ( ) ;
2019-11-04 02:45:33 +03:00
# endif
2020-04-28 17:19:50 +03:00
page_slot = move ( page ) ;
2020-09-02 01:10:54 +03:00
if ( ! remap_page ( page_index_in_region ) ) {
klog ( ) < < " MM: handle_zero_fault was unable to allocate a page table to map " < < page_slot ;
return PageFaultResponse : : OutOfMemory ;
}
2019-11-04 02:45:33 +03:00
return PageFaultResponse : : Continue ;
}
PageFaultResponse Region : : handle_cow_fault ( size_t page_index_in_region )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
2020-04-28 17:19:50 +03:00
auto & page_slot = physical_page_slot ( page_index_in_region ) ;
if ( page_slot - > ref_count ( ) = = 1 ) {
2019-11-04 02:45:33 +03:00
# ifdef PAGE_FAULT_DEBUG
2020-02-24 17:40:42 +03:00
dbg ( ) < < " >> It's a COW page but nobody is sharing it anymore. Remap r/w " ;
2019-11-04 02:45:33 +03:00
# endif
set_should_cow ( page_index_in_region , false ) ;
2020-09-02 01:10:54 +03:00
if ( ! remap_page ( page_index_in_region ) )
return PageFaultResponse : : OutOfMemory ;
2019-11-04 02:45:33 +03:00
return PageFaultResponse : : Continue ;
}
2020-06-29 00:34:31 +03:00
auto current_thread = Thread : : current ( ) ;
if ( current_thread )
current_thread - > did_cow_fault ( ) ;
2019-11-04 02:45:33 +03:00
# ifdef PAGE_FAULT_DEBUG
2020-02-24 17:40:42 +03:00
dbg ( ) < < " >> It's a COW page and it's time to COW! " ;
2019-11-04 02:45:33 +03:00
# endif
2020-04-28 17:19:50 +03:00
auto page = MM . allocate_user_physical_page ( MemoryManager : : ShouldZeroFill : : No ) ;
if ( page . is_null ( ) ) {
2020-03-01 22:45:39 +03:00
klog ( ) < < " MM: handle_cow_fault was unable to allocate a physical page " ;
2020-05-06 22:11:38 +03:00
return PageFaultResponse : : OutOfMemory ;
2019-11-04 02:45:33 +03:00
}
2020-09-12 06:11:07 +03:00
2020-04-28 17:19:50 +03:00
u8 * dest_ptr = MM . quickmap_page ( * page ) ;
2019-11-04 02:45:33 +03:00
const u8 * src_ptr = vaddr ( ) . offset ( page_index_in_region * PAGE_SIZE ) . as_ptr ( ) ;
# ifdef PAGE_FAULT_DEBUG
2020-09-12 06:11:07 +03:00
dbg ( ) < < " >> COW " < < page - > paddr ( ) < < " <- " < < page_slot - > paddr ( ) ;
2019-11-04 02:45:33 +03:00
# endif
2020-09-12 06:11:07 +03:00
{
SmapDisabler disabler ;
void * fault_at ;
if ( ! safe_memcpy ( dest_ptr , src_ptr , PAGE_SIZE , fault_at ) ) {
if ( ( u8 * ) fault_at > = dest_ptr & & ( u8 * ) fault_at < = dest_ptr + PAGE_SIZE )
dbg ( ) < < " >> COW: error copying page " < < page_slot - > paddr ( ) < < " / " < < VirtualAddress ( src_ptr ) < < " to " < < page - > paddr ( ) < < " / " < < VirtualAddress ( dest_ptr ) < < " : failed to write to page at " < < VirtualAddress ( fault_at ) ;
else if ( ( u8 * ) fault_at > = src_ptr & & ( u8 * ) fault_at < = src_ptr + PAGE_SIZE )
dbg ( ) < < " >> COW: error copying page " < < page_slot - > paddr ( ) < < " / " < < VirtualAddress ( src_ptr ) < < " to " < < page - > paddr ( ) < < " / " < < VirtualAddress ( dest_ptr ) < < " : failed to read from page at " < < VirtualAddress ( fault_at ) ;
else
ASSERT_NOT_REACHED ( ) ;
}
}
2020-04-28 17:19:50 +03:00
page_slot = move ( page ) ;
2019-11-04 02:45:33 +03:00
MM . unquickmap_page ( ) ;
set_should_cow ( page_index_in_region , false ) ;
2020-09-02 01:10:54 +03:00
if ( ! remap_page ( page_index_in_region ) )
return PageFaultResponse : : OutOfMemory ;
2019-11-04 02:45:33 +03:00
return PageFaultResponse : : Continue ;
}
PageFaultResponse Region : : handle_inode_fault ( size_t page_index_in_region )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
ASSERT ( vmobject ( ) . is_inode ( ) ) ;
LOCKER ( vmobject ( ) . m_paging_lock ) ;
2020-12-01 05:04:36 +03:00
ASSERT_INTERRUPTS_DISABLED ( ) ;
2020-02-28 22:20:35 +03:00
auto & inode_vmobject = static_cast < InodeVMObject & > ( vmobject ( ) ) ;
2020-02-08 14:54:06 +03:00
auto & vmobject_physical_page_entry = inode_vmobject . physical_pages ( ) [ first_page_index ( ) + page_index_in_region ] ;
2019-12-29 15:16:53 +03:00
# ifdef PAGE_FAULT_DEBUG
2020-01-21 18:14:39 +03:00
dbg ( ) < < " Inode fault in " < < name ( ) < < " page index: " < < page_index_in_region ;
2019-12-29 15:16:53 +03:00
# endif
2019-11-04 02:45:33 +03:00
if ( ! vmobject_physical_page_entry . is_null ( ) ) {
# ifdef PAGE_FAULT_DEBUG
2020-02-24 17:40:42 +03:00
dbg ( ) < < ( " MM: page_in_from_inode() but page already present. Fine with me! " ) ;
2019-11-04 02:45:33 +03:00
# endif
2020-09-02 01:10:54 +03:00
if ( ! remap_page ( page_index_in_region ) )
return PageFaultResponse : : OutOfMemory ;
2019-11-04 02:45:33 +03:00
return PageFaultResponse : : Continue ;
}
2020-06-29 00:34:31 +03:00
auto current_thread = Thread : : current ( ) ;
if ( current_thread )
current_thread - > did_inode_fault ( ) ;
2019-11-04 02:45:33 +03:00
# ifdef MM_DEBUG
2020-02-24 17:40:42 +03:00
dbg ( ) < < " MM: page_in_from_inode ready to read from inode " ;
2019-11-04 02:45:33 +03:00
# endif
2020-12-01 05:04:36 +03:00
2019-11-04 02:45:33 +03:00
u8 page_buffer [ PAGE_SIZE ] ;
auto & inode = inode_vmobject . inode ( ) ;
2020-09-12 06:11:07 +03:00
auto buffer = UserOrKernelBuffer : : for_kernel_buffer ( page_buffer ) ;
auto nread = inode . read_bytes ( ( first_page_index ( ) + page_index_in_region ) * PAGE_SIZE , PAGE_SIZE , buffer , nullptr ) ;
2019-11-04 02:45:33 +03:00
if ( nread < 0 ) {
2020-03-01 22:45:39 +03:00
klog ( ) < < " MM: handle_inode_fault had error ( " < < nread < < " ) while reading! " ;
2019-11-04 02:45:33 +03:00
return PageFaultResponse : : ShouldCrash ;
}
if ( nread < PAGE_SIZE ) {
// If we read less than a page, zero out the rest to avoid leaking uninitialized data.
memset ( page_buffer + nread , 0 , PAGE_SIZE - nread ) ;
}
2020-12-01 05:04:36 +03:00
2019-11-04 02:45:33 +03:00
vmobject_physical_page_entry = MM . allocate_user_physical_page ( MemoryManager : : ShouldZeroFill : : No ) ;
if ( vmobject_physical_page_entry . is_null ( ) ) {
2020-03-01 22:45:39 +03:00
klog ( ) < < " MM: handle_inode_fault was unable to allocate a physical page " ;
2020-05-06 22:11:38 +03:00
return PageFaultResponse : : OutOfMemory ;
2019-11-04 02:45:33 +03:00
}
2019-12-21 18:21:13 +03:00
u8 * dest_ptr = MM . quickmap_page ( * vmobject_physical_page_entry ) ;
2020-09-12 06:11:07 +03:00
{
void * fault_at ;
if ( ! safe_memcpy ( dest_ptr , page_buffer , PAGE_SIZE , fault_at ) ) {
if ( ( u8 * ) fault_at > = dest_ptr & & ( u8 * ) fault_at < = dest_ptr + PAGE_SIZE )
dbg ( ) < < " >> inode fault: error copying data to " < < vmobject_physical_page_entry - > paddr ( ) < < " / " < < VirtualAddress ( dest_ptr ) < < " , failed at " < < VirtualAddress ( fault_at ) ;
else
ASSERT_NOT_REACHED ( ) ;
}
}
2019-12-21 18:21:13 +03:00
MM . unquickmap_page ( ) ;
remap_page ( page_index_in_region ) ;
2019-11-04 02:45:33 +03:00
return PageFaultResponse : : Continue ;
}
2020-02-16 03:27:42 +03:00
}