2020-07-31 00:38:15 +03:00
|
|
|
/*
|
2021-01-29 16:38:49 +03:00
|
|
|
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
|
2021-02-24 19:00:58 +03:00
|
|
|
* Copyright (c) 2021, Leon Albrecht <leon2002.la@gmail.com>
|
2020-07-31 00:38:15 +03:00
|
|
|
*
|
2021-04-22 11:24:48 +03:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-07-31 00:38:15 +03:00
|
|
|
*/
|
|
|
|
|
2021-04-24 11:30:20 +03:00
|
|
|
#include <Kernel/Arch/x86/SmapDisabler.h>
|
2020-07-31 00:38:15 +03:00
|
|
|
#include <Kernel/FileSystem/FileDescription.h>
|
2021-03-02 21:01:02 +03:00
|
|
|
#include <Kernel/PerformanceEventBuffer.h>
|
2021-05-07 08:29:19 +03:00
|
|
|
#include <Kernel/PerformanceManager.h>
|
2020-07-31 00:38:15 +03:00
|
|
|
#include <Kernel/Process.h>
|
2021-01-29 00:36:20 +03:00
|
|
|
#include <Kernel/VM/MemoryManager.h>
|
2020-07-31 00:38:15 +03:00
|
|
|
#include <Kernel/VM/PageDirectory.h>
|
2020-12-29 04:11:47 +03:00
|
|
|
#include <Kernel/VM/PrivateInodeVMObject.h>
|
2020-07-31 00:38:15 +03:00
|
|
|
#include <Kernel/VM/Region.h>
|
|
|
|
#include <Kernel/VM/SharedInodeVMObject.h>
|
|
|
|
#include <LibC/limits.h>
|
2021-01-29 16:38:49 +03:00
|
|
|
#include <LibELF/Validation.h>
|
2020-07-31 00:38:15 +03:00
|
|
|
|
|
|
|
namespace Kernel {
|
|
|
|
|
2021-01-29 16:38:49 +03:00
|
|
|
static bool should_make_executable_exception_for_dynamic_loader(bool make_readable, bool make_writable, bool make_executable, const Region& region)
|
2020-07-31 00:38:15 +03:00
|
|
|
{
|
2021-01-29 16:38:49 +03:00
|
|
|
// Normally we don't allow W -> X transitions, but we have to make an exception
|
|
|
|
// for the dynamic loader, which needs to do this after performing text relocations.
|
2020-07-31 00:38:15 +03:00
|
|
|
|
2021-01-29 16:38:49 +03:00
|
|
|
// FIXME: Investigate whether we could get rid of all text relocations entirely.
|
|
|
|
|
|
|
|
// The exception is only made if all the following criteria is fulfilled:
|
|
|
|
|
|
|
|
// The region must be RW
|
|
|
|
if (!(region.is_readable() && region.is_writable() && !region.is_executable()))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// The region wants to become RX
|
|
|
|
if (!(make_readable && !make_writable && make_executable))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// The region is backed by a file
|
|
|
|
if (!region.vmobject().is_inode())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// The file mapping is private, not shared (no relocations in a shared mapping!)
|
|
|
|
if (!region.vmobject().is_private_inode())
|
|
|
|
return false;
|
|
|
|
|
2021-04-13 20:29:34 +03:00
|
|
|
auto& inode_vm = static_cast<const InodeVMObject&>(region.vmobject());
|
|
|
|
auto& inode = inode_vm.inode();
|
|
|
|
|
2021-01-29 16:38:49 +03:00
|
|
|
Elf32_Ehdr header;
|
2021-04-13 20:29:34 +03:00
|
|
|
auto buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&header);
|
2021-05-02 00:29:39 +03:00
|
|
|
auto result = inode.read_bytes(0, sizeof(header), buffer, nullptr);
|
|
|
|
if (result.is_error() || result.value() != sizeof(header))
|
2021-01-29 16:38:49 +03:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// The file is a valid ELF binary
|
|
|
|
if (!ELF::validate_elf_header(header, inode.size()))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// The file is an ELF shared object
|
|
|
|
if (header.e_type != ET_DYN)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// FIXME: Are there any additional checks/validations we could do here?
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-01-30 12:06:52 +03:00
|
|
|
static bool validate_mmap_prot(int prot, bool map_stack, bool map_anonymous, const Region* region = nullptr)
|
2021-01-29 16:38:49 +03:00
|
|
|
{
|
|
|
|
bool make_readable = prot & PROT_READ;
|
|
|
|
bool make_writable = prot & PROT_WRITE;
|
|
|
|
bool make_executable = prot & PROT_EXEC;
|
|
|
|
|
2021-01-29 16:46:53 +03:00
|
|
|
if (map_anonymous && make_executable)
|
|
|
|
return false;
|
|
|
|
|
2021-01-29 16:38:49 +03:00
|
|
|
if (make_writable && make_executable)
|
2020-07-31 00:38:15 +03:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (map_stack) {
|
2021-01-29 16:38:49 +03:00
|
|
|
if (make_executable)
|
|
|
|
return false;
|
|
|
|
if (!make_readable || !make_writable)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (region) {
|
|
|
|
if (make_writable && region->has_been_executable())
|
2020-07-31 00:38:15 +03:00
|
|
|
return false;
|
2021-01-29 16:38:49 +03:00
|
|
|
|
|
|
|
if (make_executable && region->has_been_writable()) {
|
2021-01-30 12:06:52 +03:00
|
|
|
if (should_make_executable_exception_for_dynamic_loader(make_readable, make_writable, make_executable, *region))
|
2021-01-29 16:38:49 +03:00
|
|
|
return true;
|
|
|
|
|
2020-07-31 00:38:15 +03:00
|
|
|
return false;
|
2021-01-29 16:38:49 +03:00
|
|
|
}
|
2020-07-31 00:38:15 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool validate_inode_mmap_prot(const Process& process, int prot, const Inode& inode, bool map_shared)
|
|
|
|
{
|
|
|
|
auto metadata = inode.metadata();
|
|
|
|
if ((prot & PROT_READ) && !metadata.may_read(process))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (map_shared) {
|
|
|
|
// FIXME: What about readonly filesystem mounts? We cannot make a
|
|
|
|
// decision here without knowing the mount flags, so we would need to
|
|
|
|
// keep a Custody or something from mmap time.
|
|
|
|
if ((prot & PROT_WRITE) && !metadata.may_write(process))
|
|
|
|
return false;
|
|
|
|
InterruptDisabler disabler;
|
2020-12-29 15:32:08 +03:00
|
|
|
if (auto shared_vmobject = inode.shared_vmobject()) {
|
|
|
|
if ((prot & PROT_EXEC) && shared_vmobject->writable_mappings())
|
2020-07-31 00:38:15 +03:00
|
|
|
return false;
|
2020-12-29 15:32:08 +03:00
|
|
|
if ((prot & PROT_WRITE) && shared_vmobject->executable_mappings())
|
2020-07-31 00:38:15 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-03-01 15:49:16 +03:00
|
|
|
KResultOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> user_params)
|
2020-07-31 00:38:15 +03:00
|
|
|
{
|
|
|
|
REQUIRE_PROMISE(stdio);
|
|
|
|
|
|
|
|
Syscall::SC_mmap_params params;
|
2020-09-12 06:11:07 +03:00
|
|
|
if (!copy_from_user(¶ms, user_params))
|
2021-03-01 15:49:16 +03:00
|
|
|
return EFAULT;
|
2020-07-31 00:38:15 +03:00
|
|
|
|
2021-03-01 17:53:33 +03:00
|
|
|
FlatPtr addr = params.addr;
|
2021-03-19 00:57:25 +03:00
|
|
|
auto size = params.size;
|
|
|
|
auto alignment = params.alignment;
|
|
|
|
auto prot = params.prot;
|
|
|
|
auto flags = params.flags;
|
|
|
|
auto fd = params.fd;
|
|
|
|
auto offset = params.offset;
|
2020-07-31 00:38:15 +03:00
|
|
|
|
2021-01-29 20:50:27 +03:00
|
|
|
if (prot & PROT_EXEC) {
|
|
|
|
REQUIRE_PROMISE(prot_exec);
|
|
|
|
}
|
|
|
|
|
2021-02-21 03:08:48 +03:00
|
|
|
if (prot & MAP_FIXED) {
|
|
|
|
REQUIRE_PROMISE(map_fixed);
|
|
|
|
}
|
|
|
|
|
2020-07-31 00:38:15 +03:00
|
|
|
if (alignment & ~PAGE_MASK)
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2020-07-31 00:38:15 +03:00
|
|
|
|
2021-02-14 11:57:19 +03:00
|
|
|
if (page_round_up_would_wrap(size))
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2021-02-14 11:57:19 +03:00
|
|
|
|
|
|
|
if (!is_user_range(VirtualAddress(addr), page_round_up(size)))
|
2021-03-01 15:49:16 +03:00
|
|
|
return EFAULT;
|
2020-07-31 00:38:15 +03:00
|
|
|
|
2021-05-28 10:33:14 +03:00
|
|
|
OwnPtr<KString> name;
|
2020-07-31 00:38:15 +03:00
|
|
|
if (params.name.characters) {
|
|
|
|
if (params.name.length > PATH_MAX)
|
2021-03-01 15:49:16 +03:00
|
|
|
return ENAMETOOLONG;
|
2021-05-28 10:33:14 +03:00
|
|
|
auto name_or_error = try_copy_kstring_from_user(params.name);
|
|
|
|
if (name_or_error.is_error())
|
|
|
|
return name_or_error.error();
|
|
|
|
name = name_or_error.release_value();
|
2020-07-31 00:38:15 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0)
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2020-07-31 00:38:15 +03:00
|
|
|
if ((FlatPtr)addr & ~PAGE_MASK)
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2020-07-31 00:38:15 +03:00
|
|
|
|
|
|
|
bool map_shared = flags & MAP_SHARED;
|
|
|
|
bool map_anonymous = flags & MAP_ANONYMOUS;
|
|
|
|
bool map_private = flags & MAP_PRIVATE;
|
|
|
|
bool map_stack = flags & MAP_STACK;
|
|
|
|
bool map_fixed = flags & MAP_FIXED;
|
2020-09-04 00:06:25 +03:00
|
|
|
bool map_noreserve = flags & MAP_NORESERVE;
|
2021-01-28 16:55:06 +03:00
|
|
|
bool map_randomized = flags & MAP_RANDOMIZED;
|
2020-07-31 00:38:15 +03:00
|
|
|
|
|
|
|
if (map_shared && map_private)
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2020-07-31 00:38:15 +03:00
|
|
|
|
|
|
|
if (!map_shared && !map_private)
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2020-07-31 00:38:15 +03:00
|
|
|
|
2021-01-29 02:28:27 +03:00
|
|
|
if (map_fixed && map_randomized)
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2021-01-29 02:28:27 +03:00
|
|
|
|
2021-01-30 12:30:17 +03:00
|
|
|
if (!validate_mmap_prot(prot, map_stack, map_anonymous))
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2020-07-31 00:38:15 +03:00
|
|
|
|
|
|
|
if (map_stack && (!map_private || !map_anonymous))
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2020-07-31 00:38:15 +03:00
|
|
|
|
|
|
|
Region* region = nullptr;
|
2021-01-28 16:55:06 +03:00
|
|
|
Optional<Range> range;
|
|
|
|
|
|
|
|
if (map_randomized) {
|
2021-02-14 11:57:19 +03:00
|
|
|
range = space().page_directory().range_allocator().allocate_randomized(page_round_up(size), alignment);
|
2021-01-28 16:55:06 +03:00
|
|
|
} else {
|
2021-02-08 17:45:40 +03:00
|
|
|
range = space().allocate_range(VirtualAddress(addr), size, alignment);
|
2021-01-28 16:55:06 +03:00
|
|
|
if (!range.has_value()) {
|
|
|
|
if (addr && !map_fixed) {
|
|
|
|
// If there's an address but MAP_FIXED wasn't specified, the address is just a hint.
|
2021-02-08 17:45:40 +03:00
|
|
|
range = space().allocate_range({}, size, alignment);
|
2021-01-28 16:55:06 +03:00
|
|
|
}
|
2021-01-27 22:48:38 +03:00
|
|
|
}
|
|
|
|
}
|
2020-07-31 00:38:15 +03:00
|
|
|
|
2021-01-28 16:55:06 +03:00
|
|
|
if (!range.has_value())
|
2021-03-01 15:49:16 +03:00
|
|
|
return ENOMEM;
|
2021-01-28 16:55:06 +03:00
|
|
|
|
2020-09-04 20:47:49 +03:00
|
|
|
if (map_anonymous) {
|
2021-01-02 20:16:39 +03:00
|
|
|
auto strategy = map_noreserve ? AllocationStrategy::None : AllocationStrategy::Reserve;
|
2021-05-28 10:33:14 +03:00
|
|
|
auto region_or_error = space().allocate_region(range.value(), {}, prot, strategy);
|
2021-01-15 19:27:52 +03:00
|
|
|
if (region_or_error.is_error())
|
2021-02-25 18:18:36 +03:00
|
|
|
return region_or_error.error().error();
|
2021-01-15 19:27:52 +03:00
|
|
|
region = region_or_error.value();
|
2020-07-31 00:38:15 +03:00
|
|
|
} else {
|
|
|
|
if (offset < 0)
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2020-07-31 00:38:15 +03:00
|
|
|
if (static_cast<size_t>(offset) & ~PAGE_MASK)
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2020-07-31 00:38:15 +03:00
|
|
|
auto description = file_description(fd);
|
|
|
|
if (!description)
|
2021-03-01 15:49:16 +03:00
|
|
|
return EBADF;
|
2020-07-31 00:38:15 +03:00
|
|
|
if (description->is_directory())
|
2021-03-01 15:49:16 +03:00
|
|
|
return ENODEV;
|
2020-07-31 00:38:15 +03:00
|
|
|
// Require read access even when read protection is not requested.
|
|
|
|
if (!description->is_readable())
|
2021-03-01 15:49:16 +03:00
|
|
|
return EACCES;
|
2020-07-31 00:38:15 +03:00
|
|
|
if (map_shared) {
|
|
|
|
if ((prot & PROT_WRITE) && !description->is_writable())
|
2021-03-01 15:49:16 +03:00
|
|
|
return EACCES;
|
2020-07-31 00:38:15 +03:00
|
|
|
}
|
|
|
|
if (description->inode()) {
|
|
|
|
if (!validate_inode_mmap_prot(*this, prot, *description->inode(), map_shared))
|
2021-03-01 15:49:16 +03:00
|
|
|
return EACCES;
|
2020-07-31 00:38:15 +03:00
|
|
|
}
|
2021-01-25 16:52:36 +03:00
|
|
|
|
2021-03-19 00:57:25 +03:00
|
|
|
auto region_or_error = description->mmap(*this, range.value(), static_cast<u64>(offset), prot, map_shared);
|
2020-07-31 00:38:15 +03:00
|
|
|
if (region_or_error.is_error())
|
2021-02-25 18:18:36 +03:00
|
|
|
return region_or_error.error().error();
|
2020-07-31 00:38:15 +03:00
|
|
|
region = region_or_error.value();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!region)
|
2021-03-01 15:49:16 +03:00
|
|
|
return ENOMEM;
|
2021-04-26 00:42:36 +03:00
|
|
|
|
2020-07-31 00:38:15 +03:00
|
|
|
region->set_mmap(true);
|
|
|
|
if (map_shared)
|
|
|
|
region->set_shared(true);
|
|
|
|
if (map_stack)
|
|
|
|
region->set_stack(true);
|
2021-05-28 10:33:14 +03:00
|
|
|
region->set_name(move(name));
|
2021-05-07 08:29:19 +03:00
|
|
|
|
|
|
|
PerformanceManager::add_mmap_perf_event(*this, *region);
|
|
|
|
|
2021-02-25 18:18:36 +03:00
|
|
|
return region->vaddr().get();
|
2020-07-31 00:38:15 +03:00
|
|
|
}
|
|
|
|
|
2021-02-18 20:04:58 +03:00
|
|
|
static KResultOr<Range> expand_range_to_page_boundaries(FlatPtr address, size_t size)
|
|
|
|
{
|
|
|
|
if (page_round_up_would_wrap(size))
|
|
|
|
return EINVAL;
|
|
|
|
|
|
|
|
if ((address + size) < address)
|
|
|
|
return EINVAL;
|
|
|
|
|
|
|
|
if (page_round_up_would_wrap(address + size))
|
|
|
|
return EINVAL;
|
|
|
|
|
|
|
|
auto base = VirtualAddress { address }.page_base();
|
|
|
|
auto end = page_round_up(address + size);
|
|
|
|
|
|
|
|
return Range { base, end - base.get() };
|
|
|
|
}
|
|
|
|
|
2021-03-01 17:53:33 +03:00
|
|
|
KResultOr<int> Process::sys$mprotect(Userspace<void*> addr, size_t size, int prot)
|
2020-07-31 00:38:15 +03:00
|
|
|
{
|
|
|
|
REQUIRE_PROMISE(stdio);
|
|
|
|
|
2021-01-29 20:50:27 +03:00
|
|
|
if (prot & PROT_EXEC) {
|
|
|
|
REQUIRE_PROMISE(prot_exec);
|
|
|
|
}
|
|
|
|
|
2021-03-01 17:53:33 +03:00
|
|
|
auto range_or_error = expand_range_to_page_boundaries(addr, size);
|
2021-02-18 20:04:58 +03:00
|
|
|
if (range_or_error.is_error())
|
|
|
|
return range_or_error.error();
|
2021-02-13 02:47:47 +03:00
|
|
|
|
2021-02-18 20:04:58 +03:00
|
|
|
auto range_to_mprotect = range_or_error.value();
|
2021-02-13 02:47:47 +03:00
|
|
|
if (!range_to_mprotect.size())
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2020-07-31 00:38:15 +03:00
|
|
|
|
2021-02-13 02:47:47 +03:00
|
|
|
if (!is_user_range(range_to_mprotect))
|
2021-03-01 15:49:16 +03:00
|
|
|
return EFAULT;
|
2020-07-31 00:38:15 +03:00
|
|
|
|
2021-02-08 17:45:40 +03:00
|
|
|
if (auto* whole_region = space().find_region_from_range(range_to_mprotect)) {
|
2020-07-31 00:38:15 +03:00
|
|
|
if (!whole_region->is_mmap())
|
2021-03-01 15:49:16 +03:00
|
|
|
return EPERM;
|
2021-01-30 12:06:52 +03:00
|
|
|
if (!validate_mmap_prot(prot, whole_region->is_stack(), whole_region->vmobject().is_anonymous(), whole_region))
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2020-07-31 00:38:15 +03:00
|
|
|
if (whole_region->access() == prot_to_region_access_flags(prot))
|
|
|
|
return 0;
|
|
|
|
if (whole_region->vmobject().is_inode()
|
|
|
|
&& !validate_inode_mmap_prot(*this, prot, static_cast<const InodeVMObject&>(whole_region->vmobject()).inode(), whole_region->is_shared())) {
|
2021-03-01 15:49:16 +03:00
|
|
|
return EACCES;
|
2020-07-31 00:38:15 +03:00
|
|
|
}
|
|
|
|
whole_region->set_readable(prot & PROT_READ);
|
|
|
|
whole_region->set_writable(prot & PROT_WRITE);
|
|
|
|
whole_region->set_executable(prot & PROT_EXEC);
|
2021-01-29 16:38:49 +03:00
|
|
|
|
2020-07-31 00:38:15 +03:00
|
|
|
whole_region->remap();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if we can carve out the desired range from an existing region
|
2021-02-08 17:45:40 +03:00
|
|
|
if (auto* old_region = space().find_region_containing(range_to_mprotect)) {
|
2020-07-31 00:38:15 +03:00
|
|
|
if (!old_region->is_mmap())
|
2021-03-01 15:49:16 +03:00
|
|
|
return EPERM;
|
2021-01-30 12:06:52 +03:00
|
|
|
if (!validate_mmap_prot(prot, old_region->is_stack(), old_region->vmobject().is_anonymous(), old_region))
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2020-07-31 00:38:15 +03:00
|
|
|
if (old_region->access() == prot_to_region_access_flags(prot))
|
|
|
|
return 0;
|
|
|
|
if (old_region->vmobject().is_inode()
|
|
|
|
&& !validate_inode_mmap_prot(*this, prot, static_cast<const InodeVMObject&>(old_region->vmobject()).inode(), old_region->is_shared())) {
|
2021-03-01 15:49:16 +03:00
|
|
|
return EACCES;
|
2020-07-31 00:38:15 +03:00
|
|
|
}
|
|
|
|
|
2021-04-07 02:17:05 +03:00
|
|
|
// Remove the old region from our regions tree, since were going to add another region
|
|
|
|
// with the exact same start address, but dont deallocate it yet
|
|
|
|
auto region = space().take_region(*old_region);
|
|
|
|
VERIFY(region);
|
|
|
|
|
|
|
|
// Unmap the old region here, specifying that we *don't* want the VM deallocated.
|
|
|
|
region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
|
|
|
|
|
2020-07-31 00:38:15 +03:00
|
|
|
// This vector is the region(s) adjacent to our range.
|
|
|
|
// We need to allocate a new region for the range we wanted to change permission bits on.
|
2021-04-07 02:17:05 +03:00
|
|
|
auto adjacent_regions = space().split_region_around_range(*region, range_to_mprotect);
|
2020-07-31 00:38:15 +03:00
|
|
|
|
2021-04-07 02:17:05 +03:00
|
|
|
size_t new_range_offset_in_vmobject = region->offset_in_vmobject() + (range_to_mprotect.base().get() - region->range().base().get());
|
|
|
|
auto& new_region = space().allocate_split_region(*region, range_to_mprotect, new_range_offset_in_vmobject);
|
2020-07-31 00:38:15 +03:00
|
|
|
new_region.set_readable(prot & PROT_READ);
|
|
|
|
new_region.set_writable(prot & PROT_WRITE);
|
|
|
|
new_region.set_executable(prot & PROT_EXEC);
|
|
|
|
|
|
|
|
// Map the new regions using our page directory (they were just allocated and don't have one).
|
|
|
|
for (auto* adjacent_region : adjacent_regions) {
|
2021-02-08 17:45:40 +03:00
|
|
|
adjacent_region->map(space().page_directory());
|
2020-07-31 00:38:15 +03:00
|
|
|
}
|
2021-02-08 17:45:40 +03:00
|
|
|
new_region.map(space().page_directory());
|
2020-07-31 00:38:15 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-04-01 23:04:04 +03:00
|
|
|
if (const auto& regions = space().find_regions_intersecting(range_to_mprotect); regions.size()) {
|
|
|
|
size_t full_size_found = 0;
|
|
|
|
// first check before doing anything
|
|
|
|
for (const auto* region : regions) {
|
|
|
|
if (!region->is_mmap())
|
|
|
|
return EPERM;
|
|
|
|
if (!validate_mmap_prot(prot, region->is_stack(), region->vmobject().is_anonymous(), region))
|
|
|
|
return EINVAL;
|
|
|
|
if (region->access() == prot_to_region_access_flags(prot))
|
|
|
|
return 0;
|
|
|
|
if (region->vmobject().is_inode()
|
|
|
|
&& !validate_inode_mmap_prot(*this, prot, static_cast<const InodeVMObject&>(region->vmobject()).inode(), region->is_shared())) {
|
|
|
|
return EACCES;
|
|
|
|
}
|
|
|
|
full_size_found += region->range().intersect(range_to_mprotect).size();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (full_size_found != range_to_mprotect.size())
|
|
|
|
return ENOMEM;
|
|
|
|
|
|
|
|
// then do all the other stuff
|
|
|
|
for (auto* old_region : regions) {
|
|
|
|
const auto intersection_to_mprotect = range_to_mprotect.intersect(old_region->range());
|
|
|
|
// full sub region
|
|
|
|
if (intersection_to_mprotect == old_region->range()) {
|
|
|
|
old_region->set_readable(prot & PROT_READ);
|
|
|
|
old_region->set_writable(prot & PROT_WRITE);
|
|
|
|
old_region->set_executable(prot & PROT_EXEC);
|
|
|
|
|
|
|
|
old_region->remap();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Remove the old region from our regions tree, since were going to add another region
|
|
|
|
// with the exact same start address, but dont deallocate it yet
|
|
|
|
auto region = space().take_region(*old_region);
|
|
|
|
VERIFY(region);
|
|
|
|
|
|
|
|
// Unmap the old region here, specifying that we *don't* want the VM deallocated.
|
|
|
|
region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
|
|
|
|
|
|
|
|
// This vector is the region(s) adjacent to our range.
|
|
|
|
// We need to allocate a new region for the range we wanted to change permission bits on.
|
|
|
|
auto adjacent_regions = space().split_region_around_range(*old_region, intersection_to_mprotect);
|
|
|
|
// there should only be one
|
|
|
|
VERIFY(adjacent_regions.size() == 1);
|
|
|
|
|
|
|
|
size_t new_range_offset_in_vmobject = old_region->offset_in_vmobject() + (intersection_to_mprotect.base().get() - old_region->range().base().get());
|
|
|
|
auto& new_region = space().allocate_split_region(*region, intersection_to_mprotect, new_range_offset_in_vmobject);
|
|
|
|
new_region.set_readable(prot & PROT_READ);
|
|
|
|
new_region.set_writable(prot & PROT_WRITE);
|
|
|
|
new_region.set_executable(prot & PROT_EXEC);
|
|
|
|
|
|
|
|
// Map the new region using our page directory (they were just allocated and don't have one) if any.
|
|
|
|
if (adjacent_regions.size())
|
|
|
|
adjacent_regions[0]->map(space().page_directory());
|
|
|
|
|
|
|
|
new_region.map(space().page_directory());
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2020-07-31 00:38:15 +03:00
|
|
|
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2020-07-31 00:38:15 +03:00
|
|
|
}
|
|
|
|
|
2021-03-01 17:53:33 +03:00
|
|
|
KResultOr<int> Process::sys$madvise(Userspace<void*> address, size_t size, int advice)
|
2020-07-31 00:38:15 +03:00
|
|
|
{
|
|
|
|
REQUIRE_PROMISE(stdio);
|
|
|
|
|
2021-03-01 17:53:33 +03:00
|
|
|
auto range_or_error = expand_range_to_page_boundaries(address, size);
|
2021-02-18 20:04:58 +03:00
|
|
|
if (range_or_error.is_error())
|
|
|
|
return range_or_error.error();
|
2021-02-14 11:57:19 +03:00
|
|
|
|
2021-02-18 20:04:58 +03:00
|
|
|
auto range_to_madvise = range_or_error.value();
|
2021-02-13 02:47:47 +03:00
|
|
|
|
|
|
|
if (!range_to_madvise.size())
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2020-07-31 00:38:15 +03:00
|
|
|
|
2021-02-13 02:47:47 +03:00
|
|
|
if (!is_user_range(range_to_madvise))
|
2021-03-01 15:49:16 +03:00
|
|
|
return EFAULT;
|
2020-07-31 00:38:15 +03:00
|
|
|
|
2021-02-13 02:47:47 +03:00
|
|
|
auto* region = space().find_region_from_range(range_to_madvise);
|
2020-07-31 00:38:15 +03:00
|
|
|
if (!region)
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2020-07-31 00:38:15 +03:00
|
|
|
if (!region->is_mmap())
|
2021-03-01 15:49:16 +03:00
|
|
|
return EPERM;
|
2020-09-03 07:57:09 +03:00
|
|
|
bool set_volatile = advice & MADV_SET_VOLATILE;
|
|
|
|
bool set_nonvolatile = advice & MADV_SET_NONVOLATILE;
|
|
|
|
if (set_volatile && set_nonvolatile)
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2020-09-03 07:57:09 +03:00
|
|
|
if (set_volatile || set_nonvolatile) {
|
2020-09-06 00:52:14 +03:00
|
|
|
if (!region->vmobject().is_anonymous())
|
2021-03-01 15:49:16 +03:00
|
|
|
return EPERM;
|
2020-09-03 07:57:09 +03:00
|
|
|
bool was_purged = false;
|
|
|
|
switch (region->set_volatile(VirtualAddress(address), size, set_volatile, was_purged)) {
|
|
|
|
case Region::SetVolatileError::Success:
|
|
|
|
break;
|
|
|
|
case Region::SetVolatileError::NotPurgeable:
|
2021-03-01 15:49:16 +03:00
|
|
|
return EPERM;
|
2020-09-03 07:57:09 +03:00
|
|
|
case Region::SetVolatileError::OutOfMemory:
|
2021-03-01 15:49:16 +03:00
|
|
|
return ENOMEM;
|
2020-09-03 07:57:09 +03:00
|
|
|
}
|
|
|
|
if (set_nonvolatile)
|
|
|
|
return was_purged ? 1 : 0;
|
|
|
|
return 0;
|
2020-07-31 00:38:15 +03:00
|
|
|
}
|
|
|
|
if (advice & MADV_GET_VOLATILE) {
|
2020-09-06 00:52:14 +03:00
|
|
|
if (!region->vmobject().is_anonymous())
|
2021-03-01 15:49:16 +03:00
|
|
|
return EPERM;
|
2020-09-03 07:57:09 +03:00
|
|
|
return region->is_volatile(VirtualAddress(address), size) ? 0 : 1;
|
2020-07-31 00:38:15 +03:00
|
|
|
}
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2020-07-31 00:38:15 +03:00
|
|
|
}
|
|
|
|
|
2021-03-01 15:49:16 +03:00
|
|
|
KResultOr<int> Process::sys$set_mmap_name(Userspace<const Syscall::SC_set_mmap_name_params*> user_params)
|
2020-07-31 00:38:15 +03:00
|
|
|
{
|
|
|
|
REQUIRE_PROMISE(stdio);
|
|
|
|
|
|
|
|
Syscall::SC_set_mmap_name_params params;
|
2020-09-12 06:11:07 +03:00
|
|
|
if (!copy_from_user(¶ms, user_params))
|
2021-03-01 15:49:16 +03:00
|
|
|
return EFAULT;
|
2020-07-31 00:38:15 +03:00
|
|
|
|
|
|
|
if (params.name.length > PATH_MAX)
|
2021-03-01 15:49:16 +03:00
|
|
|
return ENAMETOOLONG;
|
2020-07-31 00:38:15 +03:00
|
|
|
|
2021-05-28 10:33:14 +03:00
|
|
|
auto name_or_error = try_copy_kstring_from_user(params.name);
|
|
|
|
if (name_or_error.is_error())
|
|
|
|
return name_or_error.error();
|
|
|
|
auto name = name_or_error.release_value();
|
2020-07-31 00:38:15 +03:00
|
|
|
|
2021-02-18 20:04:58 +03:00
|
|
|
auto range_or_error = expand_range_to_page_boundaries((FlatPtr)params.addr, params.size);
|
|
|
|
if (range_or_error.is_error())
|
|
|
|
return range_or_error.error();
|
|
|
|
|
|
|
|
auto range = range_or_error.value();
|
|
|
|
|
|
|
|
auto* region = space().find_region_from_range(range);
|
2020-07-31 00:38:15 +03:00
|
|
|
if (!region)
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2020-07-31 00:38:15 +03:00
|
|
|
if (!region->is_mmap())
|
2021-03-01 15:49:16 +03:00
|
|
|
return EPERM;
|
2021-05-07 08:29:19 +03:00
|
|
|
|
2020-09-12 06:11:07 +03:00
|
|
|
region->set_name(move(name));
|
2021-05-07 08:29:19 +03:00
|
|
|
PerformanceManager::add_mmap_perf_event(*this, *region);
|
|
|
|
|
2020-07-31 00:38:15 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-03-01 17:53:33 +03:00
|
|
|
KResultOr<int> Process::sys$munmap(Userspace<void*> addr, size_t size)
|
2020-07-31 00:38:15 +03:00
|
|
|
{
|
|
|
|
REQUIRE_PROMISE(stdio);
|
|
|
|
|
2021-05-28 12:03:21 +03:00
|
|
|
auto result = space().unmap_mmap_range(VirtualAddress { addr }, size);
|
|
|
|
if (result.is_error())
|
|
|
|
return result;
|
2021-02-24 19:00:58 +03:00
|
|
|
return 0;
|
2020-07-31 00:38:15 +03:00
|
|
|
}
|
|
|
|
|
2021-03-01 15:49:16 +03:00
|
|
|
KResultOr<FlatPtr> Process::sys$mremap(Userspace<const Syscall::SC_mremap_params*> user_params)
|
2020-12-29 04:11:47 +03:00
|
|
|
{
|
|
|
|
REQUIRE_PROMISE(stdio);
|
|
|
|
|
2021-02-14 15:14:25 +03:00
|
|
|
Syscall::SC_mremap_params params {};
|
2020-12-29 04:11:47 +03:00
|
|
|
if (!copy_from_user(¶ms, user_params))
|
2021-03-01 15:49:16 +03:00
|
|
|
return EFAULT;
|
2020-12-29 04:11:47 +03:00
|
|
|
|
2021-02-18 20:04:58 +03:00
|
|
|
auto range_or_error = expand_range_to_page_boundaries((FlatPtr)params.old_address, params.old_size);
|
|
|
|
if (range_or_error.is_error())
|
2021-02-25 18:18:36 +03:00
|
|
|
return range_or_error.error().error();
|
2021-02-14 15:14:25 +03:00
|
|
|
|
2021-02-18 20:04:58 +03:00
|
|
|
auto old_range = range_or_error.value();
|
2021-02-14 15:14:25 +03:00
|
|
|
|
2021-02-18 20:04:58 +03:00
|
|
|
auto* old_region = space().find_region_from_range(old_range);
|
2020-12-29 04:11:47 +03:00
|
|
|
if (!old_region)
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2020-12-29 04:11:47 +03:00
|
|
|
|
|
|
|
if (!old_region->is_mmap())
|
2021-03-01 15:49:16 +03:00
|
|
|
return EPERM;
|
2020-12-29 04:11:47 +03:00
|
|
|
|
2020-09-04 20:47:49 +03:00
|
|
|
if (old_region->vmobject().is_shared_inode() && params.flags & MAP_PRIVATE && !(params.flags & (MAP_ANONYMOUS | MAP_NORESERVE))) {
|
2020-12-29 04:11:47 +03:00
|
|
|
auto range = old_region->range();
|
|
|
|
auto old_prot = region_access_flags_to_prot(old_region->access());
|
2021-04-13 20:27:49 +03:00
|
|
|
auto old_offset = old_region->offset_in_vmobject();
|
2020-12-29 04:11:47 +03:00
|
|
|
NonnullRefPtr inode = static_cast<SharedInodeVMObject&>(old_region->vmobject()).inode();
|
2021-01-26 16:13:57 +03:00
|
|
|
|
2021-05-28 13:18:07 +03:00
|
|
|
auto new_vmobject = PrivateInodeVMObject::create_with_inode(inode);
|
|
|
|
if (!new_vmobject)
|
|
|
|
return ENOMEM;
|
|
|
|
|
2021-06-02 12:58:56 +03:00
|
|
|
auto old_name = old_region->take_name();
|
|
|
|
|
2021-01-26 16:13:57 +03:00
|
|
|
// Unmap without deallocating the VM range since we're going to reuse it.
|
|
|
|
old_region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
|
2021-04-07 02:17:05 +03:00
|
|
|
bool success = space().deallocate_region(*old_region);
|
|
|
|
VERIFY(success);
|
2020-12-29 04:11:47 +03:00
|
|
|
|
2021-06-02 12:58:56 +03:00
|
|
|
auto new_region_or_error = space().allocate_region_with_vmobject(range, new_vmobject.release_nonnull(), old_offset, old_name->view(), old_prot, false);
|
2021-01-15 19:27:52 +03:00
|
|
|
if (new_region_or_error.is_error())
|
2021-02-25 18:18:36 +03:00
|
|
|
return new_region_or_error.error().error();
|
2021-01-15 19:27:52 +03:00
|
|
|
auto& new_region = *new_region_or_error.value();
|
|
|
|
new_region.set_mmap(true);
|
2021-02-25 18:18:36 +03:00
|
|
|
return new_region.vaddr().get();
|
2020-12-29 04:11:47 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
dbgln("sys$mremap: Unimplemented remap request (flags={})", params.flags);
|
2021-03-01 15:49:16 +03:00
|
|
|
return ENOTIMPL;
|
2020-12-29 04:11:47 +03:00
|
|
|
}
|
|
|
|
|
2021-04-24 11:30:20 +03:00
|
|
|
KResultOr<FlatPtr> Process::sys$allocate_tls(Userspace<const char*> initial_data, size_t size)
|
2020-10-10 12:17:07 +03:00
|
|
|
{
|
|
|
|
REQUIRE_PROMISE(stdio);
|
|
|
|
|
2021-04-24 11:30:20 +03:00
|
|
|
if (!size || size % PAGE_SIZE != 0)
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2020-10-10 12:17:07 +03:00
|
|
|
|
|
|
|
if (!m_master_tls_region.is_null())
|
2021-03-01 15:49:16 +03:00
|
|
|
return EEXIST;
|
2020-10-10 12:17:07 +03:00
|
|
|
|
|
|
|
if (thread_count() != 1)
|
2021-03-01 15:49:16 +03:00
|
|
|
return EFAULT;
|
2020-10-10 12:17:07 +03:00
|
|
|
|
|
|
|
Thread* main_thread = nullptr;
|
|
|
|
for_each_thread([&main_thread](auto& thread) {
|
|
|
|
main_thread = &thread;
|
|
|
|
return IterationDecision::Break;
|
|
|
|
});
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(main_thread);
|
2020-10-10 12:17:07 +03:00
|
|
|
|
2021-02-08 17:45:40 +03:00
|
|
|
auto range = space().allocate_range({}, size);
|
2021-01-27 23:01:45 +03:00
|
|
|
if (!range.has_value())
|
2021-03-01 15:49:16 +03:00
|
|
|
return ENOMEM;
|
2021-01-26 16:13:57 +03:00
|
|
|
|
2021-04-23 18:59:30 +03:00
|
|
|
auto region_or_error = space().allocate_region(range.value(), String("Master TLS"), PROT_READ | PROT_WRITE);
|
2021-01-15 19:27:52 +03:00
|
|
|
if (region_or_error.is_error())
|
2021-02-25 18:18:36 +03:00
|
|
|
return region_or_error.error().error();
|
2020-10-10 12:17:07 +03:00
|
|
|
|
2021-01-15 19:27:52 +03:00
|
|
|
m_master_tls_region = region_or_error.value()->make_weak_ptr();
|
2020-10-10 12:17:07 +03:00
|
|
|
m_master_tls_size = size;
|
|
|
|
m_master_tls_alignment = PAGE_SIZE;
|
|
|
|
|
2021-04-24 11:30:20 +03:00
|
|
|
{
|
|
|
|
Kernel::SmapDisabler disabler;
|
|
|
|
void* fault_at;
|
|
|
|
if (!Kernel::safe_memcpy((char*)m_master_tls_region.unsafe_ptr()->vaddr().as_ptr(), (char*)initial_data.ptr(), size, fault_at))
|
|
|
|
return EFAULT;
|
|
|
|
}
|
|
|
|
|
2020-10-10 12:17:07 +03:00
|
|
|
auto tsr_result = main_thread->make_thread_specific_region({});
|
|
|
|
if (tsr_result.is_error())
|
2021-03-01 15:49:16 +03:00
|
|
|
return EFAULT;
|
2020-10-10 12:17:07 +03:00
|
|
|
|
|
|
|
auto& tls_descriptor = Processor::current().get_gdt_entry(GDT_SELECTOR_TLS);
|
2021-02-25 12:29:41 +03:00
|
|
|
tls_descriptor.set_base(main_thread->thread_specific_data());
|
2020-10-10 12:17:07 +03:00
|
|
|
tls_descriptor.set_limit(main_thread->thread_specific_region_size());
|
|
|
|
|
2021-02-25 18:18:36 +03:00
|
|
|
return m_master_tls_region.unsafe_ptr()->vaddr().get();
|
2020-10-10 12:17:07 +03:00
|
|
|
}
|
|
|
|
|
2021-03-01 17:53:33 +03:00
|
|
|
KResultOr<int> Process::sys$msyscall(Userspace<void*> address)
|
2021-02-02 21:56:11 +03:00
|
|
|
{
|
2021-02-08 17:45:40 +03:00
|
|
|
if (space().enforces_syscall_regions())
|
2021-03-01 15:49:16 +03:00
|
|
|
return EPERM;
|
2021-02-02 21:56:11 +03:00
|
|
|
|
|
|
|
if (!address) {
|
2021-02-08 17:45:40 +03:00
|
|
|
space().set_enforces_syscall_regions(true);
|
2021-02-02 21:56:11 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-02-16 13:32:00 +03:00
|
|
|
if (!is_user_address(VirtualAddress { address }))
|
2021-03-01 15:49:16 +03:00
|
|
|
return EFAULT;
|
2021-02-16 13:32:00 +03:00
|
|
|
|
2021-02-08 17:45:40 +03:00
|
|
|
auto* region = space().find_region_containing(Range { VirtualAddress { address }, 1 });
|
2021-02-02 21:56:11 +03:00
|
|
|
if (!region)
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2021-02-02 21:56:11 +03:00
|
|
|
|
2021-02-02 22:16:13 +03:00
|
|
|
if (!region->is_mmap())
|
2021-03-01 15:49:16 +03:00
|
|
|
return EINVAL;
|
2021-02-02 22:16:13 +03:00
|
|
|
|
2021-02-02 21:56:11 +03:00
|
|
|
region->set_syscall_region(true);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-07-31 00:38:15 +03:00
|
|
|
}
|