2020-01-18 11:38:21 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright notice, this
|
|
|
|
* list of conditions and the following disclaimer.
|
|
|
|
*
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
|
|
|
* this list of conditions and the following disclaimer in the documentation
|
|
|
|
* and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
|
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
|
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
|
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
|
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2020-08-25 04:35:19 +03:00
|
|
|
#include <AK/Singleton.h>
|
2019-07-16 16:03:39 +03:00
|
|
|
#include <Kernel/Process.h>
|
2019-07-29 08:26:01 +03:00
|
|
|
#include <Kernel/SharedBuffer.h>
|
2019-07-16 16:03:39 +03:00
|
|
|
|
2020-08-27 01:58:09 +03:00
|
|
|
//#define SHARED_BUFFER_DEBUG
|
|
|
|
|
2020-02-16 03:27:42 +03:00
|
|
|
namespace Kernel {
|
|
|
|
|
2020-08-25 04:35:19 +03:00
|
|
|
static AK::Singleton<Lockable<HashMap<int, NonnullOwnPtr<SharedBuffer>>>> s_map;
|
|
|
|
|
2019-07-24 09:42:55 +03:00
|
|
|
Lockable<HashMap<int, NonnullOwnPtr<SharedBuffer>>>& shared_buffers()
|
2019-07-16 16:03:39 +03:00
|
|
|
{
|
2020-08-25 04:35:19 +03:00
|
|
|
return *s_map;
|
2019-07-16 16:03:39 +03:00
|
|
|
}
|
|
|
|
|
2019-07-20 00:14:56 +03:00
|
|
|
void SharedBuffer::sanity_check(const char* what)
|
|
|
|
{
|
2020-04-18 12:50:35 +03:00
|
|
|
LOCKER(shared_buffers().lock(), Lock::Mode::Shared);
|
2019-07-20 00:14:56 +03:00
|
|
|
|
|
|
|
unsigned found_refs = 0;
|
|
|
|
for (const auto& ref : m_refs)
|
|
|
|
found_refs += ref.count;
|
|
|
|
|
|
|
|
if (found_refs != m_total_refs) {
|
2021-01-09 02:11:15 +03:00
|
|
|
dbgln("{} sanity -- SharedBuffer({}) id: {} has total refs {} but we found {}",
|
|
|
|
what,
|
|
|
|
this,
|
|
|
|
m_shbuf_id,
|
|
|
|
m_total_refs,
|
|
|
|
found_refs);
|
|
|
|
|
|
|
|
for (const auto& ref : m_refs)
|
|
|
|
dbgln(" ref from pid {}: reference count {}", ref.pid.value(), ref.count);
|
2019-07-20 00:14:56 +03:00
|
|
|
ASSERT_NOT_REACHED();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-08 18:32:34 +03:00
|
|
|
bool SharedBuffer::is_shared_with(ProcessID peer_pid) const
|
2019-07-16 16:03:39 +03:00
|
|
|
{
|
2020-04-18 12:50:35 +03:00
|
|
|
LOCKER(shared_buffers().lock(), Lock::Mode::Shared);
|
2019-07-29 08:26:01 +03:00
|
|
|
if (m_global)
|
|
|
|
return true;
|
2019-07-16 16:03:39 +03:00
|
|
|
for (auto& ref : m_refs) {
|
|
|
|
if (ref.pid == peer_pid) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-07-19 18:46:21 +03:00
|
|
|
void* SharedBuffer::ref_for_process_and_get_address(Process& process)
|
2019-07-16 16:03:39 +03:00
|
|
|
{
|
|
|
|
LOCKER(shared_buffers().lock());
|
|
|
|
ASSERT(is_shared_with(process.pid()));
|
2019-07-29 08:26:01 +03:00
|
|
|
if (m_global) {
|
|
|
|
bool found = false;
|
|
|
|
for (auto& ref : m_refs) {
|
|
|
|
if (ref.pid == process.pid()) {
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found)
|
|
|
|
m_refs.append(Reference(process.pid()));
|
|
|
|
}
|
|
|
|
|
2019-07-16 16:03:39 +03:00
|
|
|
for (auto& ref : m_refs) {
|
|
|
|
if (ref.pid == process.pid()) {
|
2020-02-24 15:24:30 +03:00
|
|
|
if (!ref.region) {
|
2021-01-02 18:38:05 +03:00
|
|
|
auto* region = process.allocate_region_with_vmobject(VirtualAddress(), size(), m_vmobject, 0, "SharedBuffer", PROT_READ | (m_writable ? PROT_WRITE : 0), true);
|
2020-02-24 15:33:10 +03:00
|
|
|
if (!region)
|
|
|
|
return (void*)-ENOMEM;
|
AK: Make RefPtr, NonnullRefPtr, WeakPtr thread safe
This makes most operations thread safe, especially so that they
can safely be used in the Kernel. This includes obtaining a strong
reference from a weak reference, which now requires an explicit
call to WeakPtr::strong_ref(). Another major change is that
Weakable::make_weak_ref() may require the explicit target type.
Previously we used reinterpret_cast in WeakPtr, assuming that it
can be properly converted. But WeakPtr does not necessarily have
the knowledge to be able to do this. Instead, we now ask the class
itself to deliver a WeakPtr to the type that we want.
Also, WeakLink is no longer specific to a target type. The reason
for this is that we want to be able to safely convert e.g. WeakPtr<T>
to WeakPtr<U>, and before this we just reinterpret_cast the internal
WeakLink<T> to WeakLink<U>, which is a bold assumption that it would
actually produce the correct code. Instead, WeakLink now operates
on just a raw pointer and we only make those constructors/operators
available if we can verify that it can be safely cast.
In order to guarantee thread safety, we now use the least significant
bit in the pointer for locking purposes. This also means that only
properly aligned pointers can be used.
2020-09-30 01:26:13 +03:00
|
|
|
ref.region = region;
|
2019-07-16 16:03:39 +03:00
|
|
|
}
|
2020-02-24 15:24:30 +03:00
|
|
|
ref.count++;
|
|
|
|
m_total_refs++;
|
2019-07-20 00:14:56 +03:00
|
|
|
sanity_check("ref_for_process_and_get_address");
|
AK: Make RefPtr, NonnullRefPtr, WeakPtr thread safe
This makes most operations thread safe, especially so that they
can safely be used in the Kernel. This includes obtaining a strong
reference from a weak reference, which now requires an explicit
call to WeakPtr::strong_ref(). Another major change is that
Weakable::make_weak_ref() may require the explicit target type.
Previously we used reinterpret_cast in WeakPtr, assuming that it
can be properly converted. But WeakPtr does not necessarily have
the knowledge to be able to do this. Instead, we now ask the class
itself to deliver a WeakPtr to the type that we want.
Also, WeakLink is no longer specific to a target type. The reason
for this is that we want to be able to safely convert e.g. WeakPtr<T>
to WeakPtr<U>, and before this we just reinterpret_cast the internal
WeakLink<T> to WeakLink<U>, which is a bold assumption that it would
actually produce the correct code. Instead, WeakLink now operates
on just a raw pointer and we only make those constructors/operators
available if we can verify that it can be safely cast.
In order to guarantee thread safety, we now use the least significant
bit in the pointer for locking purposes. This also means that only
properly aligned pointers can be used.
2020-09-30 01:26:13 +03:00
|
|
|
return ref.region.unsafe_ptr()->vaddr().as_ptr(); // TODO: Region needs to be RefCounted!
|
2019-07-16 16:03:39 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
ASSERT_NOT_REACHED();
|
|
|
|
}
|
|
|
|
|
2020-08-08 18:32:34 +03:00
|
|
|
void SharedBuffer::share_with(ProcessID peer_pid)
|
2019-07-16 16:03:39 +03:00
|
|
|
{
|
|
|
|
LOCKER(shared_buffers().lock());
|
2020-02-10 13:55:34 +03:00
|
|
|
if (m_global)
|
|
|
|
return;
|
2019-07-16 16:03:39 +03:00
|
|
|
for (auto& ref : m_refs) {
|
|
|
|
if (ref.pid == peer_pid) {
|
2020-02-28 13:45:19 +03:00
|
|
|
// don't increment the reference count yet; let them shbuf_get it first.
|
2019-07-20 00:14:56 +03:00
|
|
|
sanity_check("share_with (old ref)");
|
2019-07-16 16:03:39 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
m_refs.append(Reference(peer_pid));
|
2019-07-20 00:14:56 +03:00
|
|
|
sanity_check("share_with (new ref)");
|
2019-07-16 16:03:39 +03:00
|
|
|
}
|
|
|
|
|
2020-11-23 01:25:48 +03:00
|
|
|
void SharedBuffer::share_all_shared_buffers(Process& from_process, Process& with_process)
|
|
|
|
{
|
|
|
|
LOCKER(shared_buffers().lock());
|
|
|
|
for (auto& shbuf : shared_buffers().resource()) {
|
|
|
|
auto& shared_buffer = *shbuf.value;
|
2020-11-24 20:26:32 +03:00
|
|
|
// We need to clone all references (including for global shared buffers),
|
|
|
|
// and the reference counts as well.
|
2020-11-23 01:25:48 +03:00
|
|
|
for (auto& ref : shared_buffer.m_refs) {
|
|
|
|
if (ref.pid == from_process.pid()) {
|
2020-11-24 20:26:32 +03:00
|
|
|
auto ref_count = ref.count;
|
|
|
|
shared_buffer.m_refs.append(Reference(with_process.pid(), ref_count));
|
|
|
|
// NOTE: ref may become invalid after we appended!
|
|
|
|
shared_buffer.m_total_refs += ref_count;
|
2020-11-23 01:25:48 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-19 18:46:21 +03:00
|
|
|
void SharedBuffer::deref_for_process(Process& process)
|
2019-07-16 16:03:39 +03:00
|
|
|
{
|
|
|
|
LOCKER(shared_buffers().lock());
|
2020-02-25 16:49:47 +03:00
|
|
|
for (size_t i = 0; i < m_refs.size(); ++i) {
|
2019-07-16 16:03:39 +03:00
|
|
|
auto& ref = m_refs[i];
|
|
|
|
if (ref.pid == process.pid()) {
|
2020-11-24 20:26:32 +03:00
|
|
|
ASSERT(ref.count > 0);
|
2019-07-20 00:14:56 +03:00
|
|
|
ref.count--;
|
2020-11-24 20:26:32 +03:00
|
|
|
ASSERT(m_total_refs > 0);
|
2019-07-20 00:14:56 +03:00
|
|
|
m_total_refs--;
|
|
|
|
if (ref.count == 0) {
|
2019-07-16 16:03:39 +03:00
|
|
|
#ifdef SHARED_BUFFER_DEBUG
|
2020-08-27 01:58:09 +03:00
|
|
|
dbg() << "Releasing shared buffer reference on " << m_shbuf_id << " of size " << size() << " by PID " << process.pid().value();
|
2019-07-16 16:03:39 +03:00
|
|
|
#endif
|
AK: Make RefPtr, NonnullRefPtr, WeakPtr thread safe
This makes most operations thread safe, especially so that they
can safely be used in the Kernel. This includes obtaining a strong
reference from a weak reference, which now requires an explicit
call to WeakPtr::strong_ref(). Another major change is that
Weakable::make_weak_ref() may require the explicit target type.
Previously we used reinterpret_cast in WeakPtr, assuming that it
can be properly converted. But WeakPtr does not necessarily have
the knowledge to be able to do this. Instead, we now ask the class
itself to deliver a WeakPtr to the type that we want.
Also, WeakLink is no longer specific to a target type. The reason
for this is that we want to be able to safely convert e.g. WeakPtr<T>
to WeakPtr<U>, and before this we just reinterpret_cast the internal
WeakLink<T> to WeakLink<U>, which is a bold assumption that it would
actually produce the correct code. Instead, WeakLink now operates
on just a raw pointer and we only make those constructors/operators
available if we can verify that it can be safely cast.
In order to guarantee thread safety, we now use the least significant
bit in the pointer for locking purposes. This also means that only
properly aligned pointers can be used.
2020-09-30 01:26:13 +03:00
|
|
|
process.deallocate_region(*ref.region.unsafe_ptr()); // TODO: Region needs to be RefCounted!
|
2019-07-16 16:03:39 +03:00
|
|
|
#ifdef SHARED_BUFFER_DEBUG
|
2020-08-27 01:58:09 +03:00
|
|
|
dbg() << "Released shared buffer reference on " << m_shbuf_id << " of size " << size() << " by PID " << process.pid().value();
|
2019-07-16 16:03:39 +03:00
|
|
|
#endif
|
2019-07-20 00:14:56 +03:00
|
|
|
sanity_check("deref_for_process");
|
2019-07-16 16:03:39 +03:00
|
|
|
destroy_if_unused();
|
|
|
|
return;
|
|
|
|
}
|
2019-07-29 08:26:01 +03:00
|
|
|
return;
|
2019-07-16 16:03:39 +03:00
|
|
|
}
|
|
|
|
}
|
2019-07-20 00:14:56 +03:00
|
|
|
|
|
|
|
ASSERT_NOT_REACHED();
|
2019-07-16 16:03:39 +03:00
|
|
|
}
|
|
|
|
|
2020-11-24 20:26:32 +03:00
|
|
|
bool SharedBuffer::disown(ProcessID pid)
|
2019-07-16 16:03:39 +03:00
|
|
|
{
|
|
|
|
LOCKER(shared_buffers().lock());
|
2020-02-25 16:49:47 +03:00
|
|
|
for (size_t i = 0; i < m_refs.size(); ++i) {
|
2019-07-16 16:03:39 +03:00
|
|
|
auto& ref = m_refs[i];
|
|
|
|
if (ref.pid == pid) {
|
|
|
|
#ifdef SHARED_BUFFER_DEBUG
|
2020-08-27 01:58:09 +03:00
|
|
|
dbg() << "Disowning shared buffer " << m_shbuf_id << " of size " << size() << " by PID " << pid.value();
|
2019-07-16 16:03:39 +03:00
|
|
|
#endif
|
2020-11-24 20:26:32 +03:00
|
|
|
ASSERT(m_total_refs >= ref.count);
|
2019-07-20 00:14:56 +03:00
|
|
|
m_total_refs -= ref.count;
|
2020-07-02 11:51:46 +03:00
|
|
|
m_refs.unstable_take(i);
|
2019-07-16 16:03:39 +03:00
|
|
|
#ifdef SHARED_BUFFER_DEBUG
|
2020-08-27 01:58:09 +03:00
|
|
|
dbg() << "Disowned shared buffer " << m_shbuf_id << " of size " << size() << " by PID " << pid.value();
|
2019-07-16 16:03:39 +03:00
|
|
|
#endif
|
|
|
|
destroy_if_unused();
|
2020-11-24 20:26:32 +03:00
|
|
|
break;
|
2019-07-16 16:03:39 +03:00
|
|
|
}
|
|
|
|
}
|
2020-11-24 20:26:32 +03:00
|
|
|
|
|
|
|
return m_total_refs == 0;
|
2019-07-16 16:03:39 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void SharedBuffer::destroy_if_unused()
|
|
|
|
{
|
|
|
|
LOCKER(shared_buffers().lock());
|
2019-07-20 00:14:56 +03:00
|
|
|
sanity_check("destroy_if_unused");
|
2019-07-19 18:46:21 +03:00
|
|
|
if (m_total_refs == 0) {
|
2019-07-16 16:03:39 +03:00
|
|
|
#ifdef SHARED_BUFFER_DEBUG
|
2020-03-01 22:45:39 +03:00
|
|
|
dbg() << "Destroying unused SharedBuffer{" << this << "} id: " << m_shbuf_id;
|
2019-07-16 16:03:39 +03:00
|
|
|
#endif
|
|
|
|
auto count_before = shared_buffers().resource().size();
|
2020-02-28 13:45:19 +03:00
|
|
|
shared_buffers().resource().remove(m_shbuf_id);
|
2019-07-16 16:03:39 +03:00
|
|
|
ASSERT(count_before != shared_buffers().resource().size());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void SharedBuffer::seal()
|
|
|
|
{
|
|
|
|
LOCKER(shared_buffers().lock());
|
|
|
|
m_writable = false;
|
|
|
|
for (auto& ref : m_refs) {
|
AK: Make RefPtr, NonnullRefPtr, WeakPtr thread safe
This makes most operations thread safe, especially so that they
can safely be used in the Kernel. This includes obtaining a strong
reference from a weak reference, which now requires an explicit
call to WeakPtr::strong_ref(). Another major change is that
Weakable::make_weak_ref() may require the explicit target type.
Previously we used reinterpret_cast in WeakPtr, assuming that it
can be properly converted. But WeakPtr does not necessarily have
the knowledge to be able to do this. Instead, we now ask the class
itself to deliver a WeakPtr to the type that we want.
Also, WeakLink is no longer specific to a target type. The reason
for this is that we want to be able to safely convert e.g. WeakPtr<T>
to WeakPtr<U>, and before this we just reinterpret_cast the internal
WeakLink<T> to WeakLink<U>, which is a bold assumption that it would
actually produce the correct code. Instead, WeakLink now operates
on just a raw pointer and we only make those constructors/operators
available if we can verify that it can be safely cast.
In order to guarantee thread safety, we now use the least significant
bit in the pointer for locking purposes. This also means that only
properly aligned pointers can be used.
2020-09-30 01:26:13 +03:00
|
|
|
// TODO: Region needs to be RefCounted!
|
|
|
|
if (auto* region = ref.region.unsafe_ptr()) {
|
|
|
|
region->set_writable(false);
|
|
|
|
region->remap();
|
2019-07-16 16:03:39 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-02-16 03:27:42 +03:00
|
|
|
|
2020-09-03 07:57:09 +03:00
|
|
|
auto SharedBuffer::set_volatile_all(bool is_volatile, bool& was_purged) -> SetVolatileError
|
|
|
|
{
|
|
|
|
was_purged = false;
|
|
|
|
auto pid = Process::current()->pid();
|
|
|
|
LOCKER(shared_buffers().lock());
|
|
|
|
for (size_t i = 0; i < m_refs.size(); ++i) {
|
|
|
|
auto& ref = m_refs[i];
|
|
|
|
if (ref.pid == pid) {
|
|
|
|
if (Region* region = ref.region.unsafe_ptr()) {
|
|
|
|
switch (region->set_volatile(region->vaddr(), region->size(), is_volatile, was_purged)) {
|
2020-09-06 00:52:14 +03:00
|
|
|
case Region::SetVolatileError::Success:
|
|
|
|
return SetVolatileError::Success;
|
|
|
|
case Region::SetVolatileError::NotPurgeable:
|
|
|
|
return SetVolatileError::NotPurgeable;
|
|
|
|
case Region::SetVolatileError::OutOfMemory:
|
|
|
|
return SetVolatileError::OutOfMemory;
|
2020-09-03 07:57:09 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return SetVolatileError::NotMapped;
|
|
|
|
}
|
|
|
|
|
2020-02-16 03:27:42 +03:00
|
|
|
}
|