2019-07-16 16:03:39 +03:00
|
|
|
#include <Kernel/Process.h>
|
2019-07-29 08:26:01 +03:00
|
|
|
#include <Kernel/SharedBuffer.h>
|
2019-07-16 16:03:39 +03:00
|
|
|
|
2019-07-24 09:42:55 +03:00
|
|
|
Lockable<HashMap<int, NonnullOwnPtr<SharedBuffer>>>& shared_buffers()
|
2019-07-16 16:03:39 +03:00
|
|
|
{
|
2019-07-24 09:42:55 +03:00
|
|
|
static Lockable<HashMap<int, NonnullOwnPtr<SharedBuffer>>>* map;
|
2019-07-16 16:03:39 +03:00
|
|
|
if (!map)
|
2019-07-24 09:42:55 +03:00
|
|
|
map = new Lockable<HashMap<int, NonnullOwnPtr<SharedBuffer>>>;
|
2019-07-16 16:03:39 +03:00
|
|
|
return *map;
|
|
|
|
}
|
|
|
|
|
2019-07-20 00:14:56 +03:00
|
|
|
void SharedBuffer::sanity_check(const char* what)
|
|
|
|
{
|
|
|
|
LOCKER(shared_buffers().lock());
|
|
|
|
|
|
|
|
unsigned found_refs = 0;
|
|
|
|
for (const auto& ref : m_refs)
|
|
|
|
found_refs += ref.count;
|
|
|
|
|
|
|
|
if (found_refs != m_total_refs) {
|
|
|
|
dbgprintf("%s sanity -- SharedBuffer{%p} id: %d has total refs %d but we found %d\n", what, this, m_shared_buffer_id, m_total_refs, found_refs);
|
|
|
|
for (const auto& ref : m_refs) {
|
|
|
|
dbgprintf(" ref from pid %d: refcnt %d\n", ref.pid, ref.count);
|
|
|
|
}
|
|
|
|
ASSERT_NOT_REACHED();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-16 16:03:39 +03:00
|
|
|
bool SharedBuffer::is_shared_with(pid_t peer_pid)
|
|
|
|
{
|
|
|
|
LOCKER(shared_buffers().lock());
|
2019-07-29 08:26:01 +03:00
|
|
|
if (m_global)
|
|
|
|
return true;
|
2019-07-16 16:03:39 +03:00
|
|
|
for (auto& ref : m_refs) {
|
|
|
|
if (ref.pid == peer_pid) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-07-19 18:46:21 +03:00
|
|
|
void* SharedBuffer::ref_for_process_and_get_address(Process& process)
|
2019-07-16 16:03:39 +03:00
|
|
|
{
|
|
|
|
LOCKER(shared_buffers().lock());
|
|
|
|
ASSERT(is_shared_with(process.pid()));
|
2019-07-29 08:26:01 +03:00
|
|
|
if (m_global) {
|
|
|
|
bool found = false;
|
|
|
|
for (auto& ref : m_refs) {
|
|
|
|
if (ref.pid == process.pid()) {
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found)
|
|
|
|
m_refs.append(Reference(process.pid()));
|
|
|
|
}
|
|
|
|
|
2019-07-16 16:03:39 +03:00
|
|
|
for (auto& ref : m_refs) {
|
|
|
|
if (ref.pid == process.pid()) {
|
2019-07-19 18:46:21 +03:00
|
|
|
ref.count++;
|
|
|
|
m_total_refs++;
|
2019-07-16 16:03:39 +03:00
|
|
|
if (ref.region == nullptr) {
|
2019-09-04 12:27:14 +03:00
|
|
|
ref.region = process.allocate_region_with_vmo(VirtualAddress(), size(), m_vmobject, 0, "SharedBuffer", PROT_READ | (m_writable ? PROT_WRITE : 0));
|
2019-07-16 16:03:39 +03:00
|
|
|
ref.region->set_shared(true);
|
|
|
|
}
|
2019-07-20 00:14:56 +03:00
|
|
|
sanity_check("ref_for_process_and_get_address");
|
2019-07-16 16:03:39 +03:00
|
|
|
return ref.region->vaddr().as_ptr();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ASSERT_NOT_REACHED();
|
|
|
|
}
|
|
|
|
|
|
|
|
void SharedBuffer::share_with(pid_t peer_pid)
|
|
|
|
{
|
|
|
|
LOCKER(shared_buffers().lock());
|
|
|
|
for (auto& ref : m_refs) {
|
|
|
|
if (ref.pid == peer_pid) {
|
2019-07-19 18:46:21 +03:00
|
|
|
// don't increment the reference count yet; let them get_shared_buffer it first.
|
2019-07-20 00:14:56 +03:00
|
|
|
sanity_check("share_with (old ref)");
|
2019-07-16 16:03:39 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
m_refs.append(Reference(peer_pid));
|
2019-07-20 00:14:56 +03:00
|
|
|
sanity_check("share_with (new ref)");
|
2019-07-16 16:03:39 +03:00
|
|
|
}
|
|
|
|
|
2019-07-19 18:46:21 +03:00
|
|
|
void SharedBuffer::deref_for_process(Process& process)
|
2019-07-16 16:03:39 +03:00
|
|
|
{
|
|
|
|
LOCKER(shared_buffers().lock());
|
|
|
|
for (int i = 0; i < m_refs.size(); ++i) {
|
|
|
|
auto& ref = m_refs[i];
|
|
|
|
if (ref.pid == process.pid()) {
|
2019-07-20 00:14:56 +03:00
|
|
|
ref.count--;
|
|
|
|
m_total_refs--;
|
|
|
|
if (ref.count == 0) {
|
2019-07-16 16:03:39 +03:00
|
|
|
#ifdef SHARED_BUFFER_DEBUG
|
|
|
|
dbgprintf("Releasing shared buffer reference on %d of size %d by PID %d\n", m_shared_buffer_id, size(), process.pid());
|
|
|
|
#endif
|
|
|
|
process.deallocate_region(*ref.region);
|
|
|
|
m_refs.remove(i);
|
|
|
|
#ifdef SHARED_BUFFER_DEBUG
|
|
|
|
dbgprintf("Released shared buffer reference on %d of size %d by PID %d\n", m_shared_buffer_id, size(), process.pid());
|
|
|
|
#endif
|
2019-07-20 00:14:56 +03:00
|
|
|
sanity_check("deref_for_process");
|
2019-07-16 16:03:39 +03:00
|
|
|
destroy_if_unused();
|
|
|
|
return;
|
|
|
|
}
|
2019-07-29 08:26:01 +03:00
|
|
|
return;
|
2019-07-16 16:03:39 +03:00
|
|
|
}
|
|
|
|
}
|
2019-07-20 00:14:56 +03:00
|
|
|
|
|
|
|
ASSERT_NOT_REACHED();
|
2019-07-16 16:03:39 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void SharedBuffer::disown(pid_t pid)
|
|
|
|
{
|
|
|
|
LOCKER(shared_buffers().lock());
|
|
|
|
for (int i = 0; i < m_refs.size(); ++i) {
|
|
|
|
auto& ref = m_refs[i];
|
|
|
|
if (ref.pid == pid) {
|
|
|
|
#ifdef SHARED_BUFFER_DEBUG
|
|
|
|
dbgprintf("Disowning shared buffer %d of size %d by PID %d\n", m_shared_buffer_id, size(), pid);
|
|
|
|
#endif
|
2019-07-20 00:14:56 +03:00
|
|
|
m_total_refs -= ref.count;
|
2019-07-16 16:03:39 +03:00
|
|
|
m_refs.remove(i);
|
|
|
|
#ifdef SHARED_BUFFER_DEBUG
|
|
|
|
dbgprintf("Disowned shared buffer %d of size %d by PID %d\n", m_shared_buffer_id, size(), pid);
|
|
|
|
#endif
|
|
|
|
destroy_if_unused();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void SharedBuffer::destroy_if_unused()
|
|
|
|
{
|
|
|
|
LOCKER(shared_buffers().lock());
|
2019-07-20 00:14:56 +03:00
|
|
|
sanity_check("destroy_if_unused");
|
2019-07-19 18:46:21 +03:00
|
|
|
if (m_total_refs == 0) {
|
2019-07-16 16:03:39 +03:00
|
|
|
#ifdef SHARED_BUFFER_DEBUG
|
|
|
|
kprintf("Destroying unused SharedBuffer{%p} id: %d\n", this, m_shared_buffer_id);
|
|
|
|
#endif
|
|
|
|
auto count_before = shared_buffers().resource().size();
|
|
|
|
shared_buffers().resource().remove(m_shared_buffer_id);
|
|
|
|
ASSERT(count_before != shared_buffers().resource().size());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void SharedBuffer::seal()
|
|
|
|
{
|
|
|
|
LOCKER(shared_buffers().lock());
|
|
|
|
m_writable = false;
|
|
|
|
for (auto& ref : m_refs) {
|
|
|
|
if (ref.region) {
|
|
|
|
ref.region->set_writable(false);
|
2019-11-03 22:59:54 +03:00
|
|
|
ref.region->remap();
|
2019-07-16 16:03:39 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|