2020-01-18 11:38:21 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
|
|
*
|
2021-04-22 11:24:48 +03:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-01-18 11:38:21 +03:00
|
|
|
*/
|
|
|
|
|
2020-08-25 04:35:19 +03:00
|
|
|
#include <AK/Singleton.h>
|
2019-08-10 19:10:36 +03:00
|
|
|
#include <AK/StringBuilder.h>
|
2023-01-14 00:05:18 +03:00
|
|
|
#include <AK/StringView.h>
|
2023-01-07 23:52:06 +03:00
|
|
|
#include <Kernel/API/Ioctl.h>
|
2021-09-12 14:29:28 +03:00
|
|
|
#include <Kernel/API/POSIX/errno.h>
|
2021-01-25 18:07:10 +03:00
|
|
|
#include <Kernel/Debug.h>
|
2021-09-07 14:39:11 +03:00
|
|
|
#include <Kernel/FileSystem/OpenFileDescription.h>
|
2019-06-07 12:43:58 +03:00
|
|
|
#include <Kernel/Net/ARP.h>
|
|
|
|
#include <Kernel/Net/ICMP.h>
|
|
|
|
#include <Kernel/Net/IPv4.h>
|
2019-04-02 20:54:38 +03:00
|
|
|
#include <Kernel/Net/IPv4Socket.h>
|
|
|
|
#include <Kernel/Net/NetworkAdapter.h>
|
2021-06-04 07:43:16 +03:00
|
|
|
#include <Kernel/Net/NetworkingManagement.h>
|
2019-06-07 12:43:58 +03:00
|
|
|
#include <Kernel/Net/Routing.h>
|
2019-04-02 20:54:38 +03:00
|
|
|
#include <Kernel/Net/TCP.h>
|
2019-06-07 12:43:58 +03:00
|
|
|
#include <Kernel/Net/TCPSocket.h>
|
2019-04-02 20:54:38 +03:00
|
|
|
#include <Kernel/Net/UDP.h>
|
2019-06-07 12:43:58 +03:00
|
|
|
#include <Kernel/Net/UDPSocket.h>
|
2023-02-24 20:45:37 +03:00
|
|
|
#include <Kernel/Tasks/Process.h>
|
2019-06-07 12:43:58 +03:00
|
|
|
#include <Kernel/UnixTypes.h>
|
2019-03-12 17:51:42 +03:00
|
|
|
|
2020-02-16 03:27:42 +03:00
|
|
|
namespace Kernel {
|
|
|
|
|
2021-08-22 00:31:15 +03:00
|
|
|
static Singleton<MutexProtected<IPv4Socket::List>> s_all_sockets;
|
2020-08-25 04:35:19 +03:00
|
|
|
|
2021-09-07 14:39:11 +03:00
|
|
|
using BlockFlags = Thread::OpenFileDescriptionBlocker::BlockFlags;
|
2021-03-07 14:01:11 +03:00
|
|
|
|
2021-08-22 00:31:15 +03:00
|
|
|
MutexProtected<IPv4Socket::List>& IPv4Socket::all_sockets()
|
2019-03-12 19:27:07 +03:00
|
|
|
{
|
2021-08-15 16:46:35 +03:00
|
|
|
return *s_all_sockets;
|
2019-03-12 19:27:07 +03:00
|
|
|
}
|
|
|
|
|
2021-11-08 02:51:39 +03:00
|
|
|
ErrorOr<NonnullOwnPtr<DoubleBuffer>> IPv4Socket::try_create_receive_buffer()
|
2021-08-01 12:42:03 +03:00
|
|
|
{
|
2023-12-26 22:05:15 +03:00
|
|
|
return DoubleBuffer::try_create("IPv4Socket: Receive buffer"sv, receive_buffer_size);
|
2021-08-01 12:42:03 +03:00
|
|
|
}
|
|
|
|
|
2023-03-10 09:53:02 +03:00
|
|
|
ErrorOr<NonnullRefPtr<Socket>> IPv4Socket::create(int type, int protocol)
|
2019-03-12 17:51:42 +03:00
|
|
|
{
|
2021-09-07 14:46:11 +03:00
|
|
|
auto receive_buffer = TRY(IPv4Socket::try_create_receive_buffer());
|
2021-08-01 12:42:03 +03:00
|
|
|
|
2021-09-05 15:16:08 +03:00
|
|
|
if (type == SOCK_STREAM)
|
2021-09-07 14:46:11 +03:00
|
|
|
return TRY(TCPSocket::try_create(protocol, move(receive_buffer)));
|
2021-09-05 15:16:08 +03:00
|
|
|
if (type == SOCK_DGRAM)
|
2021-09-07 14:46:11 +03:00
|
|
|
return TRY(UDPSocket::try_create(protocol, move(receive_buffer)));
|
2021-05-13 11:24:44 +03:00
|
|
|
if (type == SOCK_RAW) {
|
2023-03-10 09:53:02 +03:00
|
|
|
auto raw_socket = adopt_ref_if_nonnull(new (nothrow) IPv4Socket(type, protocol, move(receive_buffer), {}));
|
2021-05-13 11:24:44 +03:00
|
|
|
if (raw_socket)
|
|
|
|
return raw_socket.release_nonnull();
|
|
|
|
return ENOMEM;
|
|
|
|
}
|
2021-01-21 01:11:17 +03:00
|
|
|
return EINVAL;
|
2019-03-12 17:51:42 +03:00
|
|
|
}
|
|
|
|
|
2021-08-01 15:11:05 +03:00
|
|
|
IPv4Socket::IPv4Socket(int type, int protocol, NonnullOwnPtr<DoubleBuffer> receive_buffer, OwnPtr<KBuffer> optional_scratch_buffer)
|
2019-03-12 17:51:42 +03:00
|
|
|
: Socket(AF_INET, type, protocol)
|
2021-08-01 12:42:03 +03:00
|
|
|
, m_receive_buffer(move(receive_buffer))
|
2021-08-01 15:11:05 +03:00
|
|
|
, m_scratch_buffer(move(optional_scratch_buffer))
|
2019-03-12 17:51:42 +03:00
|
|
|
{
|
2021-02-07 15:03:24 +03:00
|
|
|
dbgln_if(IPV4_SOCKET_DEBUG, "IPv4Socket({}) created with type={}, protocol={}", this, type, protocol);
|
2019-12-14 11:43:31 +03:00
|
|
|
m_buffer_mode = type == SOCK_STREAM ? BufferMode::Bytes : BufferMode::Packets;
|
|
|
|
if (m_buffer_mode == BufferMode::Bytes) {
|
2021-08-01 15:11:05 +03:00
|
|
|
VERIFY(m_scratch_buffer);
|
2019-12-14 11:43:31 +03:00
|
|
|
}
|
2021-07-18 12:59:25 +03:00
|
|
|
|
|
|
|
all_sockets().with_exclusive([&](auto& table) {
|
2021-08-15 16:46:35 +03:00
|
|
|
table.append(*this);
|
2021-07-18 12:59:25 +03:00
|
|
|
});
|
2019-03-12 17:51:42 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
IPv4Socket::~IPv4Socket()
|
|
|
|
{
|
2021-07-18 12:59:25 +03:00
|
|
|
all_sockets().with_exclusive([&](auto& table) {
|
2021-08-15 16:46:35 +03:00
|
|
|
table.remove(*this);
|
2021-07-18 12:59:25 +03:00
|
|
|
});
|
2019-03-12 17:51:42 +03:00
|
|
|
}
|
|
|
|
|
2020-02-08 01:42:28 +03:00
|
|
|
void IPv4Socket::get_local_address(sockaddr* address, socklen_t* address_size)
|
2019-03-12 17:51:42 +03:00
|
|
|
{
|
2020-02-08 01:42:28 +03:00
|
|
|
sockaddr_in local_address = { AF_INET, htons(m_local_port), { m_local_address.to_in_addr_t() }, { 0 } };
|
|
|
|
memcpy(address, &local_address, min(static_cast<size_t>(*address_size), sizeof(sockaddr_in)));
|
2019-05-20 21:33:03 +03:00
|
|
|
*address_size = sizeof(sockaddr_in);
|
|
|
|
}
|
|
|
|
|
2020-02-08 01:42:28 +03:00
|
|
|
void IPv4Socket::get_peer_address(sockaddr* address, socklen_t* address_size)
|
2019-05-20 21:33:03 +03:00
|
|
|
{
|
2020-02-08 01:42:28 +03:00
|
|
|
sockaddr_in peer_address = { AF_INET, htons(m_peer_port), { m_peer_address.to_in_addr_t() }, { 0 } };
|
|
|
|
memcpy(address, &peer_address, min(static_cast<size_t>(*address_size), sizeof(sockaddr_in)));
|
2019-03-12 17:51:42 +03:00
|
|
|
*address_size = sizeof(sockaddr_in);
|
|
|
|
}
|
|
|
|
|
Kernel/Net: Rework ephemeral port allocation
Currently, ephemeral port allocation is handled by the
allocate_local_port_if_needed() and protocol_allocate_local_port()
methods. Actually binding the socket to an address (which means
inserting the socket/address pair into a global map) is performed either
in protocol_allocate_local_port() (for ephemeral ports) or in
protocol_listen() (for non-ephemeral ports); the latter will fail with
EADDRINUSE if the address is already used by an existing pair present in
the map.
There used to be a bug where for listen() without an explicit bind(),
the port allocation would conflict with itself: first an ephemeral port
would get allocated and inserted into the map, and then
protocol_listen() would check again for the port being free, find the
just-created map entry, and error out. This was fixed in commit
01e5af487f9513696dbcacab15d3e0036446f586 by passing an additional flag
did_allocate_port into protocol_listen() which specifies whether the
port was just allocated, and skipping the check in protocol_listen() if
the flag is set.
However, this only helps if the socket is bound to an ephemeral port
inside of this very listen() call. But calling bind(sin_port = 0) from
userspace should succeed and bind to an allocated ephemeral port, in the
same was as using an unbound socket for connect() does. The port number
can then be retrieved from userspace by calling getsockname (), and it
should be possible to either connect() or listen() on this socket,
keeping the allocated port number. Also, calling bind() when already
bound (either explicitly or implicitly) should always result in EINVAL.
To untangle this, introduce an explicit m_bound state in IPv4Socket,
just like LocalSocket has already. Once a socket is bound, further
attempt to bind it fail. Some operations cause the socket to implicitly
get bound to an (ephemeral) address; this is implemented by the new
ensure_bound() method. The protocol_allocate_local_port() method is
gone; it is now up to a protocol to assign a port to the socket inside
protocol_bind() if it finds that the socket has local_port() == 0.
protocol_bind() is now called in more cases, such as inside listen() if
the socket wasn't bound before that.
2023-07-23 15:43:45 +03:00
|
|
|
ErrorOr<void> IPv4Socket::ensure_bound()
|
|
|
|
{
|
|
|
|
dbgln_if(IPV4_SOCKET_DEBUG, "IPv4Socket::ensure_bound() m_bound {}", m_bound);
|
|
|
|
if (m_bound)
|
|
|
|
return {};
|
|
|
|
|
|
|
|
auto result = protocol_bind();
|
|
|
|
if (!result.is_error())
|
|
|
|
m_bound = true;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2022-08-21 17:33:09 +03:00
|
|
|
ErrorOr<void> IPv4Socket::bind(Credentials const& credentials, Userspace<sockaddr const*> user_address, socklen_t address_size)
|
2019-03-12 17:51:42 +03:00
|
|
|
{
|
Kernel/Net: Rework ephemeral port allocation
Currently, ephemeral port allocation is handled by the
allocate_local_port_if_needed() and protocol_allocate_local_port()
methods. Actually binding the socket to an address (which means
inserting the socket/address pair into a global map) is performed either
in protocol_allocate_local_port() (for ephemeral ports) or in
protocol_listen() (for non-ephemeral ports); the latter will fail with
EADDRINUSE if the address is already used by an existing pair present in
the map.
There used to be a bug where for listen() without an explicit bind(),
the port allocation would conflict with itself: first an ephemeral port
would get allocated and inserted into the map, and then
protocol_listen() would check again for the port being free, find the
just-created map entry, and error out. This was fixed in commit
01e5af487f9513696dbcacab15d3e0036446f586 by passing an additional flag
did_allocate_port into protocol_listen() which specifies whether the
port was just allocated, and skipping the check in protocol_listen() if
the flag is set.
However, this only helps if the socket is bound to an ephemeral port
inside of this very listen() call. But calling bind(sin_port = 0) from
userspace should succeed and bind to an allocated ephemeral port, in the
same was as using an unbound socket for connect() does. The port number
can then be retrieved from userspace by calling getsockname (), and it
should be possible to either connect() or listen() on this socket,
keeping the allocated port number. Also, calling bind() when already
bound (either explicitly or implicitly) should always result in EINVAL.
To untangle this, introduce an explicit m_bound state in IPv4Socket,
just like LocalSocket has already. Once a socket is bound, further
attempt to bind it fail. Some operations cause the socket to implicitly
get bound to an (ephemeral) address; this is implemented by the new
ensure_bound() method. The protocol_allocate_local_port() method is
gone; it is now up to a protocol to assign a port to the socket inside
protocol_bind() if it finds that the socket has local_port() == 0.
protocol_bind() is now called in more cases, such as inside listen() if
the socket wasn't bound before that.
2023-07-23 15:43:45 +03:00
|
|
|
if (m_bound)
|
|
|
|
return set_so_error(EINVAL);
|
|
|
|
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(setup_state() == SetupState::Unstarted);
|
2019-03-12 17:51:42 +03:00
|
|
|
if (address_size != sizeof(sockaddr_in))
|
2021-08-01 18:27:23 +03:00
|
|
|
return set_so_error(EINVAL);
|
2019-03-12 17:51:42 +03:00
|
|
|
|
2021-09-07 16:05:51 +03:00
|
|
|
sockaddr_in address {};
|
|
|
|
SOCKET_TRY(copy_from_user(&address, user_address, sizeof(sockaddr_in)));
|
2020-01-11 14:07:45 +03:00
|
|
|
|
|
|
|
if (address.sin_family != AF_INET)
|
2021-08-01 18:27:23 +03:00
|
|
|
return set_so_error(EINVAL);
|
2019-09-02 19:49:54 +03:00
|
|
|
|
2020-01-11 14:07:45 +03:00
|
|
|
auto requested_local_port = ntohs(address.sin_port);
|
2022-08-21 17:33:09 +03:00
|
|
|
if (!credentials.is_superuser()) {
|
2021-04-20 00:12:56 +03:00
|
|
|
if (requested_local_port > 0 && requested_local_port < 1024) {
|
2022-08-21 17:33:09 +03:00
|
|
|
dbgln("UID {} attempted to bind {} to port {}", credentials.uid(), class_name(), requested_local_port);
|
2021-08-01 18:27:23 +03:00
|
|
|
return set_so_error(EACCES);
|
2019-09-02 19:49:54 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
m_local_address = IPv4Address((u8 const*)&address.sin_addr.s_addr);
|
2019-09-02 19:49:54 +03:00
|
|
|
m_local_port = requested_local_port;
|
2019-05-03 22:51:40 +03:00
|
|
|
|
2021-02-07 15:03:24 +03:00
|
|
|
dbgln_if(IPV4_SOCKET_DEBUG, "IPv4Socket::bind {}({}) to {}:{}", class_name(), this, m_local_address, m_local_port);
|
2019-05-03 22:51:40 +03:00
|
|
|
|
Kernel/Net: Rework ephemeral port allocation
Currently, ephemeral port allocation is handled by the
allocate_local_port_if_needed() and protocol_allocate_local_port()
methods. Actually binding the socket to an address (which means
inserting the socket/address pair into a global map) is performed either
in protocol_allocate_local_port() (for ephemeral ports) or in
protocol_listen() (for non-ephemeral ports); the latter will fail with
EADDRINUSE if the address is already used by an existing pair present in
the map.
There used to be a bug where for listen() without an explicit bind(),
the port allocation would conflict with itself: first an ephemeral port
would get allocated and inserted into the map, and then
protocol_listen() would check again for the port being free, find the
just-created map entry, and error out. This was fixed in commit
01e5af487f9513696dbcacab15d3e0036446f586 by passing an additional flag
did_allocate_port into protocol_listen() which specifies whether the
port was just allocated, and skipping the check in protocol_listen() if
the flag is set.
However, this only helps if the socket is bound to an ephemeral port
inside of this very listen() call. But calling bind(sin_port = 0) from
userspace should succeed and bind to an allocated ephemeral port, in the
same was as using an unbound socket for connect() does. The port number
can then be retrieved from userspace by calling getsockname (), and it
should be possible to either connect() or listen() on this socket,
keeping the allocated port number. Also, calling bind() when already
bound (either explicitly or implicitly) should always result in EINVAL.
To untangle this, introduce an explicit m_bound state in IPv4Socket,
just like LocalSocket has already. Once a socket is bound, further
attempt to bind it fail. Some operations cause the socket to implicitly
get bound to an (ephemeral) address; this is implemented by the new
ensure_bound() method. The protocol_allocate_local_port() method is
gone; it is now up to a protocol to assign a port to the socket inside
protocol_bind() if it finds that the socket has local_port() == 0.
protocol_bind() is now called in more cases, such as inside listen() if
the socket wasn't bound before that.
2023-07-23 15:43:45 +03:00
|
|
|
return ensure_bound();
|
2019-03-12 17:51:42 +03:00
|
|
|
}
|
|
|
|
|
2021-11-08 02:51:39 +03:00
|
|
|
ErrorOr<void> IPv4Socket::listen(size_t backlog)
|
2019-08-06 16:40:38 +03:00
|
|
|
{
|
2021-08-29 14:10:55 +03:00
|
|
|
MutexLocker locker(mutex());
|
Kernel/Net: Rework ephemeral port allocation
Currently, ephemeral port allocation is handled by the
allocate_local_port_if_needed() and protocol_allocate_local_port()
methods. Actually binding the socket to an address (which means
inserting the socket/address pair into a global map) is performed either
in protocol_allocate_local_port() (for ephemeral ports) or in
protocol_listen() (for non-ephemeral ports); the latter will fail with
EADDRINUSE if the address is already used by an existing pair present in
the map.
There used to be a bug where for listen() without an explicit bind(),
the port allocation would conflict with itself: first an ephemeral port
would get allocated and inserted into the map, and then
protocol_listen() would check again for the port being free, find the
just-created map entry, and error out. This was fixed in commit
01e5af487f9513696dbcacab15d3e0036446f586 by passing an additional flag
did_allocate_port into protocol_listen() which specifies whether the
port was just allocated, and skipping the check in protocol_listen() if
the flag is set.
However, this only helps if the socket is bound to an ephemeral port
inside of this very listen() call. But calling bind(sin_port = 0) from
userspace should succeed and bind to an allocated ephemeral port, in the
same was as using an unbound socket for connect() does. The port number
can then be retrieved from userspace by calling getsockname (), and it
should be possible to either connect() or listen() on this socket,
keeping the allocated port number. Also, calling bind() when already
bound (either explicitly or implicitly) should always result in EINVAL.
To untangle this, introduce an explicit m_bound state in IPv4Socket,
just like LocalSocket has already. Once a socket is bound, further
attempt to bind it fail. Some operations cause the socket to implicitly
get bound to an (ephemeral) address; this is implemented by the new
ensure_bound() method. The protocol_allocate_local_port() method is
gone; it is now up to a protocol to assign a port to the socket inside
protocol_bind() if it finds that the socket has local_port() == 0.
protocol_bind() is now called in more cases, such as inside listen() if
the socket wasn't bound before that.
2023-07-23 15:43:45 +03:00
|
|
|
TRY(ensure_bound());
|
2019-08-06 16:40:38 +03:00
|
|
|
set_backlog(backlog);
|
2021-08-29 03:04:30 +03:00
|
|
|
set_role(Role::Listener);
|
2020-11-30 02:05:27 +03:00
|
|
|
evaluate_block_conditions();
|
2019-08-06 16:40:38 +03:00
|
|
|
|
2021-02-07 15:03:24 +03:00
|
|
|
dbgln_if(IPV4_SOCKET_DEBUG, "IPv4Socket({}) listening with backlog={}", this, backlog);
|
2019-08-06 16:40:38 +03:00
|
|
|
|
Kernel/Net: Rework ephemeral port allocation
Currently, ephemeral port allocation is handled by the
allocate_local_port_if_needed() and protocol_allocate_local_port()
methods. Actually binding the socket to an address (which means
inserting the socket/address pair into a global map) is performed either
in protocol_allocate_local_port() (for ephemeral ports) or in
protocol_listen() (for non-ephemeral ports); the latter will fail with
EADDRINUSE if the address is already used by an existing pair present in
the map.
There used to be a bug where for listen() without an explicit bind(),
the port allocation would conflict with itself: first an ephemeral port
would get allocated and inserted into the map, and then
protocol_listen() would check again for the port being free, find the
just-created map entry, and error out. This was fixed in commit
01e5af487f9513696dbcacab15d3e0036446f586 by passing an additional flag
did_allocate_port into protocol_listen() which specifies whether the
port was just allocated, and skipping the check in protocol_listen() if
the flag is set.
However, this only helps if the socket is bound to an ephemeral port
inside of this very listen() call. But calling bind(sin_port = 0) from
userspace should succeed and bind to an allocated ephemeral port, in the
same was as using an unbound socket for connect() does. The port number
can then be retrieved from userspace by calling getsockname (), and it
should be possible to either connect() or listen() on this socket,
keeping the allocated port number. Also, calling bind() when already
bound (either explicitly or implicitly) should always result in EINVAL.
To untangle this, introduce an explicit m_bound state in IPv4Socket,
just like LocalSocket has already. Once a socket is bound, further
attempt to bind it fail. Some operations cause the socket to implicitly
get bound to an (ephemeral) address; this is implemented by the new
ensure_bound() method. The protocol_allocate_local_port() method is
gone; it is now up to a protocol to assign a port to the socket inside
protocol_bind() if it finds that the socket has local_port() == 0.
protocol_bind() is now called in more cases, such as inside listen() if
the socket wasn't bound before that.
2023-07-23 15:43:45 +03:00
|
|
|
return protocol_listen();
|
2019-08-06 16:40:38 +03:00
|
|
|
}
|
|
|
|
|
2022-08-21 17:35:03 +03:00
|
|
|
ErrorOr<void> IPv4Socket::connect(Credentials const&, OpenFileDescription& description, Userspace<sockaddr const*> address, socklen_t address_size)
|
2019-03-12 17:51:42 +03:00
|
|
|
{
|
|
|
|
if (address_size != sizeof(sockaddr_in))
|
2021-08-01 18:27:23 +03:00
|
|
|
return set_so_error(EINVAL);
|
2020-09-12 06:11:07 +03:00
|
|
|
u16 sa_family_copy;
|
2022-04-01 20:58:27 +03:00
|
|
|
auto* user_address = reinterpret_cast<sockaddr const*>(address.unsafe_userspace_ptr());
|
2021-09-07 16:05:51 +03:00
|
|
|
SOCKET_TRY(copy_from_user(&sa_family_copy, &user_address->sa_family, sizeof(u16)));
|
2020-09-12 06:11:07 +03:00
|
|
|
if (sa_family_copy != AF_INET)
|
2021-08-01 18:27:23 +03:00
|
|
|
return set_so_error(EINVAL);
|
2019-08-11 16:38:20 +03:00
|
|
|
if (m_role == Role::Connected)
|
2021-08-01 18:27:23 +03:00
|
|
|
return set_so_error(EISCONN);
|
2019-03-12 17:51:42 +03:00
|
|
|
|
2021-09-07 16:05:51 +03:00
|
|
|
sockaddr_in safe_address {};
|
|
|
|
SOCKET_TRY(copy_from_user(&safe_address, (sockaddr_in const*)user_address, sizeof(sockaddr_in)));
|
2020-09-12 06:11:07 +03:00
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
m_peer_address = IPv4Address((u8 const*)&safe_address.sin_addr.s_addr);
|
2021-05-12 13:14:03 +03:00
|
|
|
if (m_peer_address == IPv4Address { 0, 0, 0, 0 })
|
|
|
|
m_peer_address = IPv4Address { 127, 0, 0, 1 };
|
2020-09-12 06:11:07 +03:00
|
|
|
m_peer_port = ntohs(safe_address.sin_port);
|
2019-03-13 19:17:07 +03:00
|
|
|
|
2022-07-13 09:31:24 +03:00
|
|
|
return protocol_connect(description);
|
2019-03-12 17:51:42 +03:00
|
|
|
}
|
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
bool IPv4Socket::can_read(OpenFileDescription const&, u64) const
|
2019-03-12 17:51:42 +03:00
|
|
|
{
|
2019-08-11 16:38:20 +03:00
|
|
|
if (m_role == Role::Listener)
|
2019-05-03 22:51:40 +03:00
|
|
|
return can_accept();
|
2019-03-14 17:23:32 +03:00
|
|
|
if (protocol_is_disconnected())
|
|
|
|
return true;
|
2019-03-12 19:27:07 +03:00
|
|
|
return m_can_read;
|
2019-03-12 17:51:42 +03:00
|
|
|
}
|
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
bool IPv4Socket::can_write(OpenFileDescription const&, u64) const
|
2019-03-12 17:51:42 +03:00
|
|
|
{
|
2021-06-05 14:03:06 +03:00
|
|
|
return true;
|
2019-03-12 17:51:42 +03:00
|
|
|
}
|
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
ErrorOr<size_t> IPv4Socket::sendto(OpenFileDescription&, UserOrKernelBuffer const& data, size_t data_length, [[maybe_unused]] int flags, Userspace<sockaddr const*> addr, socklen_t addr_length)
|
2019-03-12 17:51:42 +03:00
|
|
|
{
|
2021-08-29 14:10:55 +03:00
|
|
|
MutexLocker locker(mutex());
|
2020-10-21 21:51:02 +03:00
|
|
|
|
2019-03-14 01:14:30 +03:00
|
|
|
if (addr && addr_length != sizeof(sockaddr_in))
|
2021-08-01 18:27:23 +03:00
|
|
|
return set_so_error(EINVAL);
|
2019-03-12 17:51:42 +03:00
|
|
|
|
2019-03-13 19:17:07 +03:00
|
|
|
if (addr) {
|
2021-09-07 16:05:51 +03:00
|
|
|
sockaddr_in ia {};
|
2022-04-01 20:58:27 +03:00
|
|
|
SOCKET_TRY(copy_from_user(&ia, Userspace<sockaddr_in const*>(addr.ptr())));
|
2020-08-18 09:49:35 +03:00
|
|
|
|
|
|
|
if (ia.sin_family != AF_INET) {
|
2021-03-10 00:25:09 +03:00
|
|
|
dmesgln("sendto: Bad address family: {} is not AF_INET", ia.sin_family);
|
2021-08-01 18:27:23 +03:00
|
|
|
return set_so_error(EAFNOSUPPORT);
|
2019-03-14 01:14:30 +03:00
|
|
|
}
|
|
|
|
|
2023-12-25 16:44:36 +03:00
|
|
|
if (type() != SOCK_STREAM) {
|
|
|
|
m_peer_address = IPv4Address((u8 const*)&ia.sin_addr.s_addr);
|
|
|
|
m_peer_port = ntohs(ia.sin_port);
|
|
|
|
}
|
2019-03-13 19:17:07 +03:00
|
|
|
}
|
2019-03-13 17:40:30 +03:00
|
|
|
|
2021-06-05 14:03:06 +03:00
|
|
|
if (!is_connected() && m_peer_address.is_zero())
|
2021-08-01 18:27:23 +03:00
|
|
|
return set_so_error(EPIPE);
|
2021-06-05 14:03:06 +03:00
|
|
|
|
2023-12-24 20:01:09 +03:00
|
|
|
auto allow_broadcast = m_broadcast_allowed ? AllowBroadcast::Yes : AllowBroadcast::No;
|
2021-12-02 02:01:02 +03:00
|
|
|
auto allow_using_gateway = ((flags & MSG_DONTROUTE) || m_routing_disabled) ? AllowUsingGateway::No : AllowUsingGateway::Yes;
|
2023-04-11 03:50:15 +03:00
|
|
|
auto adapter = bound_interface().with([](auto& bound_device) -> RefPtr<NetworkAdapter> { return bound_device; });
|
2023-12-24 20:01:09 +03:00
|
|
|
auto routing_decision = route_to(m_peer_address, m_local_address, adapter, allow_broadcast, allow_using_gateway);
|
2019-08-29 04:18:38 +03:00
|
|
|
if (routing_decision.is_zero())
|
2021-08-01 18:27:23 +03:00
|
|
|
return set_so_error(EHOSTUNREACH);
|
2019-04-02 16:46:44 +03:00
|
|
|
|
2019-08-06 16:40:38 +03:00
|
|
|
if (m_local_address.to_u32() == 0)
|
2019-08-28 14:58:01 +03:00
|
|
|
m_local_address = routing_decision.adapter->ipv4_address();
|
2019-08-06 16:40:38 +03:00
|
|
|
|
Kernel/Net: Rework ephemeral port allocation
Currently, ephemeral port allocation is handled by the
allocate_local_port_if_needed() and protocol_allocate_local_port()
methods. Actually binding the socket to an address (which means
inserting the socket/address pair into a global map) is performed either
in protocol_allocate_local_port() (for ephemeral ports) or in
protocol_listen() (for non-ephemeral ports); the latter will fail with
EADDRINUSE if the address is already used by an existing pair present in
the map.
There used to be a bug where for listen() without an explicit bind(),
the port allocation would conflict with itself: first an ephemeral port
would get allocated and inserted into the map, and then
protocol_listen() would check again for the port being free, find the
just-created map entry, and error out. This was fixed in commit
01e5af487f9513696dbcacab15d3e0036446f586 by passing an additional flag
did_allocate_port into protocol_listen() which specifies whether the
port was just allocated, and skipping the check in protocol_listen() if
the flag is set.
However, this only helps if the socket is bound to an ephemeral port
inside of this very listen() call. But calling bind(sin_port = 0) from
userspace should succeed and bind to an allocated ephemeral port, in the
same was as using an unbound socket for connect() does. The port number
can then be retrieved from userspace by calling getsockname (), and it
should be possible to either connect() or listen() on this socket,
keeping the allocated port number. Also, calling bind() when already
bound (either explicitly or implicitly) should always result in EINVAL.
To untangle this, introduce an explicit m_bound state in IPv4Socket,
just like LocalSocket has already. Once a socket is bound, further
attempt to bind it fail. Some operations cause the socket to implicitly
get bound to an (ephemeral) address; this is implemented by the new
ensure_bound() method. The protocol_allocate_local_port() method is
gone; it is now up to a protocol to assign a port to the socket inside
protocol_bind() if it finds that the socket has local_port() == 0.
protocol_bind() is now called in more cases, such as inside listen() if
the socket wasn't bound before that.
2023-07-23 15:43:45 +03:00
|
|
|
TRY(ensure_bound());
|
2019-03-12 17:51:42 +03:00
|
|
|
|
2021-03-10 00:25:09 +03:00
|
|
|
dbgln_if(IPV4_SOCKET_DEBUG, "sendto: destination={}:{}", m_peer_address, m_peer_port);
|
2019-03-12 17:51:42 +03:00
|
|
|
|
2019-03-13 17:40:30 +03:00
|
|
|
if (type() == SOCK_RAW) {
|
2021-05-26 06:35:05 +03:00
|
|
|
auto ipv4_payload_offset = routing_decision.adapter->ipv4_payload_offset();
|
|
|
|
data_length = min(data_length, routing_decision.adapter->mtu() - ipv4_payload_offset);
|
|
|
|
auto packet = routing_decision.adapter->acquire_packet_buffer(ipv4_payload_offset + data_length);
|
|
|
|
if (!packet)
|
2021-08-01 18:27:23 +03:00
|
|
|
return set_so_error(ENOMEM);
|
2021-05-26 06:35:05 +03:00
|
|
|
routing_decision.adapter->fill_in_ipv4_header(*packet, local_address(), routing_decision.next_hop,
|
2021-10-27 23:20:24 +03:00
|
|
|
m_peer_address, (IPv4Protocol)protocol(), data_length, m_type_of_service, m_ttl);
|
2021-09-07 13:09:52 +03:00
|
|
|
if (auto result = data.read(packet->buffer->data() + ipv4_payload_offset, data_length); result.is_error()) {
|
2021-05-26 06:35:05 +03:00
|
|
|
routing_decision.adapter->release_packet_buffer(*packet);
|
2021-11-08 02:51:39 +03:00
|
|
|
return set_so_error(result.release_error());
|
2021-05-26 06:35:05 +03:00
|
|
|
}
|
2021-08-01 15:11:49 +03:00
|
|
|
routing_decision.adapter->send_packet(packet->bytes());
|
2021-05-26 06:35:05 +03:00
|
|
|
routing_decision.adapter->release_packet_buffer(*packet);
|
2019-03-13 17:40:30 +03:00
|
|
|
return data_length;
|
|
|
|
}
|
|
|
|
|
2020-08-04 19:02:23 +03:00
|
|
|
auto nsent_or_error = protocol_send(data, data_length);
|
|
|
|
if (!nsent_or_error.is_error())
|
|
|
|
Thread::current()->did_ipv4_socket_write(nsent_or_error.value());
|
|
|
|
return nsent_or_error;
|
2019-03-12 17:51:42 +03:00
|
|
|
}
|
2019-03-12 19:27:07 +03:00
|
|
|
|
2022-08-21 17:45:42 +03:00
|
|
|
ErrorOr<size_t> IPv4Socket::receive_byte_buffered(OpenFileDescription& description, UserOrKernelBuffer& buffer, size_t buffer_length, int flags, Userspace<sockaddr*>, Userspace<socklen_t*>, bool blocking)
|
2019-03-12 19:27:07 +03:00
|
|
|
{
|
2021-08-29 14:10:55 +03:00
|
|
|
MutexLocker locker(mutex());
|
2021-09-16 03:15:36 +03:00
|
|
|
|
|
|
|
VERIFY(m_receive_buffer);
|
|
|
|
|
2021-08-01 12:42:03 +03:00
|
|
|
if (m_receive_buffer->is_empty()) {
|
2020-02-08 15:09:37 +03:00
|
|
|
if (protocol_is_disconnected())
|
|
|
|
return 0;
|
2022-08-21 17:45:42 +03:00
|
|
|
if (!blocking)
|
2021-08-01 18:27:23 +03:00
|
|
|
return set_so_error(EAGAIN);
|
2019-12-14 11:43:31 +03:00
|
|
|
|
2020-03-07 13:26:17 +03:00
|
|
|
locker.unlock();
|
2021-03-07 14:01:11 +03:00
|
|
|
auto unblocked_flags = BlockFlags::None;
|
2021-01-11 02:29:28 +03:00
|
|
|
auto res = Thread::current()->block<Thread::ReadBlocker>({}, description, unblocked_flags);
|
2020-03-07 13:26:17 +03:00
|
|
|
locker.lock();
|
2019-12-14 11:43:31 +03:00
|
|
|
|
2021-03-07 14:01:11 +03:00
|
|
|
if (!has_flag(unblocked_flags, BlockFlags::Read)) {
|
2020-07-07 02:10:52 +03:00
|
|
|
if (res.was_interrupted())
|
2021-08-01 18:27:23 +03:00
|
|
|
return set_so_error(EINTR);
|
2019-12-14 11:43:31 +03:00
|
|
|
|
2020-02-08 15:09:37 +03:00
|
|
|
// Unblocked due to timeout.
|
2021-08-01 18:27:23 +03:00
|
|
|
return set_so_error(EAGAIN);
|
2019-12-14 11:43:31 +03:00
|
|
|
}
|
2020-02-08 15:09:37 +03:00
|
|
|
}
|
2019-12-14 11:43:31 +03:00
|
|
|
|
2021-11-08 02:51:39 +03:00
|
|
|
ErrorOr<size_t> nreceived_or_error { 0 };
|
2021-04-29 00:22:55 +03:00
|
|
|
if (flags & MSG_PEEK)
|
2021-08-01 12:42:03 +03:00
|
|
|
nreceived_or_error = m_receive_buffer->peek(buffer, buffer_length);
|
2021-04-29 00:22:55 +03:00
|
|
|
else
|
2021-08-01 12:42:03 +03:00
|
|
|
nreceived_or_error = m_receive_buffer->read(buffer, buffer_length);
|
2021-04-29 00:22:55 +03:00
|
|
|
|
2021-06-16 16:33:14 +03:00
|
|
|
if (!nreceived_or_error.is_error() && nreceived_or_error.value() > 0 && !(flags & MSG_PEEK))
|
|
|
|
Thread::current()->did_ipv4_socket_read(nreceived_or_error.value());
|
2019-12-14 11:43:31 +03:00
|
|
|
|
2021-08-01 12:42:03 +03:00
|
|
|
set_can_read(!m_receive_buffer->is_empty());
|
2021-06-16 16:33:14 +03:00
|
|
|
return nreceived_or_error;
|
2020-02-08 15:09:37 +03:00
|
|
|
}
|
2019-12-14 11:43:31 +03:00
|
|
|
|
2023-03-14 00:11:13 +03:00
|
|
|
ErrorOr<size_t> IPv4Socket::receive_packet_buffered(OpenFileDescription& description, UserOrKernelBuffer& buffer, size_t buffer_length, int flags, Userspace<sockaddr*> addr, Userspace<socklen_t*> addr_length, UnixDateTime& packet_timestamp, bool blocking)
|
2020-02-08 15:09:37 +03:00
|
|
|
{
|
2021-08-29 14:10:55 +03:00
|
|
|
MutexLocker locker(mutex());
|
2021-09-07 16:36:39 +03:00
|
|
|
ReceivedPacket taken_packet;
|
|
|
|
ReceivedPacket* packet { nullptr };
|
2019-03-12 19:27:07 +03:00
|
|
|
{
|
2019-11-18 19:30:45 +03:00
|
|
|
if (m_receive_queue.is_empty()) {
|
2021-08-06 01:35:27 +03:00
|
|
|
// FIXME: Shouldn't this return ENOTCONN instead of EOF?
|
2019-11-18 19:30:45 +03:00
|
|
|
// But if so, we still need to deliver at least one EOF read to userspace.. right?
|
|
|
|
if (protocol_is_disconnected())
|
|
|
|
return 0;
|
2022-08-21 17:45:42 +03:00
|
|
|
if (!blocking)
|
2021-08-01 18:27:23 +03:00
|
|
|
return set_so_error(EAGAIN);
|
2019-11-18 19:30:45 +03:00
|
|
|
}
|
2019-11-04 15:41:36 +03:00
|
|
|
|
2019-03-12 19:27:07 +03:00
|
|
|
if (!m_receive_queue.is_empty()) {
|
2021-09-07 16:36:39 +03:00
|
|
|
if (flags & MSG_PEEK) {
|
|
|
|
packet = &m_receive_queue.first();
|
|
|
|
} else {
|
|
|
|
taken_packet = m_receive_queue.take_first();
|
|
|
|
packet = &taken_packet;
|
|
|
|
}
|
2021-04-29 00:22:55 +03:00
|
|
|
|
2020-11-30 02:05:27 +03:00
|
|
|
set_can_read(!m_receive_queue.is_empty());
|
2021-01-14 02:21:21 +03:00
|
|
|
|
2021-02-07 15:03:24 +03:00
|
|
|
dbgln_if(IPV4_SOCKET_DEBUG, "IPv4Socket({}): recvfrom without blocking {} bytes, packets in queue: {}",
|
2021-01-14 02:21:21 +03:00
|
|
|
this,
|
2021-09-07 16:36:39 +03:00
|
|
|
packet->data->size(),
|
2021-01-14 02:21:21 +03:00
|
|
|
m_receive_queue.size());
|
2019-03-12 19:27:07 +03:00
|
|
|
}
|
|
|
|
}
|
2021-09-10 04:06:32 +03:00
|
|
|
|
|
|
|
if (!packet) {
|
2019-03-14 17:23:32 +03:00
|
|
|
if (protocol_is_disconnected()) {
|
2021-01-10 17:17:54 +03:00
|
|
|
dbgln("IPv4Socket({}) is protocol-disconnected, returning 0 in recvfrom!", this);
|
2019-03-14 17:23:32 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-07 13:26:17 +03:00
|
|
|
locker.unlock();
|
2021-03-07 14:01:11 +03:00
|
|
|
auto unblocked_flags = BlockFlags::None;
|
2021-01-11 02:29:28 +03:00
|
|
|
auto res = Thread::current()->block<Thread::ReadBlocker>({}, description, unblocked_flags);
|
2020-03-07 13:26:17 +03:00
|
|
|
locker.lock();
|
2019-03-12 19:27:07 +03:00
|
|
|
|
2021-03-07 14:01:11 +03:00
|
|
|
if (!has_flag(unblocked_flags, BlockFlags::Read)) {
|
2020-07-07 02:10:52 +03:00
|
|
|
if (res.was_interrupted())
|
2021-08-01 18:27:23 +03:00
|
|
|
return set_so_error(EINTR);
|
2019-07-20 12:05:52 +03:00
|
|
|
|
2019-03-13 15:13:23 +03:00
|
|
|
// Unblocked due to timeout.
|
2021-08-01 18:27:23 +03:00
|
|
|
return set_so_error(EAGAIN);
|
2019-03-13 15:13:23 +03:00
|
|
|
}
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(m_can_read);
|
|
|
|
VERIFY(!m_receive_queue.is_empty());
|
2021-04-29 00:22:55 +03:00
|
|
|
|
2021-09-07 16:36:39 +03:00
|
|
|
if (flags & MSG_PEEK) {
|
|
|
|
packet = &m_receive_queue.first();
|
|
|
|
} else {
|
|
|
|
taken_packet = m_receive_queue.take_first();
|
|
|
|
packet = &taken_packet;
|
|
|
|
}
|
2021-04-29 00:22:55 +03:00
|
|
|
|
2020-11-30 02:05:27 +03:00
|
|
|
set_can_read(!m_receive_queue.is_empty());
|
2021-01-14 02:21:21 +03:00
|
|
|
|
2021-02-07 15:03:24 +03:00
|
|
|
dbgln_if(IPV4_SOCKET_DEBUG, "IPv4Socket({}): recvfrom with blocking {} bytes, packets in queue: {}",
|
2021-01-14 02:21:21 +03:00
|
|
|
this,
|
2021-09-07 16:36:39 +03:00
|
|
|
packet->data->size(),
|
2021-01-14 02:21:21 +03:00
|
|
|
m_receive_queue.size());
|
2019-03-12 19:27:07 +03:00
|
|
|
}
|
2021-09-07 16:36:39 +03:00
|
|
|
VERIFY(packet->data);
|
2019-03-13 16:22:27 +03:00
|
|
|
|
2021-09-07 16:36:39 +03:00
|
|
|
packet_timestamp = packet->timestamp;
|
2020-09-16 19:25:06 +03:00
|
|
|
|
2019-03-14 01:14:30 +03:00
|
|
|
if (addr) {
|
2021-09-07 16:36:39 +03:00
|
|
|
dbgln_if(IPV4_SOCKET_DEBUG, "Incoming packet is from: {}:{}", packet->peer_address, packet->peer_port);
|
2020-08-18 10:25:23 +03:00
|
|
|
|
|
|
|
sockaddr_in out_addr {};
|
2021-09-07 16:36:39 +03:00
|
|
|
memcpy(&out_addr.sin_addr, &packet->peer_address, sizeof(IPv4Address));
|
|
|
|
out_addr.sin_port = htons(packet->peer_port);
|
2020-08-18 10:25:23 +03:00
|
|
|
out_addr.sin_family = AF_INET;
|
|
|
|
Userspace<sockaddr_in*> dest_addr = addr.ptr();
|
2021-09-07 16:05:51 +03:00
|
|
|
SOCKET_TRY(copy_to_user(dest_addr, &out_addr));
|
2020-08-18 10:25:23 +03:00
|
|
|
|
|
|
|
socklen_t out_length = sizeof(sockaddr_in);
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(addr_length);
|
2021-09-07 16:05:51 +03:00
|
|
|
SOCKET_TRY(copy_to_user(addr_length, &out_length));
|
2019-03-14 01:14:30 +03:00
|
|
|
}
|
2019-03-13 16:47:21 +03:00
|
|
|
|
2019-03-13 16:22:27 +03:00
|
|
|
if (type() == SOCK_RAW) {
|
2021-09-07 16:36:39 +03:00
|
|
|
size_t bytes_written = min(packet->data->size(), buffer_length);
|
|
|
|
SOCKET_TRY(buffer.write(packet->data->data(), bytes_written));
|
2020-09-10 07:12:50 +03:00
|
|
|
return bytes_written;
|
2019-03-13 16:22:27 +03:00
|
|
|
}
|
|
|
|
|
2021-09-08 19:29:52 +03:00
|
|
|
return protocol_receive(packet->data->bytes(), buffer, buffer_length, flags);
|
2020-02-08 15:09:37 +03:00
|
|
|
}
|
|
|
|
|
2023-03-14 00:11:13 +03:00
|
|
|
ErrorOr<size_t> IPv4Socket::recvfrom(OpenFileDescription& description, UserOrKernelBuffer& buffer, size_t buffer_length, int flags, Userspace<sockaddr*> user_addr, Userspace<socklen_t*> user_addr_length, UnixDateTime& packet_timestamp, bool blocking)
|
2020-02-08 15:09:37 +03:00
|
|
|
{
|
2020-08-18 10:25:23 +03:00
|
|
|
if (user_addr_length) {
|
|
|
|
socklen_t addr_length;
|
2021-09-07 16:05:51 +03:00
|
|
|
SOCKET_TRY(copy_from_user(&addr_length, user_addr_length.unsafe_userspace_ptr()));
|
2020-08-18 10:25:23 +03:00
|
|
|
if (addr_length < sizeof(sockaddr_in))
|
2021-08-01 18:27:23 +03:00
|
|
|
return set_so_error(EINVAL);
|
2020-08-18 10:25:23 +03:00
|
|
|
}
|
2020-02-08 15:09:37 +03:00
|
|
|
|
2021-03-10 00:25:09 +03:00
|
|
|
dbgln_if(IPV4_SOCKET_DEBUG, "recvfrom: type={}, local_port={}", type(), local_port());
|
2020-02-08 15:09:37 +03:00
|
|
|
|
2021-12-02 01:39:50 +03:00
|
|
|
ErrorOr<size_t> total_nreceived = 0;
|
|
|
|
do {
|
|
|
|
auto offset_buffer = buffer.offset(total_nreceived.value());
|
|
|
|
auto offset_buffer_length = buffer_length - total_nreceived.value();
|
|
|
|
|
|
|
|
ErrorOr<size_t> nreceived = 0;
|
|
|
|
if (buffer_mode() == BufferMode::Bytes)
|
2022-08-21 17:45:42 +03:00
|
|
|
nreceived = receive_byte_buffered(description, offset_buffer, offset_buffer_length, flags, user_addr, user_addr_length, blocking);
|
2021-12-02 01:39:50 +03:00
|
|
|
else
|
2022-08-21 17:45:42 +03:00
|
|
|
nreceived = receive_packet_buffered(description, offset_buffer, offset_buffer_length, flags, user_addr, user_addr_length, packet_timestamp, blocking);
|
2021-12-02 01:39:50 +03:00
|
|
|
|
|
|
|
if (nreceived.is_error())
|
2023-02-09 21:26:53 +03:00
|
|
|
total_nreceived = move(nreceived);
|
2021-12-02 01:39:50 +03:00
|
|
|
else
|
|
|
|
total_nreceived.value() += nreceived.value();
|
|
|
|
} while ((flags & MSG_WAITALL) && !total_nreceived.is_error() && total_nreceived.value() < buffer_length);
|
|
|
|
|
|
|
|
if (!total_nreceived.is_error())
|
|
|
|
Thread::current()->did_ipv4_socket_read(total_nreceived.value());
|
|
|
|
return total_nreceived;
|
2019-03-12 19:27:07 +03:00
|
|
|
}
|
|
|
|
|
2023-03-14 00:11:13 +03:00
|
|
|
bool IPv4Socket::did_receive(IPv4Address const& source_address, u16 source_port, ReadonlyBytes packet, UnixDateTime const& packet_timestamp)
|
2019-03-12 19:27:07 +03:00
|
|
|
{
|
2021-08-29 14:10:55 +03:00
|
|
|
MutexLocker locker(mutex());
|
2020-02-08 02:58:11 +03:00
|
|
|
|
|
|
|
if (is_shut_down_for_reading())
|
|
|
|
return false;
|
|
|
|
|
2019-08-05 12:06:21 +03:00
|
|
|
auto packet_size = packet.size();
|
2019-12-14 11:43:31 +03:00
|
|
|
|
|
|
|
if (buffer_mode() == BufferMode::Bytes) {
|
2021-09-16 03:15:36 +03:00
|
|
|
VERIFY(m_receive_buffer);
|
|
|
|
|
2021-08-01 12:42:03 +03:00
|
|
|
size_t space_in_receive_buffer = m_receive_buffer->space_for_writing();
|
2019-12-14 11:43:31 +03:00
|
|
|
if (packet_size > space_in_receive_buffer) {
|
2021-01-10 17:17:54 +03:00
|
|
|
dbgln("IPv4Socket({}): did_receive refusing packet since buffer is full.", this);
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(m_can_read);
|
2019-12-14 11:43:31 +03:00
|
|
|
return false;
|
|
|
|
}
|
2021-08-01 15:11:05 +03:00
|
|
|
auto scratch_buffer = UserOrKernelBuffer::for_kernel_buffer(m_scratch_buffer->data());
|
|
|
|
auto nreceived_or_error = protocol_receive(packet, scratch_buffer, m_scratch_buffer->size(), 0);
|
2020-08-04 19:02:23 +03:00
|
|
|
if (nreceived_or_error.is_error())
|
|
|
|
return false;
|
2021-08-01 12:42:03 +03:00
|
|
|
auto nwritten_or_error = m_receive_buffer->write(scratch_buffer, nreceived_or_error.value());
|
2021-06-16 16:33:14 +03:00
|
|
|
if (nwritten_or_error.is_error())
|
2020-09-12 06:11:07 +03:00
|
|
|
return false;
|
2021-08-01 12:42:03 +03:00
|
|
|
set_can_read(!m_receive_buffer->is_empty());
|
2019-12-14 11:43:31 +03:00
|
|
|
} else {
|
2020-08-05 07:35:30 +03:00
|
|
|
if (m_receive_queue.size() > 2000) {
|
2021-01-10 17:17:54 +03:00
|
|
|
dbgln("IPv4Socket({}): did_receive refusing packet since queue is full.", this);
|
2019-12-14 11:43:31 +03:00
|
|
|
return false;
|
|
|
|
}
|
2022-04-11 01:08:07 +03:00
|
|
|
auto data_or_error = KBuffer::try_create_with_bytes("IPv4Socket: Packet buffer"sv, packet);
|
2021-09-07 16:36:39 +03:00
|
|
|
if (data_or_error.is_error()) {
|
|
|
|
dbgln("IPv4Socket: did_receive unable to allocate storage for incoming packet.");
|
|
|
|
return false;
|
|
|
|
}
|
2022-11-01 12:04:13 +03:00
|
|
|
auto result = m_receive_queue.try_append({ source_address, source_port, packet_timestamp, data_or_error.release_value() });
|
|
|
|
if (result.is_error()) {
|
|
|
|
dbgln("IPv4Socket: Dropped incoming packet because appending to the receive queue failed.");
|
|
|
|
return false;
|
|
|
|
}
|
2020-11-30 02:05:27 +03:00
|
|
|
set_can_read(true);
|
2019-12-14 11:43:31 +03:00
|
|
|
}
|
2019-03-14 17:28:23 +03:00
|
|
|
m_bytes_received += packet_size;
|
2021-01-14 02:21:21 +03:00
|
|
|
|
2021-01-24 01:59:27 +03:00
|
|
|
if constexpr (IPV4_SOCKET_DEBUG) {
|
2021-01-14 02:21:21 +03:00
|
|
|
if (buffer_mode() == BufferMode::Bytes)
|
|
|
|
dbgln("IPv4Socket({}): did_receive {} bytes, total_received={}", this, packet_size, m_bytes_received);
|
|
|
|
else
|
|
|
|
dbgln("IPv4Socket({}): did_receive {} bytes, total_received={}, packets in queue: {}",
|
|
|
|
this,
|
|
|
|
packet_size,
|
|
|
|
m_bytes_received,
|
|
|
|
m_receive_queue.size());
|
|
|
|
}
|
|
|
|
|
2019-11-04 16:03:14 +03:00
|
|
|
return true;
|
2019-03-12 19:27:07 +03:00
|
|
|
}
|
2019-08-10 19:10:36 +03:00
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
ErrorOr<NonnullOwnPtr<KString>> IPv4Socket::pseudo_path(OpenFileDescription const&) const
|
2019-08-10 19:10:36 +03:00
|
|
|
{
|
|
|
|
if (m_role == Role::None)
|
2021-10-30 01:45:23 +03:00
|
|
|
return KString::try_create("socket"sv);
|
2019-08-10 19:10:36 +03:00
|
|
|
|
|
|
|
StringBuilder builder;
|
2022-07-11 20:32:29 +03:00
|
|
|
TRY(builder.try_append("socket:"sv));
|
2019-08-10 19:10:36 +03:00
|
|
|
|
2023-01-13 01:14:35 +03:00
|
|
|
TRY(builder.try_appendff("{}:{}", TRY(m_local_address.to_string()), m_local_port));
|
2019-08-10 19:10:36 +03:00
|
|
|
if (m_role == Role::Accepted || m_role == Role::Connected)
|
2023-01-13 01:14:35 +03:00
|
|
|
TRY(builder.try_appendff(" / {}:{}", TRY(m_peer_address.to_string()), m_peer_port));
|
2019-08-10 19:10:36 +03:00
|
|
|
|
|
|
|
switch (m_role) {
|
|
|
|
case Role::Listener:
|
2022-07-11 20:32:29 +03:00
|
|
|
TRY(builder.try_append(" (listening)"sv));
|
2019-08-10 19:10:36 +03:00
|
|
|
break;
|
|
|
|
case Role::Accepted:
|
2022-07-11 20:32:29 +03:00
|
|
|
TRY(builder.try_append(" (accepted)"sv));
|
2019-08-10 19:10:36 +03:00
|
|
|
break;
|
|
|
|
case Role::Connected:
|
2022-07-11 20:32:29 +03:00
|
|
|
TRY(builder.try_append(" (connected)"sv));
|
2019-08-10 19:10:36 +03:00
|
|
|
break;
|
|
|
|
case Role::Connecting:
|
2022-07-11 20:32:29 +03:00
|
|
|
TRY(builder.try_append(" (connecting)"sv));
|
2019-08-10 19:10:36 +03:00
|
|
|
break;
|
|
|
|
default:
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY_NOT_REACHED();
|
2019-08-10 19:10:36 +03:00
|
|
|
}
|
|
|
|
|
2021-12-30 15:43:45 +03:00
|
|
|
return KString::try_create(builder.string_view());
|
2019-08-10 19:10:36 +03:00
|
|
|
}
|
2019-09-19 22:40:06 +03:00
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
ErrorOr<void> IPv4Socket::setsockopt(int level, int option, Userspace<void const*> user_value, socklen_t user_value_size)
|
2019-09-19 22:40:06 +03:00
|
|
|
{
|
|
|
|
if (level != IPPROTO_IP)
|
2020-07-31 01:26:33 +03:00
|
|
|
return Socket::setsockopt(level, option, user_value, user_value_size);
|
2019-09-19 22:40:06 +03:00
|
|
|
|
2021-12-28 17:16:57 +03:00
|
|
|
MutexLocker locker(mutex());
|
|
|
|
|
2019-09-19 22:40:06 +03:00
|
|
|
switch (option) {
|
2020-07-31 01:26:33 +03:00
|
|
|
case IP_TTL: {
|
|
|
|
if (user_value_size < sizeof(int))
|
2021-01-21 01:11:17 +03:00
|
|
|
return EINVAL;
|
2020-07-31 01:26:33 +03:00
|
|
|
int value;
|
2022-04-01 20:58:27 +03:00
|
|
|
TRY(copy_from_user(&value, static_ptr_cast<int const*>(user_value)));
|
2020-07-31 01:26:33 +03:00
|
|
|
if (value < 0 || value > 255)
|
2021-01-21 01:11:17 +03:00
|
|
|
return EINVAL;
|
2020-07-31 01:26:33 +03:00
|
|
|
m_ttl = value;
|
2021-11-08 02:51:39 +03:00
|
|
|
return {};
|
2020-07-31 01:26:33 +03:00
|
|
|
}
|
2021-10-27 23:20:24 +03:00
|
|
|
case IP_TOS: {
|
|
|
|
if (user_value_size < sizeof(int))
|
|
|
|
return EINVAL;
|
|
|
|
int value;
|
2022-04-01 20:58:27 +03:00
|
|
|
TRY(copy_from_user(&value, static_ptr_cast<int const*>(user_value)));
|
2021-10-27 23:20:24 +03:00
|
|
|
if (value < 0 || value > 255)
|
|
|
|
return EINVAL;
|
|
|
|
m_type_of_service = value;
|
2021-11-08 02:51:39 +03:00
|
|
|
return {};
|
2021-10-27 23:20:24 +03:00
|
|
|
}
|
2021-05-04 14:42:47 +03:00
|
|
|
case IP_MULTICAST_LOOP: {
|
|
|
|
if (user_value_size != 1)
|
|
|
|
return EINVAL;
|
|
|
|
u8 value;
|
2022-04-01 20:58:27 +03:00
|
|
|
TRY(copy_from_user(&value, static_ptr_cast<u8 const*>(user_value)));
|
2021-05-04 14:42:47 +03:00
|
|
|
if (value != 0 && value != 1)
|
|
|
|
return EINVAL;
|
|
|
|
m_multicast_loop = value;
|
2021-11-08 02:51:39 +03:00
|
|
|
return {};
|
2021-05-04 14:42:47 +03:00
|
|
|
}
|
|
|
|
case IP_ADD_MEMBERSHIP: {
|
|
|
|
if (user_value_size != sizeof(ip_mreq))
|
|
|
|
return EINVAL;
|
|
|
|
ip_mreq mreq;
|
2022-04-01 20:58:27 +03:00
|
|
|
TRY(copy_from_user(&mreq, static_ptr_cast<ip_mreq const*>(user_value)));
|
2021-05-04 14:42:47 +03:00
|
|
|
if (mreq.imr_interface.s_addr != INADDR_ANY)
|
|
|
|
return ENOTSUP;
|
2022-04-01 20:58:27 +03:00
|
|
|
IPv4Address address { (u8 const*)&mreq.imr_multiaddr.s_addr };
|
2021-05-04 14:42:47 +03:00
|
|
|
if (!m_multicast_memberships.contains_slow(address))
|
|
|
|
m_multicast_memberships.append(address);
|
2021-11-08 02:51:39 +03:00
|
|
|
return {};
|
2021-05-04 14:42:47 +03:00
|
|
|
}
|
|
|
|
case IP_DROP_MEMBERSHIP: {
|
|
|
|
if (user_value_size != sizeof(ip_mreq))
|
|
|
|
return EINVAL;
|
|
|
|
ip_mreq mreq;
|
2022-04-01 20:58:27 +03:00
|
|
|
TRY(copy_from_user(&mreq, static_ptr_cast<ip_mreq const*>(user_value)));
|
2021-05-04 14:42:47 +03:00
|
|
|
if (mreq.imr_interface.s_addr != INADDR_ANY)
|
|
|
|
return ENOTSUP;
|
2022-04-01 20:58:27 +03:00
|
|
|
IPv4Address address { (u8 const*)&mreq.imr_multiaddr.s_addr };
|
2021-05-04 14:42:47 +03:00
|
|
|
m_multicast_memberships.remove_first_matching([&address](auto& a) { return a == address; });
|
2021-11-08 02:51:39 +03:00
|
|
|
return {};
|
2021-05-04 14:42:47 +03:00
|
|
|
}
|
2019-09-19 22:40:06 +03:00
|
|
|
default:
|
2021-01-21 01:11:17 +03:00
|
|
|
return ENOPROTOOPT;
|
2019-09-19 22:40:06 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-08 02:51:39 +03:00
|
|
|
ErrorOr<void> IPv4Socket::getsockopt(OpenFileDescription& description, int level, int option, Userspace<void*> value, Userspace<socklen_t*> value_size)
|
2019-09-19 22:40:06 +03:00
|
|
|
{
|
|
|
|
if (level != IPPROTO_IP)
|
2019-12-06 20:38:36 +03:00
|
|
|
return Socket::getsockopt(description, level, option, value, value_size);
|
2019-09-19 22:40:06 +03:00
|
|
|
|
2021-12-28 17:16:57 +03:00
|
|
|
MutexLocker locker(mutex());
|
|
|
|
|
2020-08-07 12:29:05 +03:00
|
|
|
socklen_t size;
|
2021-09-05 18:38:37 +03:00
|
|
|
TRY(copy_from_user(&size, value_size.unsafe_userspace_ptr()));
|
2020-08-07 12:29:05 +03:00
|
|
|
|
2019-09-19 22:40:06 +03:00
|
|
|
switch (option) {
|
2021-10-27 23:17:35 +03:00
|
|
|
case IP_TTL: {
|
2020-08-07 12:29:05 +03:00
|
|
|
if (size < sizeof(int))
|
2021-01-21 01:11:17 +03:00
|
|
|
return EINVAL;
|
2021-10-27 23:17:35 +03:00
|
|
|
int ttl = m_ttl;
|
|
|
|
TRY(copy_to_user(static_ptr_cast<int*>(value), (int*)&ttl));
|
2020-08-07 12:29:05 +03:00
|
|
|
size = sizeof(int);
|
2021-09-05 18:38:37 +03:00
|
|
|
return copy_to_user(value_size, &size);
|
2021-10-27 23:17:35 +03:00
|
|
|
}
|
2021-10-27 23:20:24 +03:00
|
|
|
case IP_TOS: {
|
|
|
|
if (size < sizeof(int))
|
|
|
|
return EINVAL;
|
|
|
|
int type_of_service = m_type_of_service;
|
|
|
|
TRY(copy_to_user(static_ptr_cast<int*>(value), (int*)&type_of_service));
|
|
|
|
size = sizeof(int);
|
|
|
|
return copy_to_user(value_size, &size);
|
|
|
|
}
|
2021-05-04 14:42:47 +03:00
|
|
|
case IP_MULTICAST_LOOP: {
|
|
|
|
if (size < 1)
|
|
|
|
return EINVAL;
|
2022-04-01 20:58:27 +03:00
|
|
|
TRY(copy_to_user(static_ptr_cast<u8*>(value), (u8 const*)&m_multicast_loop));
|
2021-05-04 14:42:47 +03:00
|
|
|
size = 1;
|
2021-09-05 18:38:37 +03:00
|
|
|
return copy_to_user(value_size, &size);
|
2021-05-04 14:42:47 +03:00
|
|
|
}
|
2019-09-19 22:40:06 +03:00
|
|
|
default:
|
2021-01-21 01:11:17 +03:00
|
|
|
return ENOPROTOOPT;
|
2019-09-19 22:40:06 +03:00
|
|
|
}
|
|
|
|
}
|
2019-09-23 20:06:03 +03:00
|
|
|
|
2021-11-08 02:51:39 +03:00
|
|
|
ErrorOr<void> IPv4Socket::ioctl(OpenFileDescription&, unsigned request, Userspace<void*> arg)
|
2019-09-23 20:06:03 +03:00
|
|
|
{
|
2021-12-29 12:11:45 +03:00
|
|
|
TRY(Process::current().require_promise(Pledge::inet));
|
2019-09-23 20:06:03 +03:00
|
|
|
|
2022-02-18 01:19:08 +03:00
|
|
|
MutexLocker locker(mutex());
|
|
|
|
|
2021-11-08 02:51:39 +03:00
|
|
|
auto ioctl_route = [request, arg]() -> ErrorOr<void> {
|
2021-07-26 12:47:00 +03:00
|
|
|
auto user_route = static_ptr_cast<rtentry*>(arg);
|
2020-09-12 06:11:07 +03:00
|
|
|
rtentry route;
|
2021-09-05 18:38:37 +03:00
|
|
|
TRY(copy_from_user(&route, user_route));
|
2019-09-23 20:06:03 +03:00
|
|
|
|
2021-08-13 08:04:31 +03:00
|
|
|
Userspace<const char*> user_rt_dev((FlatPtr)route.rt_dev);
|
2023-07-17 19:22:01 +03:00
|
|
|
auto ifname = TRY(Process::get_syscall_name_string_fixed_buffer<IFNAMSIZ>(user_rt_dev));
|
|
|
|
auto adapter = NetworkingManagement::the().lookup_by_name(ifname.representable_view());
|
2020-03-14 22:00:49 +03:00
|
|
|
if (!adapter)
|
2021-07-26 13:47:25 +03:00
|
|
|
return ENODEV;
|
2020-03-14 22:00:49 +03:00
|
|
|
|
|
|
|
switch (request) {
|
2022-03-12 22:16:13 +03:00
|
|
|
case SIOCADDRT: {
|
2022-08-21 01:21:01 +03:00
|
|
|
auto current_process_credentials = Process::current().credentials();
|
|
|
|
if (!current_process_credentials->is_superuser())
|
2021-07-26 13:47:25 +03:00
|
|
|
return EPERM;
|
2020-09-12 06:11:07 +03:00
|
|
|
if (route.rt_gateway.sa_family != AF_INET)
|
2021-07-26 13:47:25 +03:00
|
|
|
return EAFNOSUPPORT;
|
2022-05-09 14:23:02 +03:00
|
|
|
if (!(route.rt_flags & RTF_UP))
|
2021-07-26 13:47:25 +03:00
|
|
|
return EINVAL; // FIXME: Find the correct value to return
|
2019-09-23 20:06:03 +03:00
|
|
|
|
2022-03-12 22:16:13 +03:00
|
|
|
auto destination = IPv4Address(((sockaddr_in&)route.rt_dst).sin_addr.s_addr);
|
|
|
|
auto gateway = IPv4Address(((sockaddr_in&)route.rt_gateway).sin_addr.s_addr);
|
|
|
|
auto genmask = IPv4Address(((sockaddr_in&)route.rt_genmask).sin_addr.s_addr);
|
|
|
|
|
2022-05-09 14:23:02 +03:00
|
|
|
return update_routing_table(destination, gateway, genmask, route.rt_flags, adapter, UpdateTable::Set);
|
2022-03-12 22:16:13 +03:00
|
|
|
}
|
2020-03-14 22:00:49 +03:00
|
|
|
case SIOCDELRT:
|
2022-08-21 01:21:01 +03:00
|
|
|
auto current_process_credentials = Process::current().credentials();
|
|
|
|
if (!current_process_credentials->is_superuser())
|
2022-04-30 04:49:00 +03:00
|
|
|
return EPERM;
|
|
|
|
if (route.rt_gateway.sa_family != AF_INET)
|
|
|
|
return EAFNOSUPPORT;
|
|
|
|
|
|
|
|
auto destination = IPv4Address(((sockaddr_in&)route.rt_dst).sin_addr.s_addr);
|
|
|
|
auto gateway = IPv4Address(((sockaddr_in&)route.rt_gateway).sin_addr.s_addr);
|
|
|
|
auto genmask = IPv4Address(((sockaddr_in&)route.rt_genmask).sin_addr.s_addr);
|
|
|
|
|
2022-05-09 14:23:02 +03:00
|
|
|
return update_routing_table(destination, gateway, genmask, route.rt_flags, adapter, UpdateTable::Delete);
|
2020-03-14 22:00:49 +03:00
|
|
|
}
|
2020-03-11 23:30:41 +03:00
|
|
|
|
2021-07-26 13:47:25 +03:00
|
|
|
return EINVAL;
|
2020-03-14 22:00:49 +03:00
|
|
|
};
|
2019-10-02 19:20:11 +03:00
|
|
|
|
2021-11-08 02:51:39 +03:00
|
|
|
auto ioctl_arp = [request, arg]() -> ErrorOr<void> {
|
2021-07-26 12:47:00 +03:00
|
|
|
auto user_req = static_ptr_cast<arpreq*>(arg);
|
2021-07-25 03:04:11 +03:00
|
|
|
arpreq arp_req;
|
2021-09-05 18:38:37 +03:00
|
|
|
TRY(copy_from_user(&arp_req, user_req));
|
2021-07-25 03:04:11 +03:00
|
|
|
|
2022-08-21 01:21:01 +03:00
|
|
|
auto current_process_credentials = Process::current().credentials();
|
|
|
|
|
2021-07-25 03:04:11 +03:00
|
|
|
switch (request) {
|
|
|
|
case SIOCSARP:
|
2022-08-21 01:21:01 +03:00
|
|
|
if (!current_process_credentials->is_superuser())
|
2021-07-26 13:47:25 +03:00
|
|
|
return EPERM;
|
2021-07-25 03:04:11 +03:00
|
|
|
if (arp_req.arp_pa.sa_family != AF_INET)
|
2021-07-26 13:47:25 +03:00
|
|
|
return EAFNOSUPPORT;
|
2022-03-12 21:56:23 +03:00
|
|
|
update_arp_table(IPv4Address(((sockaddr_in&)arp_req.arp_pa).sin_addr.s_addr), *(MACAddress*)&arp_req.arp_ha.sa_data[0], UpdateTable::Set);
|
2021-11-08 02:51:39 +03:00
|
|
|
return {};
|
2021-07-25 03:04:11 +03:00
|
|
|
|
|
|
|
case SIOCDARP:
|
2022-08-21 01:21:01 +03:00
|
|
|
if (!current_process_credentials->is_superuser())
|
2021-07-26 13:47:25 +03:00
|
|
|
return EPERM;
|
2021-07-25 03:04:11 +03:00
|
|
|
if (arp_req.arp_pa.sa_family != AF_INET)
|
2021-07-26 13:47:25 +03:00
|
|
|
return EAFNOSUPPORT;
|
2022-03-12 21:56:23 +03:00
|
|
|
update_arp_table(IPv4Address(((sockaddr_in&)arp_req.arp_pa).sin_addr.s_addr), *(MACAddress*)&arp_req.arp_ha.sa_data[0], UpdateTable::Delete);
|
2021-11-08 02:51:39 +03:00
|
|
|
return {};
|
2021-07-25 03:04:11 +03:00
|
|
|
}
|
|
|
|
|
2021-07-26 13:47:25 +03:00
|
|
|
return EINVAL;
|
2021-07-25 03:04:11 +03:00
|
|
|
};
|
|
|
|
|
2021-11-08 02:51:39 +03:00
|
|
|
auto ioctl_interface = [request, arg]() -> ErrorOr<void> {
|
2021-07-26 12:47:00 +03:00
|
|
|
auto user_ifr = static_ptr_cast<ifreq*>(arg);
|
2020-09-12 06:11:07 +03:00
|
|
|
ifreq ifr;
|
2021-09-05 18:38:37 +03:00
|
|
|
TRY(copy_from_user(&ifr, user_ifr));
|
2020-03-14 22:00:49 +03:00
|
|
|
|
2023-01-14 00:05:18 +03:00
|
|
|
if (request == SIOCGIFNAME) {
|
|
|
|
// NOTE: Network devices are 1-indexed since index 0 denotes an invalid device
|
|
|
|
if (ifr.ifr_index == 0)
|
|
|
|
return EINVAL;
|
|
|
|
|
|
|
|
size_t index = 1;
|
|
|
|
Optional<StringView> result {};
|
|
|
|
|
|
|
|
NetworkingManagement::the().for_each([&ifr, &index, &result](auto& adapter) {
|
|
|
|
if (index == ifr.ifr_index)
|
|
|
|
result = adapter.name();
|
|
|
|
++index;
|
|
|
|
});
|
|
|
|
|
|
|
|
if (result.has_value()) {
|
|
|
|
auto name = result.release_value();
|
|
|
|
auto succ = name.copy_characters_to_buffer(ifr.ifr_name, IFNAMSIZ);
|
|
|
|
if (!succ) {
|
|
|
|
return EFAULT;
|
|
|
|
}
|
|
|
|
return copy_to_user(user_ifr, &ifr);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ENODEV;
|
|
|
|
}
|
|
|
|
|
2020-03-14 22:00:49 +03:00
|
|
|
char namebuf[IFNAMSIZ + 1];
|
2020-09-12 06:11:07 +03:00
|
|
|
memcpy(namebuf, ifr.ifr_name, IFNAMSIZ);
|
2020-03-14 22:00:49 +03:00
|
|
|
namebuf[sizeof(namebuf) - 1] = '\0';
|
|
|
|
|
2023-01-14 00:05:18 +03:00
|
|
|
if (request == SIOCGIFINDEX) {
|
|
|
|
StringView name { namebuf, strlen(namebuf) };
|
|
|
|
size_t index = 1;
|
|
|
|
Optional<size_t> result {};
|
|
|
|
|
|
|
|
NetworkingManagement::the().for_each([&name, &index, &result](auto& adapter) {
|
|
|
|
if (adapter.name() == name)
|
|
|
|
result = index;
|
|
|
|
++index;
|
|
|
|
});
|
|
|
|
|
|
|
|
if (result.has_value()) {
|
|
|
|
ifr.ifr_index = result.release_value();
|
|
|
|
return copy_to_user(user_ifr, &ifr);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ENODEV;
|
|
|
|
}
|
|
|
|
|
2022-07-11 22:53:29 +03:00
|
|
|
auto adapter = NetworkingManagement::the().lookup_by_name({ namebuf, strlen(namebuf) });
|
2020-03-14 22:00:49 +03:00
|
|
|
if (!adapter)
|
2021-07-26 13:47:25 +03:00
|
|
|
return ENODEV;
|
2020-03-14 22:00:49 +03:00
|
|
|
|
2022-08-21 01:21:01 +03:00
|
|
|
auto current_process_credentials = Process::current().credentials();
|
|
|
|
|
2020-03-14 22:00:49 +03:00
|
|
|
switch (request) {
|
|
|
|
case SIOCSIFADDR:
|
2022-08-21 01:21:01 +03:00
|
|
|
if (!current_process_credentials->is_superuser())
|
2021-07-26 13:47:25 +03:00
|
|
|
return EPERM;
|
2020-09-12 06:11:07 +03:00
|
|
|
if (ifr.ifr_addr.sa_family != AF_INET)
|
2021-07-26 13:47:25 +03:00
|
|
|
return EAFNOSUPPORT;
|
2020-09-12 06:11:07 +03:00
|
|
|
adapter->set_ipv4_address(IPv4Address(((sockaddr_in&)ifr.ifr_addr).sin_addr.s_addr));
|
2021-11-08 02:51:39 +03:00
|
|
|
return {};
|
2020-03-14 22:00:49 +03:00
|
|
|
|
|
|
|
case SIOCSIFNETMASK:
|
2022-08-21 01:21:01 +03:00
|
|
|
if (!current_process_credentials->is_superuser())
|
2021-07-26 13:47:25 +03:00
|
|
|
return EPERM;
|
2020-09-12 06:11:07 +03:00
|
|
|
if (ifr.ifr_addr.sa_family != AF_INET)
|
2021-07-26 13:47:25 +03:00
|
|
|
return EAFNOSUPPORT;
|
2020-09-12 06:11:07 +03:00
|
|
|
adapter->set_ipv4_netmask(IPv4Address(((sockaddr_in&)ifr.ifr_netmask).sin_addr.s_addr));
|
2021-11-08 02:51:39 +03:00
|
|
|
return {};
|
2020-03-14 22:00:49 +03:00
|
|
|
|
2020-09-12 06:11:07 +03:00
|
|
|
case SIOCGIFADDR: {
|
|
|
|
auto ip4_addr = adapter->ipv4_address().to_u32();
|
2021-07-26 12:47:00 +03:00
|
|
|
auto& socket_address_in = reinterpret_cast<sockaddr_in&>(ifr.ifr_addr);
|
|
|
|
socket_address_in.sin_family = AF_INET;
|
|
|
|
socket_address_in.sin_addr.s_addr = ip4_addr;
|
2021-09-05 18:38:37 +03:00
|
|
|
return copy_to_user(user_ifr, &ifr);
|
2020-09-12 06:11:07 +03:00
|
|
|
}
|
2020-03-14 22:00:49 +03:00
|
|
|
|
2021-04-16 18:48:41 +03:00
|
|
|
case SIOCGIFNETMASK: {
|
|
|
|
auto ip4_netmask = adapter->ipv4_netmask().to_u32();
|
2021-07-26 12:47:00 +03:00
|
|
|
auto& socket_address_in = reinterpret_cast<sockaddr_in&>(ifr.ifr_addr);
|
|
|
|
socket_address_in.sin_family = AF_INET;
|
2021-04-16 18:48:41 +03:00
|
|
|
// NOTE: NOT ifr_netmask.
|
2021-07-26 12:47:00 +03:00
|
|
|
socket_address_in.sin_addr.s_addr = ip4_netmask;
|
|
|
|
|
2021-09-05 18:38:37 +03:00
|
|
|
return copy_to_user(user_ifr, &ifr);
|
2021-04-16 18:48:41 +03:00
|
|
|
}
|
|
|
|
|
2020-09-12 06:11:07 +03:00
|
|
|
case SIOCGIFHWADDR: {
|
|
|
|
auto mac_address = adapter->mac_address();
|
2023-01-13 11:46:31 +03:00
|
|
|
switch (adapter->adapter_type()) {
|
|
|
|
case NetworkAdapter::Type::Loopback:
|
|
|
|
ifr.ifr_hwaddr.sa_family = ARPHRD_LOOPBACK;
|
|
|
|
break;
|
|
|
|
case NetworkAdapter::Type::Ethernet:
|
|
|
|
ifr.ifr_hwaddr.sa_family = ARPHRD_ETHER;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
VERIFY_NOT_REACHED();
|
|
|
|
}
|
2021-07-26 12:47:00 +03:00
|
|
|
mac_address.copy_to(Bytes { ifr.ifr_hwaddr.sa_data, sizeof(ifr.ifr_hwaddr.sa_data) });
|
2021-09-05 18:38:37 +03:00
|
|
|
return copy_to_user(user_ifr, &ifr);
|
2019-10-02 19:20:11 +03:00
|
|
|
}
|
2021-04-16 18:48:41 +03:00
|
|
|
|
|
|
|
case SIOCGIFBRDADDR: {
|
|
|
|
// Broadcast address is basically the reverse of the netmask, i.e.
|
|
|
|
// instead of zeroing out the end, you OR with 1 instead.
|
|
|
|
auto ip4_netmask = adapter->ipv4_netmask().to_u32();
|
|
|
|
auto broadcast_addr = adapter->ipv4_address().to_u32() | ~ip4_netmask;
|
2021-07-26 12:47:00 +03:00
|
|
|
auto& socket_address_in = reinterpret_cast<sockaddr_in&>(ifr.ifr_addr);
|
|
|
|
socket_address_in.sin_family = AF_INET;
|
|
|
|
socket_address_in.sin_addr.s_addr = broadcast_addr;
|
2021-09-05 18:38:37 +03:00
|
|
|
return copy_to_user(user_ifr, &ifr);
|
2021-04-16 18:48:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
case SIOCGIFMTU: {
|
|
|
|
auto ip4_metric = adapter->mtu();
|
|
|
|
|
2021-07-26 12:47:00 +03:00
|
|
|
ifr.ifr_addr.sa_family = AF_INET;
|
|
|
|
ifr.ifr_metric = ip4_metric;
|
2021-09-05 18:38:37 +03:00
|
|
|
return copy_to_user(user_ifr, &ifr);
|
2021-04-16 18:48:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
case SIOCGIFFLAGS: {
|
|
|
|
// FIXME: stub!
|
2021-07-26 12:47:00 +03:00
|
|
|
constexpr short flags = 1;
|
|
|
|
ifr.ifr_addr.sa_family = AF_INET;
|
|
|
|
ifr.ifr_flags = flags;
|
2021-09-05 18:38:37 +03:00
|
|
|
return copy_to_user(user_ifr, &ifr);
|
2021-04-16 18:48:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
case SIOCGIFCONF: {
|
|
|
|
// FIXME: stub!
|
2021-07-26 13:47:25 +03:00
|
|
|
return EINVAL;
|
2021-04-16 18:48:41 +03:00
|
|
|
}
|
2020-09-12 06:11:07 +03:00
|
|
|
}
|
2020-03-14 22:00:49 +03:00
|
|
|
|
2021-07-26 13:47:25 +03:00
|
|
|
return EINVAL;
|
2020-03-14 22:00:49 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
switch (request) {
|
|
|
|
case SIOCSIFADDR:
|
|
|
|
case SIOCSIFNETMASK:
|
|
|
|
case SIOCGIFADDR:
|
|
|
|
case SIOCGIFHWADDR:
|
2021-04-16 18:48:41 +03:00
|
|
|
case SIOCGIFNETMASK:
|
|
|
|
case SIOCGIFBRDADDR:
|
|
|
|
case SIOCGIFMTU:
|
|
|
|
case SIOCGIFFLAGS:
|
|
|
|
case SIOCGIFCONF:
|
2023-01-14 00:05:18 +03:00
|
|
|
case SIOCGIFNAME:
|
|
|
|
case SIOCGIFINDEX:
|
2020-03-14 22:00:49 +03:00
|
|
|
return ioctl_interface();
|
|
|
|
|
|
|
|
case SIOCADDRT:
|
|
|
|
case SIOCDELRT:
|
|
|
|
return ioctl_route();
|
2021-07-25 03:04:11 +03:00
|
|
|
|
|
|
|
case SIOCSARP:
|
|
|
|
case SIOCDARP:
|
|
|
|
return ioctl_arp();
|
2021-07-27 08:06:22 +03:00
|
|
|
|
|
|
|
case FIONREAD: {
|
2021-09-11 22:38:05 +03:00
|
|
|
int readable = 0;
|
|
|
|
if (buffer_mode() == BufferMode::Bytes) {
|
|
|
|
readable = static_cast<int>(m_receive_buffer->immediately_readable());
|
|
|
|
} else {
|
|
|
|
if (m_receive_queue.size() != 0u) {
|
|
|
|
readable = static_cast<int>(TRY(protocol_size(m_receive_queue.first().data->bytes())));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-15 01:43:43 +03:00
|
|
|
return copy_to_user(static_ptr_cast<int*>(arg), &readable);
|
2021-07-27 08:06:22 +03:00
|
|
|
}
|
2019-09-23 20:06:03 +03:00
|
|
|
}
|
|
|
|
|
2021-07-26 13:47:25 +03:00
|
|
|
return EINVAL;
|
2019-09-23 20:06:03 +03:00
|
|
|
}
|
2020-02-08 17:52:32 +03:00
|
|
|
|
2021-11-08 02:51:39 +03:00
|
|
|
ErrorOr<void> IPv4Socket::close()
|
2020-02-08 17:52:32 +03:00
|
|
|
{
|
2020-12-21 02:09:48 +03:00
|
|
|
[[maybe_unused]] auto rc = shutdown(SHUT_RDWR);
|
2021-11-08 02:51:39 +03:00
|
|
|
return {};
|
2020-02-08 17:52:32 +03:00
|
|
|
}
|
2020-02-08 17:59:21 +03:00
|
|
|
|
|
|
|
void IPv4Socket::shut_down_for_reading()
|
|
|
|
{
|
|
|
|
Socket::shut_down_for_reading();
|
2020-11-30 02:05:27 +03:00
|
|
|
set_can_read(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
void IPv4Socket::set_can_read(bool value)
|
|
|
|
{
|
|
|
|
m_can_read = value;
|
|
|
|
if (value)
|
|
|
|
evaluate_block_conditions();
|
2020-02-08 17:59:21 +03:00
|
|
|
}
|
2021-09-05 18:38:37 +03:00
|
|
|
|
2021-09-16 03:15:36 +03:00
|
|
|
void IPv4Socket::drop_receive_buffer()
|
|
|
|
{
|
|
|
|
m_receive_buffer = nullptr;
|
|
|
|
}
|
|
|
|
|
2020-02-16 03:27:42 +03:00
|
|
|
}
|