2020-01-18 11:38:21 +03:00
|
|
|
|
/*
|
|
|
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
|
|
|
*
|
2021-04-22 11:24:48 +03:00
|
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-01-18 11:38:21 +03:00
|
|
|
|
*/
|
|
|
|
|
|
2020-08-25 04:35:19 +03:00
|
|
|
|
#include <AK/Singleton.h>
|
2019-09-08 12:40:26 +03:00
|
|
|
|
#include <AK/Time.h>
|
2021-01-25 18:07:10 +03:00
|
|
|
|
#include <Kernel/Debug.h>
|
2023-03-18 14:17:13 +03:00
|
|
|
|
#include <Kernel/Devices/Generic/RandomDevice.h>
|
2021-09-07 14:39:11 +03:00
|
|
|
|
#include <Kernel/FileSystem/OpenFileDescription.h>
|
2021-08-22 00:31:15 +03:00
|
|
|
|
#include <Kernel/Locking/MutexProtected.h>
|
2021-05-25 22:29:37 +03:00
|
|
|
|
#include <Kernel/Net/EthernetFrameHeader.h>
|
2021-05-11 22:09:11 +03:00
|
|
|
|
#include <Kernel/Net/IPv4.h>
|
2019-04-02 20:54:38 +03:00
|
|
|
|
#include <Kernel/Net/NetworkAdapter.h>
|
2021-06-04 07:43:16 +03:00
|
|
|
|
#include <Kernel/Net/NetworkingManagement.h>
|
2019-04-02 16:46:44 +03:00
|
|
|
|
#include <Kernel/Net/Routing.h>
|
2019-06-07 12:43:58 +03:00
|
|
|
|
#include <Kernel/Net/TCP.h>
|
|
|
|
|
#include <Kernel/Net/TCPSocket.h>
|
2023-02-24 20:49:37 +03:00
|
|
|
|
#include <Kernel/Security/Random.h>
|
2023-02-24 20:45:37 +03:00
|
|
|
|
#include <Kernel/Tasks/Process.h>
|
2023-08-17 20:20:42 +03:00
|
|
|
|
#include <Kernel/Time/TimeManagement.h>
|
2019-03-14 14:20:38 +03:00
|
|
|
|
|
2020-02-16 03:27:42 +03:00
|
|
|
|
namespace Kernel {
|
|
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
|
void TCPSocket::for_each(Function<void(TCPSocket const&)> callback)
|
2019-08-06 16:40:38 +03:00
|
|
|
|
{
|
2022-04-01 20:58:27 +03:00
|
|
|
|
sockets_by_tuple().for_each_shared([&](auto const& it) {
|
2019-08-09 13:26:29 +03:00
|
|
|
|
callback(*it.value);
|
2021-07-18 13:14:43 +03:00
|
|
|
|
});
|
2019-08-06 16:40:38 +03:00
|
|
|
|
}
|
|
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
|
ErrorOr<void> TCPSocket::try_for_each(Function<ErrorOr<void>(TCPSocket const&)> callback)
|
2022-02-24 21:05:24 +03:00
|
|
|
|
{
|
2022-04-01 20:58:27 +03:00
|
|
|
|
return sockets_by_tuple().with_shared([&](auto const& sockets) -> ErrorOr<void> {
|
2022-02-24 21:05:24 +03:00
|
|
|
|
for (auto& it : sockets)
|
|
|
|
|
TRY(callback(*it.value));
|
|
|
|
|
return {};
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-07 00:06:00 +03:00
|
|
|
|
bool TCPSocket::unref() const
|
|
|
|
|
{
|
|
|
|
|
bool did_hit_zero = sockets_by_tuple().with_exclusive([&](auto& table) {
|
|
|
|
|
if (deref_base())
|
|
|
|
|
return false;
|
|
|
|
|
table.remove(tuple());
|
2022-01-08 17:43:56 +03:00
|
|
|
|
const_cast<TCPSocket&>(*this).revoke_weak_ptrs();
|
2022-01-07 00:06:00 +03:00
|
|
|
|
return true;
|
|
|
|
|
});
|
|
|
|
|
if (did_hit_zero) {
|
|
|
|
|
const_cast<TCPSocket&>(*this).will_be_destroyed();
|
|
|
|
|
delete this;
|
|
|
|
|
}
|
|
|
|
|
return did_hit_zero;
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-10 06:14:00 +03:00
|
|
|
|
void TCPSocket::set_state(State new_state)
|
|
|
|
|
{
|
2021-02-07 15:03:24 +03:00
|
|
|
|
dbgln_if(TCP_SOCKET_DEBUG, "TCPSocket({}) state moving from {} to {}", this, to_string(m_state), to_string(new_state));
|
2019-08-10 06:14:00 +03:00
|
|
|
|
|
2020-11-30 02:05:27 +03:00
|
|
|
|
auto was_disconnected = protocol_is_disconnected();
|
|
|
|
|
auto previous_role = m_role;
|
|
|
|
|
|
2019-08-10 06:14:00 +03:00
|
|
|
|
m_state = new_state;
|
2019-08-11 16:38:20 +03:00
|
|
|
|
|
2021-08-12 05:49:18 +03:00
|
|
|
|
if (new_state == State::Established && m_direction == Direction::Outgoing) {
|
2021-08-29 03:04:30 +03:00
|
|
|
|
set_role(Role::Connected);
|
2021-11-08 02:51:39 +03:00
|
|
|
|
clear_so_error();
|
2021-08-12 05:49:18 +03:00
|
|
|
|
}
|
2020-02-08 17:52:32 +03:00
|
|
|
|
|
2021-09-16 03:15:36 +03:00
|
|
|
|
if (new_state == State::TimeWait) {
|
|
|
|
|
// Once we hit TimeWait, we are only holding the socket in case there
|
|
|
|
|
// are packets on the way which we wouldn't want a new socket to get hit
|
|
|
|
|
// with, so there's no point in keeping the receive buffer around.
|
|
|
|
|
drop_receive_buffer();
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-08 17:52:32 +03:00
|
|
|
|
if (new_state == State::Closed) {
|
2021-07-18 13:14:43 +03:00
|
|
|
|
closing_sockets().with_exclusive([&](auto& table) {
|
|
|
|
|
table.remove(tuple());
|
|
|
|
|
});
|
2021-04-30 22:43:37 +03:00
|
|
|
|
|
|
|
|
|
if (m_originator)
|
|
|
|
|
release_to_originator();
|
2020-02-08 17:52:32 +03:00
|
|
|
|
}
|
2020-11-30 02:05:27 +03:00
|
|
|
|
|
|
|
|
|
if (previous_role != m_role || was_disconnected != protocol_is_disconnected())
|
|
|
|
|
evaluate_block_conditions();
|
2020-02-08 17:52:32 +03:00
|
|
|
|
}
|
|
|
|
|
|
2023-03-10 09:53:02 +03:00
|
|
|
|
static Singleton<MutexProtected<HashMap<IPv4SocketTuple, RefPtr<TCPSocket>>>> s_socket_closing;
|
2020-08-25 04:35:19 +03:00
|
|
|
|
|
2023-03-10 09:53:02 +03:00
|
|
|
|
MutexProtected<HashMap<IPv4SocketTuple, RefPtr<TCPSocket>>>& TCPSocket::closing_sockets()
|
2020-02-08 17:52:32 +03:00
|
|
|
|
{
|
2020-08-25 04:35:19 +03:00
|
|
|
|
return *s_socket_closing;
|
2019-08-10 06:14:00 +03:00
|
|
|
|
}
|
|
|
|
|
|
2021-08-22 00:31:15 +03:00
|
|
|
|
static Singleton<MutexProtected<HashMap<IPv4SocketTuple, TCPSocket*>>> s_socket_tuples;
|
2020-08-25 04:35:19 +03:00
|
|
|
|
|
2021-08-22 00:31:15 +03:00
|
|
|
|
MutexProtected<HashMap<IPv4SocketTuple, TCPSocket*>>& TCPSocket::sockets_by_tuple()
|
2019-03-14 14:28:30 +03:00
|
|
|
|
{
|
2020-08-25 04:35:19 +03:00
|
|
|
|
return *s_socket_tuples;
|
2019-03-14 14:28:30 +03:00
|
|
|
|
}
|
|
|
|
|
|
2023-03-10 09:53:02 +03:00
|
|
|
|
RefPtr<TCPSocket> TCPSocket::from_tuple(IPv4SocketTuple const& tuple)
|
2019-03-14 14:28:30 +03:00
|
|
|
|
{
|
2023-03-10 09:53:02 +03:00
|
|
|
|
return sockets_by_tuple().with_shared([&](auto const& table) -> RefPtr<TCPSocket> {
|
2021-07-18 13:14:43 +03:00
|
|
|
|
auto exact_match = table.get(tuple);
|
|
|
|
|
if (exact_match.has_value())
|
|
|
|
|
return { *exact_match.value() };
|
2019-08-09 05:48:28 +03:00
|
|
|
|
|
2021-07-18 13:14:43 +03:00
|
|
|
|
auto address_tuple = IPv4SocketTuple(tuple.local_address(), tuple.local_port(), IPv4Address(), 0);
|
|
|
|
|
auto address_match = table.get(address_tuple);
|
|
|
|
|
if (address_match.has_value())
|
|
|
|
|
return { *address_match.value() };
|
2019-08-09 05:48:28 +03:00
|
|
|
|
|
2021-07-18 13:14:43 +03:00
|
|
|
|
auto wildcard_tuple = IPv4SocketTuple(IPv4Address(), tuple.local_port(), IPv4Address(), 0);
|
|
|
|
|
auto wildcard_match = table.get(wildcard_tuple);
|
|
|
|
|
if (wildcard_match.has_value())
|
|
|
|
|
return { *wildcard_match.value() };
|
2019-08-09 05:48:28 +03:00
|
|
|
|
|
2021-07-18 13:14:43 +03:00
|
|
|
|
return {};
|
|
|
|
|
});
|
2019-03-14 14:28:30 +03:00
|
|
|
|
}
|
2023-03-10 09:53:02 +03:00
|
|
|
|
ErrorOr<NonnullRefPtr<TCPSocket>> TCPSocket::try_create_client(IPv4Address const& new_local_address, u16 new_local_port, IPv4Address const& new_peer_address, u16 new_peer_port)
|
2019-08-09 05:48:28 +03:00
|
|
|
|
{
|
|
|
|
|
auto tuple = IPv4SocketTuple(new_local_address, new_local_port, new_peer_address, new_peer_port);
|
2023-03-10 09:53:02 +03:00
|
|
|
|
return sockets_by_tuple().with_exclusive([&](auto& table) -> ErrorOr<NonnullRefPtr<TCPSocket>> {
|
2021-07-18 13:14:43 +03:00
|
|
|
|
if (table.contains(tuple))
|
2021-09-07 15:44:29 +03:00
|
|
|
|
return EEXIST;
|
2019-08-09 05:48:28 +03:00
|
|
|
|
|
2021-09-07 15:44:29 +03:00
|
|
|
|
auto receive_buffer = TRY(try_create_receive_buffer());
|
|
|
|
|
auto client = TRY(TCPSocket::try_create(protocol(), move(receive_buffer)));
|
2019-08-09 05:48:28 +03:00
|
|
|
|
|
2021-07-18 13:14:43 +03:00
|
|
|
|
client->set_setup_state(SetupState::InProgress);
|
|
|
|
|
client->set_local_address(new_local_address);
|
|
|
|
|
client->set_local_port(new_local_port);
|
|
|
|
|
client->set_peer_address(new_peer_address);
|
|
|
|
|
client->set_peer_port(new_peer_port);
|
Kernel/Net: Rework ephemeral port allocation
Currently, ephemeral port allocation is handled by the
allocate_local_port_if_needed() and protocol_allocate_local_port()
methods. Actually binding the socket to an address (which means
inserting the socket/address pair into a global map) is performed either
in protocol_allocate_local_port() (for ephemeral ports) or in
protocol_listen() (for non-ephemeral ports); the latter will fail with
EADDRINUSE if the address is already used by an existing pair present in
the map.
There used to be a bug where for listen() without an explicit bind(),
the port allocation would conflict with itself: first an ephemeral port
would get allocated and inserted into the map, and then
protocol_listen() would check again for the port being free, find the
just-created map entry, and error out. This was fixed in commit
01e5af487f9513696dbcacab15d3e0036446f586 by passing an additional flag
did_allocate_port into protocol_listen() which specifies whether the
port was just allocated, and skipping the check in protocol_listen() if
the flag is set.
However, this only helps if the socket is bound to an ephemeral port
inside of this very listen() call. But calling bind(sin_port = 0) from
userspace should succeed and bind to an allocated ephemeral port, in the
same was as using an unbound socket for connect() does. The port number
can then be retrieved from userspace by calling getsockname (), and it
should be possible to either connect() or listen() on this socket,
keeping the allocated port number. Also, calling bind() when already
bound (either explicitly or implicitly) should always result in EINVAL.
To untangle this, introduce an explicit m_bound state in IPv4Socket,
just like LocalSocket has already. Once a socket is bound, further
attempt to bind it fail. Some operations cause the socket to implicitly
get bound to an (ephemeral) address; this is implemented by the new
ensure_bound() method. The protocol_allocate_local_port() method is
gone; it is now up to a protocol to assign a port to the socket inside
protocol_bind() if it finds that the socket has local_port() == 0.
protocol_bind() is now called in more cases, such as inside listen() if
the socket wasn't bound before that.
2023-07-23 15:43:45 +03:00
|
|
|
|
client->set_bound(true);
|
2021-07-18 13:14:43 +03:00
|
|
|
|
client->set_direction(Direction::Incoming);
|
|
|
|
|
client->set_originator(*this);
|
2019-08-09 05:48:28 +03:00
|
|
|
|
|
2021-07-18 13:14:43 +03:00
|
|
|
|
m_pending_release_for_accept.set(tuple, client);
|
2023-12-26 00:23:28 +03:00
|
|
|
|
client->m_registered_socket_tuple = tuple;
|
2021-07-18 13:14:43 +03:00
|
|
|
|
table.set(tuple, client);
|
2019-08-09 05:48:28 +03:00
|
|
|
|
|
2021-07-18 13:14:43 +03:00
|
|
|
|
return { move(client) };
|
|
|
|
|
});
|
2019-08-09 05:48:28 +03:00
|
|
|
|
}
|
|
|
|
|
|
2019-09-08 10:18:28 +03:00
|
|
|
|
void TCPSocket::release_to_originator()
|
|
|
|
|
{
|
2021-02-23 22:42:32 +03:00
|
|
|
|
VERIFY(!!m_originator);
|
2022-04-09 22:25:39 +03:00
|
|
|
|
m_originator.strong_ref()->release_for_accept(*this);
|
2021-04-30 22:43:37 +03:00
|
|
|
|
m_originator.clear();
|
2019-09-08 10:18:28 +03:00
|
|
|
|
}
|
|
|
|
|
|
2023-03-10 09:53:02 +03:00
|
|
|
|
void TCPSocket::release_for_accept(NonnullRefPtr<TCPSocket> socket)
|
2019-09-08 10:18:28 +03:00
|
|
|
|
{
|
2021-02-23 22:42:32 +03:00
|
|
|
|
VERIFY(m_pending_release_for_accept.contains(socket->tuple()));
|
2019-09-08 10:18:28 +03:00
|
|
|
|
m_pending_release_for_accept.remove(socket->tuple());
|
2020-08-05 12:13:30 +03:00
|
|
|
|
// FIXME: Should we observe this error somehow?
|
2022-04-09 22:25:39 +03:00
|
|
|
|
[[maybe_unused]] auto rc = queue_connection_from(move(socket));
|
2019-09-08 10:18:28 +03:00
|
|
|
|
}
|
|
|
|
|
|
2021-09-07 16:11:49 +03:00
|
|
|
|
TCPSocket::TCPSocket(int protocol, NonnullOwnPtr<DoubleBuffer> receive_buffer, NonnullOwnPtr<KBuffer> scratch_buffer)
|
2021-08-01 15:11:05 +03:00
|
|
|
|
: IPv4Socket(SOCK_STREAM, protocol, move(receive_buffer), move(scratch_buffer))
|
2023-08-17 20:20:42 +03:00
|
|
|
|
, m_last_ack_sent_time(TimeManagement::the().monotonic_time())
|
|
|
|
|
, m_last_retransmit_time(TimeManagement::the().monotonic_time())
|
2019-03-14 14:20:38 +03:00
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TCPSocket::~TCPSocket()
|
|
|
|
|
{
|
2021-05-13 11:49:10 +03:00
|
|
|
|
dequeue_for_retransmit();
|
|
|
|
|
|
2021-02-07 15:03:24 +03:00
|
|
|
|
dbgln_if(TCP_SOCKET_DEBUG, "~TCPSocket in state {}", to_string(state()));
|
2019-03-14 14:20:38 +03:00
|
|
|
|
}
|
|
|
|
|
|
2023-03-10 09:53:02 +03:00
|
|
|
|
ErrorOr<NonnullRefPtr<TCPSocket>> TCPSocket::try_create(int protocol, NonnullOwnPtr<DoubleBuffer> receive_buffer)
|
2019-03-14 14:20:38 +03:00
|
|
|
|
{
|
2021-08-01 15:11:05 +03:00
|
|
|
|
// Note: Scratch buffer is only used for SOCK_STREAM sockets.
|
2022-04-11 01:08:07 +03:00
|
|
|
|
auto scratch_buffer = TRY(KBuffer::try_create_with_size("TCPSocket: Scratch buffer"sv, 65536));
|
2023-03-10 09:53:02 +03:00
|
|
|
|
return adopt_nonnull_ref_or_enomem(new (nothrow) TCPSocket(protocol, move(receive_buffer), move(scratch_buffer)));
|
2019-03-14 14:20:38 +03:00
|
|
|
|
}
|
|
|
|
|
|
2021-09-11 22:38:05 +03:00
|
|
|
|
ErrorOr<size_t> TCPSocket::protocol_size(ReadonlyBytes raw_ipv4_packet)
|
|
|
|
|
{
|
2022-04-01 20:58:27 +03:00
|
|
|
|
auto& ipv4_packet = *reinterpret_cast<IPv4Packet const*>(raw_ipv4_packet.data());
|
|
|
|
|
auto& tcp_packet = *static_cast<TCPPacket const*>(ipv4_packet.payload());
|
2021-09-11 22:38:05 +03:00
|
|
|
|
return raw_ipv4_packet.size() - sizeof(IPv4Packet) - tcp_packet.header_size();
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-08 02:51:39 +03:00
|
|
|
|
ErrorOr<size_t> TCPSocket::protocol_receive(ReadonlyBytes raw_ipv4_packet, UserOrKernelBuffer& buffer, size_t buffer_size, [[maybe_unused]] int flags)
|
2019-03-14 14:20:38 +03:00
|
|
|
|
{
|
2022-04-01 20:58:27 +03:00
|
|
|
|
auto& ipv4_packet = *reinterpret_cast<IPv4Packet const*>(raw_ipv4_packet.data());
|
|
|
|
|
auto& tcp_packet = *static_cast<TCPPacket const*>(ipv4_packet.payload());
|
2020-12-18 18:13:23 +03:00
|
|
|
|
size_t payload_size = raw_ipv4_packet.size() - sizeof(IPv4Packet) - tcp_packet.header_size();
|
2021-03-10 01:06:47 +03:00
|
|
|
|
dbgln_if(TCP_SOCKET_DEBUG, "payload_size {}, will it fit in {}?", payload_size, buffer_size);
|
2021-02-23 22:42:32 +03:00
|
|
|
|
VERIFY(buffer_size >= payload_size);
|
2021-09-07 16:05:51 +03:00
|
|
|
|
SOCKET_TRY(buffer.write(tcp_packet.payload(), payload_size));
|
2019-03-14 14:20:38 +03:00
|
|
|
|
return payload_size;
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
|
ErrorOr<size_t> TCPSocket::protocol_send(UserOrKernelBuffer const& data, size_t data_length)
|
2019-03-14 14:20:38 +03:00
|
|
|
|
{
|
2023-04-11 03:50:15 +03:00
|
|
|
|
auto adapter = bound_interface().with([](auto& bound_device) -> RefPtr<NetworkAdapter> { return bound_device; });
|
|
|
|
|
RoutingDecision routing_decision = route_to(peer_address(), local_address(), adapter);
|
2021-05-25 22:29:37 +03:00
|
|
|
|
if (routing_decision.is_zero())
|
2021-08-01 18:27:23 +03:00
|
|
|
|
return set_so_error(EHOSTUNREACH);
|
2021-05-25 22:29:37 +03:00
|
|
|
|
size_t mss = routing_decision.adapter->mtu() - sizeof(IPv4Packet) - sizeof(TCPPacket);
|
2023-08-17 20:53:49 +03:00
|
|
|
|
|
2023-11-04 17:08:25 +03:00
|
|
|
|
if (!m_no_delay) {
|
|
|
|
|
// RFC 896 (Nagle’s algorithm): https://www.ietf.org/rfc/rfc0896
|
|
|
|
|
// "The solution is to inhibit the sending of new TCP segments when
|
|
|
|
|
// new outgoing data arrives from the user if any previously
|
|
|
|
|
// transmitted data on the connection remains unacknowledged. This
|
|
|
|
|
// inhibition is to be unconditional; no timers, tests for size of
|
|
|
|
|
// data received, or other conditions are required."
|
|
|
|
|
auto has_unacked_data = m_unacked_packets.with_shared([&](auto const& packets) { return packets.size > 0; });
|
|
|
|
|
if (has_unacked_data && data_length < mss)
|
|
|
|
|
return set_so_error(EAGAIN);
|
|
|
|
|
}
|
2023-08-17 20:53:49 +03:00
|
|
|
|
|
2021-05-25 22:29:37 +03:00
|
|
|
|
data_length = min(data_length, mss);
|
2022-03-17 16:24:21 +03:00
|
|
|
|
TRY(send_tcp_packet(TCPFlags::PSH | TCPFlags::ACK, &data, data_length, &routing_decision));
|
2019-03-14 14:20:38 +03:00
|
|
|
|
return data_length;
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-08 02:51:39 +03:00
|
|
|
|
ErrorOr<void> TCPSocket::send_ack(bool allow_duplicate)
|
2021-05-12 10:14:37 +03:00
|
|
|
|
{
|
|
|
|
|
if (!allow_duplicate && m_last_ack_number_sent == m_ack_number)
|
2021-11-08 02:51:39 +03:00
|
|
|
|
return {};
|
2021-05-12 10:14:37 +03:00
|
|
|
|
return send_tcp_packet(TCPFlags::ACK);
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
|
ErrorOr<void> TCPSocket::send_tcp_packet(u16 flags, UserOrKernelBuffer const* payload, size_t payload_size, RoutingDecision* user_routing_decision)
|
2019-03-14 14:20:38 +03:00
|
|
|
|
{
|
2023-04-11 03:50:15 +03:00
|
|
|
|
auto adapter = bound_interface().with([](auto& bound_device) -> RefPtr<NetworkAdapter> { return bound_device; });
|
|
|
|
|
RoutingDecision routing_decision = user_routing_decision ? *user_routing_decision : route_to(peer_address(), local_address(), adapter);
|
2021-05-26 06:35:05 +03:00
|
|
|
|
if (routing_decision.is_zero())
|
2021-08-01 18:27:23 +03:00
|
|
|
|
return set_so_error(EHOSTUNREACH);
|
2021-05-26 06:35:05 +03:00
|
|
|
|
|
|
|
|
|
auto ipv4_payload_offset = routing_decision.adapter->ipv4_payload_offset();
|
|
|
|
|
|
2023-12-26 22:05:15 +03:00
|
|
|
|
bool const has_mss_option = flags & TCPFlags::SYN;
|
|
|
|
|
bool const has_window_scale_option = flags & TCPFlags::SYN;
|
|
|
|
|
size_t const options_size = (has_mss_option ? sizeof(TCPOptionMSS) : 0) + (has_window_scale_option ? sizeof(TCPOptionWindowScale) : 0);
|
|
|
|
|
size_t const tcp_header_size = sizeof(TCPPacket) + align_up_to(options_size, 4);
|
|
|
|
|
size_t const buffer_size = ipv4_payload_offset + tcp_header_size + payload_size;
|
2021-05-26 06:35:05 +03:00
|
|
|
|
auto packet = routing_decision.adapter->acquire_packet_buffer(buffer_size);
|
|
|
|
|
if (!packet)
|
2021-08-01 18:27:23 +03:00
|
|
|
|
return set_so_error(ENOMEM);
|
2021-05-26 06:35:05 +03:00
|
|
|
|
routing_decision.adapter->fill_in_ipv4_header(*packet, local_address(),
|
|
|
|
|
routing_decision.next_hop, peer_address(), IPv4Protocol::TCP,
|
2021-10-27 23:20:24 +03:00
|
|
|
|
buffer_size - ipv4_payload_offset, type_of_service(), ttl());
|
2021-08-01 15:11:49 +03:00
|
|
|
|
memset(packet->buffer->data() + ipv4_payload_offset, 0, sizeof(TCPPacket));
|
|
|
|
|
auto& tcp_packet = *(TCPPacket*)(packet->buffer->data() + ipv4_payload_offset);
|
2021-02-23 22:42:32 +03:00
|
|
|
|
VERIFY(local_port());
|
2019-05-04 17:40:34 +03:00
|
|
|
|
tcp_packet.set_source_port(local_port());
|
|
|
|
|
tcp_packet.set_destination_port(peer_port());
|
2023-12-26 22:05:15 +03:00
|
|
|
|
auto window_size = available_space_in_receive_buffer();
|
|
|
|
|
if ((flags & TCPFlags::SYN) == 0 && m_window_scaling_supported)
|
|
|
|
|
window_size >>= receive_window_scale();
|
|
|
|
|
tcp_packet.set_window_size(min(window_size, NumericLimits<u16>::max()));
|
2019-03-14 14:20:38 +03:00
|
|
|
|
tcp_packet.set_sequence_number(m_sequence_number);
|
2021-05-26 06:35:05 +03:00
|
|
|
|
tcp_packet.set_data_offset(tcp_header_size / sizeof(u32));
|
2019-03-14 14:20:38 +03:00
|
|
|
|
tcp_packet.set_flags(flags);
|
|
|
|
|
|
2021-09-07 13:09:52 +03:00
|
|
|
|
if (payload) {
|
|
|
|
|
if (auto result = payload->read(tcp_packet.payload(), payload_size); result.is_error()) {
|
|
|
|
|
routing_decision.adapter->release_packet_buffer(*packet);
|
2021-11-08 02:51:39 +03:00
|
|
|
|
return set_so_error(result.release_error());
|
2021-09-07 13:09:52 +03:00
|
|
|
|
}
|
2021-05-28 01:49:53 +03:00
|
|
|
|
}
|
2020-09-12 06:11:07 +03:00
|
|
|
|
|
2022-02-11 14:38:10 +03:00
|
|
|
|
if (flags & TCPFlags::ACK) {
|
|
|
|
|
m_last_ack_number_sent = m_ack_number;
|
2023-08-17 20:20:42 +03:00
|
|
|
|
m_last_ack_sent_time = TimeManagement::the().monotonic_time();
|
2022-02-11 14:38:10 +03:00
|
|
|
|
tcp_packet.set_ack_number(m_ack_number);
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-09 05:48:28 +03:00
|
|
|
|
if (flags & TCPFlags::SYN) {
|
2019-03-14 14:20:38 +03:00
|
|
|
|
++m_sequence_number;
|
|
|
|
|
} else {
|
|
|
|
|
m_sequence_number += payload_size;
|
|
|
|
|
}
|
|
|
|
|
|
2023-12-26 22:05:15 +03:00
|
|
|
|
u8* next_option = packet->buffer->data() + ipv4_payload_offset + sizeof(TCPPacket);
|
2021-05-11 22:09:11 +03:00
|
|
|
|
if (has_mss_option) {
|
|
|
|
|
u16 mss = routing_decision.adapter->mtu() - sizeof(IPv4Packet) - sizeof(TCPPacket);
|
|
|
|
|
TCPOptionMSS mss_option { mss };
|
2023-12-26 22:05:15 +03:00
|
|
|
|
memcpy(next_option, &mss_option, sizeof(mss_option));
|
|
|
|
|
next_option += sizeof(mss_option);
|
2021-05-11 22:09:11 +03:00
|
|
|
|
}
|
2023-12-26 22:05:15 +03:00
|
|
|
|
if (has_window_scale_option) {
|
|
|
|
|
TCPOptionWindowScale window_scale_option { receive_window_scale() };
|
|
|
|
|
memcpy(next_option, &window_scale_option, sizeof(window_scale_option));
|
|
|
|
|
next_option += sizeof(window_scale_option);
|
|
|
|
|
}
|
|
|
|
|
if ((options_size % 4) != 0)
|
|
|
|
|
*next_option = to_underlying(TCPOptionKind::End);
|
2021-05-11 22:09:11 +03:00
|
|
|
|
|
2019-08-06 16:40:38 +03:00
|
|
|
|
tcp_packet.set_checksum(compute_tcp_checksum(local_address(), peer_address(), tcp_packet, payload_size));
|
2019-09-08 10:38:08 +03:00
|
|
|
|
|
2022-11-01 12:04:13 +03:00
|
|
|
|
bool expect_ack { tcp_packet.has_syn() || payload_size > 0 };
|
|
|
|
|
if (expect_ack) {
|
|
|
|
|
bool append_failed { false };
|
2021-08-07 16:42:11 +03:00
|
|
|
|
m_unacked_packets.with_exclusive([&](auto& unacked_packets) {
|
2022-11-01 12:04:13 +03:00
|
|
|
|
auto result = unacked_packets.packets.try_append({ m_sequence_number, packet, ipv4_payload_offset, *routing_decision.adapter });
|
|
|
|
|
if (result.is_error()) {
|
|
|
|
|
dbgln("TCPSocket: Dropped outbound packet because try_append() failed");
|
|
|
|
|
append_failed = true;
|
|
|
|
|
return;
|
|
|
|
|
}
|
2021-08-07 16:42:11 +03:00
|
|
|
|
unacked_packets.size += payload_size;
|
|
|
|
|
enqueue_for_retransmit();
|
|
|
|
|
});
|
2022-11-01 12:04:13 +03:00
|
|
|
|
if (append_failed)
|
|
|
|
|
return set_so_error(ENOMEM);
|
2021-05-13 11:49:10 +03:00
|
|
|
|
}
|
|
|
|
|
|
2022-11-01 12:04:13 +03:00
|
|
|
|
m_packets_out++;
|
|
|
|
|
m_bytes_out += buffer_size;
|
|
|
|
|
routing_decision.adapter->send_packet(packet->bytes());
|
|
|
|
|
if (!expect_ack)
|
|
|
|
|
routing_decision.adapter->release_packet_buffer(*packet);
|
|
|
|
|
|
2021-11-08 02:51:39 +03:00
|
|
|
|
return {};
|
2019-09-08 10:38:08 +03:00
|
|
|
|
}
|
|
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
|
void TCPSocket::receive_tcp_packet(TCPPacket const& packet, u16 size)
|
2019-08-08 05:32:35 +03:00
|
|
|
|
{
|
2019-09-08 10:38:08 +03:00
|
|
|
|
if (packet.has_ack()) {
|
|
|
|
|
u32 ack_number = packet.ack_number();
|
|
|
|
|
|
2021-02-07 15:03:24 +03:00
|
|
|
|
dbgln_if(TCP_SOCKET_DEBUG, "TCPSocket: receive_tcp_packet: {}", ack_number);
|
2019-09-08 10:38:08 +03:00
|
|
|
|
|
|
|
|
|
int removed = 0;
|
2021-08-07 16:42:11 +03:00
|
|
|
|
m_unacked_packets.with_exclusive([&](auto& unacked_packets) {
|
|
|
|
|
while (!unacked_packets.packets.is_empty()) {
|
|
|
|
|
auto& packet = unacked_packets.packets.first();
|
|
|
|
|
|
|
|
|
|
dbgln_if(TCP_SOCKET_DEBUG, "TCPSocket: iterate: {}", packet.ack_number);
|
|
|
|
|
|
|
|
|
|
if (packet.ack_number <= ack_number) {
|
|
|
|
|
auto old_adapter = packet.adapter.strong_ref();
|
|
|
|
|
if (old_adapter)
|
|
|
|
|
old_adapter->release_packet_buffer(*packet.buffer);
|
|
|
|
|
TCPPacket& tcp_packet = *(TCPPacket*)(packet.buffer->buffer->data() + packet.ipv4_payload_offset);
|
2023-06-19 05:05:00 +03:00
|
|
|
|
if (m_send_window_size != tcp_packet.window_size()) {
|
2023-12-26 22:05:15 +03:00
|
|
|
|
m_send_window_size = tcp_packet.window_size() << m_send_window_scale;
|
2023-06-19 05:05:00 +03:00
|
|
|
|
}
|
2021-08-07 16:42:11 +03:00
|
|
|
|
auto payload_size = packet.buffer->buffer->data() + packet.buffer->buffer->size() - (u8*)tcp_packet.payload();
|
|
|
|
|
unacked_packets.size -= payload_size;
|
|
|
|
|
evaluate_block_conditions();
|
|
|
|
|
unacked_packets.packets.take_first();
|
|
|
|
|
removed++;
|
|
|
|
|
} else {
|
|
|
|
|
break;
|
|
|
|
|
}
|
2019-09-08 10:38:08 +03:00
|
|
|
|
}
|
|
|
|
|
|
2021-08-07 16:42:11 +03:00
|
|
|
|
if (unacked_packets.packets.is_empty()) {
|
|
|
|
|
m_retransmit_attempts = 0;
|
|
|
|
|
dequeue_for_retransmit();
|
|
|
|
|
}
|
2021-05-13 11:49:10 +03:00
|
|
|
|
|
2021-08-07 16:42:11 +03:00
|
|
|
|
dbgln_if(TCP_SOCKET_DEBUG, "TCPSocket: receive_tcp_packet acknowledged {} packets", removed);
|
|
|
|
|
});
|
2019-09-08 10:38:08 +03:00
|
|
|
|
}
|
|
|
|
|
|
2019-08-08 05:32:35 +03:00
|
|
|
|
m_packets_in++;
|
2019-09-08 10:38:08 +03:00
|
|
|
|
m_bytes_in += packet.header_size() + size;
|
2019-03-14 14:20:38 +03:00
|
|
|
|
}
|
|
|
|
|
|
2021-05-12 10:14:37 +03:00
|
|
|
|
bool TCPSocket::should_delay_next_ack() const
|
|
|
|
|
{
|
|
|
|
|
// FIXME: We don't know the MSS here so make a reasonable guess.
|
2023-12-26 22:05:15 +03:00
|
|
|
|
size_t const mss = 1500;
|
2021-05-12 10:14:37 +03:00
|
|
|
|
|
|
|
|
|
// RFC 1122 says we should send an ACK for every two full-sized segments.
|
|
|
|
|
if (m_ack_number >= m_last_ack_number_sent + 2 * mss)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
// RFC 1122 says we should not delay ACKs for more than 500 milliseconds.
|
2023-08-17 20:20:42 +03:00
|
|
|
|
if (TimeManagement::the().monotonic_time(TimePrecision::Precise) >= m_last_ack_sent_time + Duration::from_milliseconds(500))
|
2021-05-12 10:14:37 +03:00
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
|
NetworkOrdered<u16> TCPSocket::compute_tcp_checksum(IPv4Address const& source, IPv4Address const& destination, TCPPacket const& packet, u16 payload_size)
|
2019-03-14 14:20:38 +03:00
|
|
|
|
{
|
2022-12-12 19:52:00 +03:00
|
|
|
|
union PseudoHeader {
|
|
|
|
|
struct [[gnu::packed]] {
|
|
|
|
|
IPv4Address source;
|
|
|
|
|
IPv4Address destination;
|
|
|
|
|
u8 zero;
|
|
|
|
|
u8 protocol;
|
|
|
|
|
NetworkOrdered<u16> payload_size;
|
|
|
|
|
} header;
|
|
|
|
|
u16 raw[6];
|
2019-03-14 14:20:38 +03:00
|
|
|
|
};
|
2022-12-12 19:52:00 +03:00
|
|
|
|
static_assert(sizeof(PseudoHeader) == 12);
|
2019-03-14 14:20:38 +03:00
|
|
|
|
|
2022-12-13 14:39:52 +03:00
|
|
|
|
Checked<u16> packet_size = packet.header_size();
|
|
|
|
|
packet_size += payload_size;
|
|
|
|
|
VERIFY(!packet_size.has_overflow());
|
|
|
|
|
|
|
|
|
|
PseudoHeader pseudo_header { .header = { source, destination, 0, (u8)IPv4Protocol::TCP, packet_size.value() } };
|
2019-03-14 14:20:38 +03:00
|
|
|
|
|
2019-07-03 22:17:35 +03:00
|
|
|
|
u32 checksum = 0;
|
2022-12-12 19:52:00 +03:00
|
|
|
|
auto* raw_pseudo_header = pseudo_header.raw;
|
2019-07-03 22:17:35 +03:00
|
|
|
|
for (size_t i = 0; i < sizeof(pseudo_header) / sizeof(u16); ++i) {
|
2021-09-01 10:27:42 +03:00
|
|
|
|
checksum += AK::convert_between_host_and_network_endian(raw_pseudo_header[i]);
|
2019-03-14 14:20:38 +03:00
|
|
|
|
if (checksum > 0xffff)
|
|
|
|
|
checksum = (checksum >> 16) + (checksum & 0xffff);
|
|
|
|
|
}
|
2022-12-12 19:52:00 +03:00
|
|
|
|
auto* raw_packet = bit_cast<u16*>(&packet);
|
2021-05-11 22:09:11 +03:00
|
|
|
|
for (size_t i = 0; i < packet.header_size() / sizeof(u16); ++i) {
|
2021-09-01 10:27:42 +03:00
|
|
|
|
checksum += AK::convert_between_host_and_network_endian(raw_packet[i]);
|
2019-03-14 14:20:38 +03:00
|
|
|
|
if (checksum > 0xffff)
|
|
|
|
|
checksum = (checksum >> 16) + (checksum & 0xffff);
|
|
|
|
|
}
|
2021-05-11 22:09:11 +03:00
|
|
|
|
VERIFY(packet.data_offset() * 4 == packet.header_size());
|
2022-12-12 19:52:00 +03:00
|
|
|
|
auto* raw_payload = bit_cast<u16*>(packet.payload());
|
2019-07-03 22:17:35 +03:00
|
|
|
|
for (size_t i = 0; i < payload_size / sizeof(u16); ++i) {
|
2021-09-01 10:27:42 +03:00
|
|
|
|
checksum += AK::convert_between_host_and_network_endian(raw_payload[i]);
|
2019-03-14 14:20:38 +03:00
|
|
|
|
if (checksum > 0xffff)
|
|
|
|
|
checksum = (checksum >> 16) + (checksum & 0xffff);
|
|
|
|
|
}
|
|
|
|
|
if (payload_size & 1) {
|
2022-04-01 20:58:27 +03:00
|
|
|
|
u16 expanded_byte = ((u8 const*)packet.payload())[payload_size - 1] << 8;
|
2019-03-14 14:20:38 +03:00
|
|
|
|
checksum += expanded_byte;
|
|
|
|
|
if (checksum > 0xffff)
|
|
|
|
|
checksum = (checksum >> 16) + (checksum & 0xffff);
|
|
|
|
|
}
|
|
|
|
|
return ~(checksum & 0xffff);
|
|
|
|
|
}
|
|
|
|
|
|
2023-11-04 17:08:25 +03:00
|
|
|
|
ErrorOr<void> TCPSocket::setsockopt(int level, int option, Userspace<void const*> user_value, socklen_t user_value_size)
|
|
|
|
|
{
|
|
|
|
|
if (level != IPPROTO_TCP)
|
|
|
|
|
return IPv4Socket::setsockopt(level, option, user_value, user_value_size);
|
|
|
|
|
|
|
|
|
|
MutexLocker locker(mutex());
|
|
|
|
|
|
|
|
|
|
switch (option) {
|
|
|
|
|
case TCP_NODELAY:
|
|
|
|
|
if (user_value_size < sizeof(int))
|
|
|
|
|
return EINVAL;
|
|
|
|
|
int value;
|
|
|
|
|
TRY(copy_from_user(&value, static_ptr_cast<int const*>(user_value)));
|
|
|
|
|
if (value != 0 && value != 1)
|
|
|
|
|
return EINVAL;
|
|
|
|
|
m_no_delay = value;
|
|
|
|
|
return {};
|
|
|
|
|
default:
|
|
|
|
|
dbgln("setsockopt({}) at IPPROTO_TCP not implemented.", option);
|
|
|
|
|
return ENOPROTOOPT;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ErrorOr<void> TCPSocket::getsockopt(OpenFileDescription& description, int level, int option, Userspace<void*> value, Userspace<socklen_t*> value_size)
|
|
|
|
|
{
|
|
|
|
|
if (level != IPPROTO_TCP)
|
|
|
|
|
return IPv4Socket::getsockopt(description, level, option, value, value_size);
|
|
|
|
|
|
|
|
|
|
MutexLocker locker(mutex());
|
|
|
|
|
|
|
|
|
|
socklen_t size;
|
|
|
|
|
TRY(copy_from_user(&size, value_size.unsafe_userspace_ptr()));
|
|
|
|
|
|
|
|
|
|
switch (option) {
|
|
|
|
|
case TCP_NODELAY: {
|
|
|
|
|
int nodelay = m_no_delay ? 1 : 0;
|
|
|
|
|
if (size < sizeof(nodelay))
|
|
|
|
|
return EINVAL;
|
|
|
|
|
TRY(copy_to_user(static_ptr_cast<int*>(value), &nodelay));
|
|
|
|
|
size = sizeof(nodelay);
|
|
|
|
|
return copy_to_user(value_size, &size);
|
|
|
|
|
}
|
|
|
|
|
default:
|
|
|
|
|
dbgln("getsockopt({}) at IPPROTO_TCP not implemented.", option);
|
|
|
|
|
return ENOPROTOOPT;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-08 02:51:39 +03:00
|
|
|
|
ErrorOr<void> TCPSocket::protocol_bind()
|
2019-08-06 16:40:38 +03:00
|
|
|
|
{
|
Kernel/Net: Rework ephemeral port allocation
Currently, ephemeral port allocation is handled by the
allocate_local_port_if_needed() and protocol_allocate_local_port()
methods. Actually binding the socket to an address (which means
inserting the socket/address pair into a global map) is performed either
in protocol_allocate_local_port() (for ephemeral ports) or in
protocol_listen() (for non-ephemeral ports); the latter will fail with
EADDRINUSE if the address is already used by an existing pair present in
the map.
There used to be a bug where for listen() without an explicit bind(),
the port allocation would conflict with itself: first an ephemeral port
would get allocated and inserted into the map, and then
protocol_listen() would check again for the port being free, find the
just-created map entry, and error out. This was fixed in commit
01e5af487f9513696dbcacab15d3e0036446f586 by passing an additional flag
did_allocate_port into protocol_listen() which specifies whether the
port was just allocated, and skipping the check in protocol_listen() if
the flag is set.
However, this only helps if the socket is bound to an ephemeral port
inside of this very listen() call. But calling bind(sin_port = 0) from
userspace should succeed and bind to an allocated ephemeral port, in the
same was as using an unbound socket for connect() does. The port number
can then be retrieved from userspace by calling getsockname (), and it
should be possible to either connect() or listen() on this socket,
keeping the allocated port number. Also, calling bind() when already
bound (either explicitly or implicitly) should always result in EINVAL.
To untangle this, introduce an explicit m_bound state in IPv4Socket,
just like LocalSocket has already. Once a socket is bound, further
attempt to bind it fail. Some operations cause the socket to implicitly
get bound to an (ephemeral) address; this is implemented by the new
ensure_bound() method. The protocol_allocate_local_port() method is
gone; it is now up to a protocol to assign a port to the socket inside
protocol_bind() if it finds that the socket has local_port() == 0.
protocol_bind() is now called in more cases, such as inside listen() if
the socket wasn't bound before that.
2023-07-23 15:43:45 +03:00
|
|
|
|
dbgln_if(TCP_SOCKET_DEBUG, "TCPSocket::protocol_bind(), local_port() is {}", local_port());
|
|
|
|
|
// Check that we do have the address we're trying to bind to.
|
|
|
|
|
TRY(m_adapter.with([this](auto& adapter) -> ErrorOr<void> {
|
2023-04-11 03:50:15 +03:00
|
|
|
|
if (has_specific_local_address() && !adapter) {
|
|
|
|
|
adapter = NetworkingManagement::the().from_ipv4_address(local_address());
|
|
|
|
|
if (!adapter)
|
|
|
|
|
return set_so_error(EADDRNOTAVAIL);
|
|
|
|
|
}
|
|
|
|
|
return {};
|
Kernel/Net: Rework ephemeral port allocation
Currently, ephemeral port allocation is handled by the
allocate_local_port_if_needed() and protocol_allocate_local_port()
methods. Actually binding the socket to an address (which means
inserting the socket/address pair into a global map) is performed either
in protocol_allocate_local_port() (for ephemeral ports) or in
protocol_listen() (for non-ephemeral ports); the latter will fail with
EADDRINUSE if the address is already used by an existing pair present in
the map.
There used to be a bug where for listen() without an explicit bind(),
the port allocation would conflict with itself: first an ephemeral port
would get allocated and inserted into the map, and then
protocol_listen() would check again for the port being free, find the
just-created map entry, and error out. This was fixed in commit
01e5af487f9513696dbcacab15d3e0036446f586 by passing an additional flag
did_allocate_port into protocol_listen() which specifies whether the
port was just allocated, and skipping the check in protocol_listen() if
the flag is set.
However, this only helps if the socket is bound to an ephemeral port
inside of this very listen() call. But calling bind(sin_port = 0) from
userspace should succeed and bind to an allocated ephemeral port, in the
same was as using an unbound socket for connect() does. The port number
can then be retrieved from userspace by calling getsockname (), and it
should be possible to either connect() or listen() on this socket,
keeping the allocated port number. Also, calling bind() when already
bound (either explicitly or implicitly) should always result in EINVAL.
To untangle this, introduce an explicit m_bound state in IPv4Socket,
just like LocalSocket has already. Once a socket is bound, further
attempt to bind it fail. Some operations cause the socket to implicitly
get bound to an (ephemeral) address; this is implemented by the new
ensure_bound() method. The protocol_allocate_local_port() method is
gone; it is now up to a protocol to assign a port to the socket inside
protocol_bind() if it finds that the socket has local_port() == 0.
protocol_bind() is now called in more cases, such as inside listen() if
the socket wasn't bound before that.
2023-07-23 15:43:45 +03:00
|
|
|
|
}));
|
|
|
|
|
|
|
|
|
|
if (local_port() == 0) {
|
|
|
|
|
// Allocate an unused ephemeral port.
|
|
|
|
|
constexpr u16 first_ephemeral_port = 32768;
|
|
|
|
|
constexpr u16 last_ephemeral_port = 60999;
|
|
|
|
|
constexpr u16 ephemeral_port_range_size = last_ephemeral_port - first_ephemeral_port;
|
|
|
|
|
u16 first_scan_port = first_ephemeral_port + get_good_random<u16>() % ephemeral_port_range_size;
|
|
|
|
|
|
|
|
|
|
return sockets_by_tuple().with_exclusive([&](auto& table) -> ErrorOr<void> {
|
|
|
|
|
u16 port = first_scan_port;
|
|
|
|
|
while (true) {
|
|
|
|
|
IPv4SocketTuple proposed_tuple(local_address(), port, peer_address(), peer_port());
|
|
|
|
|
|
|
|
|
|
auto it = table.find(proposed_tuple);
|
|
|
|
|
if (it == table.end()) {
|
|
|
|
|
set_local_port(port);
|
2023-12-26 00:23:28 +03:00
|
|
|
|
m_registered_socket_tuple = proposed_tuple;
|
Kernel/Net: Rework ephemeral port allocation
Currently, ephemeral port allocation is handled by the
allocate_local_port_if_needed() and protocol_allocate_local_port()
methods. Actually binding the socket to an address (which means
inserting the socket/address pair into a global map) is performed either
in protocol_allocate_local_port() (for ephemeral ports) or in
protocol_listen() (for non-ephemeral ports); the latter will fail with
EADDRINUSE if the address is already used by an existing pair present in
the map.
There used to be a bug where for listen() without an explicit bind(),
the port allocation would conflict with itself: first an ephemeral port
would get allocated and inserted into the map, and then
protocol_listen() would check again for the port being free, find the
just-created map entry, and error out. This was fixed in commit
01e5af487f9513696dbcacab15d3e0036446f586 by passing an additional flag
did_allocate_port into protocol_listen() which specifies whether the
port was just allocated, and skipping the check in protocol_listen() if
the flag is set.
However, this only helps if the socket is bound to an ephemeral port
inside of this very listen() call. But calling bind(sin_port = 0) from
userspace should succeed and bind to an allocated ephemeral port, in the
same was as using an unbound socket for connect() does. The port number
can then be retrieved from userspace by calling getsockname (), and it
should be possible to either connect() or listen() on this socket,
keeping the allocated port number. Also, calling bind() when already
bound (either explicitly or implicitly) should always result in EINVAL.
To untangle this, introduce an explicit m_bound state in IPv4Socket,
just like LocalSocket has already. Once a socket is bound, further
attempt to bind it fail. Some operations cause the socket to implicitly
get bound to an (ephemeral) address; this is implemented by the new
ensure_bound() method. The protocol_allocate_local_port() method is
gone; it is now up to a protocol to assign a port to the socket inside
protocol_bind() if it finds that the socket has local_port() == 0.
protocol_bind() is now called in more cases, such as inside listen() if
the socket wasn't bound before that.
2023-07-23 15:43:45 +03:00
|
|
|
|
table.set(proposed_tuple, this);
|
|
|
|
|
dbgln_if(TCP_SOCKET_DEBUG, "...allocated port {}, tuple {}", port, proposed_tuple.to_string());
|
|
|
|
|
return {};
|
|
|
|
|
}
|
|
|
|
|
++port;
|
|
|
|
|
if (port > last_ephemeral_port)
|
|
|
|
|
port = first_ephemeral_port;
|
|
|
|
|
if (port == first_scan_port)
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
return set_so_error(EADDRINUSE);
|
|
|
|
|
});
|
|
|
|
|
} else {
|
|
|
|
|
// Verify that the user-supplied port is not already used by someone else.
|
2021-07-18 13:14:43 +03:00
|
|
|
|
bool ok = sockets_by_tuple().with_exclusive([&](auto& table) -> bool {
|
|
|
|
|
if (table.contains(tuple()))
|
|
|
|
|
return false;
|
2023-12-26 00:23:28 +03:00
|
|
|
|
auto socket_tuple = tuple();
|
|
|
|
|
m_registered_socket_tuple = socket_tuple;
|
|
|
|
|
table.set(socket_tuple, this);
|
2021-07-18 13:14:43 +03:00
|
|
|
|
return true;
|
|
|
|
|
});
|
|
|
|
|
if (!ok)
|
2021-08-01 18:27:23 +03:00
|
|
|
|
return set_so_error(EADDRINUSE);
|
Kernel/Net: Rework ephemeral port allocation
Currently, ephemeral port allocation is handled by the
allocate_local_port_if_needed() and protocol_allocate_local_port()
methods. Actually binding the socket to an address (which means
inserting the socket/address pair into a global map) is performed either
in protocol_allocate_local_port() (for ephemeral ports) or in
protocol_listen() (for non-ephemeral ports); the latter will fail with
EADDRINUSE if the address is already used by an existing pair present in
the map.
There used to be a bug where for listen() without an explicit bind(),
the port allocation would conflict with itself: first an ephemeral port
would get allocated and inserted into the map, and then
protocol_listen() would check again for the port being free, find the
just-created map entry, and error out. This was fixed in commit
01e5af487f9513696dbcacab15d3e0036446f586 by passing an additional flag
did_allocate_port into protocol_listen() which specifies whether the
port was just allocated, and skipping the check in protocol_listen() if
the flag is set.
However, this only helps if the socket is bound to an ephemeral port
inside of this very listen() call. But calling bind(sin_port = 0) from
userspace should succeed and bind to an allocated ephemeral port, in the
same was as using an unbound socket for connect() does. The port number
can then be retrieved from userspace by calling getsockname (), and it
should be possible to either connect() or listen() on this socket,
keeping the allocated port number. Also, calling bind() when already
bound (either explicitly or implicitly) should always result in EINVAL.
To untangle this, introduce an explicit m_bound state in IPv4Socket,
just like LocalSocket has already. Once a socket is bound, further
attempt to bind it fail. Some operations cause the socket to implicitly
get bound to an (ephemeral) address; this is implemented by the new
ensure_bound() method. The protocol_allocate_local_port() method is
gone; it is now up to a protocol to assign a port to the socket inside
protocol_bind() if it finds that the socket has local_port() == 0.
protocol_bind() is now called in more cases, such as inside listen() if
the socket wasn't bound before that.
2023-07-23 15:43:45 +03:00
|
|
|
|
return {};
|
2021-06-01 02:11:14 +03:00
|
|
|
|
}
|
Kernel/Net: Rework ephemeral port allocation
Currently, ephemeral port allocation is handled by the
allocate_local_port_if_needed() and protocol_allocate_local_port()
methods. Actually binding the socket to an address (which means
inserting the socket/address pair into a global map) is performed either
in protocol_allocate_local_port() (for ephemeral ports) or in
protocol_listen() (for non-ephemeral ports); the latter will fail with
EADDRINUSE if the address is already used by an existing pair present in
the map.
There used to be a bug where for listen() without an explicit bind(),
the port allocation would conflict with itself: first an ephemeral port
would get allocated and inserted into the map, and then
protocol_listen() would check again for the port being free, find the
just-created map entry, and error out. This was fixed in commit
01e5af487f9513696dbcacab15d3e0036446f586 by passing an additional flag
did_allocate_port into protocol_listen() which specifies whether the
port was just allocated, and skipping the check in protocol_listen() if
the flag is set.
However, this only helps if the socket is bound to an ephemeral port
inside of this very listen() call. But calling bind(sin_port = 0) from
userspace should succeed and bind to an allocated ephemeral port, in the
same was as using an unbound socket for connect() does. The port number
can then be retrieved from userspace by calling getsockname (), and it
should be possible to either connect() or listen() on this socket,
keeping the allocated port number. Also, calling bind() when already
bound (either explicitly or implicitly) should always result in EINVAL.
To untangle this, introduce an explicit m_bound state in IPv4Socket,
just like LocalSocket has already. Once a socket is bound, further
attempt to bind it fail. Some operations cause the socket to implicitly
get bound to an (ephemeral) address; this is implemented by the new
ensure_bound() method. The protocol_allocate_local_port() method is
gone; it is now up to a protocol to assign a port to the socket inside
protocol_bind() if it finds that the socket has local_port() == 0.
protocol_bind() is now called in more cases, such as inside listen() if
the socket wasn't bound before that.
2023-07-23 15:43:45 +03:00
|
|
|
|
}
|
2021-06-01 02:11:14 +03:00
|
|
|
|
|
Kernel/Net: Rework ephemeral port allocation
Currently, ephemeral port allocation is handled by the
allocate_local_port_if_needed() and protocol_allocate_local_port()
methods. Actually binding the socket to an address (which means
inserting the socket/address pair into a global map) is performed either
in protocol_allocate_local_port() (for ephemeral ports) or in
protocol_listen() (for non-ephemeral ports); the latter will fail with
EADDRINUSE if the address is already used by an existing pair present in
the map.
There used to be a bug where for listen() without an explicit bind(),
the port allocation would conflict with itself: first an ephemeral port
would get allocated and inserted into the map, and then
protocol_listen() would check again for the port being free, find the
just-created map entry, and error out. This was fixed in commit
01e5af487f9513696dbcacab15d3e0036446f586 by passing an additional flag
did_allocate_port into protocol_listen() which specifies whether the
port was just allocated, and skipping the check in protocol_listen() if
the flag is set.
However, this only helps if the socket is bound to an ephemeral port
inside of this very listen() call. But calling bind(sin_port = 0) from
userspace should succeed and bind to an allocated ephemeral port, in the
same was as using an unbound socket for connect() does. The port number
can then be retrieved from userspace by calling getsockname (), and it
should be possible to either connect() or listen() on this socket,
keeping the allocated port number. Also, calling bind() when already
bound (either explicitly or implicitly) should always result in EINVAL.
To untangle this, introduce an explicit m_bound state in IPv4Socket,
just like LocalSocket has already. Once a socket is bound, further
attempt to bind it fail. Some operations cause the socket to implicitly
get bound to an (ephemeral) address; this is implemented by the new
ensure_bound() method. The protocol_allocate_local_port() method is
gone; it is now up to a protocol to assign a port to the socket inside
protocol_bind() if it finds that the socket has local_port() == 0.
protocol_bind() is now called in more cases, such as inside listen() if
the socket wasn't bound before that.
2023-07-23 15:43:45 +03:00
|
|
|
|
ErrorOr<void> TCPSocket::protocol_listen()
|
|
|
|
|
{
|
2019-08-09 05:48:28 +03:00
|
|
|
|
set_direction(Direction::Passive);
|
2019-08-06 16:40:38 +03:00
|
|
|
|
set_state(State::Listen);
|
2019-08-10 06:17:00 +03:00
|
|
|
|
set_setup_state(SetupState::Completed);
|
2021-11-08 02:51:39 +03:00
|
|
|
|
return {};
|
2019-08-06 16:40:38 +03:00
|
|
|
|
}
|
|
|
|
|
|
2022-07-13 09:31:24 +03:00
|
|
|
|
ErrorOr<void> TCPSocket::protocol_connect(OpenFileDescription& description)
|
2019-03-14 14:20:38 +03:00
|
|
|
|
{
|
2021-08-29 14:10:55 +03:00
|
|
|
|
MutexLocker locker(mutex());
|
2020-10-21 21:51:02 +03:00
|
|
|
|
|
2019-08-28 14:58:01 +03:00
|
|
|
|
auto routing_decision = route_to(peer_address(), local_address());
|
2019-08-29 04:18:38 +03:00
|
|
|
|
if (routing_decision.is_zero())
|
2021-08-01 18:27:23 +03:00
|
|
|
|
return set_so_error(EHOSTUNREACH);
|
2019-08-28 14:58:01 +03:00
|
|
|
|
if (!has_specific_local_address())
|
|
|
|
|
set_local_address(routing_decision.adapter->ipv4_address());
|
2019-03-14 14:20:38 +03:00
|
|
|
|
|
Kernel/Net: Rework ephemeral port allocation
Currently, ephemeral port allocation is handled by the
allocate_local_port_if_needed() and protocol_allocate_local_port()
methods. Actually binding the socket to an address (which means
inserting the socket/address pair into a global map) is performed either
in protocol_allocate_local_port() (for ephemeral ports) or in
protocol_listen() (for non-ephemeral ports); the latter will fail with
EADDRINUSE if the address is already used by an existing pair present in
the map.
There used to be a bug where for listen() without an explicit bind(),
the port allocation would conflict with itself: first an ephemeral port
would get allocated and inserted into the map, and then
protocol_listen() would check again for the port being free, find the
just-created map entry, and error out. This was fixed in commit
01e5af487f9513696dbcacab15d3e0036446f586 by passing an additional flag
did_allocate_port into protocol_listen() which specifies whether the
port was just allocated, and skipping the check in protocol_listen() if
the flag is set.
However, this only helps if the socket is bound to an ephemeral port
inside of this very listen() call. But calling bind(sin_port = 0) from
userspace should succeed and bind to an allocated ephemeral port, in the
same was as using an unbound socket for connect() does. The port number
can then be retrieved from userspace by calling getsockname (), and it
should be possible to either connect() or listen() on this socket,
keeping the allocated port number. Also, calling bind() when already
bound (either explicitly or implicitly) should always result in EINVAL.
To untangle this, introduce an explicit m_bound state in IPv4Socket,
just like LocalSocket has already. Once a socket is bound, further
attempt to bind it fail. Some operations cause the socket to implicitly
get bound to an (ephemeral) address; this is implemented by the new
ensure_bound() method. The protocol_allocate_local_port() method is
gone; it is now up to a protocol to assign a port to the socket inside
protocol_bind() if it finds that the socket has local_port() == 0.
protocol_bind() is now called in more cases, such as inside listen() if
the socket wasn't bound before that.
2023-07-23 15:43:45 +03:00
|
|
|
|
TRY(ensure_bound());
|
2023-12-26 00:23:28 +03:00
|
|
|
|
if (m_registered_socket_tuple.has_value() && m_registered_socket_tuple != tuple()) {
|
|
|
|
|
// If the socket was manually bound (using bind(2)) instead of implicitly using connect,
|
|
|
|
|
// it will already be registered in the TCPSocket sockets_by_tuple table, under the previous
|
|
|
|
|
// socket tuple. We replace the entry in the table to ensure it is also properly removed on
|
|
|
|
|
// socket deletion, to prevent a dangling reference.
|
|
|
|
|
TRY(sockets_by_tuple().with_exclusive([this](auto& table) -> ErrorOr<void> {
|
|
|
|
|
auto removed = table.remove(*m_registered_socket_tuple);
|
|
|
|
|
VERIFY(removed);
|
|
|
|
|
if (table.contains(tuple()))
|
|
|
|
|
return set_so_error(EADDRINUSE);
|
|
|
|
|
table.set(tuple(), this);
|
|
|
|
|
return {};
|
|
|
|
|
}));
|
|
|
|
|
m_registered_socket_tuple = tuple();
|
|
|
|
|
}
|
2019-03-14 14:20:38 +03:00
|
|
|
|
|
2020-01-08 18:03:01 +03:00
|
|
|
|
m_sequence_number = get_good_random<u32>();
|
2019-03-14 14:20:38 +03:00
|
|
|
|
m_ack_number = 0;
|
|
|
|
|
|
2019-08-10 06:17:00 +03:00
|
|
|
|
set_setup_state(SetupState::InProgress);
|
2021-09-05 16:48:40 +03:00
|
|
|
|
TRY(send_tcp_packet(TCPFlags::SYN));
|
2019-08-06 16:40:38 +03:00
|
|
|
|
m_state = State::SynSent;
|
2021-08-29 03:04:30 +03:00
|
|
|
|
set_role(Role::Connecting);
|
2019-08-09 05:48:28 +03:00
|
|
|
|
m_direction = Direction::Outgoing;
|
2019-03-14 14:20:38 +03:00
|
|
|
|
|
2020-11-30 02:05:27 +03:00
|
|
|
|
evaluate_block_conditions();
|
|
|
|
|
|
2022-07-13 09:31:24 +03:00
|
|
|
|
if (description.is_blocking()) {
|
2020-10-21 21:51:02 +03:00
|
|
|
|
locker.unlock();
|
2020-11-30 02:05:27 +03:00
|
|
|
|
auto unblock_flags = Thread::FileBlocker::BlockFlags::None;
|
2021-01-11 02:29:28 +03:00
|
|
|
|
if (Thread::current()->block<Thread::ConnectBlocker>({}, description, unblock_flags).was_interrupted())
|
2021-08-01 18:27:23 +03:00
|
|
|
|
return set_so_error(EINTR);
|
2020-10-21 21:51:02 +03:00
|
|
|
|
locker.lock();
|
2021-02-23 22:42:32 +03:00
|
|
|
|
VERIFY(setup_state() == SetupState::Completed);
|
2020-11-30 02:05:27 +03:00
|
|
|
|
if (has_error()) { // TODO: check unblock_flags
|
2021-08-29 03:04:30 +03:00
|
|
|
|
set_role(Role::None);
|
2021-05-13 11:49:10 +03:00
|
|
|
|
if (error() == TCPSocket::Error::RetransmitTimeout)
|
2021-08-01 18:27:23 +03:00
|
|
|
|
return set_so_error(ETIMEDOUT);
|
2021-05-13 11:49:10 +03:00
|
|
|
|
else
|
2021-08-01 18:27:23 +03:00
|
|
|
|
return set_so_error(ECONNREFUSED);
|
2019-08-11 16:38:20 +03:00
|
|
|
|
}
|
2021-11-08 02:51:39 +03:00
|
|
|
|
return {};
|
2019-04-08 05:52:21 +03:00
|
|
|
|
}
|
2019-03-14 14:20:38 +03:00
|
|
|
|
|
2021-08-01 18:27:23 +03:00
|
|
|
|
return set_so_error(EINPROGRESS);
|
2019-03-14 14:20:38 +03:00
|
|
|
|
}
|
2019-03-14 14:28:30 +03:00
|
|
|
|
|
2019-03-14 17:23:32 +03:00
|
|
|
|
bool TCPSocket::protocol_is_disconnected() const
|
|
|
|
|
{
|
2019-08-06 16:40:38 +03:00
|
|
|
|
switch (m_state) {
|
|
|
|
|
case State::Closed:
|
|
|
|
|
case State::CloseWait:
|
|
|
|
|
case State::LastAck:
|
|
|
|
|
case State::FinWait1:
|
|
|
|
|
case State::FinWait2:
|
|
|
|
|
case State::Closing:
|
|
|
|
|
case State::TimeWait:
|
|
|
|
|
return true;
|
|
|
|
|
default:
|
|
|
|
|
return false;
|
|
|
|
|
}
|
2019-05-03 22:51:40 +03:00
|
|
|
|
}
|
2020-02-08 17:52:32 +03:00
|
|
|
|
|
|
|
|
|
void TCPSocket::shut_down_for_writing()
|
|
|
|
|
{
|
|
|
|
|
if (state() == State::Established) {
|
2022-02-06 19:28:58 +03:00
|
|
|
|
dbgln_if(TCP_SOCKET_DEBUG, " Sending FIN from Established and moving into FinWait1");
|
2023-06-29 01:40:44 +03:00
|
|
|
|
(void)send_tcp_packet(TCPFlags::FIN | TCPFlags::ACK);
|
2020-02-08 17:52:32 +03:00
|
|
|
|
set_state(State::FinWait1);
|
|
|
|
|
} else {
|
2021-01-10 17:43:09 +03:00
|
|
|
|
dbgln(" Shutting down TCPSocket for writing but not moving to FinWait1 since state is {}", to_string(state()));
|
2020-02-08 17:52:32 +03:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-08 02:51:39 +03:00
|
|
|
|
ErrorOr<void> TCPSocket::close()
|
2020-02-08 17:52:32 +03:00
|
|
|
|
{
|
2021-08-29 14:10:55 +03:00
|
|
|
|
MutexLocker locker(mutex());
|
2020-06-02 19:20:05 +03:00
|
|
|
|
auto result = IPv4Socket::close();
|
2020-02-08 17:52:32 +03:00
|
|
|
|
if (state() == State::CloseWait) {
|
2021-05-01 22:10:08 +03:00
|
|
|
|
dbgln_if(TCP_SOCKET_DEBUG, " Sending FIN from CloseWait and moving into LastAck");
|
2020-12-21 02:09:48 +03:00
|
|
|
|
[[maybe_unused]] auto rc = send_tcp_packet(TCPFlags::FIN | TCPFlags::ACK);
|
2020-02-08 17:52:32 +03:00
|
|
|
|
set_state(State::LastAck);
|
|
|
|
|
}
|
|
|
|
|
|
2021-07-18 13:14:43 +03:00
|
|
|
|
if (state() != State::Closed && state() != State::Listen)
|
|
|
|
|
closing_sockets().with_exclusive([&](auto& table) {
|
|
|
|
|
table.set(tuple(), *this);
|
|
|
|
|
});
|
2020-06-02 19:20:05 +03:00
|
|
|
|
return result;
|
2020-02-08 17:52:32 +03:00
|
|
|
|
}
|
2020-02-16 03:27:42 +03:00
|
|
|
|
|
2021-08-22 00:31:15 +03:00
|
|
|
|
static Singleton<MutexProtected<TCPSocket::RetransmitList>> s_sockets_for_retransmit;
|
2021-05-13 11:49:10 +03:00
|
|
|
|
|
2021-08-22 00:31:15 +03:00
|
|
|
|
MutexProtected<TCPSocket::RetransmitList>& TCPSocket::sockets_for_retransmit()
|
2021-05-13 11:49:10 +03:00
|
|
|
|
{
|
|
|
|
|
return *s_sockets_for_retransmit;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void TCPSocket::enqueue_for_retransmit()
|
|
|
|
|
{
|
2021-08-15 17:37:45 +03:00
|
|
|
|
sockets_for_retransmit().with_exclusive([&](auto& list) {
|
|
|
|
|
list.append(*this);
|
2021-07-18 13:14:43 +03:00
|
|
|
|
});
|
2021-05-13 11:49:10 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void TCPSocket::dequeue_for_retransmit()
|
|
|
|
|
{
|
2021-08-15 17:37:45 +03:00
|
|
|
|
sockets_for_retransmit().with_exclusive([&](auto& list) {
|
|
|
|
|
list.remove(*this);
|
2021-07-18 13:14:43 +03:00
|
|
|
|
});
|
2021-05-13 11:49:10 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void TCPSocket::retransmit_packets()
|
|
|
|
|
{
|
2023-08-17 20:20:42 +03:00
|
|
|
|
auto now = TimeManagement::the().monotonic_time();
|
2021-05-13 11:49:10 +03:00
|
|
|
|
|
|
|
|
|
// RFC6298 says we should have at least one second between retransmits. According to
|
|
|
|
|
// RFC1122 we must do exponential backoff - even for SYN packets.
|
|
|
|
|
i64 retransmit_interval = 1;
|
|
|
|
|
for (decltype(m_retransmit_attempts) i = 0; i < m_retransmit_attempts; i++)
|
|
|
|
|
retransmit_interval *= 2;
|
|
|
|
|
|
2023-03-13 18:30:34 +03:00
|
|
|
|
if (m_last_retransmit_time > now - Duration::from_seconds(retransmit_interval))
|
2021-05-13 11:49:10 +03:00
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
dbgln_if(TCP_SOCKET_DEBUG, "TCPSocket({}) handling retransmit", this);
|
|
|
|
|
|
|
|
|
|
m_last_retransmit_time = now;
|
|
|
|
|
++m_retransmit_attempts;
|
|
|
|
|
|
|
|
|
|
if (m_retransmit_attempts > maximum_retransmits) {
|
|
|
|
|
set_state(TCPSocket::State::Closed);
|
|
|
|
|
set_error(TCPSocket::Error::RetransmitTimeout);
|
|
|
|
|
set_setup_state(Socket::SetupState::Completed);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-11 03:50:15 +03:00
|
|
|
|
auto adapter = bound_interface().with([](auto& bound_device) -> RefPtr<NetworkAdapter> { return bound_device; });
|
|
|
|
|
auto routing_decision = route_to(peer_address(), local_address(), adapter);
|
2021-05-13 12:34:38 +03:00
|
|
|
|
if (routing_decision.is_zero())
|
|
|
|
|
return;
|
|
|
|
|
|
2021-08-07 16:42:11 +03:00
|
|
|
|
m_unacked_packets.with_exclusive([&](auto& unacked_packets) {
|
|
|
|
|
for (auto& packet : unacked_packets.packets) {
|
|
|
|
|
packet.tx_counter++;
|
|
|
|
|
|
|
|
|
|
if constexpr (TCP_SOCKET_DEBUG) {
|
|
|
|
|
auto& tcp_packet = *(const TCPPacket*)(packet.buffer->buffer->data() + packet.ipv4_payload_offset);
|
|
|
|
|
dbgln("Sending TCP packet from {}:{} to {}:{} with ({}{}{}{}) seq_no={}, ack_no={}, tx_counter={}",
|
|
|
|
|
local_address(), local_port(),
|
|
|
|
|
peer_address(), peer_port(),
|
|
|
|
|
(tcp_packet.has_syn() ? "SYN " : ""),
|
|
|
|
|
(tcp_packet.has_ack() ? "ACK " : ""),
|
|
|
|
|
(tcp_packet.has_fin() ? "FIN " : ""),
|
|
|
|
|
(tcp_packet.has_rst() ? "RST " : ""),
|
|
|
|
|
tcp_packet.sequence_number(),
|
|
|
|
|
tcp_packet.ack_number(),
|
|
|
|
|
packet.tx_counter);
|
|
|
|
|
}
|
2021-05-13 12:34:38 +03:00
|
|
|
|
|
2021-08-07 16:42:11 +03:00
|
|
|
|
size_t ipv4_payload_offset = routing_decision.adapter->ipv4_payload_offset();
|
|
|
|
|
if (ipv4_payload_offset != packet.ipv4_payload_offset) {
|
|
|
|
|
// FIXME: Add support for this. This can happen if after a route change
|
|
|
|
|
// we ended up on another adapter which doesn't have the same layer 2 type
|
|
|
|
|
// like the previous adapter.
|
|
|
|
|
VERIFY_NOT_REACHED();
|
|
|
|
|
}
|
2021-08-01 15:11:49 +03:00
|
|
|
|
|
2021-08-07 16:42:11 +03:00
|
|
|
|
auto packet_buffer = packet.buffer->bytes();
|
2021-08-01 15:11:49 +03:00
|
|
|
|
|
2021-08-07 16:42:11 +03:00
|
|
|
|
routing_decision.adapter->fill_in_ipv4_header(*packet.buffer,
|
|
|
|
|
local_address(), routing_decision.next_hop, peer_address(),
|
2021-10-27 23:20:24 +03:00
|
|
|
|
IPv4Protocol::TCP, packet_buffer.size() - ipv4_payload_offset, type_of_service(), ttl());
|
2021-08-07 16:42:11 +03:00
|
|
|
|
routing_decision.adapter->send_packet(packet_buffer);
|
|
|
|
|
m_packets_out++;
|
|
|
|
|
m_bytes_out += packet_buffer.size();
|
|
|
|
|
}
|
|
|
|
|
});
|
2021-05-13 11:49:10 +03:00
|
|
|
|
}
|
|
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
|
bool TCPSocket::can_write(OpenFileDescription const& file_description, u64 size) const
|
2021-05-26 07:26:20 +03:00
|
|
|
|
{
|
|
|
|
|
if (!IPv4Socket::can_write(file_description, size))
|
|
|
|
|
return false;
|
|
|
|
|
|
2021-06-11 09:43:17 +03:00
|
|
|
|
if (m_state == State::SynSent || m_state == State::SynReceived)
|
|
|
|
|
return false;
|
|
|
|
|
|
2021-05-26 07:26:20 +03:00
|
|
|
|
if (!file_description.is_blocking())
|
|
|
|
|
return true;
|
|
|
|
|
|
2021-08-07 16:42:11 +03:00
|
|
|
|
return m_unacked_packets.with_shared([&](auto& unacked_packets) {
|
|
|
|
|
return unacked_packets.size + size <= m_send_window_size;
|
|
|
|
|
});
|
2021-05-26 07:26:20 +03:00
|
|
|
|
}
|
2020-02-16 03:27:42 +03:00
|
|
|
|
}
|