skeleton of portmapper server

Summary:
The msft client can't be pointed at an NFS server on a particular port, so
we have to register our RPC servers (mountd and nfsd) with the port mapper
service.

There is no portmapper service running on Windows by default.

There is a port mapper implementation from msft that can be installed on
Windows Server, but not on Windows desktop.

There are some third-party implementations of the portmapper for Windows. Most
of them are part of an NFS Server implementation. But none that I have found
both run on modern Windows versions, are a general purpose port mapper, and
support port mapper v2. General purpose means they allow processes to set a
port mapping. Most the port mappers I found must register their NFS server in
process, because they don't support calling the set mapping endpoint. The rest
only run on Windows versions from before I was born or only run v4.

I tried porting the linux gnu implementation of port mapper to Windows. But despite
a few hours of trying I could not even get it to build even on Linux. Writing our own port mapper is faster.

This is the skeleton of the server. In the next diffs I will register our
RPC servers and support telling the msft NFS client about our servers.

Reviewed By: chadaustin

Differential Revision: D44987863

fbshipit-source-id: bff065795a9f4b7b6c13ef3e3ce603646e1ce364
This commit is contained in:
Katie Mancini 2023-04-18 17:50:02 -07:00 committed by Facebook GitHub Bot
parent 90741e9e30
commit 4ee4fa5e6b
8 changed files with 292 additions and 0 deletions

View File

@ -470,6 +470,25 @@ class EdenConfig : private ConfigSettingManager {
std::chrono::minutes(1),
this};
/**
* Controls whether Eden will run it's own rpcbind/portmapper server. On
* Linux there is one built into the kernel that is always running, and on
* mac there is one built into the kernel you just have to poke into running.
* There is not one built into Windows and no good (and discoverable by
* kmancini) external option to use. So we built our own.
*
* Rpcbind/portmapper runs on a fixed port (111), so two port mappers will
* conflict with each other. Thus we don't want to run rpcbind if there is
* already one running.
*
* Long story short, you never want to set this to true on Linux or mac,
* but do on windows (with care).
*/
ConfigSetting<bool> runInternalRpcbind{
"nfs:run-internal-rpcbind",
false,
this};
/**
* Controls whether Mountd will register itself against rpcbind.
*/

View File

@ -62,6 +62,7 @@ ServerState::ServerState(
mainEventBase,
initialConfig.numNfsThreads.getValue(),
initialConfig.maxNfsInflightRequests.getValue(),
initialConfig.runInternalRpcbind.getValue(),
structuredLogger_)
: nullptr},
config_{std::move(reloadableConfig)},

View File

@ -83,6 +83,7 @@ target_link_libraries(
PUBLIC
eden_nfs_mountd
eden_nfs_nfsd3
eden_nfs_portmap_server
PRIVATE
Folly::folly
)

View File

@ -17,22 +17,34 @@ NfsServer::NfsServer(
folly::EventBase* evb,
uint64_t numServicingThreads,
uint64_t maxInflightRequests,
bool shouldRunOurOwnRpcbindServer,
const std::shared_ptr<StructuredLogger>& structuredLogger)
: evb_(evb),
threadPool_(std::make_shared<folly::CPUThreadPoolExecutor>(
numServicingThreads,
std::make_unique<EdenTaskQueue>(maxInflightRequests),
std::make_unique<folly::NamedThreadFactory>("NfsThreadPool"))),
rpcbindd_(
shouldRunOurOwnRpcbindServer
? std::make_shared<Rpcbindd>(evb_, threadPool_, structuredLogger)
: nullptr),
mountd_(evb_, threadPool_, structuredLogger) {}
void NfsServer::initialize(
folly::SocketAddress addr,
bool registerMountdWithRpcbind) {
mountd_.initialize(addr, registerMountdWithRpcbind);
if (rpcbindd_) {
rpcbindd_->initialize();
}
}
void NfsServer::initialize(folly::File&& socket) {
mountd_.initialize(std::move(socket));
// todo add a config for this
if (rpcbindd_) {
rpcbindd_->initialize();
}
}
NfsServer::NfsMountInfo NfsServer::registerMount(

View File

@ -10,6 +10,7 @@
#include <tuple>
#include "eden/fs/nfs/Mountd.h"
#include "eden/fs/nfs/Nfsd3.h"
#include "eden/fs/nfs/portmap/Rpcbindd.h"
#include "eden/fs/utils/CaseSensitivity.h"
namespace folly {
@ -39,6 +40,7 @@ class NfsServer {
folly::EventBase* evb,
uint64_t numServicingThreads,
uint64_t maxInflightRequests,
bool shouldRunOurOwnRpcbindServer,
const std::shared_ptr<StructuredLogger>& structuredLogger);
/**
@ -105,6 +107,7 @@ class NfsServer {
private:
folly::EventBase* evb_;
std::shared_ptr<folly::Executor> threadPool_;
std::shared_ptr<Rpcbindd> rpcbindd_;
Mountd mountd_;
};

View File

@ -27,3 +27,15 @@ target_link_libraries(
portmap_rpc
Folly::folly
)
add_library(
eden_nfs_portmap_server STATIC
"Rpcbindd.cpp" "Rpcbindd.h"
)
target_link_libraries(
eden_nfs_portmap_server
PUBLIC
portmap_rpc
Folly::folly
)

View File

@ -0,0 +1,185 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
#include "eden/fs/nfs/portmap/Rpcbindd.h"
#include <memory>
#include <unordered_map>
#include <folly/Synchronized.h>
#include <folly/Utility.h>
#include <folly/logging/xlog.h>
#include "eden/fs/nfs/MountdRpc.h"
#include "eden/fs/nfs/portmap/RpcbindRpc.h"
#include "eden/fs/utils/ImmediateFuture.h"
namespace facebook::eden {
class RpcbinddServerProcessor final : public RpcServerProcessor {
public:
RpcbinddServerProcessor() = default;
RpcbinddServerProcessor(const RpcbinddServerProcessor&) = delete;
RpcbinddServerProcessor(RpcbinddServerProcessor&&) = delete;
RpcbinddServerProcessor& operator=(const RpcbinddServerProcessor&) = delete;
RpcbinddServerProcessor& operator=(RpcbinddServerProcessor&&) = delete;
ImmediateFuture<folly::Unit> dispatchRpc(
folly::io::Cursor deser,
folly::io::QueueAppender ser,
uint32_t xid,
uint32_t progNumber,
uint32_t progVersion,
uint32_t procNumber) override;
ImmediateFuture<folly::Unit>
null(folly::io::Cursor deser, folly::io::QueueAppender ser, uint32_t xid);
ImmediateFuture<folly::Unit>
set(folly::io::Cursor deser, folly::io::QueueAppender ser, uint32_t xid);
ImmediateFuture<folly::Unit>
unset(folly::io::Cursor deser, folly::io::QueueAppender ser, uint32_t xid);
ImmediateFuture<folly::Unit>
getport(folly::io::Cursor deser, folly::io::QueueAppender ser, uint32_t xid);
ImmediateFuture<folly::Unit>
dump(folly::io::Cursor deser, folly::io::QueueAppender ser, uint32_t xid);
ImmediateFuture<folly::Unit>
callit(folly::io::Cursor deser, folly::io::QueueAppender ser, uint32_t xid);
};
namespace {
using Handler = ImmediateFuture<folly::Unit> (RpcbinddServerProcessor::*)(
folly::io::Cursor deser,
folly::io::QueueAppender ser,
uint32_t xid);
struct HandlerEntry {
constexpr HandlerEntry() = default;
constexpr HandlerEntry(folly::StringPiece n, Handler h)
: name(n), handler(h) {}
folly::StringPiece name;
Handler handler = nullptr;
};
constexpr auto kRpcbindHandlers = [] {
std::array<HandlerEntry, 6> handlers;
handlers[folly::to_underlying(rpcbindProcs2::null)] = {
"NULL", &RpcbinddServerProcessor::null};
handlers[folly::to_underlying(rpcbindProcs2::set)] = {
"SET", &RpcbinddServerProcessor::set};
handlers[folly::to_underlying(rpcbindProcs2::unset)] = {
"UNSET", &RpcbinddServerProcessor::unset};
handlers[folly::to_underlying(rpcbindProcs2::getport)] = {
"GETPORT", &RpcbinddServerProcessor::getport};
handlers[folly::to_underlying(rpcbindProcs2::dump)] = {
"DUMP", &RpcbinddServerProcessor::dump};
handlers[folly::to_underlying(rpcbindProcs2::callit)] = {
"CALLIT", &RpcbinddServerProcessor::callit};
return handlers;
}();
} // namespace
ImmediateFuture<folly::Unit> RpcbinddServerProcessor::null(
folly::io::Cursor /*deser*/,
folly::io::QueueAppender ser,
uint32_t xid) {
serializeReply(ser, accept_stat::SUCCESS, xid);
return folly::unit;
}
ImmediateFuture<folly::Unit> RpcbinddServerProcessor::set(
folly::io::Cursor /*deser*/,
folly::io::QueueAppender ser,
uint32_t xid) {
serializeReply(ser, accept_stat::PROC_UNAVAIL, xid);
return folly::unit;
}
ImmediateFuture<folly::Unit> RpcbinddServerProcessor::unset(
folly::io::Cursor /*deser*/,
folly::io::QueueAppender ser,
uint32_t xid) {
serializeReply(ser, accept_stat::PROC_UNAVAIL, xid);
return folly::unit;
}
ImmediateFuture<folly::Unit> RpcbinddServerProcessor::getport(
folly::io::Cursor /*deser*/,
folly::io::QueueAppender ser,
uint32_t xid) {
serializeReply(ser, accept_stat::PROC_UNAVAIL, xid);
return folly::unit;
}
ImmediateFuture<folly::Unit> RpcbinddServerProcessor::dump(
folly::io::Cursor /*deser*/,
folly::io::QueueAppender ser,
uint32_t xid) {
serializeReply(ser, accept_stat::PROC_UNAVAIL, xid);
return folly::unit;
}
ImmediateFuture<folly::Unit> RpcbinddServerProcessor::callit(
folly::io::Cursor /*deser*/,
folly::io::QueueAppender ser,
uint32_t xid) {
serializeReply(ser, accept_stat::PROC_UNAVAIL, xid);
return folly::unit;
}
ImmediateFuture<folly::Unit> RpcbinddServerProcessor::dispatchRpc(
folly::io::Cursor deser,
folly::io::QueueAppender ser,
uint32_t xid,
uint32_t progNumber,
uint32_t progVersion,
uint32_t procNumber) {
XLOG(DBG7) << "dispatchRpc";
if (progNumber != kPortmapProgNumber) {
XLOG(DBG7) << "prog: " << progNumber;
serializeReply(ser, accept_stat::PROG_UNAVAIL, xid);
return folly::unit;
}
if (progVersion != kPortmapVersion2) {
XLOG(DBG7) << "vers: " << progVersion;
serializeReply(ser, accept_stat::PROG_MISMATCH, xid);
XdrTrait<mismatch_info>::serialize(
ser, mismatch_info{kPortmapVersion2, kPortmapVersion2});
return folly::unit;
}
if (procNumber >= kRpcbindHandlers.size()) {
XLOG(DBG7) << "Invalid procedure: " << procNumber;
serializeReply(ser, accept_stat::PROC_UNAVAIL, xid);
return folly::unit;
}
auto handlerEntry = kRpcbindHandlers[procNumber];
XLOG(DBG7) << handlerEntry.name;
return (this->*handlerEntry.handler)(std::move(deser), std::move(ser), xid);
}
Rpcbindd::Rpcbindd(
folly::EventBase* evb,
std::shared_ptr<folly::Executor> threadPool,
const std::shared_ptr<StructuredLogger>& structuredLogger)
: proc_(std::make_shared<RpcbinddServerProcessor>()),
server_(RpcServer::create(
proc_,
evb,
std::move(threadPool),
structuredLogger)) {}
void Rpcbindd::initialize() {
server_->initialize(folly::SocketAddress("127.0.0.1", kPortmapPortNumber));
}
} // namespace facebook::eden

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
#pragma once
// Implementation of the mount protocol as described in:
// https://tools.ietf.org/html/rfc1813#page-106
#include "eden/fs/inodes/InodeNumber.h"
#include "eden/fs/nfs/Mountd.h"
#include "eden/fs/nfs/rpc/Server.h"
#include "eden/fs/utils/PathFuncs.h"
namespace folly {
class Executor;
}
namespace facebook::eden {
class RpcbinddServerProcessor;
class Rpcbindd {
public:
/**
* Create a new RPC Rpcbindd program. Rpcbind also known as PortMapper is
* an RPC program that allows finding other RPC programs running on a
* machine. Rpcbind runs on a known port (111). Other RPC servers running
* on the same machine are suppose to register themselves with the rpcbind
* server. Then clients running on other machines can query the rpcbind
* program to learn which port the other RPC services are running on.
*
* All the socket processing will be run on the EventBase passed in. This
* also must be called on that EventBase thread.
*/
Rpcbindd(
folly::EventBase* evb,
std::shared_ptr<folly::Executor> threadPool,
const std::shared_ptr<StructuredLogger>& structuredLogger);
/**
* Start the rpcbind service
*/
void initialize();
Rpcbindd(const Rpcbindd&) = delete;
Rpcbindd(Rpcbindd&&) = delete;
Rpcbindd& operator=(const Rpcbindd&) = delete;
Rpcbindd& operator=(Rpcbindd&&) = delete;
private:
std::shared_ptr<RpcbinddServerProcessor> proc_;
std::shared_ptr<RpcServer> server_;
};
} // namespace facebook::eden