mirror of
https://github.com/facebook/sapling.git
synced 2024-10-05 14:28:17 +03:00
nfs: implement the WRITE RPC
Summary: This simply writes the data passed in to the inode. Note that the current implementation has a protocol violation since it doesn't sync the data written to disk but advertise to the client that it did. This is completely wrong from a data consistency guarantee, but is probably fine for now. Once the code becomes closer to being production ready, this will be changed to honor the client asks. Reviewed By: kmancini Differential Revision: D26681614 fbshipit-source-id: 82ad7a141be3bbe365363b1f6692ae62f253423f
This commit is contained in:
parent
3d377e2df3
commit
e87ea9c085
@ -65,6 +65,21 @@ folly::Future<std::string> NfsDispatcherImpl::readlink(
|
||||
});
|
||||
}
|
||||
|
||||
folly::Future<NfsDispatcher::WriteRes> NfsDispatcherImpl::write(
|
||||
InodeNumber ino,
|
||||
std::unique_ptr<folly::IOBuf> data,
|
||||
off_t offset,
|
||||
ObjectFetchContext& /*context*/) {
|
||||
return inodeMap_->lookupFileInode(ino).thenValue(
|
||||
[data = std::move(data), offset](const FileInodePtr& inode) mutable {
|
||||
// TODO(xavierd): Modify write to obtain pre and post stat of the file.
|
||||
return inode->write(std::move(data), offset)
|
||||
.thenValue([](size_t written) {
|
||||
return WriteRes{written, std::nullopt, std::nullopt};
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
folly::Future<NfsDispatcher::CreateRes> NfsDispatcherImpl::create(
|
||||
InodeNumber dir,
|
||||
PathComponent name,
|
||||
|
@ -36,6 +36,12 @@ class NfsDispatcherImpl : public NfsDispatcher {
|
||||
InodeNumber ino,
|
||||
ObjectFetchContext& context) override;
|
||||
|
||||
folly::Future<NfsDispatcher::WriteRes> write(
|
||||
InodeNumber ino,
|
||||
std::unique_ptr<folly::IOBuf> data,
|
||||
off_t offset,
|
||||
ObjectFetchContext& context) override;
|
||||
|
||||
folly::Future<NfsDispatcher::CreateRes> create(
|
||||
InodeNumber ino,
|
||||
PathComponent name,
|
||||
|
@ -64,6 +64,31 @@ class NfsDispatcher {
|
||||
InodeNumber ino,
|
||||
ObjectFetchContext& context) = 0;
|
||||
|
||||
/**
|
||||
* Return value of the write method.
|
||||
*/
|
||||
struct WriteRes {
|
||||
/** Number of bytes written */
|
||||
size_t written;
|
||||
|
||||
/** Attributes of the directory prior to creating the file */
|
||||
std::optional<struct stat> preStat;
|
||||
/** Attributes of the directory after creating the file */
|
||||
std::optional<struct stat> postStat;
|
||||
};
|
||||
|
||||
/**
|
||||
* Write data at offset to the file referenced by the InodeNumber ino.
|
||||
*
|
||||
* See the comment on the create method below for the meaning of the returned
|
||||
* pre and post stat.
|
||||
*/
|
||||
virtual folly::Future<WriteRes> write(
|
||||
InodeNumber ino,
|
||||
std::unique_ptr<folly::IOBuf> data,
|
||||
off_t offset,
|
||||
ObjectFetchContext& context) = 0;
|
||||
|
||||
/**
|
||||
* Return value of the create method.
|
||||
*/
|
||||
|
@ -475,12 +475,75 @@ folly::Future<folly::Unit> Nfsd3ServerProcessor::read(
|
||||
return folly::unit;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a unique per-EdenFS instance write cookie.
|
||||
*
|
||||
* TODO(xavierd): Note that for now this will always be 0 as this is to handle
|
||||
* the case where the server restart while the client isn't aware.
|
||||
*/
|
||||
writeverf3 makeWriteVerf() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
folly::Future<folly::Unit> Nfsd3ServerProcessor::write(
|
||||
folly::io::Cursor /*deser*/,
|
||||
folly::io::Cursor deser,
|
||||
folly::io::QueueAppender ser,
|
||||
uint32_t xid) {
|
||||
serializeReply(ser, accept_stat::PROC_UNAVAIL, xid);
|
||||
return folly::unit;
|
||||
serializeReply(ser, accept_stat::SUCCESS, xid);
|
||||
|
||||
auto args = XdrTrait<WRITE3args>::deserialize(deser);
|
||||
|
||||
static auto context =
|
||||
ObjectFetchContext::getNullContextWithCauseDetail("write");
|
||||
|
||||
// I have no idea why NFS sent us data that we shouldn't write to the file,
|
||||
// but here it is, let's only take up to count bytes from the data.
|
||||
auto queue = folly::IOBufQueue();
|
||||
queue.append(std::move(args.data));
|
||||
auto data = queue.split(args.count);
|
||||
|
||||
return dispatcher_
|
||||
->write(args.file.ino, std::move(data), args.offset, *context)
|
||||
.thenTry([ser = std::move(ser)](
|
||||
folly::Try<NfsDispatcher::WriteRes> writeTry) mutable {
|
||||
if (writeTry.hasException()) {
|
||||
WRITE3res res{
|
||||
{{exceptionToNfsError(writeTry.exception()), WRITE3resfail{}}}};
|
||||
XdrTrait<WRITE3res>::serialize(ser, res);
|
||||
} else {
|
||||
auto writeRes = std::move(writeTry).value();
|
||||
|
||||
// NFS is limited to writing a maximum of 4GB (2^32) of data
|
||||
// per write call, so despite write returning a size_t, it
|
||||
// should always fit in a uint32_t.
|
||||
XDCHECK_LE(
|
||||
writeRes.written, size_t{std::numeric_limits<uint32_t>::max()});
|
||||
|
||||
WRITE3res res{
|
||||
{{nfsstat3::NFS3_OK,
|
||||
WRITE3resok{
|
||||
wcc_data{
|
||||
/*before*/ writeRes.preStat.has_value()
|
||||
? statToPreOpAttr(writeRes.preStat.value())
|
||||
: pre_op_attr{},
|
||||
/*after*/ writeRes.postStat.has_value()
|
||||
? post_op_attr{statToFattr3(
|
||||
writeRes.postStat.value())}
|
||||
: post_op_attr{}},
|
||||
/*count*/ folly::to_narrow(writeRes.written),
|
||||
// TODO(xavierd): the following is a total lie and we
|
||||
// should call inode->fdatasync() in the case where
|
||||
// args.stable is anything other than
|
||||
// stable_how::UNSTABLE. For testing purpose, this is
|
||||
// OK.
|
||||
/*committed*/ stable_how::FILE_SYNC,
|
||||
/*verf*/ makeWriteVerf(),
|
||||
}}}};
|
||||
XdrTrait<WRITE3res>::serialize(ser, res);
|
||||
}
|
||||
|
||||
return folly::unit;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -379,8 +379,7 @@ struct READLINK3res
|
||||
|
||||
// WRITE Procedure:
|
||||
|
||||
constexpr inline size_t NFS3_WRITEVERFSIZE = 8;
|
||||
using writeverf3 = std::array<uint8_t, NFS3_WRITEVERFSIZE>;
|
||||
using writeverf3 = uint64_t;
|
||||
|
||||
enum class stable_how { UNSTABLE = 0, DATA_SYNC = 1, FILE_SYNC = 2 };
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user