2016-06-09 04:59:51 +03:00
|
|
|
/*
|
2019-06-20 02:58:25 +03:00
|
|
|
* Copyright (c) Facebook, Inc. and its affiliates.
|
2016-06-09 04:59:51 +03:00
|
|
|
*
|
2019-06-20 02:58:25 +03:00
|
|
|
* This software may be used and distributed according to the terms of the
|
|
|
|
* GNU General Public License version 2.
|
2016-06-09 04:59:51 +03:00
|
|
|
*/
|
2019-10-11 15:26:59 +03:00
|
|
|
|
2016-06-09 04:59:51 +03:00
|
|
|
#include "ObjectStore.h"
|
|
|
|
|
2016-06-09 04:59:56 +03:00
|
|
|
#include <folly/Conv.h>
|
2019-12-19 03:14:07 +03:00
|
|
|
#include <folly/Executor.h>
|
2016-12-14 05:11:05 +03:00
|
|
|
#include <folly/futures/Future.h>
|
2016-06-09 04:59:51 +03:00
|
|
|
#include <folly/io/IOBuf.h>
|
2020-06-20 07:05:57 +03:00
|
|
|
|
2016-06-09 04:59:56 +03:00
|
|
|
#include <stdexcept>
|
2017-06-22 23:39:57 +03:00
|
|
|
|
2016-06-09 04:59:56 +03:00
|
|
|
#include "eden/fs/model/Blob.h"
|
|
|
|
#include "eden/fs/model/Tree.h"
|
2017-06-22 23:39:57 +03:00
|
|
|
#include "eden/fs/store/BackingStore.h"
|
|
|
|
#include "eden/fs/store/LocalStore.h"
|
2020-06-23 19:52:08 +03:00
|
|
|
#include "eden/fs/store/ObjectFetchContext.h"
|
2019-10-15 23:37:04 +03:00
|
|
|
#include "eden/fs/telemetry/EdenStats.h"
|
2016-06-09 04:59:51 +03:00
|
|
|
|
2016-12-14 05:11:05 +03:00
|
|
|
using folly::Future;
|
|
|
|
using folly::makeFuture;
|
2016-06-09 04:59:51 +03:00
|
|
|
using std::shared_ptr;
|
2016-06-09 04:59:56 +03:00
|
|
|
using std::string;
|
2016-06-09 04:59:51 +03:00
|
|
|
using std::unique_ptr;
|
|
|
|
|
2021-06-09 05:28:36 +03:00
|
|
|
namespace facebook::eden {
|
2016-06-09 04:59:51 +03:00
|
|
|
|
2021-06-08 03:24:45 +03:00
|
|
|
namespace {
|
|
|
|
constexpr uint64_t kImportPriorityDeprioritizeAmount = 1;
|
|
|
|
}
|
|
|
|
|
2018-10-23 04:25:45 +03:00
|
|
|
std::shared_ptr<ObjectStore> ObjectStore::create(
|
|
|
|
shared_ptr<LocalStore> localStore,
|
2019-07-01 22:46:52 +03:00
|
|
|
shared_ptr<BackingStore> backingStore,
|
2021-04-28 03:37:23 +03:00
|
|
|
shared_ptr<TreeCache> treeCache,
|
2019-12-19 03:14:07 +03:00
|
|
|
shared_ptr<EdenStats> stats,
|
2020-07-02 17:51:24 +03:00
|
|
|
folly::Executor::KeepAlive<folly::Executor> executor,
|
|
|
|
std::shared_ptr<ProcessNameCache> processNameCache,
|
2020-07-02 18:42:17 +03:00
|
|
|
std::shared_ptr<StructuredLogger> structuredLogger,
|
|
|
|
std::shared_ptr<const EdenConfig> edenConfig) {
|
2021-01-10 21:03:53 +03:00
|
|
|
return std::shared_ptr<ObjectStore>{new ObjectStore{
|
|
|
|
std::move(localStore),
|
|
|
|
std::move(backingStore),
|
2021-04-28 03:37:23 +03:00
|
|
|
std::move(treeCache),
|
2021-01-10 21:03:53 +03:00
|
|
|
std::move(stats),
|
|
|
|
executor,
|
|
|
|
processNameCache,
|
|
|
|
structuredLogger,
|
|
|
|
edenConfig}};
|
2018-10-23 04:25:45 +03:00
|
|
|
}
|
|
|
|
|
2016-06-14 01:15:31 +03:00
|
|
|
ObjectStore::ObjectStore(
|
|
|
|
shared_ptr<LocalStore> localStore,
|
2019-07-01 22:46:52 +03:00
|
|
|
shared_ptr<BackingStore> backingStore,
|
2021-04-28 03:37:23 +03:00
|
|
|
shared_ptr<TreeCache> treeCache,
|
2019-12-19 03:14:07 +03:00
|
|
|
shared_ptr<EdenStats> stats,
|
2020-07-02 17:51:24 +03:00
|
|
|
folly::Executor::KeepAlive<folly::Executor> executor,
|
|
|
|
std::shared_ptr<ProcessNameCache> processNameCache,
|
2020-07-02 18:42:17 +03:00
|
|
|
std::shared_ptr<StructuredLogger> structuredLogger,
|
|
|
|
std::shared_ptr<const EdenConfig> edenConfig)
|
2019-06-26 06:25:14 +03:00
|
|
|
: metadataCache_{folly::in_place, kCacheSize},
|
2021-04-28 03:37:23 +03:00
|
|
|
treeCache_{std::move(treeCache)},
|
2018-10-23 04:25:45 +03:00
|
|
|
localStore_{std::move(localStore)},
|
2019-07-01 22:46:52 +03:00
|
|
|
backingStore_{std::move(backingStore)},
|
2019-12-19 03:14:07 +03:00
|
|
|
stats_{std::move(stats)},
|
2020-06-20 07:05:57 +03:00
|
|
|
executor_{executor},
|
2020-07-02 17:51:24 +03:00
|
|
|
pidFetchCounts_{std::make_unique<PidFetchCounts>()},
|
|
|
|
processNameCache_(processNameCache),
|
2020-07-02 18:42:17 +03:00
|
|
|
structuredLogger_(structuredLogger),
|
2020-08-03 21:11:33 +03:00
|
|
|
edenConfig_(edenConfig) {}
|
2016-06-09 04:59:51 +03:00
|
|
|
|
|
|
|
ObjectStore::~ObjectStore() {}
|
|
|
|
|
2020-08-03 21:11:33 +03:00
|
|
|
void ObjectStore::updateProcessFetch(
|
|
|
|
const ObjectFetchContext& fetchContext) const {
|
|
|
|
if (auto pid = fetchContext.getClientPid()) {
|
|
|
|
auto fetch_count = pidFetchCounts_->recordProcessFetch(pid.value());
|
|
|
|
auto threshold = edenConfig_->fetchHeavyThreshold.getValue();
|
|
|
|
if (fetch_count && threshold && !(fetch_count % threshold)) {
|
|
|
|
sendFetchHeavyEvent(pid.value(), fetch_count);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-02 17:51:24 +03:00
|
|
|
void ObjectStore::sendFetchHeavyEvent(pid_t pid, uint64_t fetch_count) const {
|
2020-07-31 21:40:59 +03:00
|
|
|
auto processName = processNameCache_->getSpacedProcessName(pid);
|
2020-07-02 17:51:24 +03:00
|
|
|
if (processName.has_value()) {
|
|
|
|
structuredLogger_->logEvent(
|
|
|
|
FetchHeavy{processName.value(), pid, fetch_count});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-24 18:21:52 +03:00
|
|
|
void ObjectStore::deprioritizeWhenFetchHeavy(
|
|
|
|
ObjectFetchContext& context) const {
|
|
|
|
auto pid = context.getClientPid();
|
|
|
|
if (pid.has_value()) {
|
|
|
|
auto fetch_count = pidFetchCounts_->getCountByPid(pid.value());
|
2020-08-03 21:11:33 +03:00
|
|
|
auto threshold = edenConfig_->fetchHeavyThreshold.getValue();
|
|
|
|
if (threshold && fetch_count >= threshold) {
|
2021-06-08 03:24:45 +03:00
|
|
|
context.deprioritize(kImportPriorityDeprioritizeAmount);
|
2020-07-24 18:21:52 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-08 03:24:45 +03:00
|
|
|
RootId ObjectStore::parseRootId(folly::StringPiece rootId) {
|
2021-06-03 21:05:25 +03:00
|
|
|
return backingStore_->parseRootId(rootId);
|
|
|
|
}
|
|
|
|
|
2021-06-08 03:24:45 +03:00
|
|
|
std::string ObjectStore::renderRootId(const RootId& rootId) {
|
2021-06-03 21:05:25 +03:00
|
|
|
return backingStore_->renderRootId(rootId);
|
|
|
|
}
|
|
|
|
|
2021-06-08 03:24:45 +03:00
|
|
|
Future<shared_ptr<const Tree>> ObjectStore::getRootTree(
|
|
|
|
const RootId& rootId,
|
|
|
|
ObjectFetchContext& context) const {
|
|
|
|
XLOG(DBG3) << "getRootTree(" << rootId << ")";
|
|
|
|
|
|
|
|
return backingStore_->getRootTree(rootId, context)
|
|
|
|
.via(executor_)
|
|
|
|
.thenValue([treeCache = treeCache_,
|
|
|
|
rootId,
|
|
|
|
localStore = localStore_,
|
|
|
|
edenConfig = edenConfig_](std::shared_ptr<const Tree> tree) {
|
|
|
|
if (!tree) {
|
|
|
|
throw std::domain_error(
|
|
|
|
folly::to<string>("unable to import root ", rootId));
|
|
|
|
}
|
|
|
|
|
2021-09-09 02:26:00 +03:00
|
|
|
localStore->putTree(*tree);
|
2021-06-08 03:24:45 +03:00
|
|
|
treeCache->insert(tree);
|
|
|
|
|
|
|
|
return tree;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2021-08-21 03:10:06 +03:00
|
|
|
folly::Future<std::shared_ptr<TreeEntry>> ObjectStore::getTreeEntryForRootId(
|
|
|
|
const RootId& rootId,
|
|
|
|
facebook::eden::TreeEntryType treeEntryType,
|
|
|
|
facebook::eden::PathComponentPiece pathComponentPiece,
|
|
|
|
ObjectFetchContext& context) const {
|
|
|
|
XLOG(DBG3) << "getTreeEntryForRootId(" << rootId << ")";
|
|
|
|
|
|
|
|
// TODO: We can cache the treeEntry to the localStore like Tree or
|
|
|
|
// blob
|
|
|
|
return backingStore_
|
|
|
|
->getTreeEntryForRootId(
|
|
|
|
rootId, treeEntryType, pathComponentPiece, context)
|
|
|
|
.via(executor_);
|
|
|
|
}
|
|
|
|
|
2020-02-06 00:13:11 +03:00
|
|
|
Future<shared_ptr<const Tree>> ObjectStore::getTree(
|
|
|
|
const Hash& id,
|
|
|
|
ObjectFetchContext& fetchContext) const {
|
2016-06-14 01:15:31 +03:00
|
|
|
// Check in the LocalStore first
|
2021-02-26 22:47:04 +03:00
|
|
|
|
|
|
|
// TODO: We should consider checking if we have in flight BackingStore
|
|
|
|
// requests on this layer instead of only in the BackingStore. Consider the
|
|
|
|
// case in which thread A and thread B both request a Tree at the same time.
|
|
|
|
// Let's say thread A checks the LocalStore, then thread B checks the
|
|
|
|
// LocalStore, gets the file from the BackingStore (making a request to the
|
|
|
|
// server), then writes the Tree to the LocalStore. Now when thread A checks
|
|
|
|
// for in flight requests in the BackingStore, it will not see any since
|
|
|
|
// thread B has completely finished, so thread A will make a duplicate
|
|
|
|
// request. If we we're to mark here that we got a request on this layer, then
|
|
|
|
// we could avoid that case.
|
|
|
|
|
2021-04-28 03:37:23 +03:00
|
|
|
if (auto maybeTree = treeCache_->get(id)) {
|
|
|
|
fetchContext.didFetch(
|
|
|
|
ObjectFetchContext::Tree, id, ObjectFetchContext::FromMemoryCache);
|
|
|
|
|
|
|
|
updateProcessFetch(fetchContext);
|
2021-05-27 22:10:09 +03:00
|
|
|
|
|
|
|
return maybeTree;
|
2021-04-28 03:37:23 +03:00
|
|
|
}
|
|
|
|
|
2020-06-20 07:05:57 +03:00
|
|
|
return localStore_->getTree(id).thenValue([self = shared_from_this(),
|
|
|
|
id,
|
|
|
|
&fetchContext](
|
|
|
|
shared_ptr<const Tree> tree) {
|
|
|
|
if (tree) {
|
|
|
|
XLOG(DBG4) << "tree " << id << " found in local store";
|
|
|
|
fetchContext.didFetch(
|
|
|
|
ObjectFetchContext::Tree, id, ObjectFetchContext::FromDiskCache);
|
2018-05-10 04:41:24 +03:00
|
|
|
|
2020-08-03 21:11:33 +03:00
|
|
|
self->updateProcessFetch(fetchContext);
|
2021-04-28 03:37:23 +03:00
|
|
|
self->treeCache_->insert(tree);
|
2020-06-20 07:05:57 +03:00
|
|
|
return makeFuture(std::move(tree));
|
|
|
|
}
|
2018-06-01 19:23:33 +03:00
|
|
|
|
2020-07-24 18:21:52 +03:00
|
|
|
self->deprioritizeWhenFetchHeavy(fetchContext);
|
|
|
|
|
2020-06-20 07:05:57 +03:00
|
|
|
// Load the tree from the BackingStore.
|
2020-07-24 18:21:52 +03:00
|
|
|
return self->backingStore_->getTree(id, fetchContext)
|
2020-06-20 07:05:57 +03:00
|
|
|
.via(self->executor_)
|
|
|
|
.thenValue([self, id, &fetchContext, localStore = self->localStore_](
|
2021-04-28 03:37:23 +03:00
|
|
|
unique_ptr<const Tree> tree) {
|
|
|
|
if (!tree) {
|
2020-06-20 07:05:57 +03:00
|
|
|
// TODO: Perhaps we should do some short-term negative
|
|
|
|
// caching?
|
|
|
|
XLOG(DBG2) << "unable to find tree " << id;
|
|
|
|
throw std::domain_error(
|
|
|
|
folly::to<string>("tree ", id.toString(), " not found"));
|
|
|
|
}
|
|
|
|
|
2021-04-28 03:37:23 +03:00
|
|
|
// promote to shared_ptr so we can store in the cache and return
|
|
|
|
std::shared_ptr<const Tree> loadedTree{std::move(tree)};
|
2021-09-09 02:26:00 +03:00
|
|
|
localStore->putTree(*loadedTree);
|
2021-04-28 03:37:23 +03:00
|
|
|
self->treeCache_->insert(loadedTree);
|
2020-06-20 07:05:57 +03:00
|
|
|
XLOG(DBG3) << "tree " << id << " retrieved from backing store";
|
|
|
|
fetchContext.didFetch(
|
|
|
|
ObjectFetchContext::Tree,
|
|
|
|
id,
|
|
|
|
ObjectFetchContext::FromBackingStore);
|
|
|
|
|
2020-08-03 21:11:33 +03:00
|
|
|
self->updateProcessFetch(fetchContext);
|
2020-06-20 07:05:57 +03:00
|
|
|
return shared_ptr<const Tree>(std::move(loadedTree));
|
|
|
|
});
|
|
|
|
});
|
2016-06-09 04:59:51 +03:00
|
|
|
}
|
|
|
|
|
2018-05-25 23:47:49 +03:00
|
|
|
folly::Future<folly::Unit> ObjectStore::prefetchBlobs(
|
2021-08-23 21:03:41 +03:00
|
|
|
HashRange ids,
|
2020-07-27 09:07:02 +03:00
|
|
|
ObjectFetchContext& fetchContext) const {
|
2018-05-25 23:47:49 +03:00
|
|
|
// In theory we could/should ask the localStore_ to filter the list
|
|
|
|
// of ids down to just the set that we need to load, but there is no
|
|
|
|
// bulk key existence check in rocksdb, so we would need to cause it
|
|
|
|
// to load all the blocks of those keys into memory.
|
|
|
|
// So for the moment we are committing a layering violation in the
|
|
|
|
// interest of making things faster in practice by just asking the
|
|
|
|
// mercurial backing store to ensure that its local hgcache storage
|
|
|
|
// has entries for all of the requested keys.
|
|
|
|
if (ids.empty()) {
|
|
|
|
return folly::unit;
|
|
|
|
}
|
2020-07-27 09:07:02 +03:00
|
|
|
return backingStore_->prefetchBlobs(ids, fetchContext).via(executor_);
|
2018-05-25 23:47:49 +03:00
|
|
|
}
|
|
|
|
|
2020-02-06 00:13:11 +03:00
|
|
|
Future<shared_ptr<const Blob>> ObjectStore::getBlob(
|
|
|
|
const Hash& id,
|
2020-07-02 21:58:46 +03:00
|
|
|
ObjectFetchContext& fetchContext) const {
|
2019-07-01 22:46:52 +03:00
|
|
|
auto self = shared_from_this();
|
2016-06-16 00:23:25 +03:00
|
|
|
|
2020-07-02 21:58:46 +03:00
|
|
|
return localStore_->getBlob(id).thenValue([id, &fetchContext, self](
|
2019-07-01 22:46:52 +03:00
|
|
|
shared_ptr<const Blob> blob) {
|
|
|
|
if (blob) {
|
|
|
|
// Not computing the BlobMetadata here because if the blob was found
|
|
|
|
// in the local store, the LocalStore probably also has the metadata
|
|
|
|
// already, and the caller may not even need the SHA-1 here. (If the
|
|
|
|
// caller needed the SHA-1, they would have called getBlobMetadata
|
|
|
|
// instead.)
|
|
|
|
XLOG(DBG4) << "blob " << id << " found in local store";
|
|
|
|
self->updateBlobStats(true, false);
|
2020-02-06 00:13:11 +03:00
|
|
|
fetchContext.didFetch(
|
|
|
|
ObjectFetchContext::Blob, id, ObjectFetchContext::FromDiskCache);
|
2020-08-03 21:11:33 +03:00
|
|
|
|
|
|
|
self->updateProcessFetch(fetchContext);
|
2019-07-01 22:46:52 +03:00
|
|
|
return makeFuture(shared_ptr<const Blob>(std::move(blob)));
|
|
|
|
}
|
2016-12-14 05:11:05 +03:00
|
|
|
|
2020-07-24 18:21:52 +03:00
|
|
|
self->deprioritizeWhenFetchHeavy(fetchContext);
|
|
|
|
|
2019-07-01 22:46:52 +03:00
|
|
|
// Look in the BackingStore
|
2020-07-24 18:21:52 +03:00
|
|
|
return self->backingStore_->getBlob(id, fetchContext)
|
2019-12-19 03:14:07 +03:00
|
|
|
.via(self->executor_)
|
2020-02-06 00:13:11 +03:00
|
|
|
.thenValue([self, &fetchContext, id](
|
|
|
|
unique_ptr<const Blob> loadedBlob) {
|
2019-07-01 22:46:52 +03:00
|
|
|
if (loadedBlob) {
|
|
|
|
XLOG(DBG3) << "blob " << id << " retrieved from backing store";
|
|
|
|
self->updateBlobStats(false, true);
|
2020-02-06 00:13:11 +03:00
|
|
|
fetchContext.didFetch(
|
|
|
|
ObjectFetchContext::Blob,
|
|
|
|
id,
|
|
|
|
ObjectFetchContext::FromBackingStore);
|
2020-06-20 07:05:57 +03:00
|
|
|
|
2020-08-03 21:11:33 +03:00
|
|
|
self->updateProcessFetch(fetchContext);
|
2020-06-20 07:05:57 +03:00
|
|
|
|
2021-08-27 03:29:01 +03:00
|
|
|
// Quick check in-memory cache first, before doing expensive
|
|
|
|
// calculations. If metadata is present in cache, it most certainly
|
|
|
|
// exists in local store too
|
|
|
|
if (!self->metadataCache_.rlock()->exists(id)) {
|
|
|
|
auto metadata = self->localStore_->putBlob(id, loadedBlob.get());
|
|
|
|
self->metadataCache_.wlock()->set(id, metadata);
|
|
|
|
}
|
2019-07-01 22:46:52 +03:00
|
|
|
return shared_ptr<const Blob>(std::move(loadedBlob));
|
|
|
|
}
|
|
|
|
|
|
|
|
XLOG(DBG2) << "unable to find blob " << id;
|
|
|
|
self->updateBlobStats(false, false);
|
|
|
|
// TODO: Perhaps we should do some short-term negative caching?
|
|
|
|
throw std::domain_error(
|
|
|
|
folly::to<string>("blob ", id.toString(), " not found"));
|
|
|
|
});
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
void ObjectStore::updateBlobStats(bool local, bool backing) const {
|
|
|
|
ObjectStoreThreadStats& stats = stats_->getObjectStoreStatsForCurrentThread();
|
|
|
|
stats.getBlobFromLocalStore.addValue(local);
|
|
|
|
stats.getBlobFromBackingStore.addValue(backing);
|
2016-06-14 01:15:32 +03:00
|
|
|
}
|
|
|
|
|
2020-02-06 00:13:11 +03:00
|
|
|
Future<BlobMetadata> ObjectStore::getBlobMetadata(
|
|
|
|
const Hash& id,
|
|
|
|
ObjectFetchContext& context) const {
|
2019-07-01 22:46:52 +03:00
|
|
|
// Check in-memory cache
|
2018-10-23 04:25:45 +03:00
|
|
|
{
|
|
|
|
auto metadataCache = metadataCache_.wlock();
|
|
|
|
auto cacheIter = metadataCache->find(id);
|
|
|
|
if (cacheIter != metadataCache->end()) {
|
2019-07-01 22:46:52 +03:00
|
|
|
updateBlobMetadataStats(true, false, false);
|
2020-02-06 00:13:11 +03:00
|
|
|
context.didFetch(
|
|
|
|
ObjectFetchContext::BlobMetadata,
|
|
|
|
id,
|
|
|
|
ObjectFetchContext::FromMemoryCache);
|
2020-08-03 21:11:33 +03:00
|
|
|
|
|
|
|
updateProcessFetch(context);
|
2018-10-23 04:25:45 +03:00
|
|
|
return cacheIter->second;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-01 22:46:52 +03:00
|
|
|
auto self = shared_from_this();
|
|
|
|
|
|
|
|
// Check local store
|
2018-09-15 02:57:46 +03:00
|
|
|
return localStore_->getBlobMetadata(id).thenValue(
|
2020-02-06 00:13:11 +03:00
|
|
|
[self, id, &context](std::optional<BlobMetadata>&& metadata) {
|
2019-07-01 22:46:52 +03:00
|
|
|
if (metadata) {
|
|
|
|
self->updateBlobMetadataStats(false, true, false);
|
|
|
|
self->metadataCache_.wlock()->set(id, *metadata);
|
2020-02-06 00:13:11 +03:00
|
|
|
context.didFetch(
|
|
|
|
ObjectFetchContext::BlobMetadata,
|
|
|
|
id,
|
|
|
|
ObjectFetchContext::FromDiskCache);
|
2020-06-20 07:05:57 +03:00
|
|
|
|
2020-08-03 21:11:33 +03:00
|
|
|
self->updateProcessFetch(context);
|
2019-07-01 22:46:52 +03:00
|
|
|
return makeFuture(*metadata);
|
2016-12-14 05:11:05 +03:00
|
|
|
}
|
|
|
|
|
2020-07-24 18:21:52 +03:00
|
|
|
self->deprioritizeWhenFetchHeavy(context);
|
|
|
|
|
2019-07-01 22:46:52 +03:00
|
|
|
// Check backing store
|
2018-06-01 19:23:33 +03:00
|
|
|
//
|
|
|
|
// TODO: It would be nice to add a smarter API to the BackingStore so
|
|
|
|
// that we can query it just for the blob metadata if it supports
|
|
|
|
// getting that without retrieving the full blob data.
|
2018-10-23 04:25:45 +03:00
|
|
|
//
|
|
|
|
// TODO: This should probably check the LocalStore for the blob first,
|
|
|
|
// especially when we begin to expire entries in RocksDB.
|
2020-07-24 18:21:52 +03:00
|
|
|
return self->backingStore_->getBlob(id, context)
|
2019-12-19 03:14:07 +03:00
|
|
|
.via(self->executor_)
|
2020-02-06 00:13:11 +03:00
|
|
|
.thenValue([self, id, &context](std::unique_ptr<Blob> blob) {
|
2019-07-01 22:46:52 +03:00
|
|
|
if (blob) {
|
|
|
|
self->updateBlobMetadataStats(false, false, true);
|
|
|
|
auto metadata = self->localStore_->putBlob(id, blob.get());
|
|
|
|
self->metadataCache_.wlock()->set(id, metadata);
|
2020-06-20 07:05:57 +03:00
|
|
|
// I could see an argument for recording this fetch with
|
|
|
|
// type Blob instead of BlobMetadata, but it's probably more
|
|
|
|
// useful in context to know how many metadata fetches
|
|
|
|
// occurred. Also, since backing stores don't directly
|
|
|
|
// support fetching metadata, it should be clear.
|
2020-02-06 00:13:11 +03:00
|
|
|
context.didFetch(
|
|
|
|
ObjectFetchContext::BlobMetadata,
|
|
|
|
id,
|
|
|
|
ObjectFetchContext::FromBackingStore);
|
2020-06-20 07:05:57 +03:00
|
|
|
|
2020-08-03 21:11:33 +03:00
|
|
|
self->updateProcessFetch(context);
|
2019-07-01 22:46:52 +03:00
|
|
|
return makeFuture(metadata);
|
2018-06-01 19:23:33 +03:00
|
|
|
}
|
|
|
|
|
2019-07-01 22:46:52 +03:00
|
|
|
self->updateBlobMetadataStats(false, false, false);
|
|
|
|
throw std::domain_error(
|
|
|
|
folly::to<string>("blob ", id.toString(), " not found"));
|
2018-06-01 19:23:33 +03:00
|
|
|
});
|
2016-12-14 05:11:05 +03:00
|
|
|
});
|
2016-06-09 04:59:51 +03:00
|
|
|
}
|
2018-10-09 01:08:40 +03:00
|
|
|
|
2019-07-01 22:46:52 +03:00
|
|
|
void ObjectStore::updateBlobMetadataStats(bool memory, bool local, bool backing)
|
|
|
|
const {
|
|
|
|
ObjectStoreThreadStats& stats = stats_->getObjectStoreStatsForCurrentThread();
|
|
|
|
stats.getBlobMetadataFromMemory.addValue(memory);
|
|
|
|
stats.getBlobMetadataFromLocalStore.addValue(local);
|
|
|
|
stats.getBlobMetadataFromBackingStore.addValue(backing);
|
|
|
|
}
|
|
|
|
|
2020-02-06 00:13:11 +03:00
|
|
|
Future<Hash> ObjectStore::getBlobSha1(
|
|
|
|
const Hash& id,
|
|
|
|
ObjectFetchContext& context) const {
|
|
|
|
return getBlobMetadata(id, context)
|
|
|
|
.thenValue([](const BlobMetadata& metadata) { return metadata.sha1; });
|
2019-07-01 22:46:52 +03:00
|
|
|
}
|
|
|
|
|
2020-02-06 00:13:11 +03:00
|
|
|
Future<uint64_t> ObjectStore::getBlobSize(
|
|
|
|
const Hash& id,
|
|
|
|
ObjectFetchContext& context) const {
|
|
|
|
return getBlobMetadata(id, context)
|
|
|
|
.thenValue([](const BlobMetadata& metadata) { return metadata.size; });
|
2018-10-09 01:08:40 +03:00
|
|
|
}
|
2021-06-09 05:28:36 +03:00
|
|
|
|
|
|
|
} // namespace facebook::eden
|