mirror of
https://github.com/facebook/sapling.git
synced 2024-10-11 09:17:30 +03:00
96ef9dda5d
Summary: Introduced a new local commit metadata storage system, smallcommitmetadata, which stores a simple local JSON file for a mapping of (node, category) -> value. This data can be manipulated with the debugsmallcommitmetadata command, and can be consumed in the smartlog using the smallcommitmeta template func. Total number of entries can be limited with smallcommitmetadata.entrylimit configuration option, which defaults to 100. Reviewed By: DurhamG Differential Revision: D21673611 fbshipit-source-id: 2239a47867118dd86b15944058505ddf67548549
90 lines
3.2 KiB
Python
90 lines
3.2 KiB
Python
# Copyright (c) Facebook, Inc. and its affiliates.
|
|
#
|
|
# This software may be used and distributed according to the terms of the
|
|
# GNU General Public License version 2.
|
|
|
|
# smallcommitmetadata.py - stores a small amount of metadata associated with a commit
|
|
|
|
|
|
from . import json
|
|
from .node import bin, hex
|
|
from .util import altsortdict
|
|
|
|
|
|
# Stores a mapping of (node, category) -> data, with a FIFO-limited number of entries
|
|
class smallcommitmetadata(object):
|
|
def __init__(self, vfs, entrylimit):
|
|
self.vfs = vfs
|
|
self.limit = entrylimit
|
|
self.contents = altsortdict()
|
|
self.reload()
|
|
|
|
def reload(self):
|
|
"""Read the database from disk."""
|
|
if not self.vfs.exists("commit_metadata"):
|
|
self.contents = altsortdict()
|
|
return
|
|
try:
|
|
entries = json.loads(self.vfs.tryreadutf8("commit_metadata"))[-self.limit :]
|
|
except ValueError:
|
|
entries = []
|
|
for entry in entries:
|
|
self.contents[(bin(entry["node"]), entry["category"])] = entry["data"]
|
|
|
|
def write(self):
|
|
"""Write the database to disk."""
|
|
with self.vfs("commit_metadata", "w", atomictemp=True) as f:
|
|
entries = [
|
|
{"node": hex(node), "category": category, "data": data}
|
|
for ((node, category), data) in self.contents.items()
|
|
]
|
|
json.dump(entries, f)
|
|
|
|
def store(self, node, category, data):
|
|
"""Adds a new entry with the specified node and category, and updates the data on disk. Returns the removed entry, if any."""
|
|
self.contents[(node, category)] = data
|
|
popped = None
|
|
while len(self.contents) > self.limit:
|
|
popped = self.contents.popitem(last=False)
|
|
self.write()
|
|
return popped
|
|
|
|
def delete(self, node, category):
|
|
"""Removes the entry with matching node and category and returns its value."""
|
|
value = self.contents[(node, category)]
|
|
del self.contents[(node, category)]
|
|
return value
|
|
|
|
def read(self, node, category):
|
|
"""Returns the value of the entry with specified node and category."""
|
|
return self.contents[(node, category)]
|
|
|
|
def find(self, node=None, category=None):
|
|
"""Returns a map of all entries with matching node and/or category. If both are None, returns all entries."""
|
|
return altsortdict(
|
|
(
|
|
((node_, category_), data)
|
|
for ((node_, category_), data) in self.contents.items()
|
|
if node is None or node == node_
|
|
if category is None or category == category_
|
|
)
|
|
)
|
|
|
|
def finddelete(self, node=None, category=None):
|
|
"""Removes and returns any entries with matching node and/or category."""
|
|
entriestoremove = [
|
|
((node_, category_), data_)
|
|
for ((node_, category_), data_) in self.contents.items()
|
|
if node is None or node == node_
|
|
if category is None or category == category_
|
|
]
|
|
for (key, _value) in entriestoremove:
|
|
del self.contents[key]
|
|
return altsortdict(entriestoremove)
|
|
|
|
def clear(self):
|
|
"""Removes and returns all entries."""
|
|
deleted = self.contents
|
|
self.contents = altsortdict()
|
|
return deleted
|