sapling/eden/integration/thrift_test.py

242 lines
9.1 KiB
Python
Raw Normal View History

#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
import binascii
import hashlib
import os
import re
import subprocess
from pathlib import Path
from typing import Pattern, Union
from facebook.eden.ttypes import ScmFileStatus, SHA1Result, TimeSpec
from .lib import testcase
@testcase.eden_repo_test
# pyre-fixme[13]: Attribute `commit1` is never initialized.
# pyre-fixme[13]: Attribute `commit2` is never initialized.
# pyre-fixme[13]: Attribute `commit3` is never initialized.
class ThriftTest(testcase.EdenRepoTest):
commit1: str
commit2: str
commit3: str
def populate_repo(self) -> None:
self.repo.write_file("hello", "hola\n")
self.repo.write_file("test_fetch1", "testing fetch\n")
self.repo.write_file("test_fetch2", "testing fetch\n")
self.repo.write_file("README", "docs\n")
self.repo.write_file("adir/file", "foo!\n")
self.repo.write_file("bdir/file", "bar!\n")
self.repo.symlink("slink", "hello")
self.commit1 = self.repo.commit("Initial commit.")
self.repo.write_file("bdir/file", "bar?\n")
self.repo.write_file("cdir/subdir/new.txt", "and improved")
self.repo.remove_file("README")
self.commit2 = self.repo.commit("Commit 2.")
# revert the change made to bdir/file
self.repo.write_file("bdir/file", "bar!\n")
self.commit3 = self.repo.commit("Commit 3.")
def get_loaded_inodes_count(self, path: str) -> int:
with self.get_thrift_client() as client:
result = client.debugInodeStatus(
self.mount_path_bytes, os.fsencode(path), flags=0
)
inode_count = 0
for item in result:
assert item.entries is not None
for inode in item.entries:
if inode.loaded:
inode_count += 1
return inode_count
def test_pid_fetch_counts(self) -> None:
touch_p = subprocess.Popen(
"touch test_fetch1 test_fetch2".split(), cwd=self.mount_path
)
touch_p.communicate()
with self.get_thrift_client() as client:
counts = client.getAccessCounts(1)
accesses = counts.accessesByMount[self.mount_path_bytes]
self.assertEqual(2, accesses.fetchCountsByPid[touch_p.pid])
def test_list_mounts(self) -> None:
with self.get_thrift_client() as client:
mounts = client.listMounts()
self.assertEqual(1, len(mounts))
mount = mounts[0]
self.assertEqual(self.mount_path_bytes, mount.mountPoint)
assert mount.edenClientPath is not None
# The client path should always be inside the main eden directory
# Path.relative_to() will throw a ValueError if self.eden.eden_dir is not a
# directory prefix of mount.edenClientPath
Path(os.fsdecode(mount.edenClientPath)).relative_to(self.eden.eden_dir)
def test_get_sha1(self) -> None:
expected_sha1_for_hello = hashlib.sha1(b"hola\n").digest()
result_for_hello = SHA1Result(expected_sha1_for_hello)
expected_sha1_for_adir_file = hashlib.sha1(b"foo!\n").digest()
result_for_adir_file = SHA1Result(expected_sha1_for_adir_file)
with self.get_thrift_client() as client:
self.assertEqual(
[result_for_hello, result_for_adir_file],
client.getSHA1(self.mount_path_bytes, [b"hello", b"adir/file"]),
)
def test_get_sha1_throws_for_path_with_dot_components(self) -> None:
with self.get_thrift_client() as client:
results = client.getSHA1(self.mount_path_bytes, [b"./hello"])
self.assertEqual(1, len(results))
self.assert_error(
results[0],
re.compile(
r".*PathComponentValidationError.*: PathComponent must not be \."
),
)
def test_get_sha1_throws_for_empty_string(self) -> None:
with self.get_thrift_client() as client:
results = client.getSHA1(self.mount_path_bytes, [b""])
self.assertEqual(1, len(results))
self.assert_error(results[0], "path cannot be the empty string")
def test_get_sha1_throws_for_directory(self) -> None:
with self.get_thrift_client() as client:
results = client.getSHA1(self.mount_path_bytes, [b"adir"])
self.assertEqual(1, len(results))
self.assert_error(results[0], "adir: Is a directory")
def test_get_sha1_throws_for_non_existent_file(self) -> None:
with self.get_thrift_client() as client:
results = client.getSHA1(self.mount_path_bytes, [b"i_do_not_exist"])
self.assertEqual(1, len(results))
self.assert_error(results[0], "i_do_not_exist: No such file or directory")
def test_get_sha1_throws_for_symlink(self) -> None:
"""Fails because caller should resolve the symlink themselves."""
with self.get_thrift_client() as client:
results = client.getSHA1(self.mount_path_bytes, [b"slink"])
self.assertEqual(1, len(results))
self.assert_error(results[0], "slink: file is a symlink: Invalid argument")
def assert_error(
self, sha1result: SHA1Result, error_message: Union[str, Pattern]
) -> None:
self.assertIsNotNone(sha1result, msg="Must pass a SHA1Result")
self.assertEqual(
SHA1Result.ERROR, sha1result.getType(), msg="SHA1Result must be an error"
)
error = sha1result.get_error()
self.assertIsNotNone(error)
if isinstance(error_message, str):
self.assertEqual(error_message, error.message)
else:
self.assertRegex(error.message, error_message)
implement glob thrift method Summary: This is to facilitate the watchman integration and draws on the watchman glob implementation; the approach is to split the glob strings into path components and evaluate the components step by step as the tree is walked. Components that do not include any glob special characters can be handled as a direct lookup from the directory contents (O(1) rather than O(num-entries)). The glob method returns a set of filenames that match a list of of glob patterns. Recursive globs are supported. It is worth noting that a glob like "**/*" will return a list of every entry in the filesystem. This is potentially expensive and should be avoided. simpkins is in favor of disallowing this as a forcing function to encourage tool-makers to adopt patterns that don't rely on a complete listing of the filesystem. For now I'd like to get this in without such a restriction; it's also worth noting that running `find .` in the root of the mount point has a similar effect and we can't prevent that from happening, so the effect of the overly broad glob is something that we need to be able to withstand in any case. Unrestricted recursive globs will make it easier to connect certain watchman queries in the interim, until we have a more expressive thrift API for walking and filtering the list of files. Note: I've removed the wildmatch flags that I'd put in the API when I stubbed it out originally. Since this is built on top of our GlobMatcher code and that doesn't have those flags, I thought it would be simplest to just remove them. If we find that we need them, we can figure out how to add them later. Also Note: the evaluation of the glob is parallel-ready but currently limited to 1 at a time by constraining the folly::window call to 1. We could make this larger but would need a more intelligent constraint. For example, a recursive glob could initiate N concurrent futures per level where N is the number of sub-dirs at a given level. Using a custom Executor for these futures may be a better option to set an upper bound on the number of concurrent jobs allowed for a given glob call. Depends on D4361197 Reviewed By: simpkins Differential Revision: D4371934 fbshipit-source-id: 444735600bc16d2c2185f2277ddc5b51f672600a
2017-01-26 23:45:50 +03:00
def test_unload_free_inodes(self) -> None:
for i in range(100):
self.write_file("testfile%d.txt" % i, "unload test case")
inode_count_before_unload = self.get_loaded_inodes_count("")
self.assertGreater(
inode_count_before_unload, 100, "Number of loaded inodes should increase"
)
age = TimeSpec()
age.seconds = 0
age.nanoSeconds = 0
with self.get_thrift_client() as client:
unload_count = client.unloadInodeForPath(self.mount_path_bytes, b"", age)
self.assertGreaterEqual(
unload_count, 100, "Number of loaded inodes should reduce after unload"
)
def test_unload_thrift_api_accepts_single_dot_as_root(self) -> None:
self.write_file("testfile.txt", "unload test case")
age = TimeSpec()
age.seconds = 0
age.nanoSeconds = 0
with self.get_thrift_client() as client:
unload_count = client.unloadInodeForPath(self.mount_path_bytes, b".", age)
self.assertGreater(
unload_count, 0, "Number of loaded inodes should reduce after unload"
)
def get_counter(self, name: str) -> float:
return self.get_counters()[name]
def test_diff_revisions(self) -> None:
# Convert the commit hashes to binary for the thrift call
with self.get_thrift_client() as client:
diff = client.getScmStatusBetweenRevisions(
os.fsencode(self.mount),
binascii.unhexlify(self.commit1),
binascii.unhexlify(self.commit2),
)
self.assertDictEqual(diff.errors, {})
self.assertDictEqual(
diff.entries,
{
b"cdir/subdir/new.txt": ScmFileStatus.ADDED,
b"bdir/file": ScmFileStatus.MODIFIED,
b"README": ScmFileStatus.REMOVED,
},
)
def test_diff_revisions_hex(self) -> None:
# Watchman currently calls getScmStatusBetweenRevisions()
# with 40-byte hexadecimal commit IDs, so make sure that works.
with self.get_thrift_client() as client:
diff = client.getScmStatusBetweenRevisions(
os.fsencode(self.mount),
self.commit1.encode("utf-8"),
self.commit2.encode("utf-8"),
)
self.assertDictEqual(diff.errors, {})
self.assertDictEqual(
diff.entries,
{
b"cdir/subdir/new.txt": ScmFileStatus.ADDED,
b"bdir/file": ScmFileStatus.MODIFIED,
b"README": ScmFileStatus.REMOVED,
},
)
def test_diff_revisions_with_reverted_file(self) -> None:
# Convert the commit hashes to binary for the thrift call
with self.get_thrift_client() as client:
diff = client.getScmStatusBetweenRevisions(
os.fsencode(self.mount),
binascii.unhexlify(self.commit1),
binascii.unhexlify(self.commit3),
)
self.assertDictEqual(diff.errors, {})
# bdir/file was modified twice between commit1 and commit3 but had a
# net change of 0 so it should not be reported in the diff results
self.assertDictEqual(
diff.entries,
{
b"cdir/subdir/new.txt": ScmFileStatus.ADDED,
b"README": ScmFileStatus.REMOVED,
},
)