mirror of
https://github.com/borgbackup/borg.git
synced 2024-11-05 03:25:19 +03:00
Merge pull request #6382 from ThomasWaldmann/pyupgrade-38-master
run pyupgrade (py38+)
This commit is contained in:
commit
49de070799
@ -40,7 +40,7 @@ master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = 'Borg - Deduplicating Archiver'
|
||||
copyright = u'2010-2014 Jonas Borgström, 2015-2022 The Borg Collective (see AUTHORS file)'
|
||||
copyright = '2010-2014 Jonas Borgström, 2015-2022 The Borg Collective (see AUTHORS file)'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
|
@ -35,12 +35,12 @@ def main():
|
||||
output = subprocess.check_output(objdump % filename, shell=True,
|
||||
stderr=subprocess.STDOUT)
|
||||
output = output.decode()
|
||||
versions = set(parse_version(match.group(1))
|
||||
for match in glibc_re.finditer(output))
|
||||
versions = {parse_version(match.group(1))
|
||||
for match in glibc_re.finditer(output)}
|
||||
requires_glibc = max(versions)
|
||||
overall_versions.add(requires_glibc)
|
||||
if verbose:
|
||||
print("%s %s" % (filename, format_version(requires_glibc)))
|
||||
print(f"{filename} {format_version(requires_glibc)}")
|
||||
except subprocess.CalledProcessError:
|
||||
if verbose:
|
||||
print("%s errored." % filename)
|
||||
|
@ -1,4 +1,3 @@
|
||||
|
||||
"""
|
||||
This script checks compatibility of crypto.blake2b_256 against hashlib.blake2b in CPython 3.6.
|
||||
"""
|
||||
|
@ -12,7 +12,7 @@ from setuptools import Command
|
||||
|
||||
|
||||
def long_desc_from_readme():
|
||||
with open('README.rst', 'r') as fd:
|
||||
with open('README.rst') as fd:
|
||||
long_description = fd.read()
|
||||
# remove header, but have one \n before first headline
|
||||
start = long_description.find('What is BorgBackup?')
|
||||
@ -33,7 +33,7 @@ def format_metavar(option):
|
||||
elif option.nargs is None:
|
||||
return option.metavar
|
||||
else:
|
||||
raise ValueError('Can\'t format metavar %s, unknown nargs %s!' % (option.metavar, option.nargs))
|
||||
raise ValueError(f'Can\'t format metavar {option.metavar}, unknown nargs {option.nargs}!')
|
||||
|
||||
|
||||
class build_usage(Command):
|
||||
@ -367,7 +367,7 @@ class build_man(Command):
|
||||
subparsers = [action for action in parser._actions if 'SubParsersAction' in str(action.__class__)][0]
|
||||
for subcommand in subparsers.choices:
|
||||
write('| borg', '[common options]', command, subcommand, '...')
|
||||
self.see_also.setdefault(command, []).append('%s-%s' % (command, subcommand))
|
||||
self.see_also.setdefault(command, []).append(f'{command}-{subcommand}')
|
||||
else:
|
||||
if command == "borgfs":
|
||||
write(command, end='')
|
||||
|
@ -198,7 +198,7 @@ class BackupOSError(Exception):
|
||||
|
||||
def __str__(self):
|
||||
if self.op:
|
||||
return '%s: %s' % (self.op, self.os_error)
|
||||
return f'{self.op}: {self.os_error}'
|
||||
else:
|
||||
return str(self.os_error)
|
||||
|
||||
@ -464,7 +464,7 @@ class Archive:
|
||||
raise self.AlreadyExists(name)
|
||||
i = 0
|
||||
while True:
|
||||
self.checkpoint_name = '%s.checkpoint%s' % (name, i and ('.%d' % i) or '')
|
||||
self.checkpoint_name = '{}.checkpoint{}'.format(name, i and ('.%d' % i) or '')
|
||||
if self.checkpoint_name not in manifest.archives:
|
||||
break
|
||||
i += 1
|
||||
@ -1823,7 +1823,7 @@ class ArchiveChecker:
|
||||
chunks_healthy = item.chunks_healthy if has_chunks_healthy else chunks_current
|
||||
if has_chunks_healthy and len(chunks_current) != len(chunks_healthy):
|
||||
# should never happen, but there was issue #3218.
|
||||
logger.warning('{}: {}: Invalid chunks_healthy metadata removed!'.format(archive_name, item.path))
|
||||
logger.warning(f'{archive_name}: {item.path}: Invalid chunks_healthy metadata removed!')
|
||||
del item.chunks_healthy
|
||||
has_chunks_healthy = False
|
||||
chunks_healthy = chunks_current
|
||||
@ -1867,7 +1867,7 @@ class ArchiveChecker:
|
||||
# if this is first repair, remember the correct chunk IDs, so we can maybe heal the file later
|
||||
item.chunks_healthy = item.chunks
|
||||
if has_chunks_healthy and chunk_list == chunks_healthy:
|
||||
logger.info('{}: {}: Completely healed previously damaged file!'.format(archive_name, item.path))
|
||||
logger.info(f'{archive_name}: {item.path}: Completely healed previously damaged file!')
|
||||
del item.chunks_healthy
|
||||
item.chunks = chunk_list
|
||||
if 'size' in item:
|
||||
@ -1902,7 +1902,7 @@ class ArchiveChecker:
|
||||
logger.error(msg)
|
||||
|
||||
def list_keys_safe(keys):
|
||||
return ', '.join((k.decode(errors='replace') if isinstance(k, bytes) else str(k) for k in keys))
|
||||
return ', '.join(k.decode(errors='replace') if isinstance(k, bytes) else str(k) for k in keys)
|
||||
|
||||
def valid_item(obj):
|
||||
if not isinstance(obj, StableDict):
|
||||
@ -1972,7 +1972,7 @@ class ArchiveChecker:
|
||||
with cache_if_remote(self.repository) as repository:
|
||||
for i, info in enumerate(archive_infos):
|
||||
pi.show(i)
|
||||
logger.info('Analyzing archive {} ({}/{})'.format(info.name, i + 1, num_archives))
|
||||
logger.info(f'Analyzing archive {info.name} ({i + 1}/{num_archives})')
|
||||
archive_id = info.id
|
||||
if archive_id not in self.chunks:
|
||||
logger.error('Archive metadata block is missing!')
|
||||
@ -2008,7 +2008,7 @@ class ArchiveChecker:
|
||||
unused = {id_ for id_, entry in self.chunks.iteritems() if entry.refcount == 0}
|
||||
orphaned = unused - self.possibly_superseded
|
||||
if orphaned:
|
||||
logger.error('{} orphaned objects found!'.format(len(orphaned)))
|
||||
logger.error(f'{len(orphaned)} orphaned objects found!')
|
||||
self.error_found = True
|
||||
if self.repair and unused:
|
||||
logger.info('Deleting %d orphaned and %d superseded objects...' % (
|
||||
|
@ -377,7 +377,7 @@ class Archiver:
|
||||
else:
|
||||
manager.export(args.path)
|
||||
except IsADirectoryError:
|
||||
self.print_error("'{}' must be a file, not a directory".format(args.path))
|
||||
self.print_error(f"'{args.path}' must be a file, not a directory")
|
||||
return EXIT_ERROR
|
||||
return EXIT_SUCCESS
|
||||
|
||||
@ -1191,7 +1191,7 @@ class Archiver:
|
||||
current_archive = manifest.archives.pop(archive_name)
|
||||
except KeyError:
|
||||
self.exit_code = EXIT_WARNING
|
||||
logger.warning('Archive {} not found ({}/{}).'.format(archive_name, i, len(archive_names)))
|
||||
logger.warning(f'Archive {archive_name} not found ({i}/{len(archive_names)}).')
|
||||
else:
|
||||
deleted = True
|
||||
if self.output_list:
|
||||
@ -1851,12 +1851,12 @@ class Archiver:
|
||||
value = default_values.get(key)
|
||||
if value is None:
|
||||
raise Error('The repository config is missing the %s key which has no default value' % key)
|
||||
print('%s = %s' % (key, value))
|
||||
print(f'{key} = {value}')
|
||||
for key in ['last_segment_checked', ]:
|
||||
value = config.get('repository', key, fallback=None)
|
||||
if value is None:
|
||||
continue
|
||||
print('%s = %s' % (key, value))
|
||||
print(f'{key} = {value}')
|
||||
|
||||
if not args.list:
|
||||
if args.name is None:
|
||||
@ -2059,8 +2059,8 @@ class Archiver:
|
||||
def print_finding(info, wanted, data, offset):
|
||||
before = data[offset - context:offset]
|
||||
after = data[offset + len(wanted):offset + len(wanted) + context]
|
||||
print('%s: %s %s %s == %r %r %r' % (info, before.hex(), wanted.hex(), after.hex(),
|
||||
before, wanted, after))
|
||||
print('{}: {} {} {} == {!r} {!r} {!r}'.format(info, before.hex(), wanted.hex(), after.hex(),
|
||||
before, wanted, after))
|
||||
|
||||
wanted = args.wanted
|
||||
try:
|
||||
@ -5032,7 +5032,7 @@ def sig_info_handler(sig_no, stack): # pragma: no cover
|
||||
total = loc['st'].st_size
|
||||
except Exception:
|
||||
pos, total = 0, 0
|
||||
logger.info("{0} {1}/{2}".format(path, format_file_size(pos), format_file_size(total)))
|
||||
logger.info(f"{path} {format_file_size(pos)}/{format_file_size(total)}")
|
||||
break
|
||||
if func in ('extract_item', ): # extract op
|
||||
path = loc['item'].path
|
||||
@ -5040,7 +5040,7 @@ def sig_info_handler(sig_no, stack): # pragma: no cover
|
||||
pos = loc['fd'].tell()
|
||||
except Exception:
|
||||
pos = 0
|
||||
logger.info("{0} {1}/???".format(path, format_file_size(pos)))
|
||||
logger.info(f"{path} {format_file_size(pos)}/???")
|
||||
break
|
||||
|
||||
|
||||
@ -5078,7 +5078,7 @@ def main(): # pragma: no cover
|
||||
except Error as e:
|
||||
msg = e.get_message()
|
||||
tb_log_level = logging.ERROR if e.traceback else logging.DEBUG
|
||||
tb = '%s\n%s' % (traceback.format_exc(), sysinfo())
|
||||
tb = f'{traceback.format_exc()}\n{sysinfo()}'
|
||||
# we might not have logging setup yet, so get out quickly
|
||||
print(msg, file=sys.stderr)
|
||||
if tb_log_level == logging.ERROR:
|
||||
@ -5091,7 +5091,7 @@ def main(): # pragma: no cover
|
||||
msg = e.get_message()
|
||||
msgid = type(e).__qualname__
|
||||
tb_log_level = logging.ERROR if e.traceback else logging.DEBUG
|
||||
tb = "%s\n%s" % (traceback.format_exc(), sysinfo())
|
||||
tb = f"{traceback.format_exc()}\n{sysinfo()}"
|
||||
exit_code = e.exit_code
|
||||
except RemoteRepository.RPCError as e:
|
||||
important = e.exception_class not in ('LockTimeout', ) and e.traceback
|
||||
@ -5108,18 +5108,18 @@ def main(): # pragma: no cover
|
||||
msg = 'Local Exception'
|
||||
msgid = 'Exception'
|
||||
tb_log_level = logging.ERROR
|
||||
tb = '%s\n%s' % (traceback.format_exc(), sysinfo())
|
||||
tb = f'{traceback.format_exc()}\n{sysinfo()}'
|
||||
exit_code = EXIT_ERROR
|
||||
except KeyboardInterrupt:
|
||||
msg = 'Keyboard interrupt'
|
||||
tb_log_level = logging.DEBUG
|
||||
tb = '%s\n%s' % (traceback.format_exc(), sysinfo())
|
||||
tb = f'{traceback.format_exc()}\n{sysinfo()}'
|
||||
exit_code = EXIT_SIGNAL_BASE + 2
|
||||
except SigTerm:
|
||||
msg = 'Received SIGTERM'
|
||||
msgid = 'Signal.SIGTERM'
|
||||
tb_log_level = logging.DEBUG
|
||||
tb = '%s\n%s' % (traceback.format_exc(), sysinfo())
|
||||
tb = f'{traceback.format_exc()}\n{sysinfo()}'
|
||||
exit_code = EXIT_SIGNAL_BASE + 15
|
||||
except SigHup:
|
||||
msg = 'Received SIGHUP.'
|
||||
|
@ -83,7 +83,7 @@ class SecurityManager:
|
||||
if not self.known():
|
||||
return False
|
||||
try:
|
||||
with open(self.key_type_file, 'r') as fd:
|
||||
with open(self.key_type_file) as fd:
|
||||
type = fd.read()
|
||||
return type == str(key.TYPE)
|
||||
except OSError as exc:
|
||||
@ -687,13 +687,13 @@ class LocalCache(CacheStatsMixin):
|
||||
fns = os.listdir(archive_path)
|
||||
# filenames with 64 hex digits == 256bit,
|
||||
# or compact indices which are 64 hex digits + ".compact"
|
||||
return set(unhexlify(fn) for fn in fns if len(fn) == 64) | \
|
||||
set(unhexlify(fn[:64]) for fn in fns if len(fn) == 72 and fn.endswith('.compact'))
|
||||
return {unhexlify(fn) for fn in fns if len(fn) == 64} | \
|
||||
{unhexlify(fn[:64]) for fn in fns if len(fn) == 72 and fn.endswith('.compact')}
|
||||
else:
|
||||
return set()
|
||||
|
||||
def repo_archives():
|
||||
return set(info.id for info in self.manifest.archives.list())
|
||||
return {info.id for info in self.manifest.archives.list()}
|
||||
|
||||
def cleanup_outdated(ids):
|
||||
for id in ids:
|
||||
|
@ -224,7 +224,7 @@ class DetachedIntegrityCheckedFile(IntegrityCheckedFile):
|
||||
@classmethod
|
||||
def read_integrity_file(cls, path):
|
||||
try:
|
||||
with open(cls.integrity_file_path(path), 'r') as fd:
|
||||
with open(cls.integrity_file_path(path)) as fd:
|
||||
return cls.parse_integrity_data(path, fd.read())
|
||||
except FileNotFoundError:
|
||||
logger.info('No integrity file found for %s', path)
|
||||
|
@ -379,7 +379,7 @@ class AESKeyBase(KeyBase):
|
||||
try:
|
||||
payload = self.cipher.decrypt(data)
|
||||
except IntegrityError as e:
|
||||
raise IntegrityError("Chunk %s: Could not decrypt [%s]" % (bin_to_hex(id), str(e)))
|
||||
raise IntegrityError(f"Chunk {bin_to_hex(id)}: Could not decrypt [{str(e)}]")
|
||||
if not decompress:
|
||||
return payload
|
||||
data = self.decompress(payload)
|
||||
@ -469,7 +469,7 @@ class Passphrase(str):
|
||||
msg = []
|
||||
for env_var in 'BORG_PASSPHRASE', 'BORG_PASSCOMMAND':
|
||||
env_var_set = os.environ.get(env_var) is not None
|
||||
msg.append('%s is %s.' % (env_var, 'set' if env_var_set else 'not set'))
|
||||
msg.append('{} is {}.'.format(env_var, 'set' if env_var_set else 'not set'))
|
||||
msg.append('Interactive password query failed.')
|
||||
raise NoPassphraseFailure(' '.join(msg)) from None
|
||||
else:
|
||||
@ -760,7 +760,7 @@ class KeyfileKey(ID_HMAC_SHA_256, KeyfileKeyBase):
|
||||
return path
|
||||
|
||||
def load(self, target, passphrase):
|
||||
with open(target, 'r') as fd:
|
||||
with open(target) as fd:
|
||||
key_data = ''.join(fd.readlines()[1:])
|
||||
success = self._load(key_data, passphrase)
|
||||
if success:
|
||||
@ -775,7 +775,7 @@ class KeyfileKey(ID_HMAC_SHA_256, KeyfileKeyBase):
|
||||
raise Error('Aborting because key in "%s" already exists.' % target)
|
||||
key_data = self._save(passphrase)
|
||||
with SaveFile(target) as fd:
|
||||
fd.write('%s %s\n' % (self.FILE_ID, bin_to_hex(self.repository_id)))
|
||||
fd.write(f'{self.FILE_ID} {bin_to_hex(self.repository_id)}\n')
|
||||
fd.write(key_data)
|
||||
fd.write('\n')
|
||||
self.target = target
|
||||
|
@ -52,7 +52,7 @@ class KeyManager:
|
||||
if self.keyblob_storage == KeyBlobStorage.KEYFILE:
|
||||
k = KeyfileKey(self.repository)
|
||||
target = k.find_key()
|
||||
with open(target, 'r') as fd:
|
||||
with open(target) as fd:
|
||||
self.keyblob = ''.join(fd.readlines()[1:])
|
||||
|
||||
elif self.keyblob_storage == KeyBlobStorage.REPO:
|
||||
@ -68,7 +68,7 @@ class KeyManager:
|
||||
self.repository.save_key(self.keyblob.encode('utf-8'))
|
||||
|
||||
def get_keyfile_data(self):
|
||||
data = '%s %s\n' % (KeyfileKey.FILE_ID, bin_to_hex(self.repository.id))
|
||||
data = f'{KeyfileKey.FILE_ID} {bin_to_hex(self.repository.id)}\n'
|
||||
data += self.keyblob
|
||||
if not self.keyblob.endswith('\n'):
|
||||
data += '\n'
|
||||
@ -115,7 +115,7 @@ class KeyManager:
|
||||
lines = (len(binary) + 17) // 18
|
||||
repoid = bin_to_hex(self.repository.id)[:18]
|
||||
complete_checksum = sha256_truncated(binary, 12)
|
||||
export += 'id: {0:d} / {1} / {2} - {3}\n'.format(lines,
|
||||
export += 'id: {:d} / {} / {} - {}\n'.format(lines,
|
||||
grouped(repoid),
|
||||
grouped(complete_checksum),
|
||||
sha256_truncated((str(lines) + '/' + repoid + '/' + complete_checksum).encode('ascii'), 2))
|
||||
@ -124,7 +124,7 @@ class KeyManager:
|
||||
idx += 1
|
||||
binline = binary[:18]
|
||||
checksum = sha256_truncated(idx.to_bytes(2, byteorder='big') + binline, 2)
|
||||
export += '{0:2d}: {1} - {2}\n'.format(idx, grouped(bin_to_hex(binline)), checksum)
|
||||
export += f'{idx:2d}: {grouped(bin_to_hex(binline))} - {checksum}\n'
|
||||
binary = binary[18:]
|
||||
|
||||
with dash_open(path, 'w') as fd:
|
||||
@ -188,7 +188,7 @@ class KeyManager:
|
||||
idx = 1
|
||||
# body line input
|
||||
while True:
|
||||
inline = input('{0:2d}: '.format(idx))
|
||||
inline = input(f'{idx:2d}: ')
|
||||
inline = inline.replace(' ', '')
|
||||
if inline == '':
|
||||
if yes('Abort import? [yN]:'):
|
||||
@ -204,7 +204,7 @@ class KeyManager:
|
||||
print("only characters 0-9 and a-f and '-' are valid, try again")
|
||||
continue
|
||||
if sha256_truncated(idx.to_bytes(2, byteorder='big') + part, 2) != checksum:
|
||||
print('line checksum did not match, try line {0} again'.format(idx))
|
||||
print(f'line checksum did not match, try line {idx} again')
|
||||
continue
|
||||
result += part
|
||||
if idx == lines:
|
||||
|
@ -22,7 +22,7 @@ class NonceManager:
|
||||
|
||||
def get_local_free_nonce(self):
|
||||
try:
|
||||
with open(self.nonce_file, 'r') as fd:
|
||||
with open(self.nonce_file) as fd:
|
||||
return bytes_to_long(unhexlify(fd.read()))
|
||||
except FileNotFoundError:
|
||||
return None
|
||||
|
@ -234,7 +234,7 @@ class ItemCache:
|
||||
self.write_offset = write_offset
|
||||
|
||||
|
||||
class FuseBackend(object):
|
||||
class FuseBackend:
|
||||
"""Virtual filesystem based on archive(s) to provide information to fuse
|
||||
"""
|
||||
|
||||
|
@ -233,7 +233,7 @@ class Manifest:
|
||||
|
||||
for operation, requirements in feature_flags.items():
|
||||
if b'mandatory' in requirements:
|
||||
result[operation.decode()] = set([feature.decode() for feature in requirements[b'mandatory']])
|
||||
result[operation.decode()] = {feature.decode() for feature in requirements[b'mandatory']}
|
||||
return result
|
||||
|
||||
def write(self):
|
||||
|
@ -91,13 +91,13 @@ def sysinfo():
|
||||
from ..fuse_impl import llfuse, BORG_FUSE_IMPL
|
||||
llfuse_name = llfuse.__name__ if llfuse else 'None'
|
||||
llfuse_version = (' %s' % llfuse.__version__) if llfuse else ''
|
||||
llfuse_info = '%s%s [%s]' % (llfuse_name, llfuse_version, BORG_FUSE_IMPL)
|
||||
llfuse_info = f'{llfuse_name}{llfuse_version} [{BORG_FUSE_IMPL}]'
|
||||
info = []
|
||||
if uname is not None:
|
||||
info.append('Platform: %s' % (' '.join(uname), ))
|
||||
info.append('Platform: {}'.format(' '.join(uname)))
|
||||
if linux_distribution is not None:
|
||||
info.append('Linux: %s %s %s' % linux_distribution)
|
||||
info.append('Borg: %s Python: %s %s msgpack: %s fuse: %s' % (
|
||||
info.append('Borg: {} Python: {} {} msgpack: {} fuse: {}'.format(
|
||||
borg_version, python_implementation, python_version, msgpack_version, llfuse_info))
|
||||
info.append('PID: %d CWD: %s' % (os.getpid(), os.getcwd()))
|
||||
info.append('sys.argv: %r' % sys.argv)
|
||||
|
@ -80,7 +80,7 @@ def interval(s):
|
||||
# range suffixes in ascending multiplier order
|
||||
ranges = [k for k, v in sorted(multiplier.items(), key=lambda t: t[1])]
|
||||
raise argparse.ArgumentTypeError(
|
||||
'Unexpected interval time unit "%s": expected one of %r' % (s[-1], ranges))
|
||||
f'Unexpected interval time unit "{s[-1]}": expected one of {ranges!r}')
|
||||
|
||||
try:
|
||||
hours = int(number) * multiplier[suffix]
|
||||
@ -117,7 +117,7 @@ def ChunkerParams(s):
|
||||
return CHUNKER_PARAMS
|
||||
# this must stay last as it deals with old-style compat mode (no algorithm, 4 params, buzhash):
|
||||
if algo == CH_BUZHASH and count == 5 or count == 4: # [buzhash, ]chunk_min, chunk_max, chunk_mask, window_size
|
||||
chunk_min, chunk_max, chunk_mask, window_size = [int(p) for p in params[count - 4:]]
|
||||
chunk_min, chunk_max, chunk_mask, window_size = (int(p) for p in params[count - 4:])
|
||||
if not (chunk_min <= chunk_mask <= chunk_max):
|
||||
raise ValueError('required: chunk_min <= chunk_mask <= chunk_max')
|
||||
if chunk_min < 6:
|
||||
@ -150,7 +150,7 @@ def partial_format(format, mapping):
|
||||
"""
|
||||
for key, value in mapping.items():
|
||||
key = re.escape(key)
|
||||
format = re.sub(r'(?<!\{)((\{%s\})|(\{%s:[^\}]*\}))' % (key, key),
|
||||
format = re.sub(fr'(?<!\{{)((\{{{key}\}})|(\{{{key}:[^\}}]*\}}))',
|
||||
lambda match: match.group(1).format_map(mapping),
|
||||
format)
|
||||
return format
|
||||
@ -397,7 +397,7 @@ class Location:
|
||||
valid = self._parse(repo)
|
||||
self.archive = m.group('archive')
|
||||
self.raw = repo_raw if not self.archive else repo_raw + self.raw
|
||||
self.processed = repo if not self.archive else '%s::%s' % (repo, self.archive)
|
||||
self.processed = repo if not self.archive else f'{repo}::{self.archive}'
|
||||
return valid
|
||||
|
||||
def _parse(self, text):
|
||||
@ -484,9 +484,9 @@ class Location:
|
||||
path = '/./' + self.path # /./x = path x relative to cwd
|
||||
else:
|
||||
path = self.path
|
||||
return 'ssh://{}{}{}{}'.format('{}@'.format(self.user) if self.user else '',
|
||||
return 'ssh://{}{}{}{}'.format(f'{self.user}@' if self.user else '',
|
||||
self._host, # needed for ipv6 addrs
|
||||
':{}'.format(self.port) if self.port else '',
|
||||
f':{self.port}' if self.port else '',
|
||||
path)
|
||||
|
||||
def with_timestamp(self, timestamp):
|
||||
@ -947,8 +947,8 @@ def ellipsis_truncate(msg, space):
|
||||
# if there is very little space, just show ...
|
||||
return '...' + ' ' * (space - ellipsis_width)
|
||||
if space < ellipsis_width + msg_width:
|
||||
return '%s...%s' % (swidth_slice(msg, space // 2 - ellipsis_width),
|
||||
swidth_slice(msg, -space // 2))
|
||||
return '{}...{}'.format(swidth_slice(msg, space // 2 - ellipsis_width),
|
||||
swidth_slice(msg, -space // 2))
|
||||
return msg + ' ' * (space - msg_width)
|
||||
|
||||
|
||||
|
@ -320,7 +320,7 @@ def create_filter_process(cmd, stream, stream_close, inbound=True):
|
||||
proc = popen_with_error_handling(cmd, stdin=subprocess.PIPE, stdout=filter_stream,
|
||||
log_prefix='filter-process: ', env=env)
|
||||
if not proc:
|
||||
raise Error('filter %s: process creation failed' % (cmd, ))
|
||||
raise Error(f'filter {cmd}: process creation failed')
|
||||
stream = proc.stdout if inbound else proc.stdin
|
||||
# inbound: do not close the pipe (this is the task of the filter process [== writer])
|
||||
# outbound: close the pipe, otherwise the filter process would not notice when we are done.
|
||||
|
@ -131,7 +131,7 @@ class OutputTimestamp:
|
||||
return format_time(self.ts, format_spec=format_spec)
|
||||
|
||||
def __str__(self):
|
||||
return '{}'.format(self)
|
||||
return f'{self}'
|
||||
|
||||
def isoformat(self):
|
||||
return isoformat_time(self.ts)
|
||||
|
@ -42,7 +42,7 @@ class TimeoutTimer:
|
||||
self.end_time = None
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: start=%r end=%r timeout=%r sleep=%r>" % (
|
||||
return "<{}: start={!r} end={!r} timeout={!r} sleep={!r}>".format(
|
||||
self.__class__.__name__, self.start_time, self.end_time,
|
||||
self.timeout_interval, self.sleep_interval)
|
||||
|
||||
@ -118,7 +118,7 @@ class ExclusiveLock:
|
||||
self.release()
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: %r>" % (self.__class__.__name__, self.unique_name)
|
||||
return f"<{self.__class__.__name__}: {self.unique_name!r}>"
|
||||
|
||||
def acquire(self, timeout=None, sleep=None):
|
||||
if timeout is None:
|
||||
@ -299,7 +299,7 @@ class LockRoster:
|
||||
|
||||
def get(self, key):
|
||||
roster = self.load()
|
||||
return set(tuple(e) for e in roster.get(key, []))
|
||||
return {tuple(e) for e in roster.get(key, [])}
|
||||
|
||||
def empty(self, *keys):
|
||||
return all(not self.get(key) for key in keys)
|
||||
@ -307,7 +307,7 @@ class LockRoster:
|
||||
def modify(self, key, op):
|
||||
roster = self.load()
|
||||
try:
|
||||
elements = set(tuple(e) for e in roster[key])
|
||||
elements = {tuple(e) for e in roster[key]}
|
||||
except KeyError:
|
||||
elements = set()
|
||||
if op == ADD:
|
||||
@ -374,7 +374,7 @@ class Lock:
|
||||
self.release()
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: %r>" % (self.__class__.__name__, self.id)
|
||||
return f"<{self.__class__.__name__}: {self.id!r}>"
|
||||
|
||||
def acquire(self, exclusive=None, remove=None, sleep=None):
|
||||
if exclusive is None:
|
||||
|
@ -46,7 +46,7 @@ configured = False
|
||||
|
||||
def _log_warning(message, category, filename, lineno, file=None, line=None):
|
||||
# for warnings, we just want to use the logging system, not stderr or other files
|
||||
msg = "{0}:{1}: {2}: {3}".format(filename, lineno, category.__name__, message)
|
||||
msg = f"{filename}:{lineno}: {category.__name__}: {message}"
|
||||
logger = create_logger(__name__)
|
||||
# Note: the warning will look like coming from here,
|
||||
# but msg contains info about where it really comes from
|
||||
@ -82,7 +82,7 @@ def setup_logging(stream=None, conf_fname=None, env_var='BORG_LOGGING_CONF', lev
|
||||
logger = logging.getLogger(__name__)
|
||||
borg_logger = logging.getLogger('borg')
|
||||
borg_logger.json = json
|
||||
logger.debug('using logging configuration read from "{0}"'.format(conf_fname))
|
||||
logger.debug(f'using logging configuration read from "{conf_fname}"')
|
||||
warnings.showwarning = _log_warning
|
||||
return None
|
||||
except Exception as err: # XXX be more precise
|
||||
@ -110,7 +110,7 @@ def setup_logging(stream=None, conf_fname=None, env_var='BORG_LOGGING_CONF', lev
|
||||
configured = True
|
||||
logger = logging.getLogger(__name__)
|
||||
if err_msg:
|
||||
logger.warning('setup_logging for "{0}" failed with "{1}".'.format(conf_fname, err_msg))
|
||||
logger.warning(f'setup_logging for "{conf_fname}" failed with "{err_msg}".')
|
||||
logger.debug('using builtin fallback logging configuration')
|
||||
warnings.showwarning = _log_warning
|
||||
return handler
|
||||
|
@ -201,7 +201,7 @@ class PatternBase:
|
||||
return matches
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(%s)' % (type(self), self.pattern)
|
||||
return f'{type(self)}({self.pattern})'
|
||||
|
||||
def __str__(self):
|
||||
return self.pattern_orig
|
||||
@ -315,7 +315,7 @@ _PATTERN_CLASSES = {
|
||||
ShellPattern,
|
||||
}
|
||||
|
||||
_PATTERN_CLASS_BY_PREFIX = dict((i.PREFIX, i) for i in _PATTERN_CLASSES)
|
||||
_PATTERN_CLASS_BY_PREFIX = {i.PREFIX: i for i in _PATTERN_CLASSES}
|
||||
|
||||
CmdTuple = namedtuple('CmdTuple', 'val cmd')
|
||||
|
||||
@ -339,7 +339,7 @@ def get_pattern_class(prefix):
|
||||
try:
|
||||
return _PATTERN_CLASS_BY_PREFIX[prefix]
|
||||
except KeyError:
|
||||
raise ValueError("Unknown pattern style: {}".format(prefix)) from None
|
||||
raise ValueError(f"Unknown pattern style: {prefix}") from None
|
||||
|
||||
|
||||
def parse_pattern(pattern, fallback=FnmatchPattern, recurse_dir=True):
|
||||
@ -394,7 +394,7 @@ def parse_inclexcl_command(cmd_line_str, fallback=ShellPattern):
|
||||
try:
|
||||
val = get_pattern_class(remainder_str)
|
||||
except ValueError:
|
||||
raise argparse.ArgumentTypeError("Invalid pattern style: {}".format(remainder_str))
|
||||
raise argparse.ArgumentTypeError(f"Invalid pattern style: {remainder_str}")
|
||||
else:
|
||||
# determine recurse_dir based on command type
|
||||
recurse_dir = command_recurses_dir(cmd)
|
||||
|
@ -265,7 +265,7 @@ def getfqdn(name=''):
|
||||
name = socket.gethostname()
|
||||
try:
|
||||
addrs = socket.getaddrinfo(name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME)
|
||||
except socket.error:
|
||||
except OSError:
|
||||
pass
|
||||
else:
|
||||
for addr in addrs:
|
||||
@ -288,7 +288,7 @@ hostname = hostname.split('.')[0]
|
||||
# thus, we offer BORG_HOST_ID where a user can set an own, unique id for each of his hosts.
|
||||
hostid = os.environ.get('BORG_HOST_ID')
|
||||
if not hostid:
|
||||
hostid = '%s@%s' % (fqdn, uuid.getnode())
|
||||
hostid = f'{fqdn}@{uuid.getnode()}'
|
||||
|
||||
|
||||
def get_process_id():
|
||||
|
@ -286,7 +286,7 @@ class RepositoryServer: # pragma: no cover
|
||||
else:
|
||||
tb_log_level = logging.ERROR
|
||||
msg = '%s Exception in RPC call' % e.__class__.__name__
|
||||
tb = '%s\n%s' % (traceback.format_exc(), sysinfo())
|
||||
tb = f'{traceback.format_exc()}\n{sysinfo()}'
|
||||
logging.error(msg)
|
||||
logging.log(tb_log_level, tb)
|
||||
exc = 'Remote Exception (see remote log for the traceback)'
|
||||
@ -470,7 +470,7 @@ def api(*, since, **kwargs_decorator):
|
||||
if restriction.get('dontcare', False):
|
||||
continue
|
||||
|
||||
raise self.RPCServerOutdated("{0} {1}={2!s}".format(f.__name__, name, named[name]),
|
||||
raise self.RPCServerOutdated(f"{f.__name__} {name}={named[name]!s}",
|
||||
format_version(restriction['since']))
|
||||
|
||||
return self.call(f.__name__, named, **extra)
|
||||
@ -622,7 +622,7 @@ This problem will go away as soon as the server has been upgraded to 1.0.7+.
|
||||
assert False, 'cleanup happened in Repository.__del__'
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %s>' % (self.__class__.__name__, self.location.canonical_path())
|
||||
return f'<{self.__class__.__name__} {self.location.canonical_path()}>'
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
@ -697,7 +697,7 @@ This problem will go away as soon as the server has been upgraded to 1.0.7+.
|
||||
if location.port:
|
||||
args += ['-p', str(location.port)]
|
||||
if location.user:
|
||||
args.append('%s@%s' % (location.user, location.host))
|
||||
args.append(f'{location.user}@{location.host}')
|
||||
else:
|
||||
args.append('%s' % location.host)
|
||||
return args
|
||||
@ -941,8 +941,7 @@ This problem will go away as soon as the server has been upgraded to 1.0.7+.
|
||||
return resp
|
||||
|
||||
def get_many(self, ids, is_preloaded=False):
|
||||
for resp in self.call_many('get', [{'id': id} for id in ids], is_preloaded=is_preloaded):
|
||||
yield resp
|
||||
yield from self.call_many('get', [{'id': id} for id in ids], is_preloaded=is_preloaded)
|
||||
|
||||
@api(since=parse_version('1.0.0'))
|
||||
def put(self, id, data, wait=True):
|
||||
|
@ -190,7 +190,7 @@ class Repository:
|
||||
assert False, "cleanup happened in Repository.__del__"
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %s>' % (self.__class__.__name__, self.path)
|
||||
return f'<{self.__class__.__name__} {self.path}>'
|
||||
|
||||
def __enter__(self):
|
||||
if self.do_create:
|
||||
@ -347,7 +347,7 @@ class Repository:
|
||||
|
||||
nonce_path = os.path.join(self.path, 'nonce')
|
||||
try:
|
||||
with open(nonce_path, 'r') as fd:
|
||||
with open(nonce_path) as fd:
|
||||
return int.from_bytes(unhexlify(fd.read()), byteorder='big')
|
||||
except FileNotFoundError:
|
||||
return None
|
||||
@ -716,7 +716,7 @@ class Repository:
|
||||
except OSError as os_error:
|
||||
logger.warning('Failed to check free space before committing: ' + str(os_error))
|
||||
return
|
||||
logger.debug('check_free_space: required bytes {}, free bytes {}'.format(required_free_space, free_space))
|
||||
logger.debug(f'check_free_space: required bytes {required_free_space}, free bytes {free_space}')
|
||||
if free_space < required_free_space:
|
||||
if self.created:
|
||||
logger.error('Not enough free space to initialize repository at this location.')
|
||||
@ -924,7 +924,7 @@ class Repository:
|
||||
elif tag == TAG_COMMIT:
|
||||
continue
|
||||
else:
|
||||
msg = 'Unexpected tag {} in segment {}'.format(tag, segment)
|
||||
msg = f'Unexpected tag {tag} in segment {segment}'
|
||||
if report is None:
|
||||
raise self.CheckNeeded(msg)
|
||||
else:
|
||||
@ -1045,7 +1045,7 @@ class Repository:
|
||||
# self.index, self.segments, self.compact now reflect the state of the segment files up to <transaction_id>
|
||||
# We might need to add a commit tag if no committed segment is found
|
||||
if repair and segments_transaction_id is None:
|
||||
report_error('Adding commit tag to segment {}'.format(transaction_id))
|
||||
report_error(f'Adding commit tag to segment {transaction_id}')
|
||||
self.io.segment = transaction_id + 1
|
||||
self.io.write_commit()
|
||||
if not partial:
|
||||
@ -1484,7 +1484,7 @@ class LoggedIO:
|
||||
# Repository.scan() calls us with segment > 0 when it continues an ongoing iteration
|
||||
# from a marker position - but then we have checked the magic before already.
|
||||
if fd.read(MAGIC_LEN) != MAGIC:
|
||||
raise IntegrityError('Invalid segment magic [segment {}, offset {}]'.format(segment, 0))
|
||||
raise IntegrityError(f'Invalid segment magic [segment {segment}, offset {0}]')
|
||||
offset = MAGIC_LEN
|
||||
header = fd.read(self.header_fmt.size)
|
||||
while header:
|
||||
@ -1613,7 +1613,7 @@ class LoggedIO:
|
||||
data_size = len(data)
|
||||
if data_size > MAX_DATA_SIZE:
|
||||
# this would push the segment entry size beyond MAX_OBJECT_SIZE.
|
||||
raise IntegrityError('More than allowed put data [{} > {}]'.format(data_size, MAX_DATA_SIZE))
|
||||
raise IntegrityError(f'More than allowed put data [{data_size} > {MAX_DATA_SIZE}]')
|
||||
fd = self.get_write_fd(want_new=(id == Manifest.MANIFEST_ID), raise_full=raise_full)
|
||||
size = data_size + self.put_header_fmt.size
|
||||
offset = self.offset
|
||||
|
@ -33,7 +33,7 @@ def translate(pat, match_end=r"\Z"):
|
||||
if i + 1 < n and pat[i] == "*" and pat[i + 1] == sep:
|
||||
# **/ == wildcard for 0+ full (relative) directory names with trailing slashes; the forward slash stands
|
||||
# for the platform-specific path separator
|
||||
res += r"(?:[^\%s]*\%s)*" % (sep, sep)
|
||||
res += fr"(?:[^\{sep}]*\{sep})*"
|
||||
i += 2
|
||||
else:
|
||||
# * == wildcard for name parts (does not cross path separator)
|
||||
|
@ -58,7 +58,7 @@ def unopened_tempfile():
|
||||
yield os.path.join(tempdir, "file")
|
||||
|
||||
|
||||
@functools.lru_cache()
|
||||
@functools.lru_cache
|
||||
def are_symlinks_supported():
|
||||
with unopened_tempfile() as filepath:
|
||||
try:
|
||||
@ -70,7 +70,7 @@ def are_symlinks_supported():
|
||||
return False
|
||||
|
||||
|
||||
@functools.lru_cache()
|
||||
@functools.lru_cache
|
||||
def are_hardlinks_supported():
|
||||
if not hasattr(os, 'link'):
|
||||
# some pythons do not have os.link
|
||||
@ -89,7 +89,7 @@ def are_hardlinks_supported():
|
||||
return False
|
||||
|
||||
|
||||
@functools.lru_cache()
|
||||
@functools.lru_cache
|
||||
def are_fifos_supported():
|
||||
with unopened_tempfile() as filepath:
|
||||
try:
|
||||
@ -104,7 +104,7 @@ def are_fifos_supported():
|
||||
return False
|
||||
|
||||
|
||||
@functools.lru_cache()
|
||||
@functools.lru_cache
|
||||
def is_utime_fully_supported():
|
||||
with unopened_tempfile() as filepath:
|
||||
# Some filesystems (such as SSHFS) don't support utime on symlinks
|
||||
@ -124,7 +124,7 @@ def is_utime_fully_supported():
|
||||
return False
|
||||
|
||||
|
||||
@functools.lru_cache()
|
||||
@functools.lru_cache
|
||||
def is_birthtime_fully_supported():
|
||||
if not hasattr(os.stat_result, 'st_birthtime'):
|
||||
return False
|
||||
@ -172,9 +172,9 @@ class BaseTestCase(unittest.TestCase):
|
||||
|
||||
@contextmanager
|
||||
def assert_creates_file(self, path):
|
||||
assert not os.path.exists(path), '{} should not exist'.format(path)
|
||||
assert not os.path.exists(path), f'{path} should not exist'
|
||||
yield
|
||||
assert os.path.exists(path), '{} should exist'.format(path)
|
||||
assert os.path.exists(path), f'{path} should exist'
|
||||
|
||||
def assert_dirs_equal(self, dir1, dir2, **kwargs):
|
||||
diff = filecmp.dircmp(dir1, dir2)
|
||||
@ -293,7 +293,7 @@ class BaseTestCase(unittest.TestCase):
|
||||
if os.path.ismount(mountpoint) == mounted:
|
||||
return
|
||||
time.sleep(0.1)
|
||||
message = 'Waiting for %s of %s' % ('mount' if mounted else 'umount', mountpoint)
|
||||
message = 'Waiting for {} of {}'.format('mount' if mounted else 'umount', mountpoint)
|
||||
raise TimeoutError(message)
|
||||
|
||||
@contextmanager
|
||||
|
@ -55,10 +55,10 @@ def tests_stats_progress(stats, monkeypatch, columns=80):
|
||||
def test_stats_format(stats):
|
||||
assert str(stats) == """\
|
||||
This archive: 20 B 10 B 10 B"""
|
||||
s = "{0.osize_fmt}".format(stats)
|
||||
s = f"{stats.osize_fmt}"
|
||||
assert s == "20 B"
|
||||
# kind of redundant, but id is variable so we can't match reliably
|
||||
assert repr(stats) == '<Statistics object at {:#x} (20, 10, 10)>'.format(id(stats))
|
||||
assert repr(stats) == f'<Statistics object at {id(stats):#x} (20, 10, 10)>'
|
||||
|
||||
|
||||
class MockCache:
|
||||
|
@ -1867,7 +1867,7 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
||||
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
|
||||
with Cache(repository, key, manifest) as cache:
|
||||
cache.begin_txn()
|
||||
cache.cache_config.mandatory_features = set(['unknown-feature'])
|
||||
cache.cache_config.mandatory_features = {'unknown-feature'}
|
||||
cache.commit()
|
||||
|
||||
if self.FORK_DEFAULT:
|
||||
@ -1891,7 +1891,7 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
||||
repository._location = Location(self.repository_location)
|
||||
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
|
||||
with Cache(repository, key, manifest) as cache:
|
||||
assert cache.cache_config.mandatory_features == set([])
|
||||
assert cache.cache_config.mandatory_features == set()
|
||||
|
||||
def test_progress_on(self):
|
||||
self.create_regular_file('file1', size=1024 * 80)
|
||||
@ -2748,7 +2748,7 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
||||
pass
|
||||
with open(assert_data_file, 'rb') as _in:
|
||||
assert_data = pickle.load(_in)
|
||||
print('\nLock.migrate_lock(): assert_data = %r.' % (assert_data, ), file=sys.stderr, flush=True)
|
||||
print(f'\nLock.migrate_lock(): assert_data = {assert_data!r}.', file=sys.stderr, flush=True)
|
||||
exception = assert_data['exception']
|
||||
if exception is not None:
|
||||
extracted_tb = assert_data['exception.extr_tb']
|
||||
@ -3089,14 +3089,14 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
||||
repo_id = self._extract_repository_id(self.repository_path)
|
||||
self.cmd('key', 'export', self.repository_location, export_file)
|
||||
|
||||
with open(export_file, 'r') as fd:
|
||||
with open(export_file) as fd:
|
||||
export_contents = fd.read()
|
||||
|
||||
assert export_contents.startswith('BORG_KEY ' + bin_to_hex(repo_id) + '\n')
|
||||
|
||||
key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0]
|
||||
|
||||
with open(key_file, 'r') as fd:
|
||||
with open(key_file) as fd:
|
||||
key_contents = fd.read()
|
||||
|
||||
assert key_contents == export_contents
|
||||
@ -3105,7 +3105,7 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
||||
|
||||
self.cmd('key', 'import', self.repository_location, export_file)
|
||||
|
||||
with open(key_file, 'r') as fd:
|
||||
with open(key_file) as fd:
|
||||
key_contents2 = fd.read()
|
||||
|
||||
assert key_contents2 == key_contents
|
||||
@ -3117,7 +3117,7 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
||||
self.cmd('key', 'export', self.repository_location, exported_key_file)
|
||||
|
||||
key_file = os.path.join(self.keys_path, os.listdir(self.keys_path)[0])
|
||||
with open(key_file, 'r') as fd:
|
||||
with open(key_file) as fd:
|
||||
key_contents = fd.read()
|
||||
os.unlink(key_file)
|
||||
|
||||
@ -3126,7 +3126,7 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
||||
self.cmd('key', 'import', self.repository_location, exported_key_file)
|
||||
assert not os.path.isfile(key_file), '"borg key import" should respect BORG_KEY_FILE'
|
||||
|
||||
with open(imported_key_file, 'r') as fd:
|
||||
with open(imported_key_file) as fd:
|
||||
imported_key_contents = fd.read()
|
||||
assert imported_key_contents == key_contents
|
||||
|
||||
@ -3136,7 +3136,7 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
||||
repo_id = self._extract_repository_id(self.repository_path)
|
||||
self.cmd('key', 'export', self.repository_location, export_file)
|
||||
|
||||
with open(export_file, 'r') as fd:
|
||||
with open(export_file) as fd:
|
||||
export_contents = fd.read()
|
||||
|
||||
assert export_contents.startswith('BORG_KEY ' + bin_to_hex(repo_id) + '\n')
|
||||
@ -3167,7 +3167,7 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
||||
repo_id = self._extract_repository_id(self.repository_path)
|
||||
self.cmd('key', 'export', '--qr-html', self.repository_location, export_file)
|
||||
|
||||
with open(export_file, 'r', encoding='utf-8') as fd:
|
||||
with open(export_file, encoding='utf-8') as fd:
|
||||
export_contents = fd.read()
|
||||
|
||||
assert bin_to_hex(repo_id) in export_contents
|
||||
@ -3221,7 +3221,7 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
||||
|
||||
self.cmd('key', 'export', '--paper', self.repository_location, export_file)
|
||||
|
||||
with open(export_file, 'r') as fd:
|
||||
with open(export_file) as fd:
|
||||
export_contents = fd.read()
|
||||
|
||||
assert export_contents == """To restore key use borg key import --paper /path/to/repo
|
||||
@ -3284,7 +3284,7 @@ id: 2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02
|
||||
dump_file = self.output_path + '/dump'
|
||||
output = self.cmd('debug', 'dump-manifest', self.repository_location, dump_file)
|
||||
assert output == ""
|
||||
with open(dump_file, "r") as f:
|
||||
with open(dump_file) as f:
|
||||
result = json.load(f)
|
||||
assert 'archives' in result
|
||||
assert 'config' in result
|
||||
@ -3299,7 +3299,7 @@ id: 2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02
|
||||
dump_file = self.output_path + '/dump'
|
||||
output = self.cmd('debug', 'dump-archive', self.repository_location + "::test", dump_file)
|
||||
assert output == ""
|
||||
with open(dump_file, "r") as f:
|
||||
with open(dump_file) as f:
|
||||
result = json.load(f)
|
||||
assert '_name' in result
|
||||
assert '_manifest_entry' in result
|
||||
@ -4144,7 +4144,7 @@ class DiffArchiverTestCase(ArchiverTestCaseBase):
|
||||
# File contents changed (deleted and replaced with a new file)
|
||||
change = 'B' if can_compare_ids else '{:<19}'.format('modified')
|
||||
assert 'file_replaced' in output # added to debug #3494
|
||||
assert '{} input/file_replaced'.format(change) in output
|
||||
assert f'{change} input/file_replaced' in output
|
||||
|
||||
# File unchanged
|
||||
assert 'input/file_unchanged' not in output
|
||||
@ -4174,9 +4174,9 @@ class DiffArchiverTestCase(ArchiverTestCaseBase):
|
||||
# should notice the changes in both links. However, the symlink
|
||||
# pointing to the file is not changed.
|
||||
change = '0 B' if can_compare_ids else '{:<19}'.format('modified')
|
||||
assert '{} input/empty'.format(change) in output
|
||||
assert f'{change} input/empty' in output
|
||||
if are_hardlinks_supported():
|
||||
assert '{} input/hardlink_contents_changed'.format(change) in output
|
||||
assert f'{change} input/hardlink_contents_changed' in output
|
||||
if are_symlinks_supported():
|
||||
assert 'input/link_target_contents_changed' not in output
|
||||
|
||||
|
@ -147,7 +147,7 @@ def test_obfuscate():
|
||||
# 2 id bytes compression, 2 id bytes obfuscator. 4 length bytes
|
||||
assert len(data) + 8 <= len(compressed) <= len(data) * 101 + 8
|
||||
# compressing 100 times the same data should give at least 50 different result sizes
|
||||
assert len(set(len(compressor.compress(data)) for i in range(100))) > 50
|
||||
assert len({len(compressor.compress(data)) for i in range(100)}) > 50
|
||||
|
||||
cs = CompressionSpec('obfuscate,2,lz4')
|
||||
assert isinstance(cs.inner.compressor, LZ4)
|
||||
@ -158,7 +158,7 @@ def test_obfuscate():
|
||||
min_compress, max_compress = 0.2, 0.001 # estimate compression factor outer boundaries
|
||||
assert max_compress * len(data) + 8 <= len(compressed) <= min_compress * len(data) * 1001 + 8
|
||||
# compressing 100 times the same data should give multiple different result sizes
|
||||
assert len(set(len(compressor.compress(data)) for i in range(100))) > 10
|
||||
assert len({len(compressor.compress(data)) for i in range(100)}) > 10
|
||||
|
||||
cs = CompressionSpec('obfuscate,6,zstd,3')
|
||||
assert isinstance(cs.inner.compressor, ZSTD)
|
||||
@ -169,7 +169,7 @@ def test_obfuscate():
|
||||
min_compress, max_compress = 0.2, 0.001 # estimate compression factor outer boundaries
|
||||
assert max_compress * len(data) + 8 <= len(compressed) <= min_compress * len(data) * 10000001 + 8
|
||||
# compressing 100 times the same data should give multiple different result sizes
|
||||
assert len(set(len(compressor.compress(data)) for i in range(100))) > 90
|
||||
assert len({len(compressor.compress(data)) for i in range(100)}) > 90
|
||||
|
||||
cs = CompressionSpec('obfuscate,2,auto,zstd,10')
|
||||
assert isinstance(cs.inner.compressor, Auto)
|
||||
@ -180,7 +180,7 @@ def test_obfuscate():
|
||||
min_compress, max_compress = 0.2, 0.001 # estimate compression factor outer boundaries
|
||||
assert max_compress * len(data) + 8 <= len(compressed) <= min_compress * len(data) * 1001 + 8
|
||||
# compressing 100 times the same data should give multiple different result sizes
|
||||
assert len(set(len(compressor.compress(data)) for i in range(100))) > 10
|
||||
assert len({len(compressor.compress(data)) for i in range(100)}) > 10
|
||||
|
||||
cs = CompressionSpec('obfuscate,110,none')
|
||||
assert isinstance(cs.inner.compressor, CNONE)
|
||||
|
@ -1,4 +1,3 @@
|
||||
|
||||
import pytest
|
||||
|
||||
from ..crypto.file_integrity import IntegrityCheckedFile, DetachedIntegrityCheckedFile, FileIntegrityError
|
||||
|
@ -225,7 +225,7 @@ class TestLocationWithoutEnv:
|
||||
monkeypatch.delenv('BORG_REPO', raising=False)
|
||||
test_pid = os.getpid()
|
||||
assert repr(Location('/some/path::archive{pid}')) == \
|
||||
"Location(proto='file', user=None, host=None, port=None, path='/some/path', archive='archive{}')".format(test_pid)
|
||||
f"Location(proto='file', user=None, host=None, port=None, path='/some/path', archive='archive{test_pid}')"
|
||||
location_time1 = Location('/some/path::archive{now:%s}')
|
||||
sleep(1.1)
|
||||
location_time2 = Location('/some/path::archive{now:%s}')
|
||||
@ -259,11 +259,11 @@ class TestLocationWithEnv:
|
||||
from borg.platform import hostname
|
||||
monkeypatch.setenv('BORG_REPO', 'ssh://user@host:1234/{hostname}')
|
||||
assert repr(Location('::archive')) == \
|
||||
"Location(proto='ssh', user='user', host='host', port=1234, path='/{}', archive='archive')".format(hostname)
|
||||
f"Location(proto='ssh', user='user', host='host', port=1234, path='/{hostname}', archive='archive')"
|
||||
assert repr(Location('::')) == \
|
||||
"Location(proto='ssh', user='user', host='host', port=1234, path='/{}', archive=None)".format(hostname)
|
||||
f"Location(proto='ssh', user='user', host='host', port=1234, path='/{hostname}', archive=None)"
|
||||
assert repr(Location()) == \
|
||||
"Location(proto='ssh', user='user', host='host', port=1234, path='/{}', archive=None)".format(hostname)
|
||||
f"Location(proto='ssh', user='user', host='host', port=1234, path='/{hostname}', archive=None)"
|
||||
|
||||
def test_file(self, monkeypatch):
|
||||
monkeypatch.setenv('BORG_REPO', 'file:///some/path')
|
||||
@ -380,7 +380,7 @@ class MockArchive:
|
||||
self.id = id
|
||||
|
||||
def __repr__(self):
|
||||
return "{0}: {1}".format(self.id, self.ts.isoformat())
|
||||
return f"{self.id}: {self.ts.isoformat()}"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
@ -14,7 +14,7 @@ class TestLRUCache:
|
||||
for i, x in enumerate('abc'):
|
||||
c[x] = i
|
||||
assert len(c) == 2
|
||||
assert c.items() == set([('b', 1), ('c', 2)])
|
||||
assert c.items() == {('b', 1), ('c', 2)}
|
||||
assert 'a' not in c
|
||||
assert 'b' in c
|
||||
with pytest.raises(KeyError):
|
||||
|
@ -1,4 +1,3 @@
|
||||
|
||||
import pytest
|
||||
|
||||
from ..nanorst import rst_to_text
|
||||
|
@ -37,7 +37,7 @@ class TestNonceManager:
|
||||
self.repository = None
|
||||
|
||||
def cache_nonce(self):
|
||||
with open(os.path.join(get_security_dir(self.repository.id_str), 'nonce'), "r") as fd:
|
||||
with open(os.path.join(get_security_dir(self.repository.id_str), 'nonce')) as fd:
|
||||
return fd.read()
|
||||
|
||||
def set_cache_nonce(self, nonce):
|
||||
|
@ -204,7 +204,7 @@ def use_normalized_unicode():
|
||||
def _make_test_patterns(pattern):
|
||||
return [PathPrefixPattern(pattern),
|
||||
FnmatchPattern(pattern),
|
||||
RegexPattern("^{}/foo$".format(pattern)),
|
||||
RegexPattern(f"^{pattern}/foo$"),
|
||||
ShellPattern(pattern),
|
||||
]
|
||||
|
||||
@ -275,7 +275,7 @@ def test_exclude_patterns_from_file(tmpdir, lines, expected):
|
||||
|
||||
def evaluate(filename):
|
||||
patterns = []
|
||||
load_exclude_file(open(filename, "rt"), patterns)
|
||||
load_exclude_file(open(filename), patterns)
|
||||
matcher = PatternMatcher(fallback=True)
|
||||
matcher.add_inclexcl(patterns)
|
||||
return [path for path in files if matcher.match(path)]
|
||||
@ -306,7 +306,7 @@ def test_load_patterns_from_file(tmpdir, lines, expected_roots, expected_numpatt
|
||||
def evaluate(filename):
|
||||
roots = []
|
||||
inclexclpatterns = []
|
||||
load_pattern_file(open(filename, "rt"), roots, inclexclpatterns)
|
||||
load_pattern_file(open(filename), roots, inclexclpatterns)
|
||||
return roots, len(inclexclpatterns)
|
||||
patternfile = tmpdir.join("patterns.txt")
|
||||
|
||||
@ -356,7 +356,7 @@ def test_load_invalid_patterns_from_file(tmpdir, lines):
|
||||
with pytest.raises(argparse.ArgumentTypeError):
|
||||
roots = []
|
||||
inclexclpatterns = []
|
||||
load_pattern_file(open(filename, "rt"), roots, inclexclpatterns)
|
||||
load_pattern_file(open(filename), roots, inclexclpatterns)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("lines, expected", [
|
||||
@ -400,7 +400,7 @@ def test_inclexcl_patterns_from_file(tmpdir, lines, expected):
|
||||
matcher = PatternMatcher(fallback=True)
|
||||
roots = []
|
||||
inclexclpatterns = []
|
||||
load_pattern_file(open(filename, "rt"), roots, inclexclpatterns)
|
||||
load_pattern_file(open(filename), roots, inclexclpatterns)
|
||||
matcher.add_inclexcl(inclexclpatterns)
|
||||
return [path for path in files if matcher.match(path)]
|
||||
|
||||
|
@ -53,7 +53,7 @@ def user_exists(username):
|
||||
return False
|
||||
|
||||
|
||||
@functools.lru_cache()
|
||||
@functools.lru_cache
|
||||
def are_acls_working():
|
||||
with unopened_tempfile() as filepath:
|
||||
open(filepath, 'w').close()
|
||||
@ -118,12 +118,12 @@ class PlatformLinuxTestCase(BaseTestCase):
|
||||
# but in practice they seem to be out there and must not make our code explode.
|
||||
file = tempfile.NamedTemporaryFile()
|
||||
self.assert_equal(self.get_acl(file.name), {})
|
||||
nothing_special = 'user::rw-\ngroup::r--\nmask::rw-\nother::---\n'.encode('ascii')
|
||||
nothing_special = b'user::rw-\ngroup::r--\nmask::rw-\nother::---\n'
|
||||
# TODO: can this be tested without having an existing system user übel with uid 666 gid 666?
|
||||
user_entry = 'user:übel:rw-:666'.encode('utf-8')
|
||||
user_entry_numeric = 'user:666:rw-:666'.encode('ascii')
|
||||
group_entry = 'group:übel:rw-:666'.encode('utf-8')
|
||||
group_entry_numeric = 'group:666:rw-:666'.encode('ascii')
|
||||
user_entry = 'user:übel:rw-:666'.encode()
|
||||
user_entry_numeric = b'user:666:rw-:666'
|
||||
group_entry = 'group:übel:rw-:666'.encode()
|
||||
group_entry_numeric = b'group:666:rw-:666'
|
||||
acl = b'\n'.join([nothing_special, user_entry, group_entry])
|
||||
self.set_acl(file.name, access=acl, numeric_ids=False)
|
||||
acl_access = self.get_acl(file.name, numeric_ids=False)['acl_access']
|
||||
|
@ -528,14 +528,14 @@ class NonceReservation(RepositoryTestCaseBase):
|
||||
self.repository.commit_nonce_reservation(0x200, 15)
|
||||
|
||||
self.repository.commit_nonce_reservation(0x200, None)
|
||||
with open(os.path.join(self.repository.path, "nonce"), "r") as fd:
|
||||
with open(os.path.join(self.repository.path, "nonce")) as fd:
|
||||
assert fd.read() == "0000000000000200"
|
||||
|
||||
with pytest.raises(Exception):
|
||||
self.repository.commit_nonce_reservation(0x200, 15)
|
||||
|
||||
self.repository.commit_nonce_reservation(0x400, 0x200)
|
||||
with open(os.path.join(self.repository.path, "nonce"), "r") as fd:
|
||||
with open(os.path.join(self.repository.path, "nonce")) as fd:
|
||||
assert fd.read() == "0000000000000400"
|
||||
|
||||
|
||||
@ -710,7 +710,7 @@ class RepositoryCheckTestCase(RepositoryTestCaseBase):
|
||||
return sorted(int(n) for n in os.listdir(os.path.join(self.tmppath, 'repository', 'data', '0')) if n.isdigit())[-1]
|
||||
|
||||
def open_index(self):
|
||||
return NSIndex.read(os.path.join(self.tmppath, 'repository', 'index.{}'.format(self.get_head())))
|
||||
return NSIndex.read(os.path.join(self.tmppath, 'repository', f'index.{self.get_head()}'))
|
||||
|
||||
def corrupt_object(self, id_):
|
||||
idx = self.open_index()
|
||||
@ -723,18 +723,18 @@ class RepositoryCheckTestCase(RepositoryTestCaseBase):
|
||||
os.unlink(os.path.join(self.tmppath, 'repository', 'data', '0', str(segment)))
|
||||
|
||||
def delete_index(self):
|
||||
os.unlink(os.path.join(self.tmppath, 'repository', 'index.{}'.format(self.get_head())))
|
||||
os.unlink(os.path.join(self.tmppath, 'repository', f'index.{self.get_head()}'))
|
||||
|
||||
def rename_index(self, new_name):
|
||||
os.rename(os.path.join(self.tmppath, 'repository', 'index.{}'.format(self.get_head())),
|
||||
os.rename(os.path.join(self.tmppath, 'repository', f'index.{self.get_head()}'),
|
||||
os.path.join(self.tmppath, 'repository', new_name))
|
||||
|
||||
def list_objects(self):
|
||||
return set(int(key) for key in self.repository.list())
|
||||
return {int(key) for key in self.repository.list()}
|
||||
|
||||
def test_repair_corrupted_segment(self):
|
||||
self.add_objects([[1, 2, 3], [4, 5], [6]])
|
||||
self.assert_equal(set([1, 2, 3, 4, 5, 6]), self.list_objects())
|
||||
self.assert_equal({1, 2, 3, 4, 5, 6}, self.list_objects())
|
||||
self.check(status=True)
|
||||
self.corrupt_object(5)
|
||||
self.assert_raises(IntegrityError, lambda: self.get_objects(5))
|
||||
@ -746,22 +746,22 @@ class RepositoryCheckTestCase(RepositoryTestCaseBase):
|
||||
self.check(repair=True, status=True)
|
||||
self.get_objects(4)
|
||||
self.check(status=True)
|
||||
self.assert_equal(set([1, 2, 3, 4, 6]), self.list_objects())
|
||||
self.assert_equal({1, 2, 3, 4, 6}, self.list_objects())
|
||||
|
||||
def test_repair_missing_segment(self):
|
||||
self.add_objects([[1, 2, 3], [4, 5, 6]])
|
||||
self.assert_equal(set([1, 2, 3, 4, 5, 6]), self.list_objects())
|
||||
self.assert_equal({1, 2, 3, 4, 5, 6}, self.list_objects())
|
||||
self.check(status=True)
|
||||
self.delete_segment(2)
|
||||
self.repository.rollback()
|
||||
self.check(repair=True, status=True)
|
||||
self.assert_equal(set([1, 2, 3]), self.list_objects())
|
||||
self.assert_equal({1, 2, 3}, self.list_objects())
|
||||
|
||||
def test_repair_missing_commit_segment(self):
|
||||
self.add_objects([[1, 2, 3], [4, 5, 6]])
|
||||
self.delete_segment(3)
|
||||
self.assert_raises(Repository.ObjectNotFound, lambda: self.get_objects(4))
|
||||
self.assert_equal(set([1, 2, 3]), self.list_objects())
|
||||
self.assert_equal({1, 2, 3}, self.list_objects())
|
||||
|
||||
def test_repair_corrupted_commit_segment(self):
|
||||
self.add_objects([[1, 2, 3], [4, 5, 6]])
|
||||
@ -771,7 +771,7 @@ class RepositoryCheckTestCase(RepositoryTestCaseBase):
|
||||
self.assert_raises(Repository.ObjectNotFound, lambda: self.get_objects(4))
|
||||
self.check(status=True)
|
||||
self.get_objects(3)
|
||||
self.assert_equal(set([1, 2, 3]), self.list_objects())
|
||||
self.assert_equal({1, 2, 3}, self.list_objects())
|
||||
|
||||
def test_repair_no_commits(self):
|
||||
self.add_objects([[1, 2, 3]])
|
||||
@ -786,14 +786,14 @@ class RepositoryCheckTestCase(RepositoryTestCaseBase):
|
||||
self.assert_equal(self.list_indices(), ['index.2'])
|
||||
self.check(status=True)
|
||||
self.get_objects(3)
|
||||
self.assert_equal(set([1, 2, 3]), self.list_objects())
|
||||
self.assert_equal({1, 2, 3}, self.list_objects())
|
||||
|
||||
def test_repair_missing_index(self):
|
||||
self.add_objects([[1, 2, 3], [4, 5, 6]])
|
||||
self.delete_index()
|
||||
self.check(status=True)
|
||||
self.get_objects(4)
|
||||
self.assert_equal(set([1, 2, 3, 4, 5, 6]), self.list_objects())
|
||||
self.assert_equal({1, 2, 3, 4, 5, 6}, self.list_objects())
|
||||
|
||||
def test_repair_index_too_new(self):
|
||||
self.add_objects([[1, 2, 3], [4, 5, 6]])
|
||||
@ -802,7 +802,7 @@ class RepositoryCheckTestCase(RepositoryTestCaseBase):
|
||||
self.check(status=True)
|
||||
self.assert_equal(self.list_indices(), ['index.3'])
|
||||
self.get_objects(4)
|
||||
self.assert_equal(set([1, 2, 3, 4, 5, 6]), self.list_objects())
|
||||
self.assert_equal({1, 2, 3, 4, 5, 6}, self.list_objects())
|
||||
|
||||
def test_crash_before_compact(self):
|
||||
self.repository.put(H(0), b'data')
|
||||
|
@ -54,7 +54,7 @@ def key_valid(path):
|
||||
"""
|
||||
keyfile = os.path.join(get_keys_dir(),
|
||||
os.path.basename(path))
|
||||
with open(keyfile, 'r') as f:
|
||||
with open(keyfile) as f:
|
||||
return f.read().startswith(KeyfileKey.FILE_ID)
|
||||
|
||||
|
||||
|
@ -36,7 +36,7 @@ class AtticRepositoryUpgrader(Repository):
|
||||
with self:
|
||||
backup = None
|
||||
if not inplace:
|
||||
backup = '{}.before-upgrade-{:%Y-%m-%d-%H:%M:%S}'.format(self.path, datetime.datetime.now())
|
||||
backup = f'{self.path}.before-upgrade-{datetime.datetime.now():%Y-%m-%d-%H:%M:%S}'
|
||||
logger.info('making a hardlink copy in %s', backup)
|
||||
if not dryrun:
|
||||
shutil.copytree(self.path, backup, copy_function=os.link)
|
||||
@ -144,7 +144,7 @@ class AtticRepositoryUpgrader(Repository):
|
||||
problem because the keyfiles are small (compared to, say,
|
||||
all the segments)."""
|
||||
logger.info("converting keyfile %s" % keyfile)
|
||||
with open(keyfile, 'r') as f:
|
||||
with open(keyfile) as f:
|
||||
data = f.read()
|
||||
data = data.replace(AtticKeyfileKey.FILE_ID, KeyfileKey.FILE_ID, 1)
|
||||
keyfile = os.path.join(get_keys_dir(), os.path.basename(keyfile))
|
||||
@ -214,12 +214,12 @@ class AtticRepositoryUpgrader(Repository):
|
||||
if os.path.exists(borg_file):
|
||||
logger.warning("borg cache file already exists in %s, not copying from Attic", borg_file)
|
||||
else:
|
||||
logger.info("copying attic cache file from %s to %s" % (attic_file, borg_file))
|
||||
logger.info(f"copying attic cache file from {attic_file} to {borg_file}")
|
||||
if not dryrun:
|
||||
shutil.copyfile(attic_file, borg_file)
|
||||
return borg_file
|
||||
else:
|
||||
logger.warning("no %s cache file found in %s" % (path, attic_file))
|
||||
logger.warning(f"no {path} cache file found in {attic_file}")
|
||||
return None
|
||||
|
||||
# XXX: untested, because generating cache files is a PITA, see
|
||||
@ -270,7 +270,7 @@ class AtticKeyfileKey(KeyfileKey):
|
||||
raise KeyfileNotFoundError(repository.path, keys_dir)
|
||||
for name in os.listdir(keys_dir):
|
||||
filename = os.path.join(keys_dir, name)
|
||||
with open(filename, 'r') as fd:
|
||||
with open(filename) as fd:
|
||||
line = fd.readline().strip()
|
||||
if line and line.startswith(cls.FILE_ID) and line[10:] == repository.id_str:
|
||||
return filename
|
||||
@ -319,7 +319,7 @@ class Borg0xxKeyfileKey(KeyfileKey):
|
||||
raise KeyfileNotFoundError(repository.path, keys_dir)
|
||||
for name in os.listdir(keys_dir):
|
||||
filename = os.path.join(keys_dir, name)
|
||||
with open(filename, 'r') as fd:
|
||||
with open(filename) as fd:
|
||||
line = fd.readline().strip()
|
||||
if line and line.startswith(cls.FILE_ID) and line[len(cls.FILE_ID) + 1:] == repository.id_str:
|
||||
return filename
|
||||
|
@ -91,7 +91,7 @@ def get_all(path, follow_symlinks=False):
|
||||
pass
|
||||
elif e.errno == errno.EPERM:
|
||||
# we were not permitted to read this attribute, still can continue trying to read others
|
||||
logger.warning('%s: Operation not permitted when reading extended attribute %s' % (
|
||||
logger.warning('{}: Operation not permitted when reading extended attribute {}'.format(
|
||||
path_str, name_str))
|
||||
else:
|
||||
raise
|
||||
|
Loading…
Reference in New Issue
Block a user