mirror of
https://github.com/borgbackup/borg.git
synced 2026-04-22 14:49:03 -04:00
Merge branch 'master' into windows
This commit is contained in:
commit
885a5c9868
38 changed files with 1866 additions and 523 deletions
|
|
@ -14,6 +14,9 @@ matrix:
|
|||
- python: 3.5
|
||||
os: linux
|
||||
env: TOXENV=py35
|
||||
- python: nightly
|
||||
os: linux
|
||||
env: TOXENV=py36
|
||||
- python: 3.5
|
||||
os: linux
|
||||
env: TOXENV=flake8
|
||||
|
|
@ -25,6 +28,8 @@ matrix:
|
|||
os: osx
|
||||
osx_image: xcode6.4
|
||||
env: TOXENV=py35
|
||||
allow_failures:
|
||||
- python: nightly
|
||||
|
||||
install:
|
||||
- ./.travis/install.sh
|
||||
|
|
|
|||
4
Vagrantfile
vendored
4
Vagrantfile
vendored
|
|
@ -202,8 +202,8 @@ def install_borg(boxname)
|
|||
rm -f borg/{chunker,crypto,compress,hashindex,platform_linux}.c
|
||||
rm -rf borg/__pycache__ borg/support/__pycache__ borg/testsuite/__pycache__
|
||||
pip install -r requirements.d/development.txt
|
||||
pip install -r requirements.d/fuse.txt
|
||||
pip install -e .
|
||||
# by using [fuse], setup.py can handle different fuse requirements:
|
||||
pip install -e .[fuse]
|
||||
EOF
|
||||
end
|
||||
|
||||
|
|
|
|||
|
|
@ -96,7 +96,7 @@ buzhash(const unsigned char *data, size_t len, const uint32_t *h)
|
|||
static uint32_t
|
||||
buzhash_update(uint32_t sum, unsigned char remove, unsigned char add, size_t len, const uint32_t *h)
|
||||
{
|
||||
uint32_t lenmod = len & 0x1f;
|
||||
uint32_t lenmod = len & 0x1f; /* Note: replace by constant to get small speedup */
|
||||
return BARREL_SHIFT(sum, 1) ^ BARREL_SHIFT(h[remove], lenmod) ^ h[add];
|
||||
}
|
||||
|
||||
|
|
@ -196,10 +196,10 @@ chunker_fill(Chunker *c)
|
|||
// We rollback the initial offset back to the start of the page,
|
||||
// to avoid it not being truncated as a partial page request.
|
||||
if (length > 0) {
|
||||
// Linux kernels prior to 4.7 have a bug where they truncate
|
||||
// last partial page of POSIX_FADV_DONTNEED request, so we need
|
||||
// to page-align it ourselves. We'll need the rest of this page
|
||||
// on the next read (assuming this was not EOF)
|
||||
// All Linux kernels (at least up to and including 4.6(.0)) have a bug where
|
||||
// they truncate last partial page of POSIX_FADV_DONTNEED request, so we need
|
||||
// to page-align it ourselves. We'll need the rest of this page on the next
|
||||
// read (assuming this was not EOF).
|
||||
overshoot = (offset + length) & pagemask;
|
||||
} else {
|
||||
// For length == 0 we set overshoot 0, so the below
|
||||
|
|
@ -249,11 +249,12 @@ chunker_process(Chunker *c)
|
|||
PyErr_SetString(PyExc_Exception, "chunkifier byte count mismatch");
|
||||
return NULL;
|
||||
}
|
||||
while(c->remaining <= window_size && !c->eof) {
|
||||
while(c->remaining < min_size + window_size + 1 && !c->eof) { /* see assert in Chunker init */
|
||||
if(!chunker_fill(c)) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
/* here we either are at eof ... */
|
||||
if(c->eof) {
|
||||
c->done = 1;
|
||||
if(c->remaining) {
|
||||
|
|
@ -268,8 +269,15 @@ chunker_process(Chunker *c)
|
|||
return NULL;
|
||||
}
|
||||
}
|
||||
/* ... or we have at least min_size + window_size + 1 bytes remaining.
|
||||
* We do not want to "cut" a chunk smaller than min_size and the hash
|
||||
* window starts at the potential cutting place.
|
||||
*/
|
||||
c->position += min_size;
|
||||
c->remaining -= min_size;
|
||||
n += min_size;
|
||||
sum = buzhash(c->data + c->position, window_size, c->table);
|
||||
while(c->remaining > c->window_size && ((sum & chunk_mask) || n < min_size)) {
|
||||
while(c->remaining > c->window_size && (sum & chunk_mask)) {
|
||||
sum = buzhash_update(sum, c->data[c->position],
|
||||
c->data[c->position + window_size],
|
||||
window_size, c->table);
|
||||
|
|
|
|||
109
borg/archive.py
109
borg/archive.py
|
|
@ -9,6 +9,7 @@ from .key import key_factory
|
|||
from .remote import cache_if_remote
|
||||
|
||||
import os
|
||||
from shutil import get_terminal_size
|
||||
import socket
|
||||
import stat
|
||||
import sys
|
||||
|
|
@ -19,26 +20,78 @@ from .compress import COMPR_BUFFER
|
|||
from .constants import * # NOQA
|
||||
from .helpers import Chunk, Error, uid2user, user2uid, gid2group, group2gid, \
|
||||
parse_timestamp, to_localtime, format_time, format_timedelta, safe_encode, safe_decode, \
|
||||
Manifest, Statistics, decode_dict, make_path_safe, StableDict, int_to_bigint, bigint_to_int, bin_to_hex, \
|
||||
Manifest, decode_dict, make_path_safe, StableDict, int_to_bigint, bigint_to_int, bin_to_hex, \
|
||||
ProgressIndicatorPercent, ChunkIteratorFileWrapper, remove_surrogates, log_multi, \
|
||||
PathPrefixPattern, FnmatchPattern, open_item, file_status, format_file_size, consume, \
|
||||
CompressionDecider1, CompressionDecider2, CompressionSpec
|
||||
CompressionDecider1, CompressionDecider2, CompressionSpec, \
|
||||
IntegrityError
|
||||
from .repository import Repository
|
||||
from .platform import acl_get, acl_set
|
||||
if sys.platform == 'win32':
|
||||
from .platform import get_owner, set_owner
|
||||
from .platform import acl_get, acl_set, set_flags, get_flags, swidth
|
||||
from .chunker import Chunker
|
||||
from .hashindex import ChunkIndex, ChunkIndexEntry
|
||||
from .cache import ChunkListEntry
|
||||
import msgpack
|
||||
|
||||
has_lchmod = hasattr(os, 'lchmod')
|
||||
has_lchflags = hasattr(os, 'lchflags')
|
||||
|
||||
flags_normal = os.O_RDONLY | getattr(os, 'O_BINARY', 0)
|
||||
flags_noatime = flags_normal | getattr(os, 'O_NOATIME', 0)
|
||||
|
||||
|
||||
class Statistics:
|
||||
|
||||
def __init__(self):
|
||||
self.osize = self.csize = self.usize = self.nfiles = 0
|
||||
self.last_progress = 0 # timestamp when last progress was shown
|
||||
|
||||
def update(self, size, csize, unique):
|
||||
self.osize += size
|
||||
self.csize += csize
|
||||
if unique:
|
||||
self.usize += csize
|
||||
|
||||
summary = """\
|
||||
Original size Compressed size Deduplicated size
|
||||
{label:15} {stats.osize_fmt:>20s} {stats.csize_fmt:>20s} {stats.usize_fmt:>20s}"""
|
||||
|
||||
def __str__(self):
|
||||
return self.summary.format(stats=self, label='This archive:')
|
||||
|
||||
def __repr__(self):
|
||||
return "<{cls} object at {hash:#x} ({self.osize}, {self.csize}, {self.usize})>".format(
|
||||
cls=type(self).__name__, hash=id(self), self=self)
|
||||
|
||||
@property
|
||||
def osize_fmt(self):
|
||||
return format_file_size(self.osize)
|
||||
|
||||
@property
|
||||
def usize_fmt(self):
|
||||
return format_file_size(self.usize)
|
||||
|
||||
@property
|
||||
def csize_fmt(self):
|
||||
return format_file_size(self.csize)
|
||||
|
||||
def show_progress(self, item=None, final=False, stream=None, dt=None):
|
||||
now = time.time()
|
||||
if dt is None or now - self.last_progress > dt:
|
||||
self.last_progress = now
|
||||
columns, lines = get_terminal_size()
|
||||
if not final:
|
||||
msg = '{0.osize_fmt} O {0.csize_fmt} C {0.usize_fmt} D {0.nfiles} N '.format(self)
|
||||
path = remove_surrogates(item[b'path']) if item else ''
|
||||
space = columns - swidth(msg)
|
||||
if space < swidth('...') + swidth(path):
|
||||
path = '%s...%s' % (path[:(space // 2) - swidth('...')], path[-space // 2:])
|
||||
msg += "{0:<{space}}".format(path, space=space)
|
||||
else:
|
||||
msg = ' ' * columns
|
||||
print(msg, file=stream or sys.stderr, end="\r", flush=True)
|
||||
|
||||
|
||||
class DownloadPipeline:
|
||||
|
||||
def __init__(self, repository, key):
|
||||
|
|
@ -320,7 +373,7 @@ Number of files: {0.stats.nfiles}'''.format(
|
|||
"""
|
||||
if dry_run or stdout:
|
||||
if b'chunks' in item:
|
||||
for data in self.pipeline.fetch_many([c.id for c in item[b'chunks']], is_preloaded=True):
|
||||
for _, data in self.pipeline.fetch_many([c.id for c in item[b'chunks']], is_preloaded=True):
|
||||
if stdout:
|
||||
sys.stdout.buffer.write(data)
|
||||
if stdout:
|
||||
|
|
@ -450,10 +503,9 @@ Number of files: {0.stats.nfiles}'''.format(
|
|||
else:
|
||||
os.utime(path, None, ns=(atime, mtime), follow_symlinks=False)
|
||||
acl_set(path, item, self.numeric_owner)
|
||||
# Only available on OS X and FreeBSD
|
||||
if has_lchflags and b'bsdflags' in item:
|
||||
if b'bsdflags' in item:
|
||||
try:
|
||||
os.lchflags(path, item[b'bsdflags'])
|
||||
set_flags(path, item[b'bsdflags'], fd=fd)
|
||||
except OSError:
|
||||
pass
|
||||
# chown removes Linux capabilities, so set the extended attributes at the end, after chown, since they include
|
||||
|
|
@ -533,8 +585,9 @@ Number of files: {0.stats.nfiles}'''.format(
|
|||
xattrs = xattr.get_all(path, follow_symlinks=False)
|
||||
if xattrs:
|
||||
item[b'xattrs'] = StableDict(xattrs)
|
||||
if has_lchflags and st.st_flags:
|
||||
item[b'bsdflags'] = st.st_flags
|
||||
bsdflags = get_flags(path, st)
|
||||
if bsdflags:
|
||||
item[b'bsdflags'] = bsdflags
|
||||
acl_get(path, item, st, self.numeric_owner)
|
||||
return item
|
||||
|
||||
|
|
@ -726,7 +779,17 @@ class ArchiveChecker:
|
|||
self.error_found = False
|
||||
self.possibly_superseded = set()
|
||||
|
||||
def check(self, repository, repair=False, archive=None, last=None, prefix=None, save_space=False):
|
||||
def check(self, repository, repair=False, archive=None, last=None, prefix=None, verify_data=False,
|
||||
save_space=False):
|
||||
"""Perform a set of checks on 'repository'
|
||||
|
||||
:param repair: enable repair mode, write updated or corrected data into repository
|
||||
:param archive: only check this archive
|
||||
:param last: only check this number of recent archives
|
||||
:param prefix: only check archives with this prefix
|
||||
:param verify_data: integrity verification of data referenced by archives
|
||||
:param save_space: Repository.commit(save_space)
|
||||
"""
|
||||
logger.info('Starting archive consistency check...')
|
||||
self.check_all = archive is None and last is None and prefix is None
|
||||
self.repair = repair
|
||||
|
|
@ -740,6 +803,8 @@ class ArchiveChecker:
|
|||
else:
|
||||
self.manifest, _ = Manifest.load(repository, key=self.key)
|
||||
self.rebuild_refcounts(archive=archive, last=last, prefix=prefix)
|
||||
if verify_data:
|
||||
self.verify_data()
|
||||
self.orphan_chunks_check()
|
||||
self.finish(save_space=save_space)
|
||||
if self.error_found:
|
||||
|
|
@ -769,6 +834,26 @@ class ArchiveChecker:
|
|||
cdata = repository.get(next(self.chunks.iteritems())[0])
|
||||
return key_factory(repository, cdata)
|
||||
|
||||
def verify_data(self):
|
||||
logger.info('Starting cryptographic data integrity verification...')
|
||||
pi = ProgressIndicatorPercent(total=len(self.chunks), msg="Verifying data %6.2f%%", step=0.01, same_line=True)
|
||||
count = errors = 0
|
||||
for chunk_id, (refcount, *_) in self.chunks.iteritems():
|
||||
pi.show()
|
||||
if not refcount:
|
||||
continue
|
||||
encrypted_data = self.repository.get(chunk_id)
|
||||
try:
|
||||
_, data = self.key.decrypt(chunk_id, encrypted_data)
|
||||
except IntegrityError as integrity_error:
|
||||
self.error_found = True
|
||||
errors += 1
|
||||
logger.error('chunk %s, integrity error: %s', bin_to_hex(chunk_id), integrity_error)
|
||||
count += 1
|
||||
pi.finish()
|
||||
log = logger.error if errors else logger.info
|
||||
log('Finished cryptographic data integrity verification, verified %d chunks with %d integrity errors.', count, errors)
|
||||
|
||||
def rebuild_manifest(self):
|
||||
"""Rebuild the manifest object if it is missing
|
||||
|
||||
|
|
@ -902,6 +987,8 @@ class ArchiveChecker:
|
|||
else:
|
||||
# we only want one specific archive
|
||||
archive_items = [item for item in self.manifest.archives.items() if item[0] == archive]
|
||||
if not archive_items:
|
||||
logger.error("Archive '%s' not found.", archive)
|
||||
num_archives = 1
|
||||
end = 1
|
||||
|
||||
|
|
|
|||
142
borg/archiver.py
142
borg/archiver.py
|
|
@ -8,6 +8,7 @@ import functools
|
|||
import hashlib
|
||||
import inspect
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
|
|
@ -22,7 +23,7 @@ from . import __version__
|
|||
from .helpers import Error, location_validator, archivename_validator, format_time, format_file_size, \
|
||||
parse_pattern, PathPrefixPattern, to_localtime, timestamp, \
|
||||
get_cache_dir, prune_within, prune_split, bin_to_hex, safe_encode, \
|
||||
Manifest, remove_surrogates, update_excludes, format_archive, check_extension_modules, Statistics, \
|
||||
Manifest, remove_surrogates, update_excludes, format_archive, check_extension_modules, \
|
||||
dir_is_tagged, ChunkerParams, CompressionSpec, is_slow_msgpack, yes, sysinfo, \
|
||||
log_multi, PatternMatcher, ItemFormatter
|
||||
from .logger import create_logger, setup_logging
|
||||
|
|
@ -34,12 +35,11 @@ from .repository import Repository
|
|||
from .cache import Cache
|
||||
from .constants import * # NOQA
|
||||
from .key import key_creator, RepoKey, PassphraseKey
|
||||
from .archive import Archive, ArchiveChecker, ArchiveRecreater
|
||||
from .archive import Archive, ArchiveChecker, ArchiveRecreater, Statistics
|
||||
from .remote import RepositoryServer, RemoteRepository, cache_if_remote
|
||||
from .selftest import selftest
|
||||
from .hashindex import ChunkIndexEntry
|
||||
|
||||
has_lchflags = hasattr(os, 'lchflags')
|
||||
from .platform import get_flags
|
||||
|
||||
if sys.platform == 'win32':
|
||||
import posixpath
|
||||
|
|
@ -99,9 +99,10 @@ def with_archive(method):
|
|||
|
||||
class Archiver:
|
||||
|
||||
def __init__(self, lock_wait=None):
|
||||
def __init__(self, lock_wait=None, prog=None):
|
||||
self.exit_code = EXIT_SUCCESS
|
||||
self.lock_wait = lock_wait
|
||||
self.parser = self.build_parser(prog)
|
||||
|
||||
def print_error(self, msg, *args):
|
||||
msg = args and msg % args or msg
|
||||
|
|
@ -115,7 +116,7 @@ class Archiver:
|
|||
|
||||
def print_file_status(self, status, path):
|
||||
if self.output_list and (self.output_filter is None or status in self.output_filter):
|
||||
logger.info("%1s %s", status, remove_surrogates(path))
|
||||
logging.getLogger('borg.output.list').info("%1s %s", status, remove_surrogates(path))
|
||||
|
||||
@staticmethod
|
||||
def compare_chunk_contents(chunks1, chunks2):
|
||||
|
|
@ -188,12 +189,16 @@ class Archiver:
|
|||
if not yes(msg, false_msg="Aborting.", truish=('YES', ),
|
||||
env_var_override='BORG_CHECK_I_KNOW_WHAT_I_AM_DOING'):
|
||||
return EXIT_ERROR
|
||||
if args.repo_only and args.verify_data:
|
||||
self.print_error("--repository-only and --verify-data contradict each other. Please select one.")
|
||||
return EXIT_ERROR
|
||||
if not args.archives_only:
|
||||
if not repository.check(repair=args.repair, save_space=args.save_space):
|
||||
return EXIT_WARNING
|
||||
if not args.repo_only and not ArchiveChecker().check(
|
||||
repository, repair=args.repair, archive=args.location.archive,
|
||||
last=args.last, prefix=args.prefix, save_space=args.save_space):
|
||||
last=args.last, prefix=args.prefix, verify_data=args.verify_data,
|
||||
save_space=args.save_space):
|
||||
return EXIT_WARNING
|
||||
return EXIT_SUCCESS
|
||||
|
||||
|
|
@ -280,7 +285,7 @@ class Archiver:
|
|||
DASHES,
|
||||
str(archive.stats),
|
||||
str(cache),
|
||||
DASHES)
|
||||
DASHES, logger=logging.getLogger('borg.output.stats'))
|
||||
|
||||
self.output_filter = args.output_filter
|
||||
self.output_list = args.output_list
|
||||
|
|
@ -318,7 +323,8 @@ class Archiver:
|
|||
return
|
||||
status = None
|
||||
# Ignore if nodump flag is set
|
||||
if has_lchflags and (st.st_flags & stat.UF_NODUMP):
|
||||
if get_flags(path, st) & stat.UF_NODUMP:
|
||||
self.print_file_status('x', path)
|
||||
return
|
||||
if stat.S_ISREG(st.st_mode) or read_special and not stat.S_ISDIR(st.st_mode):
|
||||
if not dry_run:
|
||||
|
|
@ -419,7 +425,7 @@ class Archiver:
|
|||
while dirs and not item[b'path'].startswith(dirs[-1][b'path']):
|
||||
archive.extract_item(dirs.pop(-1), stdout=stdout)
|
||||
if output_list:
|
||||
logger.info(remove_surrogates(orig_path))
|
||||
logging.getLogger('borg.output.list').info(remove_surrogates(orig_path))
|
||||
try:
|
||||
if dry_run:
|
||||
archive.extract_item(item, dry_run=True)
|
||||
|
|
@ -676,7 +682,7 @@ class Archiver:
|
|||
log_multi(DASHES,
|
||||
stats.summary.format(label='Deleted data:', stats=stats),
|
||||
str(cache),
|
||||
DASHES)
|
||||
DASHES, logger=logging.getLogger('borg.output.stats'))
|
||||
else:
|
||||
if not args.cache_only:
|
||||
msg = []
|
||||
|
|
@ -826,18 +832,19 @@ class Archiver:
|
|||
to_delete = (set(archives) | checkpoints) - (set(keep) | set(keep_checkpoints))
|
||||
stats = Statistics()
|
||||
with Cache(repository, key, manifest, do_files=args.cache_files, lock_wait=self.lock_wait) as cache:
|
||||
list_logger = logging.getLogger('borg.output.list')
|
||||
for archive in archives_checkpoints:
|
||||
if archive in to_delete:
|
||||
if args.dry_run:
|
||||
if args.output_list:
|
||||
logger.info('Would prune: %s' % format_archive(archive))
|
||||
list_logger.info('Would prune: %s' % format_archive(archive))
|
||||
else:
|
||||
if args.output_list:
|
||||
logger.info('Pruning archive: %s' % format_archive(archive))
|
||||
list_logger.info('Pruning archive: %s' % format_archive(archive))
|
||||
Archive(repository, key, manifest, archive.name, cache).delete(stats)
|
||||
else:
|
||||
if args.output_list:
|
||||
logger.info('Keeping archive: %s' % format_archive(archive))
|
||||
list_logger.info('Keeping archive: %s' % format_archive(archive))
|
||||
if to_delete and not args.dry_run:
|
||||
manifest.write()
|
||||
repository.commit(save_space=args.save_space)
|
||||
|
|
@ -846,7 +853,7 @@ class Archiver:
|
|||
log_multi(DASHES,
|
||||
stats.summary.format(label='Deleted data:', stats=stats),
|
||||
str(cache),
|
||||
DASHES)
|
||||
DASHES, logger=logging.getLogger('borg.output.stats'))
|
||||
return self.exit_code
|
||||
|
||||
def do_upgrade(self, args):
|
||||
|
|
@ -1121,7 +1128,7 @@ class Archiver:
|
|||
self.print_warning(warning)
|
||||
return args
|
||||
|
||||
def build_parser(self, args=None, prog=None):
|
||||
def build_parser(self, prog=None):
|
||||
common_parser = argparse.ArgumentParser(add_help=False, prog=prog)
|
||||
|
||||
common_group = common_parser.add_argument_group('Common options')
|
||||
|
|
@ -1172,7 +1179,48 @@ class Archiver:
|
|||
init_epilog = textwrap.dedent("""
|
||||
This command initializes an empty repository. A repository is a filesystem
|
||||
directory containing the deduplicated data from zero or more archives.
|
||||
Encryption can be enabled at repository init time.
|
||||
|
||||
Encryption can be enabled at repository init time (the default).
|
||||
|
||||
It is not recommended to disable encryption. Repository encryption protects you
|
||||
e.g. against the case that an attacker has access to your backup repository.
|
||||
|
||||
But be careful with the key / the passphrase:
|
||||
|
||||
If you want "passphrase-only" security, use the repokey mode. The key will
|
||||
be stored inside the repository (in its "config" file). In above mentioned
|
||||
attack scenario, the attacker will have the key (but not the passphrase).
|
||||
|
||||
If you want "passphrase and having-the-key" security, use the keyfile mode.
|
||||
The key will be stored in your home directory (in .config/borg/keys). In
|
||||
the attack scenario, the attacker who has just access to your repo won't have
|
||||
the key (and also not the passphrase).
|
||||
|
||||
Make a backup copy of the key file (keyfile mode) or repo config file
|
||||
(repokey mode) and keep it at a safe place, so you still have the key in
|
||||
case it gets corrupted or lost. Also keep the passphrase at a safe place.
|
||||
The backup that is encrypted with that key won't help you with that, of course.
|
||||
|
||||
Make sure you use a good passphrase. Not too short, not too simple. The real
|
||||
encryption / decryption key is encrypted with / locked by your passphrase.
|
||||
If an attacker gets your key, he can't unlock and use it without knowing the
|
||||
passphrase.
|
||||
|
||||
Be careful with special or non-ascii characters in your passphrase:
|
||||
|
||||
- Borg processes the passphrase as unicode (and encodes it as utf-8),
|
||||
so it does not have problems dealing with even the strangest characters.
|
||||
- BUT: that does not necessarily apply to your OS / VM / keyboard configuration.
|
||||
|
||||
So better use a long passphrase made from simple ascii chars than one that
|
||||
includes non-ascii stuff or characters that are hard/impossible to enter on
|
||||
a different keyboard layout.
|
||||
|
||||
You can change your passphrase for existing repos at any time, it won't affect
|
||||
the encryption/decryption key or other secrets.
|
||||
|
||||
When encrypting, AES-CTR-256 is used for encryption, and HMAC-SHA256 for
|
||||
authentication. Hardware acceleration will be used automatically.
|
||||
""")
|
||||
subparser = subparsers.add_parser('init', parents=[common_parser], add_help=False,
|
||||
description=self.do_init.__doc__, epilog=init_epilog,
|
||||
|
|
@ -1221,6 +1269,18 @@ class Archiver:
|
|||
required).
|
||||
- The archive checks can be time consuming, they can be skipped using the
|
||||
--repository-only option.
|
||||
|
||||
The --verify-data option will perform a full integrity verification (as opposed to
|
||||
checking the CRC32 of the segment) of data, which means reading the data from the
|
||||
repository, decrypting and decompressing it. This is a cryptographic verification,
|
||||
which will detect (accidental) corruption. For encrypted repositories it is
|
||||
tamper-resistant as well, unless the attacker has access to the keys.
|
||||
|
||||
It is also very slow.
|
||||
|
||||
--verify-data only verifies data used by the archives specified with --last,
|
||||
--prefix or an explicitly named archive. If none of these are passed,
|
||||
all data in the repository is verified.
|
||||
""")
|
||||
subparser = subparsers.add_parser('check', parents=[common_parser], add_help=False,
|
||||
description=self.do_check.__doc__,
|
||||
|
|
@ -1237,6 +1297,10 @@ class Archiver:
|
|||
subparser.add_argument('--archives-only', dest='archives_only', action='store_true',
|
||||
default=False,
|
||||
help='only perform archives checks')
|
||||
subparser.add_argument('--verify-data', dest='verify_data', action='store_true',
|
||||
default=False,
|
||||
help='perform cryptographic archive data integrity verification '
|
||||
'(conflicts with --repository-only)')
|
||||
subparser.add_argument('--repair', dest='repair', action='store_true',
|
||||
default=False,
|
||||
help='attempt to repair any inconsistencies found')
|
||||
|
|
@ -1248,6 +1312,9 @@ class Archiver:
|
|||
help='only check last N archives (Default: all)')
|
||||
subparser.add_argument('-P', '--prefix', dest='prefix', type=str,
|
||||
help='only consider archive names starting with this prefix')
|
||||
subparser.add_argument('-p', '--progress', dest='progress',
|
||||
action='store_true', default=False,
|
||||
help="""show progress display while checking""")
|
||||
|
||||
change_passphrase_epilog = textwrap.dedent("""
|
||||
The key files used for repository encryption are optionally passphrase
|
||||
|
|
@ -2005,8 +2072,7 @@ class Archiver:
|
|||
# We can't use argparse for "serve" since we don't want it to show up in "Available commands"
|
||||
if args:
|
||||
args = self.preprocess_args(args)
|
||||
parser = self.build_parser(args)
|
||||
args = parser.parse_args(args or ['-h'])
|
||||
args = self.parser.parse_args(args or ['-h'])
|
||||
update_excludes(args)
|
||||
return args
|
||||
|
||||
|
|
@ -2015,12 +2081,27 @@ class Archiver:
|
|||
if sys.platform != 'win32':
|
||||
selftest(logger)
|
||||
|
||||
def _setup_implied_logging(self, args):
|
||||
""" turn on INFO level logging for args that imply that they will produce output """
|
||||
# map of option name to name of logger for that option
|
||||
option_logger = {
|
||||
'output_list': 'borg.output.list',
|
||||
'show_version': 'borg.output.show-version',
|
||||
'show_rc': 'borg.output.show-rc',
|
||||
'stats': 'borg.output.stats',
|
||||
'progress': 'borg.output.progress',
|
||||
}
|
||||
for option, logger_name in option_logger.items():
|
||||
if args.get(option, False):
|
||||
logging.getLogger(logger_name).setLevel('INFO')
|
||||
|
||||
def run(self, args):
|
||||
os.umask(args.umask) # early, before opening files
|
||||
self.lock_wait = args.lock_wait
|
||||
setup_logging(level=args.log_level, is_serve=args.func == self.do_serve) # do not use loggers before this!
|
||||
self._setup_implied_logging(vars(args))
|
||||
if args.show_version:
|
||||
logger.info('borgbackup version %s' % __version__)
|
||||
logging.getLogger('borg.output.show-version').info('borgbackup version %s' % __version__)
|
||||
self.prerun_checks(logger)
|
||||
if is_slow_msgpack():
|
||||
logger.warning("Using a pure-python msgpack! This will result in lower performance.")
|
||||
|
|
@ -2050,6 +2131,14 @@ def sig_info_handler(signum, stack): # pragma: no cover
|
|||
break
|
||||
|
||||
|
||||
class SIGTERMReceived(BaseException):
|
||||
pass
|
||||
|
||||
|
||||
def sig_term_handler(signum, stack):
|
||||
raise SIGTERMReceived
|
||||
|
||||
|
||||
def setup_signal_handlers(): # pragma: no cover
|
||||
sigs = []
|
||||
if hasattr(signal, 'SIGUSR1'):
|
||||
|
|
@ -2058,6 +2147,7 @@ def setup_signal_handlers(): # pragma: no cover
|
|||
sigs.append(signal.SIGINFO) # kill -INFO pid (or ctrl-t)
|
||||
for sig in sigs:
|
||||
signal.signal(sig, sig_info_handler)
|
||||
signal.signal(signal.SIGTERM, sig_term_handler)
|
||||
|
||||
|
||||
def main(): # pragma: no cover
|
||||
|
|
@ -2089,18 +2179,22 @@ def main(): # pragma: no cover
|
|||
except KeyboardInterrupt:
|
||||
msg = 'Keyboard interrupt.\n%s\n%s' % (traceback.format_exc(), sysinfo())
|
||||
exit_code = EXIT_ERROR
|
||||
except SIGTERMReceived:
|
||||
msg = 'Received SIGTERM.'
|
||||
exit_code = EXIT_ERROR
|
||||
if msg:
|
||||
logger.error(msg)
|
||||
if args.show_rc:
|
||||
rc_logger = logging.getLogger('borg.output.show-rc')
|
||||
exit_msg = 'terminating with %s status, rc %d'
|
||||
if exit_code == EXIT_SUCCESS:
|
||||
logger.info(exit_msg % ('success', exit_code))
|
||||
rc_logger.info(exit_msg % ('success', exit_code))
|
||||
elif exit_code == EXIT_WARNING:
|
||||
logger.warning(exit_msg % ('warning', exit_code))
|
||||
rc_logger.warning(exit_msg % ('warning', exit_code))
|
||||
elif exit_code == EXIT_ERROR:
|
||||
logger.error(exit_msg % ('error', exit_code))
|
||||
rc_logger.error(exit_msg % ('error', exit_code))
|
||||
else:
|
||||
logger.error(exit_msg % ('abnormal', exit_code or 666))
|
||||
rc_logger.error(exit_msg % ('abnormal', exit_code or 666))
|
||||
sys.exit(exit_code)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -23,6 +23,8 @@ cdef class Chunker:
|
|||
def __cinit__(self, int seed, int chunk_min_exp, int chunk_max_exp, int hash_mask_bits, int hash_window_size):
|
||||
min_size = 1 << chunk_min_exp
|
||||
max_size = 1 << chunk_max_exp
|
||||
# see chunker_process, first while loop condition, first term must be able to get True:
|
||||
assert hash_window_size + min_size + 1 <= max_size, "too small max_size"
|
||||
hash_mask = (1 << hash_mask_bits) - 1
|
||||
self.chunker = chunker_init(hash_window_size, hash_mask, min_size, max_size, seed & 0xffffffff)
|
||||
|
||||
|
|
|
|||
|
|
@ -12,8 +12,14 @@ UMASK_DEFAULT = 0o077
|
|||
CACHE_TAG_NAME = 'CACHEDIR.TAG'
|
||||
CACHE_TAG_CONTENTS = b'Signature: 8a477f597d28d172789f06886806bc55'
|
||||
|
||||
DEFAULT_MAX_SEGMENT_SIZE = 5 * 1024 * 1024
|
||||
DEFAULT_SEGMENTS_PER_DIR = 10000
|
||||
# A large, but not unreasonably large segment size. Always less than 2 GiB (for legacy file systems). We choose
|
||||
# 500 MiB which means that no indirection from the inode is needed for typical Linux file systems.
|
||||
# Note that this is a soft-limit and can be exceeded (worst case) by a full maximum chunk size and some metadata
|
||||
# bytes. That's why it's 500 MiB instead of 512 MiB.
|
||||
DEFAULT_MAX_SEGMENT_SIZE = 500 * 1024 * 1024
|
||||
|
||||
# A few hundred files per directory to go easy on filesystems which don't like too many files per dir (NTFS)
|
||||
DEFAULT_SEGMENTS_PER_DIR = 500
|
||||
|
||||
CHUNK_MIN_EXP = 19 # 2**19 == 512kiB
|
||||
CHUNK_MAX_EXP = 23 # 2**23 == 8MiB
|
||||
|
|
|
|||
|
|
@ -1,9 +1,12 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from collections import namedtuple
|
||||
import locale
|
||||
import os
|
||||
|
||||
cimport cython
|
||||
from libc.stdint cimport uint32_t, UINT32_MAX, uint64_t
|
||||
from libc.errno cimport errno
|
||||
from cpython.exc cimport PyErr_SetFromErrnoWithFilename
|
||||
|
||||
API_VERSION = 2
|
||||
|
||||
|
|
@ -52,6 +55,7 @@ MAX_VALUE = _MAX_VALUE
|
|||
|
||||
assert _MAX_VALUE % 2 == 1
|
||||
|
||||
|
||||
@cython.internal
|
||||
cdef class IndexBase:
|
||||
cdef HashIndex *index
|
||||
|
|
@ -63,7 +67,10 @@ cdef class IndexBase:
|
|||
path = os.fsencode(path)
|
||||
self.index = hashindex_read(path)
|
||||
if not self.index:
|
||||
raise Exception('hashindex_read failed')
|
||||
if errno:
|
||||
PyErr_SetFromErrnoWithFilename(OSError, path)
|
||||
return
|
||||
raise RuntimeError('hashindex_read failed')
|
||||
else:
|
||||
self.index = hashindex_init(capacity, self.key_size, self.value_size)
|
||||
if not self.index:
|
||||
|
|
|
|||
106
borg/helpers.py
106
borg/helpers.py
|
|
@ -16,8 +16,6 @@ import stat
|
|||
import textwrap
|
||||
|
||||
import re
|
||||
from shutil import get_terminal_size
|
||||
|
||||
from string import Formatter
|
||||
import platform
|
||||
import time
|
||||
|
|
@ -70,6 +68,18 @@ class ErrorWithTraceback(Error):
|
|||
traceback = True
|
||||
|
||||
|
||||
class InternalOSError(Error):
|
||||
"""Error while accessing repository: [Errno {}] {}: {}"""
|
||||
|
||||
def __init__(self, os_error):
|
||||
self.errno = os_error.errno
|
||||
self.strerror = os_error.strerror
|
||||
self.filename = os_error.filename
|
||||
|
||||
def get_message(self):
|
||||
return self.__doc__.format(self.errno, self.strerror, self.filename)
|
||||
|
||||
|
||||
class IntegrityError(ErrorWithTraceback):
|
||||
"""Data integrity error"""
|
||||
|
||||
|
|
@ -86,7 +96,7 @@ def check_extension_modules():
|
|||
raise ExtensionModuleError
|
||||
if crypto.API_VERSION != 3:
|
||||
raise ExtensionModuleError
|
||||
if platform.API_VERSION != 2:
|
||||
if platform.API_VERSION != 3:
|
||||
raise ExtensionModuleError
|
||||
|
||||
|
||||
|
|
@ -176,57 +186,6 @@ def prune_split(archives, pattern, n, skip=[]):
|
|||
return keep
|
||||
|
||||
|
||||
class Statistics:
|
||||
|
||||
def __init__(self):
|
||||
self.osize = self.csize = self.usize = self.nfiles = 0
|
||||
self.last_progress = 0 # timestamp when last progress was shown
|
||||
|
||||
def update(self, size, csize, unique):
|
||||
self.osize += size
|
||||
self.csize += csize
|
||||
if unique:
|
||||
self.usize += csize
|
||||
|
||||
summary = """\
|
||||
Original size Compressed size Deduplicated size
|
||||
{label:15} {stats.osize_fmt:>20s} {stats.csize_fmt:>20s} {stats.usize_fmt:>20s}"""
|
||||
|
||||
def __str__(self):
|
||||
return self.summary.format(stats=self, label='This archive:')
|
||||
|
||||
def __repr__(self):
|
||||
return "<{cls} object at {hash:#x} ({self.osize}, {self.csize}, {self.usize})>".format(cls=type(self).__name__, hash=id(self), self=self)
|
||||
|
||||
@property
|
||||
def osize_fmt(self):
|
||||
return format_file_size(self.osize)
|
||||
|
||||
@property
|
||||
def usize_fmt(self):
|
||||
return format_file_size(self.usize)
|
||||
|
||||
@property
|
||||
def csize_fmt(self):
|
||||
return format_file_size(self.csize)
|
||||
|
||||
def show_progress(self, item=None, final=False, stream=None, dt=None):
|
||||
now = time.time()
|
||||
if dt is None or now - self.last_progress > dt:
|
||||
self.last_progress = now
|
||||
columns, lines = get_terminal_size()
|
||||
if not final:
|
||||
msg = '{0.osize_fmt} O {0.csize_fmt} C {0.usize_fmt} D {0.nfiles} N '.format(self)
|
||||
path = remove_surrogates(item[b'path']) if item else ''
|
||||
space = columns - len(msg)
|
||||
if space < len('...') + len(path):
|
||||
path = '%s...%s' % (path[:(space // 2) - len('...')], path[-space // 2:])
|
||||
msg += "{0:<{space}}".format(path, space=space)
|
||||
else:
|
||||
msg = ' ' * columns
|
||||
print(msg, file=stream or sys.stderr, end="\r", flush=True)
|
||||
|
||||
|
||||
def get_home_dir():
|
||||
"""Get user's home directory while preferring a possibly set HOME
|
||||
environment variable
|
||||
|
|
@ -764,11 +723,15 @@ def posix_acl_use_stored_uid_gid(acl):
|
|||
|
||||
def safe_decode(s, coding='utf-8', errors='surrogateescape'):
|
||||
"""decode bytes to str, with round-tripping "invalid" bytes"""
|
||||
if s is None:
|
||||
return None
|
||||
return s.decode(coding, errors)
|
||||
|
||||
|
||||
def safe_encode(s, coding='utf-8', errors='surrogateescape'):
|
||||
"""encode str to bytes, with round-tripping "invalid" bytes"""
|
||||
if s is None:
|
||||
return None
|
||||
return s.encode(coding, errors)
|
||||
|
||||
|
||||
|
|
@ -1098,7 +1061,7 @@ def yes(msg=None, false_msg=None, true_msg=None, default_msg=None,
|
|||
|
||||
|
||||
class ProgressIndicatorPercent:
|
||||
def __init__(self, total, step=5, start=0, same_line=False, msg="%3.0f%%", file=None):
|
||||
def __init__(self, total, step=5, start=0, same_line=False, msg="%3.0f%%"):
|
||||
"""
|
||||
Percentage-based progress indicator
|
||||
|
||||
|
|
@ -1107,17 +1070,33 @@ class ProgressIndicatorPercent:
|
|||
:param start: at which percent value to start
|
||||
:param same_line: if True, emit output always on same line
|
||||
:param msg: output message, must contain one %f placeholder for the percentage
|
||||
:param file: output file, default: sys.stderr
|
||||
"""
|
||||
self.counter = 0 # 0 .. (total-1)
|
||||
self.total = total
|
||||
self.trigger_at = start # output next percentage value when reaching (at least) this
|
||||
self.step = step
|
||||
if file is None:
|
||||
file = sys.stderr
|
||||
self.file = file
|
||||
self.msg = msg
|
||||
self.same_line = same_line
|
||||
self.handler = None
|
||||
self.logger = logging.getLogger('borg.output.progress')
|
||||
|
||||
# If there are no handlers, set one up explicitly because the
|
||||
# terminator and propagation needs to be set. If there are,
|
||||
# they must have been set up by BORG_LOGGING_CONF: skip setup.
|
||||
if not self.logger.handlers:
|
||||
self.handler = logging.StreamHandler(stream=sys.stderr)
|
||||
self.handler.setLevel(logging.INFO)
|
||||
self.handler.terminator = '\r' if self.same_line else '\n'
|
||||
|
||||
self.logger.addHandler(self.handler)
|
||||
if self.logger.level == logging.NOTSET:
|
||||
self.logger.setLevel(logging.WARN)
|
||||
self.logger.propagate = False
|
||||
|
||||
def __del__(self):
|
||||
if self.handler is not None:
|
||||
self.logger.removeHandler(self.handler)
|
||||
self.handler.close()
|
||||
|
||||
def progress(self, current=None):
|
||||
if current is not None:
|
||||
|
|
@ -1134,11 +1113,11 @@ class ProgressIndicatorPercent:
|
|||
return self.output(pct)
|
||||
|
||||
def output(self, percent):
|
||||
print(self.msg % percent, file=self.file, end='\r' if self.same_line else '\n', flush=True)
|
||||
self.logger.info(self.msg % percent)
|
||||
|
||||
def finish(self):
|
||||
if self.same_line:
|
||||
print(" " * len(self.msg % 100.0), file=self.file, end='\r')
|
||||
self.logger.info(" " * len(self.msg % 100.0))
|
||||
|
||||
|
||||
class ProgressIndicatorEndless:
|
||||
|
|
@ -1188,7 +1167,7 @@ def sysinfo():
|
|||
return '\n'.join(info)
|
||||
|
||||
|
||||
def log_multi(*msgs, level=logging.INFO):
|
||||
def log_multi(*msgs, level=logging.INFO, logger=logger):
|
||||
"""
|
||||
log multiple lines of text, each line by a separate logging call for cosmetic reasons
|
||||
|
||||
|
|
@ -1227,7 +1206,7 @@ class ItemFormatter:
|
|||
'NUL': 'NUL character for creating print0 / xargs -0 like ouput, see bpath',
|
||||
}
|
||||
KEY_GROUPS = (
|
||||
('type', 'mode', 'uid', 'gid', 'user', 'group', 'path', 'bpath', 'source', 'linktarget'),
|
||||
('type', 'mode', 'uid', 'gid', 'user', 'group', 'path', 'bpath', 'source', 'linktarget', 'flags'),
|
||||
('size', 'csize', 'num_chunks', 'unique_chunks'),
|
||||
('mtime', 'ctime', 'atime', 'isomtime', 'isoctime', 'isoatime'),
|
||||
tuple(sorted(hashlib.algorithms_guaranteed)),
|
||||
|
|
@ -1320,6 +1299,7 @@ class ItemFormatter:
|
|||
item_data['source'] = source
|
||||
item_data['linktarget'] = source
|
||||
item_data['extra'] = extra
|
||||
item_data['flags'] = item.get(b'bsdflags')
|
||||
for key in self.used_call_keys:
|
||||
item_data[key] = self.call_keys[key](item)
|
||||
return item_data
|
||||
|
|
|
|||
141
borg/item.py
Normal file
141
borg/item.py
Normal file
|
|
@ -0,0 +1,141 @@
|
|||
from .constants import ITEM_KEYS
|
||||
from .helpers import safe_encode, safe_decode, bigint_to_int, int_to_bigint, StableDict
|
||||
|
||||
|
||||
class PropDict:
|
||||
"""
|
||||
Manage a dictionary via properties.
|
||||
|
||||
- initialization by giving a dict or kw args
|
||||
- on initialization, normalize dict keys to be str type
|
||||
- access dict via properties, like: x.key_name
|
||||
- membership check via: 'key_name' in x
|
||||
- optionally, encode when setting a value
|
||||
- optionally, decode when getting a value
|
||||
- be safe against typos in key names: check against VALID_KEYS
|
||||
- when setting a value: check type of value
|
||||
"""
|
||||
VALID_KEYS = None # override with <set of str> in child class
|
||||
|
||||
__slots__ = ("_dict", ) # avoid setting attributes not supported by properties
|
||||
|
||||
def __init__(self, data_dict=None, **kw):
|
||||
if data_dict is None:
|
||||
data = kw
|
||||
elif not isinstance(data_dict, dict):
|
||||
raise TypeError("data_dict must be dict")
|
||||
else:
|
||||
data = data_dict
|
||||
# internally, we want an dict with only str-typed keys
|
||||
_dict = {}
|
||||
for k, v in data.items():
|
||||
if isinstance(k, bytes):
|
||||
k = k.decode()
|
||||
elif not isinstance(k, str):
|
||||
raise TypeError("dict keys must be str or bytes, not %r" % k)
|
||||
_dict[k] = v
|
||||
unknown_keys = set(_dict) - self.VALID_KEYS
|
||||
if unknown_keys:
|
||||
raise ValueError("dict contains unknown keys %s" % ','.join(unknown_keys))
|
||||
self._dict = _dict
|
||||
|
||||
def as_dict(self):
|
||||
"""return the internal dictionary"""
|
||||
return StableDict(self._dict)
|
||||
|
||||
def _check_key(self, key):
|
||||
"""make sure key is of type str and known"""
|
||||
if not isinstance(key, str):
|
||||
raise TypeError("key must be str")
|
||||
if key not in self.VALID_KEYS:
|
||||
raise ValueError("key '%s' is not a valid key" % key)
|
||||
return key
|
||||
|
||||
def __contains__(self, key):
|
||||
"""do we have this key?"""
|
||||
return self._check_key(key) in self._dict
|
||||
|
||||
def get(self, key, default=None):
|
||||
"""get value for key, return default if key does not exist"""
|
||||
return getattr(self, self._check_key(key), default)
|
||||
|
||||
@staticmethod
|
||||
def _make_property(key, value_type, value_type_name=None, encode=None, decode=None):
|
||||
"""return a property that deals with self._dict[key]"""
|
||||
assert isinstance(key, str)
|
||||
if value_type_name is None:
|
||||
value_type_name = value_type.__name__
|
||||
doc = "%s (%s)" % (key, value_type_name)
|
||||
type_error_msg = "%s value must be %s" % (key, value_type_name)
|
||||
attr_error_msg = "attribute %s not found" % key
|
||||
|
||||
def _get(self):
|
||||
try:
|
||||
value = self._dict[key]
|
||||
except KeyError:
|
||||
raise AttributeError(attr_error_msg) from None
|
||||
if decode is not None:
|
||||
value = decode(value)
|
||||
return value
|
||||
|
||||
def _set(self, value):
|
||||
if not isinstance(value, value_type):
|
||||
raise TypeError(type_error_msg)
|
||||
if encode is not None:
|
||||
value = encode(value)
|
||||
self._dict[key] = value
|
||||
|
||||
def _del(self):
|
||||
try:
|
||||
del self._dict[key]
|
||||
except KeyError:
|
||||
raise AttributeError(attr_error_msg) from None
|
||||
|
||||
return property(_get, _set, _del, doc=doc)
|
||||
|
||||
|
||||
class Item(PropDict):
|
||||
"""
|
||||
Item abstraction that deals with validation and the low-level details internally:
|
||||
|
||||
Items are created either from msgpack unpacker output, from another dict, from kwargs or
|
||||
built step-by-step by setting attributes.
|
||||
|
||||
msgpack gives us a dict with bytes-typed keys, just give it to Item(d) and use item.key_name later.
|
||||
msgpack gives us byte-typed values for stuff that should be str, we automatically decode when getting
|
||||
such a property and encode when setting it.
|
||||
|
||||
If an Item shall be serialized, give as_dict() method output to msgpack packer.
|
||||
"""
|
||||
|
||||
VALID_KEYS = set(key.decode() for key in ITEM_KEYS) # we want str-typed keys
|
||||
|
||||
__slots__ = ("_dict", ) # avoid setting attributes not supported by properties
|
||||
|
||||
# properties statically defined, so that IDEs can know their names:
|
||||
|
||||
path = PropDict._make_property('path', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
|
||||
source = PropDict._make_property('source', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
|
||||
acl_access = PropDict._make_property('acl_access', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
|
||||
acl_default = PropDict._make_property('acl_default', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
|
||||
acl_extended = PropDict._make_property('acl_extended', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
|
||||
acl_nfs4 = PropDict._make_property('acl_nfs4', str, 'surrogate-escaped str', encode=safe_encode, decode=safe_decode)
|
||||
|
||||
user = PropDict._make_property('user', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode)
|
||||
group = PropDict._make_property('group', (str, type(None)), 'surrogate-escaped str or None', encode=safe_encode, decode=safe_decode)
|
||||
|
||||
mode = PropDict._make_property('mode', int)
|
||||
uid = PropDict._make_property('uid', int)
|
||||
gid = PropDict._make_property('gid', int)
|
||||
rdev = PropDict._make_property('rdev', int)
|
||||
bsdflags = PropDict._make_property('bsdflags', int)
|
||||
|
||||
atime = PropDict._make_property('atime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int)
|
||||
ctime = PropDict._make_property('ctime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int)
|
||||
mtime = PropDict._make_property('mtime', int, 'bigint', encode=int_to_bigint, decode=bigint_to_int)
|
||||
|
||||
hardlink_master = PropDict._make_property('hardlink_master', bool)
|
||||
|
||||
chunks = PropDict._make_property('chunks', list)
|
||||
|
||||
xattrs = PropDict._make_property('xattrs', StableDict)
|
||||
15
borg/key.py
15
borg/key.py
|
|
@ -1,4 +1,4 @@
|
|||
from binascii import a2b_base64, b2a_base64
|
||||
from binascii import a2b_base64, b2a_base64, hexlify
|
||||
import configparser
|
||||
import getpass
|
||||
import os
|
||||
|
|
@ -413,16 +413,19 @@ class KeyfileKey(KeyfileKeyBase):
|
|||
FILE_ID = 'BORG_KEY'
|
||||
|
||||
def sanity_check(self, filename, id):
|
||||
with open(filename, 'r') as fd:
|
||||
line = fd.readline().strip()
|
||||
if not line.startswith(self.FILE_ID):
|
||||
file_id = self.FILE_ID.encode() + b' '
|
||||
repo_id = hexlify(id)
|
||||
with open(filename, 'rb') as fd:
|
||||
# we do the magic / id check in binary mode to avoid stumbling over
|
||||
# decoding errors if somebody has binary files in the keys dir for some reason.
|
||||
if fd.read(len(file_id)) != file_id:
|
||||
raise KeyfileInvalidError(self.repository._location.canonical_path(), filename)
|
||||
if line[len(self.FILE_ID) + 1:] != id:
|
||||
if fd.read(len(repo_id)) != repo_id:
|
||||
raise KeyfileMismatchError(self.repository._location.canonical_path(), filename)
|
||||
return filename
|
||||
|
||||
def find_key(self):
|
||||
id = self.repository.id_str
|
||||
id = self.repository.id
|
||||
keyfile = os.environ.get('BORG_KEY_FILE')
|
||||
if keyfile:
|
||||
return self.sanity_check(keyfile, id)
|
||||
|
|
|
|||
|
|
@ -88,7 +88,7 @@ def setup_logging(stream=None, conf_fname=None, env_var='BORG_LOGGING_CONF', lev
|
|||
logger = logging.getLogger('')
|
||||
handler = logging.StreamHandler(stream)
|
||||
if is_serve:
|
||||
fmt = '$LOG %(levelname)s Remote: %(message)s'
|
||||
fmt = '$LOG %(levelname)s %(name)s Remote: %(message)s'
|
||||
else:
|
||||
fmt = '%(message)s'
|
||||
handler.setFormatter(logging.Formatter(fmt))
|
||||
|
|
|
|||
|
|
@ -1,18 +1,12 @@
|
|||
import sys
|
||||
|
||||
from .platform_base import acl_get, acl_set, SyncFile, sync_dir, set_flags, get_flags, swidth, API_VERSION
|
||||
|
||||
if sys.platform.startswith('linux'): # pragma: linux only
|
||||
from .platform_linux import acl_get, acl_set, API_VERSION
|
||||
from .platform_linux import acl_get, acl_set, SyncFile, set_flags, get_flags, swidth, API_VERSION
|
||||
elif sys.platform.startswith('freebsd'): # pragma: freebsd only
|
||||
from .platform_freebsd import acl_get, acl_set, API_VERSION
|
||||
from .platform_freebsd import acl_get, acl_set, swidth, API_VERSION
|
||||
elif sys.platform == 'darwin': # pragma: darwin only
|
||||
from .platform_darwin import acl_get, acl_set, API_VERSION
|
||||
from .platform_darwin import acl_get, acl_set, swidth, API_VERSION
|
||||
elif sys.platform == 'win32': # pragma: windows only
|
||||
from .platform_windows import acl_get, acl_set, API_VERSION, get_owner, set_owner
|
||||
else: # pragma: unknown platform only
|
||||
API_VERSION = 2
|
||||
|
||||
def acl_get(path, item, st, numeric_owner=False):
|
||||
pass
|
||||
|
||||
def acl_set(path, item, numeric_owner=False):
|
||||
pass
|
||||
|
|
|
|||
100
borg/platform_base.py
Normal file
100
borg/platform_base.py
Normal file
|
|
@ -0,0 +1,100 @@
|
|||
import os
|
||||
|
||||
API_VERSION = 3
|
||||
|
||||
fdatasync = getattr(os, 'fdatasync', os.fsync)
|
||||
|
||||
|
||||
def acl_get(path, item, st, numeric_owner=False):
|
||||
"""
|
||||
Saves ACL Entries
|
||||
|
||||
If `numeric_owner` is True the user/group field is not preserved only uid/gid
|
||||
"""
|
||||
|
||||
|
||||
def acl_set(path, item, numeric_owner=False):
|
||||
"""
|
||||
Restore ACL Entries
|
||||
|
||||
If `numeric_owner` is True the stored uid/gid is used instead
|
||||
of the user/group names
|
||||
"""
|
||||
|
||||
try:
|
||||
from os import lchflags
|
||||
|
||||
def set_flags(path, bsd_flags, fd=None):
|
||||
lchflags(path, bsd_flags)
|
||||
except ImportError:
|
||||
def set_flags(path, bsd_flags, fd=None):
|
||||
pass
|
||||
|
||||
|
||||
def get_flags(path, st):
|
||||
"""Return BSD-style file flags for path or stat without following symlinks."""
|
||||
return getattr(st, 'st_flags', 0)
|
||||
|
||||
|
||||
def sync_dir(path):
|
||||
fd = os.open(path, os.O_RDONLY)
|
||||
try:
|
||||
os.fsync(fd)
|
||||
finally:
|
||||
os.close(fd)
|
||||
|
||||
|
||||
class SyncFile:
|
||||
"""
|
||||
A file class that is supposed to enable write ordering (one way or another) and data durability after close().
|
||||
|
||||
The degree to which either is possible varies with operating system, file system and hardware.
|
||||
|
||||
This fallback implements a naive and slow way of doing this. On some operating systems it can't actually
|
||||
guarantee any of the above, since fsync() doesn't guarantee it. Furthermore it may not be possible at all
|
||||
to satisfy the above guarantees on some hardware or operating systems. In these cases we hope that the thorough
|
||||
checksumming implemented catches any corrupted data due to misordered, delayed or partial writes.
|
||||
|
||||
Note that POSIX doesn't specify *anything* about power failures (or similar failures). A system that
|
||||
routinely loses files or corrupts file on power loss is POSIX compliant.
|
||||
|
||||
TODO: Use F_FULLSYNC on OSX.
|
||||
TODO: A Windows implementation should use CreateFile with FILE_FLAG_WRITE_THROUGH.
|
||||
"""
|
||||
|
||||
def __init__(self, path):
|
||||
self.fd = open(path, 'wb')
|
||||
self.fileno = self.fd.fileno()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.close()
|
||||
|
||||
def write(self, data):
|
||||
self.fd.write(data)
|
||||
|
||||
def sync(self):
|
||||
"""
|
||||
Synchronize file contents. Everything written prior to sync() must become durable before anything written
|
||||
after sync().
|
||||
"""
|
||||
self.fd.flush()
|
||||
fdatasync(self.fileno)
|
||||
if hasattr(os, 'posix_fadvise'):
|
||||
os.posix_fadvise(self.fileno, 0, 0, os.POSIX_FADV_DONTNEED)
|
||||
|
||||
def close(self):
|
||||
"""sync() and close."""
|
||||
self.sync()
|
||||
self.fd.close()
|
||||
sync_dir(os.path.dirname(self.fd.name))
|
||||
|
||||
|
||||
def swidth(s):
|
||||
"""terminal output width of string <s>
|
||||
|
||||
For western scripts, this is just len(s), but for cjk glyphs, 2 cells are used.
|
||||
"""
|
||||
return len(s)
|
||||
|
|
@ -1,7 +1,8 @@
|
|||
import os
|
||||
from .helpers import user2uid, group2gid, safe_decode, safe_encode
|
||||
from .platform_posix import swidth
|
||||
|
||||
API_VERSION = 2
|
||||
API_VERSION = 3
|
||||
|
||||
cdef extern from "sys/acl.h":
|
||||
ctypedef struct _acl_t:
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
import os
|
||||
from .helpers import posix_acl_use_stored_uid_gid, safe_encode, safe_decode
|
||||
from .platform_posix import swidth
|
||||
|
||||
API_VERSION = 2
|
||||
API_VERSION = 3
|
||||
|
||||
cdef extern from "errno.h":
|
||||
int errno
|
||||
|
|
|
|||
|
|
@ -1,9 +1,16 @@
|
|||
import os
|
||||
import re
|
||||
from stat import S_ISLNK
|
||||
from .helpers import posix_acl_use_stored_uid_gid, user2uid, group2gid, safe_decode, safe_encode
|
||||
import resource
|
||||
import stat
|
||||
|
||||
API_VERSION = 2
|
||||
from .helpers import posix_acl_use_stored_uid_gid, user2uid, group2gid, safe_decode, safe_encode
|
||||
from .platform_base import SyncFile as BaseSyncFile
|
||||
from .platform_posix import swidth
|
||||
|
||||
from libc cimport errno
|
||||
from libc.stdint cimport int64_t
|
||||
|
||||
API_VERSION = 3
|
||||
|
||||
cdef extern from "sys/types.h":
|
||||
int ACL_TYPE_ACCESS
|
||||
|
|
@ -23,10 +30,78 @@ cdef extern from "sys/acl.h":
|
|||
cdef extern from "acl/libacl.h":
|
||||
int acl_extended_file(const char *path)
|
||||
|
||||
cdef extern from "fcntl.h":
|
||||
int sync_file_range(int fd, int64_t offset, int64_t nbytes, unsigned int flags)
|
||||
unsigned int SYNC_FILE_RANGE_WRITE
|
||||
unsigned int SYNC_FILE_RANGE_WAIT_BEFORE
|
||||
unsigned int SYNC_FILE_RANGE_WAIT_AFTER
|
||||
|
||||
cdef extern from "linux/fs.h":
|
||||
# ioctls
|
||||
int FS_IOC_SETFLAGS
|
||||
int FS_IOC_GETFLAGS
|
||||
|
||||
# inode flags
|
||||
int FS_NODUMP_FL
|
||||
int FS_IMMUTABLE_FL
|
||||
int FS_APPEND_FL
|
||||
int FS_COMPR_FL
|
||||
|
||||
cdef extern from "sys/ioctl.h":
|
||||
int ioctl(int fildes, int request, ...)
|
||||
|
||||
cdef extern from "string.h":
|
||||
char *strerror(int errnum)
|
||||
|
||||
_comment_re = re.compile(' *#.*', re.M)
|
||||
|
||||
|
||||
BSD_TO_LINUX_FLAGS = {
|
||||
stat.UF_NODUMP: FS_NODUMP_FL,
|
||||
stat.UF_IMMUTABLE: FS_IMMUTABLE_FL,
|
||||
stat.UF_APPEND: FS_APPEND_FL,
|
||||
stat.UF_COMPRESSED: FS_COMPR_FL,
|
||||
}
|
||||
|
||||
|
||||
def set_flags(path, bsd_flags, fd=None):
|
||||
if fd is None and stat.S_ISLNK(os.lstat(path).st_mode):
|
||||
return
|
||||
cdef int flags = 0
|
||||
for bsd_flag, linux_flag in BSD_TO_LINUX_FLAGS.items():
|
||||
if bsd_flags & bsd_flag:
|
||||
flags |= linux_flag
|
||||
open_fd = fd is None
|
||||
if open_fd:
|
||||
fd = os.open(path, os.O_RDONLY|os.O_NONBLOCK|os.O_NOFOLLOW)
|
||||
try:
|
||||
if ioctl(fd, FS_IOC_SETFLAGS, &flags) == -1:
|
||||
error_number = errno.errno
|
||||
if error_number != errno.EOPNOTSUPP:
|
||||
raise OSError(error_number, strerror(error_number).decode(), path)
|
||||
finally:
|
||||
if open_fd:
|
||||
os.close(fd)
|
||||
|
||||
|
||||
def get_flags(path, st):
|
||||
cdef int linux_flags
|
||||
try:
|
||||
fd = os.open(path, os.O_RDONLY|os.O_NONBLOCK|os.O_NOFOLLOW)
|
||||
except OSError:
|
||||
return 0
|
||||
try:
|
||||
if ioctl(fd, FS_IOC_GETFLAGS, &linux_flags) == -1:
|
||||
return 0
|
||||
finally:
|
||||
os.close(fd)
|
||||
bsd_flags = 0
|
||||
for bsd_flag, linux_flag in BSD_TO_LINUX_FLAGS.items():
|
||||
if linux_flags & linux_flag:
|
||||
bsd_flags |= bsd_flag
|
||||
return bsd_flags
|
||||
|
||||
|
||||
def acl_use_local_uid_gid(acl):
|
||||
"""Replace the user/group field with the local uid/gid if possible
|
||||
"""
|
||||
|
|
@ -77,17 +152,13 @@ cdef acl_numeric_ids(acl):
|
|||
|
||||
|
||||
def acl_get(path, item, st, numeric_owner=False):
|
||||
"""Saves ACL Entries
|
||||
|
||||
If `numeric_owner` is True the user/group field is not preserved only uid/gid
|
||||
"""
|
||||
cdef acl_t default_acl = NULL
|
||||
cdef acl_t access_acl = NULL
|
||||
cdef char *default_text = NULL
|
||||
cdef char *access_text = NULL
|
||||
|
||||
p = <bytes>os.fsencode(path)
|
||||
if S_ISLNK(st.st_mode) or acl_extended_file(p) <= 0:
|
||||
if stat.S_ISLNK(st.st_mode) or acl_extended_file(p) <= 0:
|
||||
return
|
||||
if numeric_owner:
|
||||
converter = acl_numeric_ids
|
||||
|
|
@ -112,11 +183,6 @@ def acl_get(path, item, st, numeric_owner=False):
|
|||
|
||||
|
||||
def acl_set(path, item, numeric_owner=False):
|
||||
"""Restore ACL Entries
|
||||
|
||||
If `numeric_owner` is True the stored uid/gid is used instead
|
||||
of the user/group names
|
||||
"""
|
||||
cdef acl_t access_acl = NULL
|
||||
cdef acl_t default_acl = NULL
|
||||
|
||||
|
|
@ -141,3 +207,45 @@ def acl_set(path, item, numeric_owner=False):
|
|||
acl_set_file(p, ACL_TYPE_DEFAULT, default_acl)
|
||||
finally:
|
||||
acl_free(default_acl)
|
||||
|
||||
cdef _sync_file_range(fd, offset, length, flags):
|
||||
assert offset & PAGE_MASK == 0, "offset %d not page-aligned" % offset
|
||||
assert length & PAGE_MASK == 0, "length %d not page-aligned" % length
|
||||
if sync_file_range(fd, offset, length, flags) != 0:
|
||||
raise OSError(errno, os.strerror(errno))
|
||||
os.posix_fadvise(fd, offset, length, os.POSIX_FADV_DONTNEED)
|
||||
|
||||
cdef unsigned PAGE_MASK = resource.getpagesize() - 1
|
||||
|
||||
|
||||
class SyncFile(BaseSyncFile):
|
||||
"""
|
||||
Implemented using sync_file_range for asynchronous write-out and fdatasync for actual durability.
|
||||
|
||||
"write-out" means that dirty pages (= data that was written) are submitted to an I/O queue and will be send to
|
||||
disk in the immediate future.
|
||||
"""
|
||||
|
||||
def __init__(self, path):
|
||||
super().__init__(path)
|
||||
self.offset = 0
|
||||
self.write_window = (16 * 1024 ** 2) & ~PAGE_MASK
|
||||
self.last_sync = 0
|
||||
self.pending_sync = None
|
||||
|
||||
def write(self, data):
|
||||
self.offset += self.fd.write(data)
|
||||
offset = self.offset & ~PAGE_MASK
|
||||
if offset >= self.last_sync + self.write_window:
|
||||
self.fd.flush()
|
||||
_sync_file_range(self.fileno, self.last_sync, offset - self.last_sync, SYNC_FILE_RANGE_WRITE)
|
||||
if self.pending_sync is not None:
|
||||
_sync_file_range(self.fileno, self.pending_sync, self.last_sync - self.pending_sync,
|
||||
SYNC_FILE_RANGE_WRITE | SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WAIT_AFTER)
|
||||
self.pending_sync = self.last_sync
|
||||
self.last_sync = offset
|
||||
|
||||
def sync(self):
|
||||
self.fd.flush()
|
||||
os.fdatasync(self.fileno)
|
||||
os.posix_fadvise(self.fileno, 0, 0, os.POSIX_FADV_DONTNEED)
|
||||
|
|
|
|||
10
borg/platform_posix.pyx
Normal file
10
borg/platform_posix.pyx
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
cdef extern from "wchar.h":
|
||||
cdef int wcswidth(const Py_UNICODE *str, size_t n)
|
||||
|
||||
def swidth(s):
|
||||
str_len = len(s)
|
||||
terminal_width = wcswidth(s, str_len)
|
||||
if terminal_width >= 0:
|
||||
return terminal_width
|
||||
else:
|
||||
return str_len
|
||||
|
|
@ -20,6 +20,8 @@ RPC_PROTOCOL_VERSION = 2
|
|||
|
||||
BUFSIZE = 10 * 1024 * 1024
|
||||
|
||||
MAX_INFLIGHT = 100
|
||||
|
||||
|
||||
class ConnectionClosed(Error):
|
||||
"""Connection closed by remote host"""
|
||||
|
|
@ -248,7 +250,6 @@ class RemoteRepository:
|
|||
|
||||
calls = list(calls)
|
||||
waiting_for = []
|
||||
w_fds = [self.stdin_fd]
|
||||
while wait or calls:
|
||||
while waiting_for:
|
||||
try:
|
||||
|
|
@ -277,6 +278,10 @@ class RemoteRepository:
|
|||
return
|
||||
except KeyError:
|
||||
break
|
||||
if self.to_send or ((calls or self.preload_ids) and len(waiting_for) < MAX_INFLIGHT):
|
||||
w_fds = [self.stdin_fd]
|
||||
else:
|
||||
w_fds = []
|
||||
r, w, x = select.select(self.r_fds, w_fds, self.x_fds, 1)
|
||||
if x:
|
||||
raise Exception('FD exception occurred')
|
||||
|
|
@ -300,14 +305,9 @@ class RemoteRepository:
|
|||
raise ConnectionClosed()
|
||||
data = data.decode('utf-8')
|
||||
for line in data.splitlines(keepends=True):
|
||||
if line.startswith('$LOG '):
|
||||
_, level, msg = line.split(' ', 2)
|
||||
level = getattr(logging, level, logging.CRITICAL) # str -> int
|
||||
logging.log(level, msg.rstrip())
|
||||
else:
|
||||
sys.stderr.write("Remote: " + line)
|
||||
handle_remote_line(line)
|
||||
if w:
|
||||
while not self.to_send and (calls or self.preload_ids) and len(waiting_for) < 100:
|
||||
while not self.to_send and (calls or self.preload_ids) and len(waiting_for) < MAX_INFLIGHT:
|
||||
if calls:
|
||||
if is_preloaded:
|
||||
if calls[0] in self.cache:
|
||||
|
|
@ -334,8 +334,6 @@ class RemoteRepository:
|
|||
# that the fd should be writable
|
||||
if e.errno != errno.EAGAIN:
|
||||
raise
|
||||
if not self.to_send and not (calls or self.preload_ids):
|
||||
w_fds = []
|
||||
self.ignore_responses |= set(waiting_for)
|
||||
|
||||
def check(self, repair=False, save_space=False):
|
||||
|
|
@ -390,6 +388,21 @@ class RemoteRepository:
|
|||
self.preload_ids += ids
|
||||
|
||||
|
||||
def handle_remote_line(line):
|
||||
if line.startswith('$LOG '):
|
||||
_, level, msg = line.split(' ', 2)
|
||||
level = getattr(logging, level, logging.CRITICAL) # str -> int
|
||||
if msg.startswith('Remote:'):
|
||||
# server format: '$LOG <level> Remote: <msg>'
|
||||
logging.log(level, msg.rstrip())
|
||||
else:
|
||||
# server format '$LOG <level> <logname> Remote: <msg>'
|
||||
logname, msg = msg.split(' ', 1)
|
||||
logging.getLogger(logname).log(level, msg.rstrip())
|
||||
else:
|
||||
sys.stderr.write("Remote: " + line)
|
||||
|
||||
|
||||
class RepositoryNoCache:
|
||||
"""A not caching Repository wrapper, passes through to repository.
|
||||
|
||||
|
|
@ -420,6 +433,9 @@ class RepositoryCache(RepositoryNoCache):
|
|||
|
||||
Caches Repository GET operations using a local temporary Repository.
|
||||
"""
|
||||
# maximum object size that will be cached, 64 kiB.
|
||||
THRESHOLD = 2**16
|
||||
|
||||
def __init__(self, repository):
|
||||
super().__init__(repository)
|
||||
tmppath = tempfile.mkdtemp(prefix='borg-tmp')
|
||||
|
|
@ -440,7 +456,8 @@ class RepositoryCache(RepositoryNoCache):
|
|||
except Repository.ObjectNotFound:
|
||||
for key_, data in repository_iterator:
|
||||
if key_ == key:
|
||||
self.caching_repo.put(key, data)
|
||||
if len(data) <= self.THRESHOLD:
|
||||
self.caching_repo.put(key, data)
|
||||
yield data
|
||||
break
|
||||
# Consume any pending requests
|
||||
|
|
|
|||
|
|
@ -9,14 +9,18 @@ logger = logging.getLogger(__name__)
|
|||
import os
|
||||
import shutil
|
||||
import struct
|
||||
from collections import defaultdict
|
||||
from functools import partial
|
||||
from zlib import crc32
|
||||
|
||||
import msgpack
|
||||
from .constants import * # NOQA
|
||||
from .helpers import Error, ErrorWithTraceback, IntegrityError, Location, ProgressIndicatorPercent, bin_to_hex
|
||||
from .helpers import Error, ErrorWithTraceback, IntegrityError, InternalOSError, Location, ProgressIndicatorPercent, \
|
||||
bin_to_hex
|
||||
from .hashindex import NSIndex
|
||||
from .locking import UpgradableLock, LockError, LockErrorT
|
||||
from .lrucache import LRUCache
|
||||
from .platform import SyncFile, sync_dir
|
||||
|
||||
MAX_OBJECT_SIZE = 20 * 1024 * 1024
|
||||
MAGIC = b'BORG_SEG'
|
||||
|
|
@ -25,17 +29,55 @@ TAG_PUT = 0
|
|||
TAG_DELETE = 1
|
||||
TAG_COMMIT = 2
|
||||
|
||||
FreeSpace = partial(defaultdict, int)
|
||||
|
||||
|
||||
class Repository:
|
||||
"""Filesystem based transactional key value store
|
||||
"""
|
||||
Filesystem based transactional key value store
|
||||
|
||||
Transactionality is achieved by using a log (aka journal) to record changes. The log is a series of numbered files
|
||||
called segments. Each segment is a series of log entries. The segment number together with the offset of each
|
||||
entry relative to its segment start establishes an ordering of the log entries. This is the "definition" of
|
||||
time for the purposes of the log.
|
||||
|
||||
Log entries are either PUT, DELETE or COMMIT.
|
||||
|
||||
A COMMIT is always the final log entry in a segment and marks all data from the beginning of the log until the
|
||||
segment ending with the COMMIT as committed and consistent. The segment number of a segment ending with a COMMIT
|
||||
is called the transaction ID of that commit, and a segment ending with a COMMIT is called committed.
|
||||
|
||||
When reading from a repository it is first checked whether the last segment is committed. If it is not, then
|
||||
all segments after the last committed segment are deleted; they contain log entries whose consistency is not
|
||||
established by a COMMIT.
|
||||
|
||||
Note that the COMMIT can't establish consistency by itself, but only manages to do so with proper support from
|
||||
the platform (including the hardware). See platform_base.SyncFile for details.
|
||||
|
||||
A PUT inserts a key-value pair. The value is stored in the log entry, hence the repository implements
|
||||
full data logging, meaning that all data is consistent, not just metadata (which is common in file systems).
|
||||
|
||||
A DELETE marks a key as deleted.
|
||||
|
||||
For a given key only the last entry regarding the key, which is called current (all other entries are called
|
||||
superseded), is relevant: If there is no entry or the last entry is a DELETE then the key does not exist.
|
||||
Otherwise the last PUT defines the value of the key.
|
||||
|
||||
By superseding a PUT (with either another PUT or a DELETE) the log entry becomes obsolete. A segment containing
|
||||
such obsolete entries is called sparse, while a segment containing no such entries is called compact.
|
||||
|
||||
Sparse segments can be compacted and thereby disk space freed. This destroys the transaction for which the
|
||||
superseded entries where current.
|
||||
|
||||
On disk layout:
|
||||
|
||||
dir/README
|
||||
dir/config
|
||||
dir/data/<X / SEGMENTS_PER_DIR>/<X>
|
||||
dir/data/<X // SEGMENTS_PER_DIR>/<X>
|
||||
dir/index.X
|
||||
dir/hints.X
|
||||
"""
|
||||
|
||||
class DoesNotExist(Error):
|
||||
"""Repository {} does not exist."""
|
||||
|
||||
|
|
@ -137,7 +179,7 @@ class Repository:
|
|||
else:
|
||||
return None
|
||||
|
||||
def get_transaction_id(self):
|
||||
def check_transaction(self):
|
||||
index_transaction_id = self.get_index_transaction_id()
|
||||
segments_transaction_id = self.io.get_segments_transaction_id()
|
||||
if index_transaction_id is not None and segments_transaction_id is None:
|
||||
|
|
@ -150,6 +192,9 @@ class Repository:
|
|||
else:
|
||||
replay_from = index_transaction_id
|
||||
self.replay_segments(replay_from, segments_transaction_id)
|
||||
|
||||
def get_transaction_id(self):
|
||||
self.check_transaction()
|
||||
return self.get_index_transaction_id()
|
||||
|
||||
def break_lock(self):
|
||||
|
|
@ -190,10 +235,27 @@ class Repository:
|
|||
self.write_index()
|
||||
self.rollback()
|
||||
|
||||
def open_index(self, transaction_id):
|
||||
def open_index(self, transaction_id, auto_recover=True):
|
||||
if transaction_id is None:
|
||||
return NSIndex()
|
||||
return NSIndex.read((os.path.join(self.path, 'index.%d') % transaction_id).encode('utf-8'))
|
||||
index_path = os.path.join(self.path, 'index.%d' % transaction_id).encode('utf-8')
|
||||
try:
|
||||
return NSIndex.read(index_path)
|
||||
except RuntimeError as error:
|
||||
assert str(error) == 'hashindex_read failed' # everything else means we're in *deep* trouble
|
||||
logger.warning('Repository index missing or corrupted, trying to recover')
|
||||
try:
|
||||
os.unlink(index_path)
|
||||
except OSError as e:
|
||||
raise InternalOSError(e) from None
|
||||
if not auto_recover:
|
||||
raise
|
||||
self.prepare_txn(self.get_transaction_id())
|
||||
# don't leave an open transaction around
|
||||
self.commit()
|
||||
return self.open_index(self.get_transaction_id())
|
||||
except OSError as e:
|
||||
raise InternalOSError(e) from None
|
||||
|
||||
def prepare_txn(self, transaction_id, do_cleanup=True):
|
||||
self._active_txn = True
|
||||
|
|
@ -206,24 +268,51 @@ class Repository:
|
|||
self._active_txn = False
|
||||
raise
|
||||
if not self.index or transaction_id is None:
|
||||
self.index = self.open_index(transaction_id)
|
||||
try:
|
||||
self.index = self.open_index(transaction_id, False)
|
||||
except RuntimeError:
|
||||
self.check_transaction()
|
||||
self.index = self.open_index(transaction_id, False)
|
||||
if transaction_id is None:
|
||||
self.segments = {} # XXX bad name: usage_count_of_segment_x = self.segments[x]
|
||||
self.compact = set() # XXX bad name: segments_needing_compaction = self.compact
|
||||
self.compact = FreeSpace() # XXX bad name: freeable_space_of_segment_x = self.compact[x]
|
||||
else:
|
||||
if do_cleanup:
|
||||
self.io.cleanup(transaction_id)
|
||||
with open(os.path.join(self.path, 'hints.%d' % transaction_id), 'rb') as fd:
|
||||
hints = msgpack.unpack(fd)
|
||||
if hints[b'version'] != 1:
|
||||
raise ValueError('Unknown hints file version: %d' % hints['version'])
|
||||
self.segments = hints[b'segments']
|
||||
self.compact = set(hints[b'compact'])
|
||||
hints_path = os.path.join(self.path, 'hints.%d' % transaction_id)
|
||||
index_path = os.path.join(self.path, 'index.%d' % transaction_id)
|
||||
try:
|
||||
with open(hints_path, 'rb') as fd:
|
||||
hints = msgpack.unpack(fd)
|
||||
except (msgpack.UnpackException, msgpack.ExtraData, FileNotFoundError) as e:
|
||||
logger.warning('Repository hints file missing or corrupted, trying to recover')
|
||||
if not isinstance(e, FileNotFoundError):
|
||||
os.unlink(hints_path)
|
||||
# index must exist at this point
|
||||
os.unlink(index_path)
|
||||
self.check_transaction()
|
||||
self.prepare_txn(transaction_id)
|
||||
return
|
||||
except OSError as os_error:
|
||||
raise InternalOSError(os_error) from None
|
||||
if hints[b'version'] == 1:
|
||||
logger.debug('Upgrading from v1 hints.%d', transaction_id)
|
||||
self.segments = hints[b'segments']
|
||||
self.compact = FreeSpace()
|
||||
for segment in sorted(hints[b'compact']):
|
||||
logger.debug('Rebuilding sparse info for segment %d', segment)
|
||||
self._rebuild_sparse(segment)
|
||||
logger.debug('Upgrade to v2 hints complete')
|
||||
elif hints[b'version'] != 2:
|
||||
raise ValueError('Unknown hints file version: %d' % hints[b'version'])
|
||||
else:
|
||||
self.segments = hints[b'segments']
|
||||
self.compact = FreeSpace(hints[b'compact'])
|
||||
|
||||
def write_index(self):
|
||||
hints = {b'version': 1,
|
||||
hints = {b'version': 2,
|
||||
b'segments': self.segments,
|
||||
b'compact': list(self.compact)}
|
||||
b'compact': self.compact}
|
||||
transaction_id = self.io.get_segments_transaction_id()
|
||||
hints_file = os.path.join(self.path, 'hints.%d' % transaction_id)
|
||||
with open(hints_file + '.tmp', 'wb') as fd:
|
||||
|
|
@ -237,10 +326,10 @@ class Repository:
|
|||
if self.append_only:
|
||||
with open(os.path.join(self.path, 'transactions'), 'a') as log:
|
||||
print('transaction %d, UTC time %s' % (transaction_id, datetime.utcnow().isoformat()), file=log)
|
||||
# Remove old indices
|
||||
# Remove old auxiliary files
|
||||
current = '.%d' % transaction_id
|
||||
for name in os.listdir(self.path):
|
||||
if not name.startswith('index.') and not name.startswith('hints.'):
|
||||
if not name.startswith(('index.', 'hints.')):
|
||||
continue
|
||||
if name.endswith(current):
|
||||
continue
|
||||
|
|
@ -266,32 +355,40 @@ class Repository:
|
|||
for segment in unused:
|
||||
assert self.segments.pop(segment) == 0
|
||||
self.io.delete_segment(segment)
|
||||
del self.compact[segment]
|
||||
unused = []
|
||||
|
||||
for segment in sorted(self.compact):
|
||||
if self.io.segment_exists(segment):
|
||||
for tag, key, offset, data in self.io.iter_objects(segment, include_data=True):
|
||||
if tag == TAG_PUT and self.index.get(key, (-1, -1)) == (segment, offset):
|
||||
for segment, freeable_space in sorted(self.compact.items()):
|
||||
if not self.io.segment_exists(segment):
|
||||
del self.compact[segment]
|
||||
continue
|
||||
segment_size = self.io.segment_size(segment)
|
||||
if segment_size > 0.2 * self.max_segment_size and freeable_space < 0.15 * segment_size:
|
||||
logger.debug('not compacting segment %d for later (only %d bytes are sparse)',
|
||||
segment, freeable_space)
|
||||
continue
|
||||
segments.setdefault(segment, 0)
|
||||
for tag, key, offset, data in self.io.iter_objects(segment, include_data=True):
|
||||
if tag == TAG_PUT and self.index.get(key, (-1, -1)) == (segment, offset):
|
||||
try:
|
||||
new_segment, offset = self.io.write_put(key, data, raise_full=save_space)
|
||||
except LoggedIO.SegmentFull:
|
||||
complete_xfer()
|
||||
new_segment, offset = self.io.write_put(key, data)
|
||||
self.index[key] = new_segment, offset
|
||||
segments.setdefault(new_segment, 0)
|
||||
segments[new_segment] += 1
|
||||
segments[segment] -= 1
|
||||
elif tag == TAG_DELETE:
|
||||
if index_transaction_id is None or segment > index_transaction_id:
|
||||
try:
|
||||
new_segment, offset = self.io.write_put(key, data, raise_full=save_space)
|
||||
self.io.write_delete(key, raise_full=save_space)
|
||||
except LoggedIO.SegmentFull:
|
||||
complete_xfer()
|
||||
new_segment, offset = self.io.write_put(key, data)
|
||||
self.index[key] = new_segment, offset
|
||||
segments.setdefault(new_segment, 0)
|
||||
segments[new_segment] += 1
|
||||
segments[segment] -= 1
|
||||
elif tag == TAG_DELETE:
|
||||
if index_transaction_id is None or segment > index_transaction_id:
|
||||
try:
|
||||
self.io.write_delete(key, raise_full=save_space)
|
||||
except LoggedIO.SegmentFull:
|
||||
complete_xfer()
|
||||
self.io.write_delete(key)
|
||||
assert segments[segment] == 0
|
||||
unused.append(segment)
|
||||
self.io.write_delete(key)
|
||||
assert segments[segment] == 0
|
||||
unused.append(segment)
|
||||
complete_xfer()
|
||||
self.compact = set()
|
||||
|
||||
def replay_segments(self, index_transaction_id, segments_transaction_id):
|
||||
self.prepare_txn(index_transaction_id, do_cleanup=False)
|
||||
|
|
@ -314,11 +411,12 @@ class Repository:
|
|||
def _update_index(self, segment, objects, report=None):
|
||||
"""some code shared between replay_segments and check"""
|
||||
self.segments[segment] = 0
|
||||
for tag, key, offset in objects:
|
||||
for tag, key, offset, size in objects:
|
||||
if tag == TAG_PUT:
|
||||
try:
|
||||
# If this PUT supersedes an older PUT, mark the old segment for compaction and count the free space
|
||||
s, _ = self.index[key]
|
||||
self.compact.add(s)
|
||||
self.compact[s] += size
|
||||
self.segments[s] -= 1
|
||||
except KeyError:
|
||||
pass
|
||||
|
|
@ -326,12 +424,17 @@ class Repository:
|
|||
self.segments[segment] += 1
|
||||
elif tag == TAG_DELETE:
|
||||
try:
|
||||
s, _ = self.index.pop(key)
|
||||
self.segments[s] -= 1
|
||||
self.compact.add(s)
|
||||
# if the deleted PUT is not in the index, there is nothing to clean up
|
||||
s, offset = self.index.pop(key)
|
||||
except KeyError:
|
||||
pass
|
||||
self.compact.add(segment)
|
||||
else:
|
||||
if self.io.segment_exists(s):
|
||||
# the old index is not necessarily valid for this transaction (e.g. compaction); if the segment
|
||||
# is already gone, then it was already compacted.
|
||||
self.segments[s] -= 1
|
||||
size = self.io.read(s, offset, key, read_data=False)
|
||||
self.compact[s] += size
|
||||
elif tag == TAG_COMMIT:
|
||||
continue
|
||||
else:
|
||||
|
|
@ -341,7 +444,22 @@ class Repository:
|
|||
else:
|
||||
report(msg)
|
||||
if self.segments[segment] == 0:
|
||||
self.compact.add(segment)
|
||||
self.compact[segment] += self.io.segment_size(segment)
|
||||
|
||||
def _rebuild_sparse(self, segment):
|
||||
"""Rebuild sparse bytes count for a single segment relative to the current index."""
|
||||
self.compact[segment] = 0
|
||||
if self.segments[segment] == 0:
|
||||
self.compact[segment] += self.io.segment_size(segment)
|
||||
return
|
||||
for tag, key, offset, size in self.io.iter_objects(segment, read_data=False):
|
||||
if tag == TAG_PUT:
|
||||
if self.index.get(key, (-1, -1)) != (segment, offset):
|
||||
# This PUT is superseded later
|
||||
self.compact[segment] += size
|
||||
elif tag == TAG_DELETE:
|
||||
# The outcome of the DELETE has been recorded in the PUT branch already
|
||||
self.compact[segment] += size
|
||||
|
||||
def check(self, repair=False, save_space=False):
|
||||
"""Check repository consistency
|
||||
|
|
@ -456,14 +574,16 @@ class Repository:
|
|||
if not self._active_txn:
|
||||
self.prepare_txn(self.get_transaction_id())
|
||||
try:
|
||||
segment, _ = self.index[id]
|
||||
self.segments[segment] -= 1
|
||||
self.compact.add(segment)
|
||||
segment = self.io.write_delete(id)
|
||||
self.segments.setdefault(segment, 0)
|
||||
self.compact.add(segment)
|
||||
segment, offset = self.index[id]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
self.segments[segment] -= 1
|
||||
size = self.io.read(segment, offset, id, read_data=False)
|
||||
self.compact[segment] += size
|
||||
segment, size = self.io.write_delete(id)
|
||||
self.compact[segment] += size
|
||||
self.segments.setdefault(segment, 0)
|
||||
segment, offset = self.io.write_put(id, data)
|
||||
self.segments.setdefault(segment, 0)
|
||||
self.segments[segment] += 1
|
||||
|
|
@ -477,9 +597,10 @@ class Repository:
|
|||
except KeyError:
|
||||
raise self.ObjectNotFound(id, self.path) from None
|
||||
self.segments[segment] -= 1
|
||||
self.compact.add(segment)
|
||||
segment = self.io.write_delete(id)
|
||||
self.compact.add(segment)
|
||||
size = self.io.read(segment, offset, id, read_data=False)
|
||||
self.compact[segment] += size
|
||||
segment, size = self.io.write_delete(id)
|
||||
self.compact[segment] += size
|
||||
self.segments.setdefault(segment, 0)
|
||||
|
||||
def preload(self, ids):
|
||||
|
|
@ -507,7 +628,7 @@ class LoggedIO:
|
|||
def __init__(self, path, limit, segments_per_dir, capacity=90):
|
||||
self.path = path
|
||||
self.fds = LRUCache(capacity,
|
||||
dispose=lambda fd: fd.close())
|
||||
dispose=self.close_fd)
|
||||
self.segment = 0
|
||||
self.limit = limit
|
||||
self.segments_per_dir = segments_per_dir
|
||||
|
|
@ -519,6 +640,11 @@ class LoggedIO:
|
|||
self.fds.clear()
|
||||
self.fds = None # Just to make sure we're disabled
|
||||
|
||||
def close_fd(self, fd):
|
||||
if hasattr(os, 'posix_fadvise'): # only on UNIX
|
||||
os.posix_fadvise(fd.fileno(), 0, 0, os.POSIX_FADV_DONTNEED)
|
||||
fd.close()
|
||||
|
||||
def segment_iterator(self, reverse=False):
|
||||
data_path = os.path.join(self.path, 'data')
|
||||
dirs = sorted((dir for dir in os.listdir(data_path) if dir.isdigit()), key=int, reverse=reverse)
|
||||
|
|
@ -535,10 +661,10 @@ class LoggedIO:
|
|||
return None
|
||||
|
||||
def get_segments_transaction_id(self):
|
||||
"""Verify that the transaction id is consistent with the index transaction id
|
||||
"""Return the last committed segment.
|
||||
"""
|
||||
for segment, filename in self.segment_iterator(reverse=True):
|
||||
if self.is_committed_segment(filename):
|
||||
if self.is_committed_segment(segment):
|
||||
return segment
|
||||
return None
|
||||
|
||||
|
|
@ -552,10 +678,14 @@ class LoggedIO:
|
|||
else:
|
||||
break
|
||||
|
||||
def is_committed_segment(self, filename):
|
||||
def is_committed_segment(self, segment):
|
||||
"""Check if segment ends with a COMMIT_TAG tag
|
||||
"""
|
||||
with open(filename, 'rb') as fd:
|
||||
try:
|
||||
iterator = self.iter_objects(segment)
|
||||
except IntegrityError:
|
||||
return False
|
||||
with open(self.segment_filename(segment), 'rb') as fd:
|
||||
try:
|
||||
fd.seek(-self.header_fmt.size, os.SEEK_END)
|
||||
except OSError as e:
|
||||
|
|
@ -563,7 +693,22 @@ class LoggedIO:
|
|||
if e.errno == errno.EINVAL:
|
||||
return False
|
||||
raise e
|
||||
return fd.read(self.header_fmt.size) == self.COMMIT
|
||||
if fd.read(self.header_fmt.size) != self.COMMIT:
|
||||
return False
|
||||
seen_commit = False
|
||||
while True:
|
||||
try:
|
||||
tag, key, offset, _ = next(iterator)
|
||||
except IntegrityError:
|
||||
return False
|
||||
except StopIteration:
|
||||
break
|
||||
if tag == TAG_COMMIT:
|
||||
seen_commit = True
|
||||
continue
|
||||
if seen_commit:
|
||||
return False
|
||||
return seen_commit
|
||||
|
||||
def segment_filename(self, segment):
|
||||
return os.path.join(self.path, 'data', str(segment // self.segments_per_dir), str(segment))
|
||||
|
|
@ -578,7 +723,8 @@ class LoggedIO:
|
|||
dirname = os.path.join(self.path, 'data', str(self.segment // self.segments_per_dir))
|
||||
if not os.path.exists(dirname):
|
||||
os.mkdir(dirname)
|
||||
self._write_fd = open(self.segment_filename(self.segment), 'ab')
|
||||
sync_dir(os.path.join(self.path, 'data'))
|
||||
self._write_fd = SyncFile(self.segment_filename(self.segment))
|
||||
self._write_fd.write(MAGIC)
|
||||
self.offset = MAGIC_LEN
|
||||
return self._write_fd
|
||||
|
|
@ -591,6 +737,13 @@ class LoggedIO:
|
|||
self.fds[segment] = fd
|
||||
return fd
|
||||
|
||||
def close_segment(self):
|
||||
if self._write_fd:
|
||||
self.segment += 1
|
||||
self.offset = 0
|
||||
self._write_fd.close()
|
||||
self._write_fd = None
|
||||
|
||||
def delete_segment(self, segment):
|
||||
if segment in self.fds:
|
||||
del self.fds[segment]
|
||||
|
|
@ -602,7 +755,18 @@ class LoggedIO:
|
|||
def segment_exists(self, segment):
|
||||
return os.path.exists(self.segment_filename(segment))
|
||||
|
||||
def iter_objects(self, segment, include_data=False):
|
||||
def segment_size(self, segment):
|
||||
return os.path.getsize(self.segment_filename(segment))
|
||||
|
||||
def iter_objects(self, segment, include_data=False, read_data=True):
|
||||
"""
|
||||
Return object iterator for *segment*.
|
||||
|
||||
If read_data is False then include_data must be False as well.
|
||||
Integrity checks are skipped: all data obtained from the iterator must be considered informational.
|
||||
|
||||
The iterator returns four-tuples of (tag, key, offset, data|size).
|
||||
"""
|
||||
fd = self.get_fd(segment)
|
||||
fd.seek(0)
|
||||
if fd.read(MAGIC_LEN) != MAGIC:
|
||||
|
|
@ -611,11 +775,12 @@ class LoggedIO:
|
|||
header = fd.read(self.header_fmt.size)
|
||||
while header:
|
||||
size, tag, key, data = self._read(fd, self.header_fmt, header, segment, offset,
|
||||
(TAG_PUT, TAG_DELETE, TAG_COMMIT))
|
||||
(TAG_PUT, TAG_DELETE, TAG_COMMIT),
|
||||
read_data=read_data)
|
||||
if include_data:
|
||||
yield tag, key, offset, data
|
||||
else:
|
||||
yield tag, key, offset
|
||||
yield tag, key, offset, size
|
||||
offset += size
|
||||
header = fd.read(self.header_fmt.size)
|
||||
|
||||
|
|
@ -639,19 +804,25 @@ class LoggedIO:
|
|||
fd.write(data[:size])
|
||||
data = data[size:]
|
||||
|
||||
def read(self, segment, offset, id):
|
||||
def read(self, segment, offset, id, read_data=True):
|
||||
"""
|
||||
Read entry from *segment* at *offset* with *id*.
|
||||
|
||||
If read_data is False the size of the entry is returned instead and integrity checks are skipped.
|
||||
The return value should thus be considered informational.
|
||||
"""
|
||||
if segment == self.segment and self._write_fd:
|
||||
self._write_fd.flush()
|
||||
self._write_fd.sync()
|
||||
fd = self.get_fd(segment)
|
||||
fd.seek(offset)
|
||||
header = fd.read(self.put_header_fmt.size)
|
||||
size, tag, key, data = self._read(fd, self.put_header_fmt, header, segment, offset, (TAG_PUT, ))
|
||||
size, tag, key, data = self._read(fd, self.put_header_fmt, header, segment, offset, (TAG_PUT, ), read_data)
|
||||
if id != key:
|
||||
raise IntegrityError('Invalid segment entry header, is not for wanted id [segment {}, offset {}]'.format(
|
||||
segment, offset))
|
||||
return data
|
||||
return data if read_data else size
|
||||
|
||||
def _read(self, fd, fmt, header, segment, offset, acceptable_tags):
|
||||
def _read(self, fd, fmt, header, segment, offset, acceptable_tags, read_data=True):
|
||||
# some code shared by read() and iter_objects()
|
||||
try:
|
||||
hdr_tuple = fmt.unpack(header)
|
||||
|
|
@ -669,18 +840,32 @@ class LoggedIO:
|
|||
raise IntegrityError('Invalid segment entry size [segment {}, offset {}]'.format(
|
||||
segment, offset))
|
||||
length = size - fmt.size
|
||||
data = fd.read(length)
|
||||
if len(data) != length:
|
||||
raise IntegrityError('Segment entry data short read [segment {}, offset {}]: expected {}, got {} bytes'.format(
|
||||
segment, offset, length, len(data)))
|
||||
if crc32(data, crc32(memoryview(header)[4:])) & 0xffffffff != crc:
|
||||
raise IntegrityError('Segment entry checksum mismatch [segment {}, offset {}]'.format(
|
||||
segment, offset))
|
||||
if read_data:
|
||||
data = fd.read(length)
|
||||
if len(data) != length:
|
||||
raise IntegrityError('Segment entry data short read [segment {}, offset {}]: expected {}, got {} bytes'.format(
|
||||
segment, offset, length, len(data)))
|
||||
if crc32(data, crc32(memoryview(header)[4:])) & 0xffffffff != crc:
|
||||
raise IntegrityError('Segment entry checksum mismatch [segment {}, offset {}]'.format(
|
||||
segment, offset))
|
||||
if key is None and tag in (TAG_PUT, TAG_DELETE):
|
||||
key, data = data[:32], data[32:]
|
||||
else:
|
||||
if key is None and tag in (TAG_PUT, TAG_DELETE):
|
||||
key = fd.read(32)
|
||||
length -= 32
|
||||
if len(key) != 32:
|
||||
raise IntegrityError('Segment entry key short read [segment {}, offset {}]: expected {}, got {} bytes'.format(
|
||||
segment, offset, 32, len(key)))
|
||||
oldpos = fd.tell()
|
||||
seeked = fd.seek(length, os.SEEK_CUR) - oldpos
|
||||
data = None
|
||||
if seeked != length:
|
||||
raise IntegrityError('Segment entry data short seek [segment {}, offset {}]: expected {}, got {} bytes'.format(
|
||||
segment, offset, length, seeked))
|
||||
if tag not in acceptable_tags:
|
||||
raise IntegrityError('Invalid segment entry header, did not get acceptable tag [segment {}, offset {}]'.format(
|
||||
segment, offset))
|
||||
if key is None and tag in (TAG_PUT, TAG_DELETE):
|
||||
key, data = data[:32], data[32:]
|
||||
return size, tag, key, data
|
||||
|
||||
def write_put(self, id, data, raise_full=False):
|
||||
|
|
@ -699,24 +884,12 @@ class LoggedIO:
|
|||
crc = self.crc_fmt.pack(crc32(id, crc32(header)) & 0xffffffff)
|
||||
fd.write(b''.join((crc, header, id)))
|
||||
self.offset += self.put_header_fmt.size
|
||||
return self.segment
|
||||
return self.segment, self.put_header_fmt.size
|
||||
|
||||
def write_commit(self):
|
||||
fd = self.get_write_fd(no_new=True)
|
||||
self.close_segment()
|
||||
fd = self.get_write_fd()
|
||||
header = self.header_no_crc_fmt.pack(self.header_fmt.size, TAG_COMMIT)
|
||||
crc = self.crc_fmt.pack(crc32(header) & 0xffffffff)
|
||||
fd.write(b''.join((crc, header)))
|
||||
self.close_segment()
|
||||
|
||||
def close_segment(self):
|
||||
if self._write_fd:
|
||||
self.segment += 1
|
||||
self.offset = 0
|
||||
self._write_fd.flush()
|
||||
os.fsync(self._write_fd.fileno())
|
||||
if hasattr(os, 'posix_fadvise'): # only on UNIX
|
||||
# tell the OS that it does not need to cache what we just wrote,
|
||||
# avoids spoiling the cache for the OS and other processes.
|
||||
os.posix_fadvise(self._write_fd.fileno(), 0, 0, os.POSIX_FADV_DONTNEED)
|
||||
self._write_fd.close()
|
||||
self._write_fd = None
|
||||
|
|
|
|||
|
|
@ -6,9 +6,13 @@ if sys.platform != 'win32':
|
|||
import posix
|
||||
import stat
|
||||
import sysconfig
|
||||
import tempfile
|
||||
import time
|
||||
import unittest
|
||||
|
||||
from ..xattr import get_all
|
||||
from ..platform import get_flags
|
||||
from .. import platform
|
||||
|
||||
# Note: this is used by borg.selftest, do not use or import py.test functionality here.
|
||||
|
||||
|
|
@ -24,8 +28,20 @@ try:
|
|||
except ImportError:
|
||||
raises = None
|
||||
|
||||
has_lchflags = hasattr(os, 'lchflags')
|
||||
has_lchflags = hasattr(os, 'lchflags') or sys.platform.startswith('linux')
|
||||
no_lchlfags_because = '' if has_lchflags else '(not supported on this platform)'
|
||||
try:
|
||||
with tempfile.NamedTemporaryFile() as file:
|
||||
platform.set_flags(file.name, stat.UF_NODUMP)
|
||||
except OSError:
|
||||
has_lchflags = False
|
||||
no_lchlfags_because = '(the file system at %s does not support flags)' % tempfile.gettempdir()
|
||||
|
||||
try:
|
||||
import llfuse
|
||||
has_llfuse = True or llfuse # avoids "unused import"
|
||||
except ImportError:
|
||||
has_llfuse = False
|
||||
|
||||
# The mtime get/set precision varies on different OS and Python versions
|
||||
if sys.platform != 'win32' and 'HAVE_FUTIMENS' in getattr(posix, '_have_functions', []):
|
||||
|
|
@ -76,13 +92,13 @@ class BaseTestCase(unittest.TestCase):
|
|||
# Assume path2 is on FUSE if st_dev is different
|
||||
fuse = s1.st_dev != s2.st_dev
|
||||
attrs = ['st_mode', 'st_uid', 'st_gid', 'st_rdev']
|
||||
if has_lchflags:
|
||||
attrs.append('st_flags')
|
||||
if not fuse or not os.path.isdir(path1):
|
||||
# dir nlink is always 1 on our fuse filesystem
|
||||
attrs.append('st_nlink')
|
||||
d1 = [filename] + [getattr(s1, a) for a in attrs]
|
||||
d2 = [filename] + [getattr(s2, a) for a in attrs]
|
||||
d1.append(get_flags(path1, s1))
|
||||
d2.append(get_flags(path2, s2))
|
||||
# ignore st_rdev if file is not a block/char device, fixes #203
|
||||
if not stat.S_ISCHR(d1[1]) and not stat.S_ISBLK(d1[1]):
|
||||
d1[4] = None
|
||||
|
|
|
|||
|
|
@ -1,14 +1,64 @@
|
|||
import os
|
||||
from datetime import datetime, timezone
|
||||
from io import StringIO
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
import msgpack
|
||||
|
||||
from ..archive import Archive, CacheChunkBuffer, RobustUnpacker
|
||||
from ..archive import Archive, CacheChunkBuffer, RobustUnpacker, Statistics
|
||||
from ..key import PlaintextKey
|
||||
from ..helpers import Manifest
|
||||
from . import BaseTestCase
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def stats():
|
||||
stats = Statistics()
|
||||
stats.update(20, 10, unique=True)
|
||||
return stats
|
||||
|
||||
|
||||
def test_stats_basic(stats):
|
||||
assert stats.osize == 20
|
||||
assert stats.csize == stats.usize == 10
|
||||
stats.update(20, 10, unique=False)
|
||||
assert stats.osize == 40
|
||||
assert stats.csize == 20
|
||||
assert stats.usize == 10
|
||||
|
||||
|
||||
def tests_stats_progress(stats, columns=80):
|
||||
os.environ['COLUMNS'] = str(columns)
|
||||
out = StringIO()
|
||||
stats.show_progress(stream=out)
|
||||
s = '20 B O 10 B C 10 B D 0 N '
|
||||
buf = ' ' * (columns - len(s))
|
||||
assert out.getvalue() == s + buf + "\r"
|
||||
|
||||
out = StringIO()
|
||||
stats.update(10**3, 0, unique=False)
|
||||
stats.show_progress(item={b'path': 'foo'}, final=False, stream=out)
|
||||
s = '1.02 kB O 10 B C 10 B D 0 N foo'
|
||||
buf = ' ' * (columns - len(s))
|
||||
assert out.getvalue() == s + buf + "\r"
|
||||
out = StringIO()
|
||||
stats.show_progress(item={b'path': 'foo'*40}, final=False, stream=out)
|
||||
s = '1.02 kB O 10 B C 10 B D 0 N foofoofoofoofoofoofoofo...oofoofoofoofoofoofoofoofoo'
|
||||
buf = ' ' * (columns - len(s))
|
||||
assert out.getvalue() == s + buf + "\r"
|
||||
|
||||
|
||||
def test_stats_format(stats):
|
||||
assert str(stats) == """\
|
||||
Original size Compressed size Deduplicated size
|
||||
This archive: 20 B 10 B 10 B"""
|
||||
s = "{0.osize_fmt}".format(stats)
|
||||
assert s == "20 B"
|
||||
# kind of redundant, but id is variable so we can't match reliably
|
||||
assert repr(stats) == '<Statistics object at {:#x} (20, 10, 10)>'.format(id(stats))
|
||||
|
||||
|
||||
class MockCache:
|
||||
|
||||
def __init__(self):
|
||||
|
|
|
|||
|
|
@ -3,7 +3,9 @@ import errno
|
|||
import os
|
||||
import inspect
|
||||
from io import StringIO
|
||||
import logging
|
||||
import random
|
||||
import socket
|
||||
import stat
|
||||
import subprocess
|
||||
import sys
|
||||
|
|
@ -16,7 +18,7 @@ from hashlib import sha256
|
|||
|
||||
import pytest
|
||||
|
||||
from .. import xattr, helpers
|
||||
from .. import xattr, helpers, platform
|
||||
from ..archive import Archive, ChunkBuffer, ArchiveRecreater
|
||||
from ..archiver import Archiver
|
||||
from ..cache import Cache
|
||||
|
|
@ -26,15 +28,13 @@ from ..helpers import Chunk, Manifest, EXIT_SUCCESS, EXIT_WARNING, EXIT_ERROR, b
|
|||
from ..key import KeyfileKeyBase
|
||||
from ..remote import RemoteRepository, PathNotAllowed
|
||||
from ..repository import Repository
|
||||
from . import has_lchflags, has_llfuse
|
||||
from . import BaseTestCase, changedir, environment_variable
|
||||
|
||||
try:
|
||||
import llfuse
|
||||
has_llfuse = True or llfuse # avoids "unused import"
|
||||
except ImportError:
|
||||
has_llfuse = False
|
||||
|
||||
has_lchflags = hasattr(os, 'lchflags')
|
||||
pass
|
||||
|
||||
src_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
|
||||
|
||||
|
|
@ -280,7 +280,7 @@ class ArchiverTestCaseBase(BaseTestCase):
|
|||
# FIFO node
|
||||
os.mkfifo(os.path.join(self.input_path, 'fifo1'))
|
||||
if has_lchflags:
|
||||
os.lchflags(os.path.join(self.input_path, 'flagfile'), stat.UF_NODUMP)
|
||||
platform.set_flags(os.path.join(self.input_path, 'flagfile'), stat.UF_NODUMP)
|
||||
try:
|
||||
# Block device
|
||||
os.mknod('input/bdev', 0o600 | stat.S_IFBLK, os.makedev(10, 20))
|
||||
|
|
@ -299,9 +299,14 @@ class ArchiverTestCaseBase(BaseTestCase):
|
|||
class ArchiverTestCase(ArchiverTestCaseBase):
|
||||
def test_basic_functionality(self):
|
||||
have_root = self.create_test_files()
|
||||
self.cmd('init', self.repository_location)
|
||||
# fork required to test show-rc output
|
||||
output = self.cmd('init', '--show-version', '--show-rc', self.repository_location, fork=True)
|
||||
self.assert_in('borgbackup version', output)
|
||||
self.assert_in('terminating with success status, rc 0', output)
|
||||
self.cmd('create', self.repository_location + '::test', 'input')
|
||||
self.cmd('create', '--stats', self.repository_location + '::test.2', 'input')
|
||||
output = self.cmd('create', '--stats', self.repository_location + '::test.2', 'input')
|
||||
self.assert_in('Archive name: test.2', output)
|
||||
self.assert_in('This archive: ', output)
|
||||
with changedir('output'):
|
||||
self.cmd('extract', self.repository_location + '::test')
|
||||
list_output = self.cmd('list', '--short', self.repository_location)
|
||||
|
|
@ -353,6 +358,24 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
|||
# the interesting parts of info_output2 and info_output should be same
|
||||
self.assert_equal(filter(info_output), filter(info_output2))
|
||||
|
||||
def test_unix_socket(self):
|
||||
self.cmd('init', self.repository_location)
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
sock.bind(os.path.join(self.input_path, 'unix-socket'))
|
||||
self.cmd('create', self.repository_location + '::test', 'input')
|
||||
sock.close()
|
||||
with changedir('output'):
|
||||
self.cmd('extract', self.repository_location + '::test')
|
||||
assert not os.path.exists('input/unix-socket')
|
||||
|
||||
def test_symlink_extract(self):
|
||||
self.create_test_files()
|
||||
self.cmd('init', self.repository_location)
|
||||
self.cmd('create', self.repository_location + '::test', 'input')
|
||||
with changedir('output'):
|
||||
self.cmd('extract', self.repository_location + '::test')
|
||||
assert os.readlink('input/link1') == 'somewhere'
|
||||
|
||||
def test_atime(self):
|
||||
self.create_test_files()
|
||||
atime, mtime = 123456780, 234567890
|
||||
|
|
@ -639,6 +662,31 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
|||
self.cmd("extract", self.repository_location + "::test", "fm:input/file1", "fm:*file33*", "input/file2")
|
||||
self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2", "file333"])
|
||||
|
||||
def test_extract_list_output(self):
|
||||
self.cmd('init', self.repository_location)
|
||||
self.create_regular_file('file', size=1024 * 80)
|
||||
|
||||
self.cmd('create', self.repository_location + '::test', 'input')
|
||||
|
||||
with changedir('output'):
|
||||
output = self.cmd('extract', self.repository_location + '::test')
|
||||
self.assert_not_in("input/file", output)
|
||||
shutil.rmtree('output/input')
|
||||
|
||||
with changedir('output'):
|
||||
output = self.cmd('extract', '--info', self.repository_location + '::test')
|
||||
self.assert_not_in("input/file", output)
|
||||
shutil.rmtree('output/input')
|
||||
|
||||
with changedir('output'):
|
||||
output = self.cmd('extract', '--list', self.repository_location + '::test')
|
||||
self.assert_in("input/file", output)
|
||||
shutil.rmtree('output/input')
|
||||
|
||||
with changedir('output'):
|
||||
output = self.cmd('extract', '--list', '--info', self.repository_location + '::test')
|
||||
self.assert_in("input/file", output)
|
||||
|
||||
def _create_test_caches(self):
|
||||
self.cmd('init', self.repository_location)
|
||||
self.create_regular_file('file1', size=1024 * 80)
|
||||
|
|
@ -851,7 +899,8 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
|||
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
|
||||
self.cmd('delete', self.repository_location + '::test')
|
||||
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
|
||||
self.cmd('delete', '--stats', self.repository_location + '::test.2')
|
||||
output = self.cmd('delete', '--stats', self.repository_location + '::test.2')
|
||||
self.assert_in('Deleted data:', output)
|
||||
# Make sure all data except the manifest has been deleted
|
||||
with Repository(self.repository_path) as repository:
|
||||
self.assert_equal(len(repository), 1)
|
||||
|
|
@ -874,12 +923,16 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
|||
self.cmd('init', self.repository_location)
|
||||
self.create_src_archive('test')
|
||||
self.cmd('extract', '--dry-run', self.repository_location + '::test')
|
||||
self.cmd('check', self.repository_location)
|
||||
name = sorted(os.listdir(os.path.join(self.tmpdir, 'repository', 'data', '0')), reverse=True)[0]
|
||||
output = self.cmd('check', '--show-version', self.repository_location)
|
||||
self.assert_in('borgbackup version', output) # implied output even without --info given
|
||||
self.assert_not_in('Starting repository check', output) # --info not given for root logger
|
||||
|
||||
name = sorted(os.listdir(os.path.join(self.tmpdir, 'repository', 'data', '0')), reverse=True)[1]
|
||||
with open(os.path.join(self.tmpdir, 'repository', 'data', '0', name), 'r+b') as fd:
|
||||
fd.seek(100)
|
||||
fd.write(b'XXXX')
|
||||
self.cmd('check', self.repository_location, exit_code=1)
|
||||
output = self.cmd('check', '--info', self.repository_location, exit_code=1)
|
||||
self.assert_in('Starting repository check', output) # --info given for root logger
|
||||
|
||||
# we currently need to be able to create a lock directory inside the repo:
|
||||
@pytest.mark.xfail(reason="we need to be able to create the lock directory inside the repo")
|
||||
|
|
@ -927,11 +980,11 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
|||
os.utime('input/file1', (now - 5, now - 5)) # 5 seconds ago
|
||||
self.create_regular_file('file2', size=1024 * 80)
|
||||
self.cmd('init', self.repository_location)
|
||||
output = self.cmd('create', '-v', '--list', self.repository_location + '::test', 'input')
|
||||
output = self.cmd('create', '--list', self.repository_location + '::test', 'input')
|
||||
self.assert_in("A input/file1", output)
|
||||
self.assert_in("A input/file2", output)
|
||||
# should find first file as unmodified
|
||||
output = self.cmd('create', '-v', '--list', self.repository_location + '::test1', 'input')
|
||||
output = self.cmd('create', '--list', self.repository_location + '::test1', 'input')
|
||||
self.assert_in("U input/file1", output)
|
||||
# this is expected, although surprising, for why, see:
|
||||
# https://borgbackup.readthedocs.org/en/latest/faq.html#i-am-seeing-a-added-status-for-a-unchanged-file
|
||||
|
|
@ -944,14 +997,21 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
|||
self.create_regular_file('file1', size=1024 * 80)
|
||||
os.utime('input/file1', (now - 5, now - 5)) # 5 seconds ago
|
||||
self.create_regular_file('file2', size=1024 * 80)
|
||||
if has_lchflags:
|
||||
self.create_regular_file('file3', size=1024 * 80)
|
||||
platform.set_flags(os.path.join(self.input_path, 'file3'), stat.UF_NODUMP)
|
||||
self.cmd('init', self.repository_location)
|
||||
output = self.cmd('create', '-v', '--list', self.repository_location + '::test', 'input')
|
||||
output = self.cmd('create', '--list', self.repository_location + '::test', 'input')
|
||||
self.assert_in("A input/file1", output)
|
||||
self.assert_in("A input/file2", output)
|
||||
if has_lchflags:
|
||||
self.assert_in("x input/file3", output)
|
||||
# should find second file as excluded
|
||||
output = self.cmd('create', '-v', '--list', self.repository_location + '::test1', 'input', '--exclude', '*/file2')
|
||||
output = self.cmd('create', '--list', self.repository_location + '::test1', 'input', '--exclude', '*/file2')
|
||||
self.assert_in("U input/file1", output)
|
||||
self.assert_in("x input/file2", output)
|
||||
if has_lchflags:
|
||||
self.assert_in("x input/file3", output)
|
||||
|
||||
def test_create_topical(self):
|
||||
now = time.time()
|
||||
|
|
@ -966,15 +1026,15 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
|||
output = self.cmd('create', self.repository_location + '::test0', 'input')
|
||||
self.assert_not_in('file1', output)
|
||||
# should list the file as unchanged
|
||||
output = self.cmd('create', '-v', '--list', '--filter=U', self.repository_location + '::test1', 'input')
|
||||
output = self.cmd('create', '--list', '--filter=U', self.repository_location + '::test1', 'input')
|
||||
self.assert_in('file1', output)
|
||||
# should *not* list the file as changed
|
||||
output = self.cmd('create', '-v', '--filter=AM', self.repository_location + '::test2', 'input')
|
||||
output = self.cmd('create', '--list', '--filter=AM', self.repository_location + '::test2', 'input')
|
||||
self.assert_not_in('file1', output)
|
||||
# change the file
|
||||
self.create_regular_file('file1', size=1024 * 100)
|
||||
# should list the file as changed
|
||||
output = self.cmd('create', '-v', '--list', '--filter=AM', self.repository_location + '::test3', 'input')
|
||||
output = self.cmd('create', '--list', '--filter=AM', self.repository_location + '::test3', 'input')
|
||||
self.assert_in('file1', output)
|
||||
|
||||
# def test_cmdline_compatibility(self):
|
||||
|
|
@ -992,7 +1052,8 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
|||
self.cmd('create', self.repository_location + '::test3.checkpoint', src_dir)
|
||||
self.cmd('create', self.repository_location + '::test3.checkpoint.1', src_dir)
|
||||
self.cmd('create', self.repository_location + '::test4.checkpoint', src_dir)
|
||||
output = self.cmd('prune', '-v', '--list', '--dry-run', self.repository_location, '--keep-daily=2')
|
||||
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=2')
|
||||
self.assert_in('Keeping archive: test2', output)
|
||||
self.assert_in('Would prune: test1', output)
|
||||
# must keep the latest non-checkpoint archive:
|
||||
self.assert_in('Keeping archive: test2', output)
|
||||
|
|
@ -1026,9 +1087,10 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
|||
self.cmd('init', self.repository_location)
|
||||
self.cmd('create', self.repository_location + '::test1', src_dir)
|
||||
self.cmd('create', self.repository_location + '::test2', src_dir)
|
||||
output = self.cmd('prune', '-v', '--list', '--dry-run', self.repository_location, '--keep-daily=2')
|
||||
output = self.cmd('prune', '--list', '--stats', '--dry-run', self.repository_location, '--keep-daily=2')
|
||||
self.assert_in('Keeping archive: test2', output)
|
||||
self.assert_in('Would prune: test1', output)
|
||||
self.assert_in('Deleted data:', output)
|
||||
output = self.cmd('list', self.repository_location)
|
||||
self.assert_in('test1', output)
|
||||
self.assert_in('test2', output)
|
||||
|
|
@ -1043,7 +1105,7 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
|||
self.cmd('create', self.repository_location + '::foo-2015-08-12-20:00', src_dir)
|
||||
self.cmd('create', self.repository_location + '::bar-2015-08-12-10:00', src_dir)
|
||||
self.cmd('create', self.repository_location + '::bar-2015-08-12-20:00', src_dir)
|
||||
output = self.cmd('prune', '-v', '--list', '--dry-run', self.repository_location, '--keep-daily=2', '--prefix=foo-')
|
||||
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=2', '--prefix=foo-')
|
||||
self.assert_in('Keeping archive: foo-2015-08-12-20:00', output)
|
||||
self.assert_in('Would prune: foo-2015-08-12-10:00', output)
|
||||
output = self.cmd('list', self.repository_location)
|
||||
|
|
@ -1200,6 +1262,9 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
|||
try:
|
||||
self.cmd('mount', self.repository_location, mountpoint, fork=True)
|
||||
self.wait_for_mount(mountpoint)
|
||||
if has_lchflags:
|
||||
# remove the file we did not backup, so input and output become equal
|
||||
os.remove(os.path.join('input', 'flagfile'))
|
||||
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive', 'input'))
|
||||
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive2', 'input'))
|
||||
finally:
|
||||
|
|
@ -1221,6 +1286,9 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
|||
try:
|
||||
self.cmd('mount', self.repository_location + '::archive', mountpoint, fork=True)
|
||||
self.wait_for_mount(mountpoint)
|
||||
if has_lchflags:
|
||||
# remove the file we did not backup, so input and output become equal
|
||||
os.remove(os.path.join('input', 'flagfile'))
|
||||
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'input'))
|
||||
finally:
|
||||
if sys.platform.startswith('linux'):
|
||||
|
|
@ -1395,7 +1463,7 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
|||
if interrupt_early:
|
||||
process_files = 0
|
||||
with patch.object(ArchiveRecreater, 'process_item', self._recreate_interrupt_patch(process_files)):
|
||||
self.cmd('recreate', '-sv', '--list', self.repository_location, 'input/dir2')
|
||||
self.cmd('recreate', self.repository_location, 'input/dir2')
|
||||
assert 'test.recreate' in self.cmd('list', self.repository_location)
|
||||
if change_args:
|
||||
with patch.object(sys, 'argv', sys.argv + ['non-forking tests don\'t use sys.argv']):
|
||||
|
|
@ -1447,9 +1515,9 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
|||
self.cmd('create', self.repository_location + '::test', 'input')
|
||||
archive_before = self.cmd('list', self.repository_location + '::test', '--format', '{sha512}')
|
||||
with patch.object(Cache, 'add_chunk', self._test_recreate_chunker_interrupt_patch()):
|
||||
self.cmd('recreate', '-pv', '--chunker-params', '10,12,11,4095', self.repository_location)
|
||||
self.cmd('recreate', '-pv', '--chunker-params', '10,13,11,4095', self.repository_location)
|
||||
assert 'test.recreate' in self.cmd('list', self.repository_location)
|
||||
output = self.cmd('recreate', '-svp', '--debug', '--chunker-params', '10,12,11,4095', self.repository_location)
|
||||
output = self.cmd('recreate', '-svp', '--debug', '--chunker-params', '10,13,11,4095', self.repository_location)
|
||||
assert 'Found test.recreate, will resume' in output
|
||||
assert 'Copied 1 chunks from a partially processed item' in output
|
||||
archive_after = self.cmd('list', self.repository_location + '::test', '--format', '{sha512}')
|
||||
|
|
@ -1486,6 +1554,32 @@ class ArchiverTestCase(ArchiverTestCaseBase):
|
|||
cmd = 'python3', '-c', 'import os, sys; sys.exit(42 if os.path.exists("%s") else 23)' % lock_path
|
||||
self.cmd('with-lock', self.repository_location, *cmd, fork=True, exit_code=42)
|
||||
|
||||
def test_recreate_list_output(self):
|
||||
self.cmd('init', self.repository_location)
|
||||
self.create_regular_file('file1', size=0)
|
||||
self.create_regular_file('file2', size=0)
|
||||
self.create_regular_file('file3', size=0)
|
||||
self.create_regular_file('file4', size=0)
|
||||
self.create_regular_file('file5', size=0)
|
||||
|
||||
self.cmd('create', self.repository_location + '::test', 'input')
|
||||
|
||||
output = self.cmd('recreate', '--list', '--info', self.repository_location + '::test', '-e', 'input/file2')
|
||||
self.assert_in("input/file1", output)
|
||||
self.assert_in("x input/file2", output)
|
||||
|
||||
output = self.cmd('recreate', '--list', self.repository_location + '::test', '-e', 'input/file3')
|
||||
self.assert_in("input/file1", output)
|
||||
self.assert_in("x input/file3", output)
|
||||
|
||||
output = self.cmd('recreate', self.repository_location + '::test', '-e', 'input/file4')
|
||||
self.assert_not_in("input/file1", output)
|
||||
self.assert_not_in("x input/file4", output)
|
||||
|
||||
output = self.cmd('recreate', '--info', self.repository_location + '::test', '-e', 'input/file5')
|
||||
self.assert_not_in("input/file1", output)
|
||||
self.assert_not_in("x input/file5", output)
|
||||
|
||||
|
||||
@unittest.skipUnless('binary' in BORG_EXES, 'no borg.exe available')
|
||||
class ArchiverTestCaseBinary(ArchiverTestCase):
|
||||
|
|
@ -1526,12 +1620,16 @@ class ArchiverCheckTestCase(ArchiverTestCaseBase):
|
|||
return archive, repository
|
||||
|
||||
def test_check_usage(self):
|
||||
output = self.cmd('check', '-v', self.repository_location, exit_code=0)
|
||||
output = self.cmd('check', '-v', '--progress', self.repository_location, exit_code=0)
|
||||
self.assert_in('Starting repository check', output)
|
||||
self.assert_in('Starting archive consistency check', output)
|
||||
self.assert_in('Checking segments', output)
|
||||
# reset logging to new process default to avoid need for fork=True on next check
|
||||
logging.getLogger('borg.output.progress').setLevel(logging.NOTSET)
|
||||
output = self.cmd('check', '-v', '--repository-only', self.repository_location, exit_code=0)
|
||||
self.assert_in('Starting repository check', output)
|
||||
self.assert_not_in('Starting archive consistency check', output)
|
||||
self.assert_not_in('Checking segments', output)
|
||||
output = self.cmd('check', '-v', '--archives-only', self.repository_location, exit_code=0)
|
||||
self.assert_not_in('Starting repository check', output)
|
||||
self.assert_in('Starting archive consistency check', output)
|
||||
|
|
@ -1590,6 +1688,29 @@ class ArchiverCheckTestCase(ArchiverTestCaseBase):
|
|||
self.cmd('check', self.repository_location, exit_code=0)
|
||||
self.cmd('extract', '--dry-run', self.repository_location + '::archive1', exit_code=0)
|
||||
|
||||
def _test_verify_data(self, *init_args):
|
||||
shutil.rmtree(self.repository_path)
|
||||
self.cmd('init', self.repository_location, *init_args)
|
||||
self.create_src_archive('archive1')
|
||||
archive, repository = self.open_archive('archive1')
|
||||
with repository:
|
||||
for item in archive.iter_items():
|
||||
if item[b'path'].endswith('testsuite/archiver.py'):
|
||||
chunk = item[b'chunks'][-1]
|
||||
data = repository.get(chunk.id) + b'1234'
|
||||
repository.put(chunk.id, data)
|
||||
break
|
||||
repository.commit()
|
||||
self.cmd('check', self.repository_location, exit_code=0)
|
||||
output = self.cmd('check', '--verify-data', self.repository_location, exit_code=1)
|
||||
assert bin_to_hex(chunk.id) + ', integrity error' in output
|
||||
|
||||
def test_verify_data(self):
|
||||
self._test_verify_data('--encryption', 'repokey')
|
||||
|
||||
def test_verify_data_unencrypted(self):
|
||||
self._test_verify_data('--encryption', 'none')
|
||||
|
||||
|
||||
class RemoteArchiverTestCase(ArchiverTestCase):
|
||||
prefix = '__testsuite__:'
|
||||
|
|
|
|||
|
|
@ -1,4 +0,0 @@
|
|||
from ..logger import setup_logging
|
||||
|
||||
# Ensure that the loggers exist for all tests
|
||||
setup_logging()
|
||||
|
|
@ -1,7 +1,8 @@
|
|||
import hashlib
|
||||
import io
|
||||
import logging
|
||||
from time import mktime, strptime
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from io import StringIO
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
|
@ -11,7 +12,7 @@ import msgpack.fallback
|
|||
import time
|
||||
|
||||
from ..helpers import Location, format_file_size, format_timedelta, make_path_safe, clean_lines, \
|
||||
prune_within, prune_split, get_cache_dir, get_keys_dir, Statistics, is_slow_msgpack, \
|
||||
prune_within, prune_split, get_cache_dir, get_keys_dir, is_slow_msgpack, \
|
||||
yes, TRUISH, FALSISH, DEFAULTISH, \
|
||||
StableDict, int_to_bigint, bigint_to_int, bin_to_hex, parse_timestamp, ChunkerParams, Chunk, \
|
||||
ProgressIndicatorPercent, ProgressIndicatorEndless, load_excludes, parse_pattern, \
|
||||
|
|
@ -629,53 +630,6 @@ def test_get_keys_dir():
|
|||
os.environ['BORG_KEYS_DIR'] = old_env
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def stats():
|
||||
stats = Statistics()
|
||||
stats.update(20, 10, unique=True)
|
||||
return stats
|
||||
|
||||
|
||||
def test_stats_basic(stats):
|
||||
assert stats.osize == 20
|
||||
assert stats.csize == stats.usize == 10
|
||||
stats.update(20, 10, unique=False)
|
||||
assert stats.osize == 40
|
||||
assert stats.csize == 20
|
||||
assert stats.usize == 10
|
||||
|
||||
|
||||
def tests_stats_progress(stats, columns=80):
|
||||
os.environ['COLUMNS'] = str(columns)
|
||||
out = StringIO()
|
||||
stats.show_progress(stream=out)
|
||||
s = '20 B O 10 B C 10 B D 0 N '
|
||||
buf = ' ' * (columns - len(s))
|
||||
assert out.getvalue() == s + buf + "\r"
|
||||
|
||||
out = StringIO()
|
||||
stats.update(10**3, 0, unique=False)
|
||||
stats.show_progress(item={b'path': 'foo'}, final=False, stream=out)
|
||||
s = '1.02 kB O 10 B C 10 B D 0 N foo'
|
||||
buf = ' ' * (columns - len(s))
|
||||
assert out.getvalue() == s + buf + "\r"
|
||||
out = StringIO()
|
||||
stats.show_progress(item={b'path': 'foo'*40}, final=False, stream=out)
|
||||
s = '1.02 kB O 10 B C 10 B D 0 N foofoofoofoofoofoofoofo...oofoofoofoofoofoofoofoofoo'
|
||||
buf = ' ' * (columns - len(s))
|
||||
assert out.getvalue() == s + buf + "\r"
|
||||
|
||||
|
||||
def test_stats_format(stats):
|
||||
assert str(stats) == """\
|
||||
Original size Compressed size Deduplicated size
|
||||
This archive: 20 B 10 B 10 B"""
|
||||
s = "{0.osize_fmt}".format(stats)
|
||||
assert s == "20 B"
|
||||
# kind of redundant, but id is variable so we can't match reliably
|
||||
assert repr(stats) == '<Statistics object at {:#x} (20, 10, 10)>'.format(id(stats))
|
||||
|
||||
|
||||
def test_file_size():
|
||||
"""test the size formatting routines"""
|
||||
si_size_map = {
|
||||
|
|
@ -826,7 +780,7 @@ def test_yes_output(capfd):
|
|||
|
||||
|
||||
def test_progress_percentage_multiline(capfd):
|
||||
pi = ProgressIndicatorPercent(1000, step=5, start=0, same_line=False, msg="%3.0f%%", file=sys.stderr)
|
||||
pi = ProgressIndicatorPercent(1000, step=5, start=0, same_line=False, msg="%3.0f%%")
|
||||
pi.show(0)
|
||||
out, err = capfd.readouterr()
|
||||
assert err == ' 0%\n'
|
||||
|
|
@ -842,13 +796,14 @@ def test_progress_percentage_multiline(capfd):
|
|||
|
||||
|
||||
def test_progress_percentage_sameline(capfd):
|
||||
pi = ProgressIndicatorPercent(1000, step=5, start=0, same_line=True, msg="%3.0f%%", file=sys.stderr)
|
||||
pi = ProgressIndicatorPercent(1000, step=5, start=0, same_line=True, msg="%3.0f%%")
|
||||
pi.show(0)
|
||||
out, err = capfd.readouterr()
|
||||
assert err == ' 0%\r'
|
||||
pi.show(420)
|
||||
pi.show(680)
|
||||
out, err = capfd.readouterr()
|
||||
assert err == ' 42%\r'
|
||||
assert err == ' 42%\r 68%\r'
|
||||
pi.show(1000)
|
||||
out, err = capfd.readouterr()
|
||||
assert err == '100%\r'
|
||||
|
|
@ -858,7 +813,7 @@ def test_progress_percentage_sameline(capfd):
|
|||
|
||||
|
||||
def test_progress_percentage_step(capfd):
|
||||
pi = ProgressIndicatorPercent(100, step=2, start=0, same_line=False, msg="%3.0f%%", file=sys.stderr)
|
||||
pi = ProgressIndicatorPercent(100, step=2, start=0, same_line=False, msg="%3.0f%%")
|
||||
pi.show()
|
||||
out, err = capfd.readouterr()
|
||||
assert err == ' 0%\n'
|
||||
|
|
@ -870,6 +825,21 @@ def test_progress_percentage_step(capfd):
|
|||
assert err == ' 2%\n'
|
||||
|
||||
|
||||
def test_progress_percentage_quiet(capfd):
|
||||
logging.getLogger('borg.output.progress').setLevel(logging.WARN)
|
||||
|
||||
pi = ProgressIndicatorPercent(1000, step=5, start=0, same_line=False, msg="%3.0f%%")
|
||||
pi.show(0)
|
||||
out, err = capfd.readouterr()
|
||||
assert err == ''
|
||||
pi.show(1000)
|
||||
out, err = capfd.readouterr()
|
||||
assert err == ''
|
||||
pi.finish()
|
||||
out, err = capfd.readouterr()
|
||||
assert err == ''
|
||||
|
||||
|
||||
def test_progress_endless(capfd):
|
||||
pi = ProgressIndicatorEndless(step=1, file=sys.stderr)
|
||||
pi.show()
|
||||
|
|
|
|||
147
borg/testsuite/item.py
Normal file
147
borg/testsuite/item.py
Normal file
|
|
@ -0,0 +1,147 @@
|
|||
import pytest
|
||||
|
||||
from ..item import Item
|
||||
from ..helpers import StableDict
|
||||
|
||||
|
||||
def test_item_empty():
|
||||
item = Item()
|
||||
|
||||
assert item.as_dict() == {}
|
||||
|
||||
assert 'path' not in item
|
||||
with pytest.raises(ValueError):
|
||||
'invalid-key' in item
|
||||
with pytest.raises(TypeError):
|
||||
b'path' in item
|
||||
with pytest.raises(TypeError):
|
||||
42 in item
|
||||
|
||||
assert item.get('mode') is None
|
||||
assert item.get('mode', 0o666) == 0o666
|
||||
with pytest.raises(ValueError):
|
||||
item.get('invalid-key')
|
||||
with pytest.raises(TypeError):
|
||||
item.get(b'mode')
|
||||
with pytest.raises(TypeError):
|
||||
item.get(42)
|
||||
|
||||
with pytest.raises(AttributeError):
|
||||
item.path
|
||||
|
||||
with pytest.raises(AttributeError):
|
||||
del item.path
|
||||
|
||||
|
||||
def test_item_from_dict():
|
||||
# does not matter whether we get str or bytes keys
|
||||
item = Item({b'path': b'/a/b/c', b'mode': 0o666})
|
||||
assert item.path == '/a/b/c'
|
||||
assert item.mode == 0o666
|
||||
assert 'path' in item
|
||||
|
||||
# does not matter whether we get str or bytes keys
|
||||
item = Item({'path': b'/a/b/c', 'mode': 0o666})
|
||||
assert item.path == '/a/b/c'
|
||||
assert item.mode == 0o666
|
||||
assert 'mode' in item
|
||||
|
||||
# invalid - no dict
|
||||
with pytest.raises(TypeError):
|
||||
Item(42)
|
||||
|
||||
# invalid - no bytes/str key
|
||||
with pytest.raises(TypeError):
|
||||
Item({42: 23})
|
||||
|
||||
# invalid - unknown key
|
||||
with pytest.raises(ValueError):
|
||||
Item({'foobar': 'baz'})
|
||||
|
||||
|
||||
def test_item_from_kw():
|
||||
item = Item(path=b'/a/b/c', mode=0o666)
|
||||
assert item.path == '/a/b/c'
|
||||
assert item.mode == 0o666
|
||||
|
||||
|
||||
def test_item_int_property():
|
||||
item = Item()
|
||||
item.mode = 0o666
|
||||
assert item.mode == 0o666
|
||||
assert item.as_dict() == {'mode': 0o666}
|
||||
del item.mode
|
||||
assert item.as_dict() == {}
|
||||
with pytest.raises(TypeError):
|
||||
item.mode = "invalid"
|
||||
|
||||
|
||||
def test_item_bigint_property():
|
||||
item = Item()
|
||||
small, big = 42, 2 ** 65
|
||||
item.atime = small
|
||||
assert item.atime == small
|
||||
assert item.as_dict() == {'atime': small}
|
||||
item.atime = big
|
||||
assert item.atime == big
|
||||
assert item.as_dict() == {'atime': b'\0' * 8 + b'\x02'}
|
||||
|
||||
|
||||
def test_item_user_group_none():
|
||||
item = Item()
|
||||
item.user = None
|
||||
assert item.user is None
|
||||
item.group = None
|
||||
assert item.group is None
|
||||
|
||||
|
||||
def test_item_se_str_property():
|
||||
# start simple
|
||||
item = Item()
|
||||
item.path = '/a/b/c'
|
||||
assert item.path == '/a/b/c'
|
||||
assert item.as_dict() == {'path': b'/a/b/c'}
|
||||
del item.path
|
||||
assert item.as_dict() == {}
|
||||
with pytest.raises(TypeError):
|
||||
item.path = 42
|
||||
|
||||
# non-utf-8 path, needing surrogate-escaping for latin-1 u-umlaut
|
||||
item = Item({'path': b'/a/\xfc/c'})
|
||||
assert item.path == '/a/\udcfc/c' # getting a surrogate-escaped representation
|
||||
assert item.as_dict() == {'path': b'/a/\xfc/c'}
|
||||
del item.path
|
||||
assert 'path' not in item
|
||||
item.path = '/a/\udcfc/c' # setting using a surrogate-escaped representation
|
||||
assert item.as_dict() == {'path': b'/a/\xfc/c'}
|
||||
|
||||
|
||||
def test_item_list_property():
|
||||
item = Item()
|
||||
item.chunks = []
|
||||
assert item.chunks == []
|
||||
item.chunks.append(0)
|
||||
assert item.chunks == [0]
|
||||
item.chunks.append(1)
|
||||
assert item.chunks == [0, 1]
|
||||
assert item.as_dict() == {'chunks': [0, 1]}
|
||||
|
||||
|
||||
def test_item_dict_property():
|
||||
item = Item()
|
||||
item.xattrs = StableDict()
|
||||
assert item.xattrs == StableDict()
|
||||
item.xattrs['foo'] = 'bar'
|
||||
assert item.xattrs['foo'] == 'bar'
|
||||
item.xattrs['bar'] = 'baz'
|
||||
assert item.xattrs == StableDict({'foo': 'bar', 'bar': 'baz'})
|
||||
assert item.as_dict() == {'xattrs': {'foo': 'bar', 'bar': 'baz'}}
|
||||
|
||||
|
||||
def test_unknown_property():
|
||||
# we do not want the user to be able to set unknown attributes -
|
||||
# they won't get into the .as_dict() result dictionary.
|
||||
# also they might be just typos of known attributes.
|
||||
item = Item()
|
||||
with pytest.raises(AttributeError):
|
||||
item.unknown_attribute = None
|
||||
|
|
@ -1,17 +1,24 @@
|
|||
import getpass
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import tempfile
|
||||
from binascii import hexlify, unhexlify
|
||||
|
||||
import pytest
|
||||
|
||||
from ..crypto import bytes_to_long, num_aes_blocks
|
||||
from ..key import PlaintextKey, PassphraseKey, KeyfileKey
|
||||
from ..helpers import Location, Chunk, bin_to_hex
|
||||
from . import BaseTestCase, environment_variable
|
||||
from ..key import PlaintextKey, PassphraseKey, KeyfileKey, Passphrase, PasswordRetriesExceeded, bin_to_hex
|
||||
from ..helpers import Location, Chunk, IntegrityError
|
||||
|
||||
|
||||
class KeyTestCase(BaseTestCase):
|
||||
@pytest.fixture(autouse=True)
|
||||
def clean_env(monkeypatch):
|
||||
# Workaround for some tests (testsuite/archiver) polluting the environment
|
||||
monkeypatch.delenv('BORG_PASSPHRASE', False)
|
||||
|
||||
|
||||
class TestKey:
|
||||
class MockArgs:
|
||||
location = Location(tempfile.mkstemp()[1])
|
||||
|
||||
|
|
@ -31,14 +38,10 @@ class KeyTestCase(BaseTestCase):
|
|||
"""))
|
||||
keyfile2_id = unhexlify('c3fbf14bc001ebcc3cd86e696c13482ed071740927cd7cbe1b01b4bfcee49314')
|
||||
|
||||
def setUp(self):
|
||||
self.tmppath = tempfile.mkdtemp()
|
||||
os.environ['BORG_KEYS_DIR'] = self.tmppath
|
||||
self.tmppath2 = tempfile.mkdtemp()
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(self.tmppath)
|
||||
shutil.rmtree(self.tmppath2)
|
||||
@pytest.fixture
|
||||
def keys_dir(self, request, monkeypatch, tmpdir):
|
||||
monkeypatch.setenv('BORG_KEYS_DIR', tmpdir)
|
||||
return tmpdir
|
||||
|
||||
class MockRepository:
|
||||
class _Location:
|
||||
|
|
@ -51,78 +54,144 @@ class KeyTestCase(BaseTestCase):
|
|||
def test_plaintext(self):
|
||||
key = PlaintextKey.create(None, None)
|
||||
chunk = Chunk(b'foo')
|
||||
self.assert_equal(hexlify(key.id_hash(chunk.data)), b'2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae')
|
||||
self.assert_equal(chunk, key.decrypt(key.id_hash(chunk.data), key.encrypt(chunk)))
|
||||
assert hexlify(key.id_hash(chunk.data)) == b'2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae'
|
||||
assert chunk == key.decrypt(key.id_hash(chunk.data), key.encrypt(chunk))
|
||||
|
||||
def test_keyfile(self):
|
||||
os.environ['BORG_PASSPHRASE'] = 'test'
|
||||
def test_keyfile(self, monkeypatch, keys_dir):
|
||||
monkeypatch.setenv('BORG_PASSPHRASE', 'test')
|
||||
key = KeyfileKey.create(self.MockRepository(), self.MockArgs())
|
||||
self.assert_equal(bytes_to_long(key.enc_cipher.iv, 8), 0)
|
||||
assert bytes_to_long(key.enc_cipher.iv, 8) == 0
|
||||
manifest = key.encrypt(Chunk(b'XXX'))
|
||||
self.assert_equal(key.extract_nonce(manifest), 0)
|
||||
assert key.extract_nonce(manifest) == 0
|
||||
manifest2 = key.encrypt(Chunk(b'XXX'))
|
||||
self.assert_not_equal(manifest, manifest2)
|
||||
self.assert_equal(key.decrypt(None, manifest), key.decrypt(None, manifest2))
|
||||
self.assert_equal(key.extract_nonce(manifest2), 1)
|
||||
assert manifest != manifest2
|
||||
assert key.decrypt(None, manifest) == key.decrypt(None, manifest2)
|
||||
assert key.extract_nonce(manifest2) == 1
|
||||
iv = key.extract_nonce(manifest)
|
||||
key2 = KeyfileKey.detect(self.MockRepository(), manifest)
|
||||
self.assert_equal(bytes_to_long(key2.enc_cipher.iv, 8), iv + num_aes_blocks(len(manifest) - KeyfileKey.PAYLOAD_OVERHEAD))
|
||||
assert bytes_to_long(key2.enc_cipher.iv, 8) == iv + num_aes_blocks(len(manifest) - KeyfileKey.PAYLOAD_OVERHEAD)
|
||||
# Key data sanity check
|
||||
self.assert_equal(len(set([key2.id_key, key2.enc_key, key2.enc_hmac_key])), 3)
|
||||
self.assert_equal(key2.chunk_seed == 0, False)
|
||||
assert len({key2.id_key, key2.enc_key, key2.enc_hmac_key}) == 3
|
||||
assert key2.chunk_seed != 0
|
||||
chunk = Chunk(b'foo')
|
||||
self.assert_equal(chunk, key2.decrypt(key.id_hash(chunk.data), key.encrypt(chunk)))
|
||||
assert chunk == key2.decrypt(key.id_hash(chunk.data), key.encrypt(chunk))
|
||||
|
||||
def test_keyfile_kfenv(self):
|
||||
keyfile = os.path.join(self.tmppath2, 'keyfile')
|
||||
with environment_variable(BORG_KEY_FILE=keyfile, BORG_PASSPHRASE='testkf'):
|
||||
assert not os.path.exists(keyfile)
|
||||
key = KeyfileKey.create(self.MockRepository(), self.MockArgs())
|
||||
assert os.path.exists(keyfile)
|
||||
chunk = Chunk(b'XXX')
|
||||
chunk_id = key.id_hash(chunk.data)
|
||||
chunk_cdata = key.encrypt(chunk)
|
||||
key = KeyfileKey.detect(self.MockRepository(), chunk_cdata)
|
||||
self.assert_equal(chunk, key.decrypt(chunk_id, chunk_cdata))
|
||||
os.unlink(keyfile)
|
||||
self.assert_raises(FileNotFoundError, KeyfileKey.detect, self.MockRepository(), chunk_cdata)
|
||||
def test_keyfile_kfenv(self, tmpdir, monkeypatch):
|
||||
keyfile = tmpdir.join('keyfile')
|
||||
monkeypatch.setenv('BORG_KEY_FILE', str(keyfile))
|
||||
monkeypatch.setenv('BORG_PASSPHRASE', 'testkf')
|
||||
assert not keyfile.exists()
|
||||
key = KeyfileKey.create(self.MockRepository(), self.MockArgs())
|
||||
assert keyfile.exists()
|
||||
chunk = Chunk(b'XXX')
|
||||
chunk_id = key.id_hash(chunk.data)
|
||||
chunk_cdata = key.encrypt(chunk)
|
||||
key = KeyfileKey.detect(self.MockRepository(), chunk_cdata)
|
||||
assert chunk == key.decrypt(chunk_id, chunk_cdata)
|
||||
keyfile.remove()
|
||||
with pytest.raises(FileNotFoundError):
|
||||
KeyfileKey.detect(self.MockRepository(), chunk_cdata)
|
||||
|
||||
def test_keyfile2(self):
|
||||
with open(os.path.join(os.environ['BORG_KEYS_DIR'], 'keyfile'), 'w') as fd:
|
||||
def test_keyfile2(self, monkeypatch, keys_dir):
|
||||
with keys_dir.join('keyfile').open('w') as fd:
|
||||
fd.write(self.keyfile2_key_file)
|
||||
os.environ['BORG_PASSPHRASE'] = 'passphrase'
|
||||
monkeypatch.setenv('BORG_PASSPHRASE', 'passphrase')
|
||||
key = KeyfileKey.detect(self.MockRepository(), self.keyfile2_cdata)
|
||||
self.assert_equal(key.decrypt(self.keyfile2_id, self.keyfile2_cdata).data, b'payload')
|
||||
assert key.decrypt(self.keyfile2_id, self.keyfile2_cdata).data == b'payload'
|
||||
|
||||
def test_keyfile2_kfenv(self):
|
||||
keyfile = os.path.join(self.tmppath2, 'keyfile')
|
||||
with open(keyfile, 'w') as fd:
|
||||
def test_keyfile2_kfenv(self, tmpdir, monkeypatch):
|
||||
keyfile = tmpdir.join('keyfile')
|
||||
with keyfile.open('w') as fd:
|
||||
fd.write(self.keyfile2_key_file)
|
||||
with environment_variable(BORG_KEY_FILE=keyfile, BORG_PASSPHRASE='passphrase'):
|
||||
key = KeyfileKey.detect(self.MockRepository(), self.keyfile2_cdata)
|
||||
self.assert_equal(key.decrypt(self.keyfile2_id, self.keyfile2_cdata).data, b'payload')
|
||||
monkeypatch.setenv('BORG_KEY_FILE', str(keyfile))
|
||||
monkeypatch.setenv('BORG_PASSPHRASE', 'passphrase')
|
||||
key = KeyfileKey.detect(self.MockRepository(), self.keyfile2_cdata)
|
||||
assert key.decrypt(self.keyfile2_id, self.keyfile2_cdata).data == b'payload'
|
||||
|
||||
def test_passphrase(self):
|
||||
os.environ['BORG_PASSPHRASE'] = 'test'
|
||||
def test_passphrase(self, keys_dir, monkeypatch):
|
||||
monkeypatch.setenv('BORG_PASSPHRASE', 'test')
|
||||
key = PassphraseKey.create(self.MockRepository(), None)
|
||||
self.assert_equal(bytes_to_long(key.enc_cipher.iv, 8), 0)
|
||||
self.assert_equal(hexlify(key.id_key), b'793b0717f9d8fb01c751a487e9b827897ceea62409870600013fbc6b4d8d7ca6')
|
||||
self.assert_equal(hexlify(key.enc_hmac_key), b'b885a05d329a086627412a6142aaeb9f6c54ab7950f996dd65587251f6bc0901')
|
||||
self.assert_equal(hexlify(key.enc_key), b'2ff3654c6daf7381dbbe718d2b20b4f1ea1e34caa6cc65f6bb3ac376b93fed2a')
|
||||
self.assert_equal(key.chunk_seed, -775740477)
|
||||
assert bytes_to_long(key.enc_cipher.iv, 8) == 0
|
||||
assert hexlify(key.id_key) == b'793b0717f9d8fb01c751a487e9b827897ceea62409870600013fbc6b4d8d7ca6'
|
||||
assert hexlify(key.enc_hmac_key) == b'b885a05d329a086627412a6142aaeb9f6c54ab7950f996dd65587251f6bc0901'
|
||||
assert hexlify(key.enc_key) == b'2ff3654c6daf7381dbbe718d2b20b4f1ea1e34caa6cc65f6bb3ac376b93fed2a'
|
||||
assert key.chunk_seed == -775740477
|
||||
manifest = key.encrypt(Chunk(b'XXX'))
|
||||
self.assert_equal(key.extract_nonce(manifest), 0)
|
||||
assert key.extract_nonce(manifest) == 0
|
||||
manifest2 = key.encrypt(Chunk(b'XXX'))
|
||||
self.assert_not_equal(manifest, manifest2)
|
||||
self.assert_equal(key.decrypt(None, manifest), key.decrypt(None, manifest2))
|
||||
self.assert_equal(key.extract_nonce(manifest2), 1)
|
||||
assert manifest != manifest2
|
||||
assert key.decrypt(None, manifest) == key.decrypt(None, manifest2)
|
||||
assert key.extract_nonce(manifest2) == 1
|
||||
iv = key.extract_nonce(manifest)
|
||||
key2 = PassphraseKey.detect(self.MockRepository(), manifest)
|
||||
self.assert_equal(bytes_to_long(key2.enc_cipher.iv, 8), iv + num_aes_blocks(len(manifest) - PassphraseKey.PAYLOAD_OVERHEAD))
|
||||
self.assert_equal(key.id_key, key2.id_key)
|
||||
self.assert_equal(key.enc_hmac_key, key2.enc_hmac_key)
|
||||
self.assert_equal(key.enc_key, key2.enc_key)
|
||||
self.assert_equal(key.chunk_seed, key2.chunk_seed)
|
||||
assert bytes_to_long(key2.enc_cipher.iv, 8) == iv + num_aes_blocks(len(manifest) - PassphraseKey.PAYLOAD_OVERHEAD)
|
||||
assert key.id_key == key2.id_key
|
||||
assert key.enc_hmac_key == key2.enc_hmac_key
|
||||
assert key.enc_key == key2.enc_key
|
||||
assert key.chunk_seed == key2.chunk_seed
|
||||
chunk = Chunk(b'foo')
|
||||
self.assert_equal(hexlify(key.id_hash(chunk.data)), b'818217cf07d37efad3860766dcdf1d21e401650fed2d76ed1d797d3aae925990')
|
||||
self.assert_equal(chunk, key2.decrypt(key2.id_hash(chunk.data), key.encrypt(chunk)))
|
||||
assert hexlify(key.id_hash(chunk.data)) == b'818217cf07d37efad3860766dcdf1d21e401650fed2d76ed1d797d3aae925990'
|
||||
assert chunk == key2.decrypt(key2.id_hash(chunk.data), key.encrypt(chunk))
|
||||
|
||||
def _corrupt_byte(self, key, data, offset):
|
||||
data = bytearray(data)
|
||||
data[offset] += 1
|
||||
with pytest.raises(IntegrityError):
|
||||
key.decrypt("", data)
|
||||
|
||||
def test_decrypt_integrity(self, monkeypatch, keys_dir):
|
||||
with keys_dir.join('keyfile').open('w') as fd:
|
||||
fd.write(self.keyfile2_key_file)
|
||||
monkeypatch.setenv('BORG_PASSPHRASE', 'passphrase')
|
||||
key = KeyfileKey.detect(self.MockRepository(), self.keyfile2_cdata)
|
||||
|
||||
data = self.keyfile2_cdata
|
||||
for i in range(len(data)):
|
||||
self._corrupt_byte(key, data, i)
|
||||
|
||||
with pytest.raises(IntegrityError):
|
||||
data = bytearray(self.keyfile2_cdata)
|
||||
id = bytearray(key.id_hash(data)) # corrupt chunk id
|
||||
id[12] = 0
|
||||
key.decrypt(id, data)
|
||||
|
||||
|
||||
class TestPassphrase:
|
||||
def test_passphrase_new_verification(self, capsys, monkeypatch):
|
||||
monkeypatch.setattr(getpass, 'getpass', lambda prompt: "12aöäü")
|
||||
monkeypatch.setenv('BORG_DISPLAY_PASSPHRASE', 'no')
|
||||
Passphrase.new()
|
||||
out, err = capsys.readouterr()
|
||||
assert "12" not in out
|
||||
assert "12" not in err
|
||||
|
||||
monkeypatch.setenv('BORG_DISPLAY_PASSPHRASE', 'yes')
|
||||
passphrase = Passphrase.new()
|
||||
out, err = capsys.readouterr()
|
||||
assert "313261c3b6c3a4c3bc" not in out
|
||||
assert "313261c3b6c3a4c3bc" in err
|
||||
assert passphrase == "12aöäü"
|
||||
|
||||
monkeypatch.setattr(getpass, 'getpass', lambda prompt: "1234/@=")
|
||||
Passphrase.new()
|
||||
out, err = capsys.readouterr()
|
||||
assert "1234/@=" not in out
|
||||
assert "1234/@=" in err
|
||||
|
||||
def test_passphrase_new_empty(self, capsys, monkeypatch):
|
||||
monkeypatch.delenv('BORG_PASSPHRASE', False)
|
||||
monkeypatch.setattr(getpass, 'getpass', lambda prompt: "")
|
||||
with pytest.raises(PasswordRetriesExceeded):
|
||||
Passphrase.new(allow_empty=False)
|
||||
out, err = capsys.readouterr()
|
||||
assert "must not be blank" in err
|
||||
|
||||
def test_passphrase_new_retries(self, monkeypatch):
|
||||
monkeypatch.delenv('BORG_PASSPHRASE', False)
|
||||
ascending_numbers = iter(range(20))
|
||||
monkeypatch.setattr(getpass, 'getpass', lambda prompt: str(next(ascending_numbers)))
|
||||
with pytest.raises(PasswordRetriesExceeded):
|
||||
Passphrase.new()
|
||||
|
||||
def test_passphrase_repr(self):
|
||||
assert "secret" not in repr(Passphrase("secret"))
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import sys
|
|||
import tempfile
|
||||
import unittest
|
||||
|
||||
from ..platform import acl_get, acl_set
|
||||
from ..platform import acl_get, acl_set, swidth
|
||||
from . import BaseTestCase
|
||||
|
||||
|
||||
|
|
@ -138,3 +138,16 @@ class PlatformDarwinTestCase(BaseTestCase):
|
|||
self.set_acl(file2.name, b'!#acl 1\ngroup:ABCDEFAB-CDEF-ABCD-EFAB-CDEF00000000:staff:0:allow:read\nuser:FFFFEEEE-DDDD-CCCC-BBBB-AAAA00000000:root:0:allow:read\n', numeric_owner=True)
|
||||
self.assert_in(b'group:ABCDEFAB-CDEF-ABCD-EFAB-CDEF00000000:wheel:0:allow:read', self.get_acl(file2.name)[b'acl_extended'])
|
||||
self.assert_in(b'group:ABCDEFAB-CDEF-ABCD-EFAB-CDEF00000000::0:allow:read', self.get_acl(file2.name, numeric_owner=True)[b'acl_extended'])
|
||||
|
||||
|
||||
@unittest.skipUnless(sys.platform.startswith(('linux', 'freebsd', 'darwin')), 'POSIX only tests')
|
||||
class PlatformPosixTestCase(BaseTestCase):
|
||||
|
||||
def test_swidth_ascii(self):
|
||||
self.assert_equal(swidth("borg"), 4)
|
||||
|
||||
def test_swidth_cjk(self):
|
||||
self.assert_equal(swidth("バックアップ"), 6 * 2)
|
||||
|
||||
def test_swidth_mixed(self):
|
||||
self.assert_equal(swidth("borgバックアップ"), 4 + 6 * 2)
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
import io
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
|
|
@ -5,10 +7,10 @@ import tempfile
|
|||
from unittest.mock import patch
|
||||
|
||||
from ..hashindex import NSIndex
|
||||
from ..helpers import Location, IntegrityError
|
||||
from ..helpers import Location, IntegrityError, InternalOSError
|
||||
from ..locking import UpgradableLock, LockFailed
|
||||
from ..remote import RemoteRepository, InvalidRPCMethod
|
||||
from ..repository import Repository
|
||||
from ..remote import RemoteRepository, InvalidRPCMethod, ConnectionClosedWithHint, handle_remote_line
|
||||
from ..repository import Repository, LoggedIO, MAGIC
|
||||
from . import BaseTestCase
|
||||
|
||||
|
||||
|
|
@ -123,6 +125,46 @@ class RepositoryTestCase(RepositoryTestCaseBase):
|
|||
self.assert_equal(len(self.repository.list(limit=50)), 50)
|
||||
|
||||
|
||||
class LocalRepositoryTestCase(RepositoryTestCaseBase):
|
||||
# test case that doesn't work with remote repositories
|
||||
|
||||
def _assert_sparse(self):
|
||||
# The superseded 123456... PUT
|
||||
assert self.repository.compact[0] == 41 + 9
|
||||
# The DELETE issued by the superseding PUT (or issued directly)
|
||||
assert self.repository.compact[2] == 41
|
||||
self.repository._rebuild_sparse(0)
|
||||
assert self.repository.compact[0] == 41 + 9
|
||||
|
||||
def test_sparse1(self):
|
||||
self.repository.put(b'00000000000000000000000000000000', b'foo')
|
||||
self.repository.put(b'00000000000000000000000000000001', b'123456789')
|
||||
self.repository.commit()
|
||||
self.repository.put(b'00000000000000000000000000000001', b'bar')
|
||||
self._assert_sparse()
|
||||
|
||||
def test_sparse2(self):
|
||||
self.repository.put(b'00000000000000000000000000000000', b'foo')
|
||||
self.repository.put(b'00000000000000000000000000000001', b'123456789')
|
||||
self.repository.commit()
|
||||
self.repository.delete(b'00000000000000000000000000000001')
|
||||
self._assert_sparse()
|
||||
|
||||
def test_sparse_delete(self):
|
||||
self.repository.put(b'00000000000000000000000000000000', b'1245')
|
||||
self.repository.delete(b'00000000000000000000000000000000')
|
||||
self.repository.io._write_fd.sync()
|
||||
|
||||
# The on-line tracking works on a per-object basis...
|
||||
assert self.repository.compact[0] == 41 + 41 + 4
|
||||
self.repository._rebuild_sparse(0)
|
||||
# ...while _rebuild_sparse can mark whole segments as completely sparse (which then includes the segment magic)
|
||||
assert self.repository.compact[0] == 41 + 41 + 4 + len(MAGIC)
|
||||
|
||||
self.repository.commit()
|
||||
assert 0 not in [segment for segment, _ in self.repository.io.segment_iterator()]
|
||||
|
||||
|
||||
class RepositoryCommitTestCase(RepositoryTestCaseBase):
|
||||
|
||||
def add_keys(self):
|
||||
|
|
@ -192,6 +234,13 @@ class RepositoryCommitTestCase(RepositoryTestCaseBase):
|
|||
self.assert_equal(self.repository.check(), True)
|
||||
self.assert_equal(len(self.repository), 3)
|
||||
|
||||
def test_ignores_commit_tag_in_data(self):
|
||||
self.repository.put(b'0' * 32, LoggedIO.COMMIT)
|
||||
self.reopen()
|
||||
with self.repository:
|
||||
io = self.repository.io
|
||||
assert not io.is_committed_segment(io.get_latest_segment())
|
||||
|
||||
|
||||
class RepositoryAppendOnlyTestCase(RepositoryTestCaseBase):
|
||||
def test_destroy_append_only(self):
|
||||
|
|
@ -207,18 +256,69 @@ class RepositoryAppendOnlyTestCase(RepositoryTestCaseBase):
|
|||
self.repository.commit()
|
||||
|
||||
self.repository.append_only = False
|
||||
assert segments_in_repository() == 1
|
||||
assert segments_in_repository() == 2
|
||||
self.repository.put(b'00000000000000000000000000000000', b'foo')
|
||||
self.repository.commit()
|
||||
# normal: compact squashes the data together, only one segment
|
||||
assert segments_in_repository() == 1
|
||||
assert segments_in_repository() == 4
|
||||
|
||||
self.repository.append_only = True
|
||||
assert segments_in_repository() == 1
|
||||
assert segments_in_repository() == 4
|
||||
self.repository.put(b'00000000000000000000000000000000', b'foo')
|
||||
self.repository.commit()
|
||||
# append only: does not compact, only new segments written
|
||||
assert segments_in_repository() == 2
|
||||
assert segments_in_repository() == 6
|
||||
|
||||
|
||||
class RepositoryAuxiliaryCorruptionTestCase(RepositoryTestCaseBase):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.repository.put(b'00000000000000000000000000000000', b'foo')
|
||||
self.repository.commit()
|
||||
self.repository.close()
|
||||
|
||||
def do_commit(self):
|
||||
with self.repository:
|
||||
self.repository.put(b'00000000000000000000000000000000', b'fox')
|
||||
self.repository.commit()
|
||||
|
||||
def test_corrupted_hints(self):
|
||||
with open(os.path.join(self.repository.path, 'hints.1'), 'ab') as fd:
|
||||
fd.write(b'123456789')
|
||||
self.do_commit()
|
||||
|
||||
def test_deleted_hints(self):
|
||||
os.unlink(os.path.join(self.repository.path, 'hints.1'))
|
||||
self.do_commit()
|
||||
|
||||
def test_deleted_index(self):
|
||||
os.unlink(os.path.join(self.repository.path, 'index.1'))
|
||||
self.do_commit()
|
||||
|
||||
def test_unreadable_hints(self):
|
||||
hints = os.path.join(self.repository.path, 'hints.1')
|
||||
os.unlink(hints)
|
||||
os.mkdir(hints)
|
||||
with self.assert_raises(InternalOSError):
|
||||
self.do_commit()
|
||||
|
||||
def test_index(self):
|
||||
with open(os.path.join(self.repository.path, 'index.1'), 'wb') as fd:
|
||||
fd.write(b'123456789')
|
||||
self.do_commit()
|
||||
|
||||
def test_index_outside_transaction(self):
|
||||
with open(os.path.join(self.repository.path, 'index.1'), 'wb') as fd:
|
||||
fd.write(b'123456789')
|
||||
with self.repository:
|
||||
assert len(self.repository) == 1
|
||||
|
||||
def test_unreadable_index(self):
|
||||
index = os.path.join(self.repository.path, 'index.1')
|
||||
os.unlink(index)
|
||||
os.mkdir(index)
|
||||
with self.assert_raises(InternalOSError):
|
||||
self.do_commit()
|
||||
|
||||
|
||||
class RepositoryCheckTestCase(RepositoryTestCaseBase):
|
||||
|
|
@ -268,7 +368,7 @@ class RepositoryCheckTestCase(RepositoryTestCaseBase):
|
|||
return set(int(key) for key in self.repository.list())
|
||||
|
||||
def test_repair_corrupted_segment(self):
|
||||
self.add_objects([[1, 2, 3], [4, 5, 6]])
|
||||
self.add_objects([[1, 2, 3], [4, 5], [6]])
|
||||
self.assert_equal(set([1, 2, 3, 4, 5, 6]), self.list_objects())
|
||||
self.check(status=True)
|
||||
self.corrupt_object(5)
|
||||
|
|
@ -287,20 +387,20 @@ class RepositoryCheckTestCase(RepositoryTestCaseBase):
|
|||
self.add_objects([[1, 2, 3], [4, 5, 6]])
|
||||
self.assert_equal(set([1, 2, 3, 4, 5, 6]), self.list_objects())
|
||||
self.check(status=True)
|
||||
self.delete_segment(1)
|
||||
self.delete_segment(2)
|
||||
self.repository.rollback()
|
||||
self.check(repair=True, status=True)
|
||||
self.assert_equal(set([1, 2, 3]), self.list_objects())
|
||||
|
||||
def test_repair_missing_commit_segment(self):
|
||||
self.add_objects([[1, 2, 3], [4, 5, 6]])
|
||||
self.delete_segment(1)
|
||||
self.delete_segment(3)
|
||||
self.assert_raises(Repository.ObjectNotFound, lambda: self.get_objects(4))
|
||||
self.assert_equal(set([1, 2, 3]), self.list_objects())
|
||||
|
||||
def test_repair_corrupted_commit_segment(self):
|
||||
self.add_objects([[1, 2, 3], [4, 5, 6]])
|
||||
with open(os.path.join(self.tmppath, 'repository', 'data', '0', '1'), 'r+b') as fd:
|
||||
with open(os.path.join(self.tmppath, 'repository', 'data', '0', '3'), 'r+b') as fd:
|
||||
fd.seek(-1, os.SEEK_END)
|
||||
fd.write(b'X')
|
||||
self.assert_raises(Repository.ObjectNotFound, lambda: self.get_objects(4))
|
||||
|
|
@ -310,15 +410,15 @@ class RepositoryCheckTestCase(RepositoryTestCaseBase):
|
|||
|
||||
def test_repair_no_commits(self):
|
||||
self.add_objects([[1, 2, 3]])
|
||||
with open(os.path.join(self.tmppath, 'repository', 'data', '0', '0'), 'r+b') as fd:
|
||||
with open(os.path.join(self.tmppath, 'repository', 'data', '0', '1'), 'r+b') as fd:
|
||||
fd.seek(-1, os.SEEK_END)
|
||||
fd.write(b'X')
|
||||
self.assert_raises(Repository.CheckNeeded, lambda: self.get_objects(4))
|
||||
self.check(status=False)
|
||||
self.check(status=False)
|
||||
self.assert_equal(self.list_indices(), ['index.0'])
|
||||
self.check(repair=True, status=True)
|
||||
self.assert_equal(self.list_indices(), ['index.1'])
|
||||
self.check(repair=True, status=True)
|
||||
self.assert_equal(self.list_indices(), ['index.3'])
|
||||
self.check(status=True)
|
||||
self.get_objects(3)
|
||||
self.assert_equal(set([1, 2, 3]), self.list_objects())
|
||||
|
|
@ -332,10 +432,10 @@ class RepositoryCheckTestCase(RepositoryTestCaseBase):
|
|||
|
||||
def test_repair_index_too_new(self):
|
||||
self.add_objects([[1, 2, 3], [4, 5, 6]])
|
||||
self.assert_equal(self.list_indices(), ['index.1'])
|
||||
self.assert_equal(self.list_indices(), ['index.3'])
|
||||
self.rename_index('index.100')
|
||||
self.check(status=True)
|
||||
self.assert_equal(self.list_indices(), ['index.1'])
|
||||
self.assert_equal(self.list_indices(), ['index.3'])
|
||||
self.get_objects(4)
|
||||
self.assert_equal(set([1, 2, 3, 4, 5, 6]), self.list_objects())
|
||||
|
||||
|
|
@ -389,3 +489,72 @@ class RemoteRepositoryCheckTestCase(RepositoryCheckTestCase):
|
|||
def test_crash_before_compact(self):
|
||||
# skip this test, we can't mock-patch a Repository class in another process!
|
||||
pass
|
||||
|
||||
|
||||
class RemoteLoggerTestCase(BaseTestCase):
|
||||
def setUp(self):
|
||||
self.stream = io.StringIO()
|
||||
self.handler = logging.StreamHandler(self.stream)
|
||||
logging.getLogger().handlers[:] = [self.handler]
|
||||
logging.getLogger('borg.repository').handlers[:] = []
|
||||
logging.getLogger('borg.repository.foo').handlers[:] = []
|
||||
# capture stderr
|
||||
sys.stderr.flush()
|
||||
self.old_stderr = sys.stderr
|
||||
self.stderr = sys.stderr = io.StringIO()
|
||||
|
||||
def tearDown(self):
|
||||
sys.stderr = self.old_stderr
|
||||
|
||||
def test_stderr_messages(self):
|
||||
handle_remote_line("unstructured stderr message")
|
||||
self.assert_equal(self.stream.getvalue(), '')
|
||||
# stderr messages don't get an implicit newline
|
||||
self.assert_equal(self.stderr.getvalue(), 'Remote: unstructured stderr message')
|
||||
|
||||
def test_pre11_format_messages(self):
|
||||
self.handler.setLevel(logging.DEBUG)
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
|
||||
handle_remote_line("$LOG INFO Remote: borg < 1.1 format message")
|
||||
self.assert_equal(self.stream.getvalue(), 'Remote: borg < 1.1 format message\n')
|
||||
self.assert_equal(self.stderr.getvalue(), '')
|
||||
|
||||
def test_post11_format_messages(self):
|
||||
self.handler.setLevel(logging.DEBUG)
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
|
||||
handle_remote_line("$LOG INFO borg.repository Remote: borg >= 1.1 format message")
|
||||
self.assert_equal(self.stream.getvalue(), 'Remote: borg >= 1.1 format message\n')
|
||||
self.assert_equal(self.stderr.getvalue(), '')
|
||||
|
||||
def test_remote_messages_screened(self):
|
||||
# default borg config for root logger
|
||||
self.handler.setLevel(logging.WARNING)
|
||||
logging.getLogger().setLevel(logging.WARNING)
|
||||
|
||||
handle_remote_line("$LOG INFO borg.repository Remote: new format info message")
|
||||
self.assert_equal(self.stream.getvalue(), '')
|
||||
self.assert_equal(self.stderr.getvalue(), '')
|
||||
|
||||
def test_info_to_correct_local_child(self):
|
||||
logging.getLogger('borg.repository').setLevel(logging.INFO)
|
||||
logging.getLogger('borg.repository.foo').setLevel(logging.INFO)
|
||||
# default borg config for root logger
|
||||
self.handler.setLevel(logging.WARNING)
|
||||
logging.getLogger().setLevel(logging.WARNING)
|
||||
|
||||
child_stream = io.StringIO()
|
||||
child_handler = logging.StreamHandler(child_stream)
|
||||
child_handler.setLevel(logging.INFO)
|
||||
logging.getLogger('borg.repository').handlers[:] = [child_handler]
|
||||
foo_stream = io.StringIO()
|
||||
foo_handler = logging.StreamHandler(foo_stream)
|
||||
foo_handler.setLevel(logging.INFO)
|
||||
logging.getLogger('borg.repository.foo').handlers[:] = [foo_handler]
|
||||
|
||||
handle_remote_line("$LOG INFO borg.repository Remote: new format child message")
|
||||
self.assert_equal(foo_stream.getvalue(), '')
|
||||
self.assert_equal(child_stream.getvalue(), 'Remote: new format child message\n')
|
||||
self.assert_equal(self.stream.getvalue(), '')
|
||||
self.assert_equal(self.stderr.getvalue(), '')
|
||||
|
|
|
|||
45
conftest.py
Normal file
45
conftest.py
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
import os.path
|
||||
import sys
|
||||
|
||||
# This is a hack to fix path problems because "borg" (the package) is in the source root.
|
||||
# When importing the conftest an "import borg" can incorrectly import the borg from the
|
||||
# source root instead of the one installed in the environment.
|
||||
# The workaround is to remove entries pointing there from the path and check whether "borg"
|
||||
# is still importable. If it is not, then it has not been installed in the environment
|
||||
# and the entries are put back.
|
||||
#
|
||||
# TODO: After moving the package to src/: remove this.
|
||||
|
||||
original_path = list(sys.path)
|
||||
for entry in original_path:
|
||||
if entry == '' or entry == os.path.dirname(__file__):
|
||||
sys.path.remove(entry)
|
||||
|
||||
try:
|
||||
import borg
|
||||
except ImportError:
|
||||
sys.path = original_path
|
||||
|
||||
from borg.logger import setup_logging
|
||||
|
||||
# Ensure that the loggers exist for all tests
|
||||
setup_logging()
|
||||
|
||||
from borg.testsuite import has_lchflags, no_lchlfags_because, has_llfuse
|
||||
from borg.testsuite.platform import fakeroot_detected
|
||||
from borg import xattr, constants
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
# no fixture-based monkey-patching since star-imports are used for the constants module
|
||||
constants.PBKDF2_ITERATIONS = 1
|
||||
|
||||
|
||||
def pytest_report_header(config, startdir):
|
||||
yesno = ['no', 'yes']
|
||||
flags = 'Testing BSD-style flags: %s %s' % (yesno[has_lchflags], no_lchlfags_because)
|
||||
fakeroot = 'fakeroot: %s (>=1.20.2: %s)' % (
|
||||
yesno[fakeroot_detected()],
|
||||
yesno[xattr.XATTR_FAKEROOT])
|
||||
llfuse = 'Testing fuse: %s' % yesno[has_llfuse]
|
||||
return '\n'.join((flags, llfuse, fakeroot))
|
||||
72
docs/api.rst
72
docs/api.rst
|
|
@ -2,31 +2,11 @@
|
|||
API Documentation
|
||||
=================
|
||||
|
||||
.. automodule:: borg.platform
|
||||
.. automodule:: borg.archiver
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. automodule:: borg.shellpattern
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. automodule:: borg.locking
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. automodule:: borg.hash_sizes
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. automodule:: borg.logger
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. automodule:: borg.remote
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. automodule:: borg.fuse
|
||||
.. automodule:: borg.upgrader
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
|
|
@ -34,7 +14,23 @@ API Documentation
|
|||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. automodule:: borg.helpers
|
||||
.. automodule:: borg.fuse
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. automodule:: borg.platform
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. automodule:: borg.locking
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. automodule:: borg.shellpattern
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. automodule:: borg.repository
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
|
|
@ -42,15 +38,19 @@ API Documentation
|
|||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. automodule:: borg.remote
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. automodule:: borg.hash_sizes
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. automodule:: borg.xattr
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. automodule:: borg.archiver
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. automodule:: borg.repository
|
||||
.. automodule:: borg.helpers
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
|
|
@ -62,7 +62,7 @@ API Documentation
|
|||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. automodule:: borg.upgrader
|
||||
.. automodule:: borg.logger
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
|
|
@ -70,15 +70,15 @@ API Documentation
|
|||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. automodule:: borg.compress
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. automodule:: borg.platform_linux
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. automodule:: borg.crypto
|
||||
.. automodule:: borg.hashindex
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. automodule:: borg.compress
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
|
|
@ -86,10 +86,10 @@ API Documentation
|
|||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. automodule:: borg.platform_freebsd
|
||||
.. automodule:: borg.crypto
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
||||
.. automodule:: borg.hashindex
|
||||
.. automodule:: borg.platform_freebsd
|
||||
:members:
|
||||
:undoc-members:
|
||||
|
|
|
|||
|
|
@ -6,6 +6,10 @@ Version 1.1.0 (not released yet)
|
|||
|
||||
New features:
|
||||
|
||||
- borg check: will not produce the "Checking segments" output unless
|
||||
new --progress option is passed, #824.
|
||||
- options that imply output (--show-rc, --show-version, --list, --stats,
|
||||
--progress) don't need -v/--info to have that output displayed, #865
|
||||
- borg recreate: re-create existing archives, #787 #686 #630 #70, also see
|
||||
#757, #770.
|
||||
|
||||
|
|
@ -34,7 +38,7 @@ New features:
|
|||
- borg comment: add archive comments, #842
|
||||
- provide "borgfs" wrapper for borg mount, enables usage via fstab, #743
|
||||
- create: add 'x' status for excluded paths, #814
|
||||
- --show-version: shows/logs the borg version (use -v), #725
|
||||
- --show-version: shows/logs the borg version, #725
|
||||
- borg list/prune/delete: also output archive id, #731
|
||||
|
||||
Bug fixes:
|
||||
|
|
@ -70,22 +74,34 @@ Other changes:
|
|||
- ChunkBuffer: add test for leaving partial chunk in buffer, fixes #945
|
||||
|
||||
|
||||
Version 1.0.3 (not released yet)
|
||||
--------------------------------
|
||||
Version 1.0.3
|
||||
-------------
|
||||
|
||||
Bug fixes:
|
||||
|
||||
- prune: ignore checkpoints, #997
|
||||
- prune: fix bad validator, #942
|
||||
- fix capabilities extraction on Linux (set xattrs last, after chown())
|
||||
- prune: avoid that checkpoints are kept and completed archives are deleted in
|
||||
a prune run), #997
|
||||
- prune: fix commandline argument validation - some valid command lines were
|
||||
considered invalid (annoying, but harmless), #942
|
||||
- fix capabilities extraction on Linux (set xattrs last, after chown()), #1069
|
||||
- repository: fix commit tags being seen in data
|
||||
- when probing key files, do binary reads. avoids crash when non-borg binary
|
||||
files are located in borg's key files directory.
|
||||
- handle SIGTERM and make a clean exit - avoids orphan lock files.
|
||||
- repository cache: don't cache large objects (avoid using lots of temp. disk
|
||||
space), #1063
|
||||
|
||||
Other changes:
|
||||
|
||||
- update readthedocs URLs, #991
|
||||
- add missing docs for "borg break-lock", #992
|
||||
- borg create help: add some words to about the archive name
|
||||
- borg create help: document format tags, #894
|
||||
- Vagrantfile: OS X: update osxfuse / install lzma package, #933
|
||||
- setup.py: add check for platform_darwin.c
|
||||
- setup.py: on freebsd, use a llfuse release that builds ok
|
||||
- docs / help:
|
||||
|
||||
- update readthedocs URLs, #991
|
||||
- add missing docs for "borg break-lock", #992
|
||||
- borg create help: add some words to about the archive name
|
||||
- borg create help: document format tags, #894
|
||||
|
||||
|
||||
Version 1.0.2
|
||||
|
|
|
|||
|
|
@ -27,8 +27,10 @@ errors, critical for critical errors/states).
|
|||
When directly talking to the user (e.g. Y/N questions), do not use logging,
|
||||
but directly output to stderr (not: stdout, it could be connected to a pipe).
|
||||
|
||||
To control the amount and kinds of messages output to stderr or emitted at
|
||||
info level, use flags like ``--stats`` or ``--list``.
|
||||
To control the amount and kinds of messages output emitted at info level, use
|
||||
flags like ``--stats`` or ``--list``, then create a topic logger for messages
|
||||
controlled by that flag. See ``_setup_implied_logging()`` in
|
||||
``borg/archiver.py`` for the entry point to topic logging.
|
||||
|
||||
Building a development environment
|
||||
----------------------------------
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ A step by step example
|
|||
|
||||
3. The next day create a new archive called *Tuesday*::
|
||||
|
||||
$ borg create -v --stats /path/to/repo::Tuesday ~/src ~/Documents
|
||||
$ borg create --stats /path/to/repo::Tuesday ~/src ~/Documents
|
||||
|
||||
This backup will be a lot quicker and a lot smaller since only new never
|
||||
before seen data is stored. The ``--stats`` option causes |project_name| to
|
||||
|
|
@ -93,9 +93,10 @@ A step by step example
|
|||
|
||||
.. Note::
|
||||
Borg is quiet by default (it works on WARNING log level).
|
||||
Add the ``-v`` (or ``--verbose`` or ``--info``) option to adjust the log
|
||||
level to INFO and also use options like ``--progress`` or ``--list`` to
|
||||
get progress reporting during command execution.
|
||||
You can use options like ``--progress`` or ``--list`` to get specific
|
||||
reports during command execution. You can also add the ``-v`` (or
|
||||
``--verbose`` or ``--info``) option to adjust the log level to INFO to
|
||||
get other informational messages.
|
||||
|
||||
Automating backups
|
||||
------------------
|
||||
|
|
@ -114,7 +115,7 @@ certain number of old archives::
|
|||
export BORG_PASSPHRASE=mysecret
|
||||
|
||||
# Backup most important stuff:
|
||||
borg create -v --stats -C lz4 ::`hostname`-`date +%Y-%m-%d` \
|
||||
borg create --stats -C lz4 ::`hostname`-`date +%Y-%m-%d` \
|
||||
/etc \
|
||||
/home \
|
||||
/var \
|
||||
|
|
|
|||
|
|
@ -16,7 +16,8 @@ Type of log output
|
|||
|
||||
The log level of the builtin logging configuration defaults to WARNING.
|
||||
This is because we want |project_name| to be mostly silent and only output
|
||||
warnings, errors and critical messages.
|
||||
warnings, errors and critical messages, unless output has been requested
|
||||
by supplying an option that implies output (eg, --list or --progress).
|
||||
|
||||
Log levels: DEBUG < INFO < WARNING < ERROR < CRITICAL
|
||||
|
||||
|
|
@ -41,10 +42,6 @@ give different output on different log levels - it's just a possibility.
|
|||
.. warning:: Options --critical and --error are provided for completeness,
|
||||
their usage is not recommended as you might miss important information.
|
||||
|
||||
.. warning:: While some options (like ``--stats`` or ``--list``) will emit more
|
||||
informational messages, you have to use INFO (or lower) log level to make
|
||||
them show up in log output. Use ``-v`` or a logging configuration.
|
||||
|
||||
Return codes
|
||||
~~~~~~~~~~~~
|
||||
|
||||
|
|
@ -170,6 +167,22 @@ Network:
|
|||
|
||||
In case you are interested in more details, please read the internals documentation.
|
||||
|
||||
File systems
|
||||
~~~~~~~~~~~~
|
||||
|
||||
We strongly recommend against using Borg (or any other database-like
|
||||
software) on non-journaling file systems like FAT, since it is not
|
||||
possible to assume any consistency in case of power failures (or a
|
||||
sudden disconnect of an external drive or similar failures).
|
||||
|
||||
While Borg uses a data store that is resilient against these failures
|
||||
when used on journaling file systems, it is not possible to guarantee
|
||||
this with some hardware -- independent of the software used. We don't
|
||||
know a list of affected hardware.
|
||||
|
||||
If you are suspicious whether your Borg repository is still consistent
|
||||
and readable after one of the failures mentioned above occured, run
|
||||
``borg check --verify-data`` to make sure it is consistent.
|
||||
|
||||
Units
|
||||
~~~~~
|
||||
|
|
@ -220,46 +233,6 @@ Examples
|
|||
# Remote repository (store the key your home dir)
|
||||
$ borg init --encryption=keyfile user@hostname:backup
|
||||
|
||||
Important notes about encryption:
|
||||
|
||||
It is not recommended to disable encryption. Repository encryption protects you
|
||||
e.g. against the case that an attacker has access to your backup repository.
|
||||
|
||||
But be careful with the key / the passphrase:
|
||||
|
||||
If you want "passphrase-only" security, use the ``repokey`` mode. The key will
|
||||
be stored inside the repository (in its "config" file). In above mentioned
|
||||
attack scenario, the attacker will have the key (but not the passphrase).
|
||||
|
||||
If you want "passphrase and having-the-key" security, use the ``keyfile`` mode.
|
||||
The key will be stored in your home directory (in ``.config/borg/keys``). In
|
||||
the attack scenario, the attacker who has just access to your repo won't have
|
||||
the key (and also not the passphrase).
|
||||
|
||||
Make a backup copy of the key file (``keyfile`` mode) or repo config file
|
||||
(``repokey`` mode) and keep it at a safe place, so you still have the key in
|
||||
case it gets corrupted or lost. Also keep the passphrase at a safe place.
|
||||
The backup that is encrypted with that key won't help you with that, of course.
|
||||
|
||||
Make sure you use a good passphrase. Not too short, not too simple. The real
|
||||
encryption / decryption key is encrypted with / locked by your passphrase.
|
||||
If an attacker gets your key, he can't unlock and use it without knowing the
|
||||
passphrase.
|
||||
|
||||
Be careful with special or non-ascii characters in your passphrase:
|
||||
|
||||
- |project_name| processes the passphrase as unicode (and encodes it as utf-8),
|
||||
so it does not have problems dealing with even the strangest characters.
|
||||
- BUT: that does not necessarily apply to your OS / VM / keyboard configuration.
|
||||
|
||||
So better use a long passphrase made from simple ascii chars than one that
|
||||
includes non-ascii stuff or characters that are hard/impossible to enter on
|
||||
a different keyboard layout.
|
||||
|
||||
You can change your passphrase for existing repos at any time, it won't affect
|
||||
the encryption/decryption key or other secrets.
|
||||
|
||||
|
||||
.. include:: usage/create.rst.inc
|
||||
|
||||
Examples
|
||||
|
|
@ -269,8 +242,8 @@ Examples
|
|||
# Backup ~/Documents into an archive named "my-documents"
|
||||
$ borg create /path/to/repo::my-documents ~/Documents
|
||||
|
||||
# same, but verbosely list all files as we process them
|
||||
$ borg create -v --list /path/to/repo::my-documents ~/Documents
|
||||
# same, but list all files as we process them
|
||||
$ borg create --list /path/to/repo::my-documents ~/Documents
|
||||
|
||||
# Backup ~/Documents and ~/src but exclude pyc files
|
||||
$ borg create /path/to/repo::my-files \
|
||||
|
|
@ -325,7 +298,7 @@ Examples
|
|||
$ borg extract /path/to/repo::my-files
|
||||
|
||||
# Extract entire archive and list files while processing
|
||||
$ borg extract -v --list /path/to/repo::my-files
|
||||
$ borg extract --list /path/to/repo::my-files
|
||||
|
||||
# Verify whether an archive could be successfully extracted, but do not write files to disk
|
||||
$ borg extract --dry-run /path/to/repo::my-files
|
||||
|
|
@ -504,7 +477,7 @@ Examples
|
|||
Username: root
|
||||
Time (start): Mon, 2016-02-15 19:36:29
|
||||
Time (end): Mon, 2016-02-15 19:39:26
|
||||
Command line: /usr/local/bin/borg create -v --list -C zlib,6 /path/to/repo::root-2016-02-15 / --one-file-system
|
||||
Command line: /usr/local/bin/borg create --list -C zlib,6 /path/to/repo::root-2016-02-15 / --one-file-system
|
||||
Number of files: 38100
|
||||
|
||||
Original size Compressed size Deduplicated size
|
||||
|
|
@ -679,7 +652,7 @@ Here are misc. notes about topics that are maybe not covered in enough detail in
|
|||
Item flags
|
||||
~~~~~~~~~~
|
||||
|
||||
``borg create -v --list`` outputs a verbose list of all files, directories and other
|
||||
``borg create --list`` outputs a list of all files, directories and other
|
||||
file system items it considered (no matter whether they had content changes
|
||||
or not). For each item, it prefixes a single-letter flag that indicates type
|
||||
and/or status of the item.
|
||||
|
|
|
|||
|
|
@ -74,6 +74,12 @@ This command creates a backup archive containing all files found while recursive
|
|||
traversing all paths specified. The archive will consume almost no disk space for
|
||||
files or parts of files that have already been stored in other archives.
|
||||
|
||||
The archive name needs to be unique. It must not end in '.checkpoint' or
|
||||
'.checkpoint.N' (with N being a number), because these names are used for
|
||||
checkpoints and treated in special ways.
|
||||
|
||||
In the archive name, you may use the following format tags:
|
||||
{now}, {utcnow}, {fqdn}, {hostname}, {user}, {pid}
|
||||
|
||||
To speed up pulling backups over sshfs and similar network file systems which do
|
||||
not provide correct inode information the --ignore-inode flag can be used. This
|
||||
|
|
|
|||
16
setup.py
16
setup.py
|
|
@ -27,12 +27,18 @@ install_requires = ['msgpack-python>=0.4.6', ]
|
|||
extras_require = {
|
||||
# llfuse 0.40 (tested, proven, ok), needs FUSE version >= 2.8.0
|
||||
# llfuse 0.41 (tested shortly, looks ok), needs FUSE version >= 2.8.0
|
||||
# llfuse 0.41.1 (tested shortly, looks ok), needs FUSE version >= 2.8.0
|
||||
# llfuse 0.42 (tested shortly, looks ok), needs FUSE version >= 2.8.0
|
||||
# llfuse 1.0 (tested shortly, looks ok), needs FUSE version >= 2.8.0
|
||||
# llfuse 2.0 will break API
|
||||
'fuse': ['llfuse<2.0', ],
|
||||
}
|
||||
|
||||
if sys.platform.startswith('freebsd'):
|
||||
# while llfuse 1.0 is the latest llfuse release right now,
|
||||
# llfuse 0.41.1 is the latest release that actually builds on freebsd:
|
||||
extras_require['fuse'] = ['llfuse==0.41.1', ]
|
||||
|
||||
from setuptools import setup, Extension
|
||||
from setuptools.command.sdist import sdist
|
||||
|
||||
|
|
@ -41,6 +47,7 @@ compress_source = 'borg/compress.pyx'
|
|||
crypto_source = 'borg/crypto.pyx'
|
||||
chunker_source = 'borg/chunker.pyx'
|
||||
hashindex_source = 'borg/hashindex.pyx'
|
||||
platform_posix_source = 'borg/platform_posix.pyx'
|
||||
platform_linux_source = 'borg/platform_linux.pyx'
|
||||
platform_darwin_source = 'borg/platform_darwin.pyx'
|
||||
platform_freebsd_source = 'borg/platform_freebsd.pyx'
|
||||
|
|
@ -63,6 +70,7 @@ try:
|
|||
'borg/crypto.c',
|
||||
'borg/chunker.c', 'borg/_chunker.c',
|
||||
'borg/hashindex.c', 'borg/_hashindex.c',
|
||||
'borg/platform_posix.c',
|
||||
'borg/platform_linux.c',
|
||||
'borg/platform_freebsd.c',
|
||||
'borg/platform_darwin.c',
|
||||
|
|
@ -79,6 +87,7 @@ except ImportError:
|
|||
crypto_source = crypto_source.replace('.pyx', '.c')
|
||||
chunker_source = chunker_source.replace('.pyx', '.c')
|
||||
hashindex_source = hashindex_source.replace('.pyx', '.c')
|
||||
platform_posix_source = platform_posix_source.replace('.pyx', '.c')
|
||||
platform_linux_source = platform_linux_source.replace('.pyx', '.c')
|
||||
platform_freebsd_source = platform_freebsd_source.replace('.pyx', '.c')
|
||||
platform_darwin_source = platform_darwin_source.replace('.pyx', '.c')
|
||||
|
|
@ -86,7 +95,7 @@ except ImportError:
|
|||
from distutils.command.build_ext import build_ext
|
||||
if not on_rtd and not all(os.path.exists(path) for path in [
|
||||
compress_source, crypto_source, chunker_source, hashindex_source,
|
||||
platform_linux_source, platform_freebsd_source, platform_windows_source]):
|
||||
platform_linux_source, platform_freebsd_source, platform_darwin_source, platform_windows_source]):
|
||||
raise ImportError('The GIT version of Borg needs Cython. Install Cython or use a released version.')
|
||||
|
||||
|
||||
|
|
@ -174,7 +183,7 @@ class build_usage(Command):
|
|||
print('generating usage docs')
|
||||
# allows us to build docs without the C modules fully loaded during help generation
|
||||
from borg.archiver import Archiver
|
||||
parser = Archiver().build_parser(prog='borg')
|
||||
parser = Archiver(prog='borg').parser
|
||||
choices = {}
|
||||
for action in parser._actions:
|
||||
if action.choices is not None:
|
||||
|
|
@ -310,6 +319,9 @@ if not on_rtd:
|
|||
Extension('borg.chunker', [chunker_source]),
|
||||
Extension('borg.hashindex', [hashindex_source])
|
||||
]
|
||||
if sys.platform.startswith(('linux', 'freebsd', 'darwin')):
|
||||
ext_modules.append(Extension('borg.platform_posix', [platform_posix_source]))
|
||||
|
||||
if sys.platform == 'linux':
|
||||
ext_modules.append(Extension('borg.platform_linux', [platform_linux_source], libraries=['acl']))
|
||||
elif sys.platform.startswith('freebsd'):
|
||||
|
|
|
|||
2
tox.ini
2
tox.ini
|
|
@ -2,7 +2,7 @@
|
|||
# fakeroot -u tox --recreate
|
||||
|
||||
[tox]
|
||||
envlist = py{34,35},flake8
|
||||
envlist = py{34,35,36},flake8
|
||||
|
||||
[testenv]
|
||||
# Change dir to avoid import problem for cython code. The directory does
|
||||
|
|
|
|||
Loading…
Reference in a new issue