mirror of
https://github.com/borgbackup/borg.git
synced 2026-04-23 23:28:10 -04:00
.pyx files: fix typos and grammar
This commit is contained in:
parent
6602cf1679
commit
a4b2bb3d76
9 changed files with 104 additions and 113 deletions
|
|
@ -36,15 +36,15 @@ _Chunk.__doc__ = """\
|
|||
meta is always a dictionary, data depends on allocation.
|
||||
|
||||
data chunk read from a DATA range of a file (not from a sparse hole):
|
||||
meta = {'allocation' = CH_DATA, 'size' = size_of_chunk }
|
||||
meta = {'allocation': CH_DATA, 'size': size_of_chunk}
|
||||
data = read_data [bytes or memoryview]
|
||||
|
||||
all-zero chunk read from a DATA range of a file (not from a sparse hole, but detected to be all-zero):
|
||||
meta = {'allocation' = CH_ALLOC, 'size' = size_of_chunk }
|
||||
meta = {'allocation': CH_ALLOC, 'size': size_of_chunk}
|
||||
data = None
|
||||
|
||||
all-zero chunk from a HOLE range of a file (from a sparse hole):
|
||||
meta = {'allocation' = CH_HOLE, 'size' = size_of_chunk }
|
||||
meta = {'allocation': CH_HOLE, 'size': size_of_chunk}
|
||||
data = None
|
||||
"""
|
||||
|
||||
|
|
@ -57,8 +57,8 @@ def dread(offset, size, fd=None, fh=-1):
|
|||
if use_fh:
|
||||
data = os.read(fh, size)
|
||||
if hasattr(os, 'posix_fadvise'):
|
||||
# UNIX only and, in case of block sizes that are not a multiple of the
|
||||
# system's page size, better be used with a bug fixed linux kernel > 4.6.0,
|
||||
# UNIX-only and, in case of block sizes that are not a multiple of the
|
||||
# system's page size, it is better used with a bug-fixed Linux kernel >= 4.6.0,
|
||||
# see comment/workaround in _chunker.c and borgbackup issue #907.
|
||||
os.posix_fadvise(fh, offset, len(data), os.POSIX_FADV_DONTNEED)
|
||||
return data
|
||||
|
|
@ -86,14 +86,14 @@ def dpos_curr_end(fd=None, fh=-1):
|
|||
|
||||
def sparsemap(fd=None, fh=-1):
|
||||
"""
|
||||
generator yielding a (start, length, is_data) tuple for each range.
|
||||
is_data is indicating data ranges (True) or hole ranges (False).
|
||||
Generator yielding a (start, length, is_data) tuple for each range.
|
||||
is_data indicates data ranges (True) or hole ranges (False).
|
||||
|
||||
note:
|
||||
the map is generated starting from the current seek position (it
|
||||
Note:
|
||||
The map is generated starting from the current seek position (it
|
||||
is not required to be 0 / to be at the start of the file) and
|
||||
work from there up to the end of the file.
|
||||
when the generator is finished, the file pointer position will be
|
||||
works from there up to the end of the file.
|
||||
When the generator is finished, the file pointer position will be
|
||||
reset to where it was before calling this function.
|
||||
"""
|
||||
curr, file_len = dpos_curr_end(fd, fh)
|
||||
|
|
@ -107,7 +107,7 @@ def sparsemap(fd=None, fh=-1):
|
|||
except OSError as e:
|
||||
if e.errno == errno.ENXIO:
|
||||
if not is_data and start < file_len:
|
||||
# if there is a hole at the end of a file, we can not find the file end by SEEK_DATA
|
||||
# If there is a hole at the end of a file, we cannot find the file end by SEEK_DATA
|
||||
# (because we run into ENXIO), thus we must manually deal with this case:
|
||||
end = file_len
|
||||
yield (start, end - start, is_data)
|
||||
|
|
@ -120,7 +120,7 @@ def sparsemap(fd=None, fh=-1):
|
|||
start = end
|
||||
whence = os.SEEK_DATA if is_data else os.SEEK_HOLE
|
||||
finally:
|
||||
# seek to same position as before calling this function
|
||||
# Seek to the same position as before calling this function
|
||||
dseek(curr, os.SEEK_SET, fd, fh)
|
||||
|
||||
|
||||
|
|
@ -271,7 +271,7 @@ cdef class Chunker:
|
|||
got = len(data)
|
||||
# we do not have SEEK_DATA/SEEK_HOLE support in chunker_process C code,
|
||||
# but we can just check if data was all-zero (and either came from a hole
|
||||
# or from stored zeros - we can not detect that here).
|
||||
# or from stored zeros - we cannot detect that here).
|
||||
if zeros.startswith(data):
|
||||
data = None
|
||||
allocation = CH_ALLOC
|
||||
|
|
|
|||
|
|
@ -55,10 +55,10 @@ cdef class CompressorBase:
|
|||
"""
|
||||
base class for all (de)compression classes,
|
||||
also handles compression format auto detection and
|
||||
adding/stripping the ID header (which enable auto detection).
|
||||
adding/stripping the ID header (which enables auto detection).
|
||||
"""
|
||||
ID = b'\xFF\xFF' # reserved and not used
|
||||
# overwrite with a unique 2-bytes bytestring in child classes
|
||||
# overwrite with a unique 2-byte byte string in child classes
|
||||
name = 'baseclass'
|
||||
|
||||
@classmethod
|
||||
|
|
@ -147,7 +147,7 @@ cdef class DecidingCompressor(CompressorBase):
|
|||
|
||||
class CNONE(CompressorBase):
|
||||
"""
|
||||
none - no compression, just pass through data
|
||||
None - no compression; just pass through data.
|
||||
"""
|
||||
ID = b'\x00\x00'
|
||||
name = 'none'
|
||||
|
|
@ -167,9 +167,9 @@ class LZ4(DecidingCompressor):
|
|||
raw LZ4 compression / decompression (liblz4).
|
||||
|
||||
Features:
|
||||
- lz4 is super fast
|
||||
- wrapper releases CPython's GIL to support multithreaded code
|
||||
- uses safe lz4 methods that never go beyond the end of the output buffer
|
||||
- LZ4 is super fast
|
||||
- The wrapper releases CPython's GIL to support multithreaded code
|
||||
- Uses safe LZ4 methods that never go beyond the end of the output buffer
|
||||
"""
|
||||
ID = b'\x01\x00'
|
||||
name = 'lz4'
|
||||
|
|
@ -196,7 +196,7 @@ class LZ4(DecidingCompressor):
|
|||
osize = LZ4_compress_default(source, dest, isize, osize)
|
||||
if not osize:
|
||||
raise Exception('lz4 compress failed')
|
||||
# only compress if the result actually is smaller
|
||||
# only compress if the result is actually smaller
|
||||
if osize < isize:
|
||||
return self, dest[:osize]
|
||||
else:
|
||||
|
|
@ -234,7 +234,7 @@ class LZ4(DecidingCompressor):
|
|||
|
||||
class LZMA(DecidingCompressor):
|
||||
"""
|
||||
lzma compression / decompression
|
||||
LZMA compression/decompression.
|
||||
"""
|
||||
ID = b'\x02\x00'
|
||||
name = 'lzma'
|
||||
|
|
@ -251,7 +251,7 @@ class LZMA(DecidingCompressor):
|
|||
|
||||
*lzma_data* is the LZMA result if *compressor* is LZMA as well, otherwise it is None.
|
||||
"""
|
||||
# we do not need integrity checks in lzma, we do that already
|
||||
# We do not need integrity checks in LZMA; we do that already.
|
||||
lzma_data = lzma.compress(data, preset=self.level, check=lzma.CHECK_NONE)
|
||||
if len(lzma_data) < len(data):
|
||||
return self, lzma_data
|
||||
|
|
@ -267,10 +267,10 @@ class LZMA(DecidingCompressor):
|
|||
|
||||
|
||||
class ZSTD(DecidingCompressor):
|
||||
"""zstd compression / decompression (pypi: zstandard, gh: python-zstandard)"""
|
||||
# This is a NOT THREAD SAFE implementation.
|
||||
# Only ONE python context must be created at a time.
|
||||
# It should work flawlessly as long as borg will call ONLY ONE compression job at time.
|
||||
"""Zstd compression/decompression (PyPI: zstandard, GH: python-zstandard)."""
|
||||
# This is NOT THREAD-SAFE.
|
||||
# Only ONE Python context must be created at a time.
|
||||
# It should work flawlessly as long as borg calls ONLY ONE compression job at a time.
|
||||
ID = b'\x03\x00'
|
||||
name = 'zstd'
|
||||
|
||||
|
|
@ -298,7 +298,7 @@ class ZSTD(DecidingCompressor):
|
|||
osize = ZSTD_compress(dest, osize, source, isize, level)
|
||||
if ZSTD_isError(osize):
|
||||
raise Exception('zstd compress failed: %s' % ZSTD_getErrorName(osize))
|
||||
# only compress if the result actually is smaller
|
||||
# only compress if the result is actually smaller
|
||||
if osize < isize:
|
||||
return self, dest[:osize]
|
||||
else:
|
||||
|
|
@ -334,7 +334,7 @@ class ZSTD(DecidingCompressor):
|
|||
|
||||
class ZLIB(CompressorBase):
|
||||
"""
|
||||
zlib compression / decompression (python stdlib)
|
||||
Zlib compression/decompression (Python stdlib).
|
||||
"""
|
||||
ID = b'\x08\x00' # not used here, see detect()
|
||||
# avoid all 0x.8.. IDs elsewhere!
|
||||
|
|
@ -353,11 +353,11 @@ class ZLIB(CompressorBase):
|
|||
self.level = level
|
||||
|
||||
def compress(self, data):
|
||||
# note: for compatibility no super call, do not add ID bytes
|
||||
# Note: for compatibility, no super call; do not add ID bytes.
|
||||
return zlib.compress(data, self.level)
|
||||
|
||||
def decompress(self, data):
|
||||
# note: for compatibility no super call, do not strip ID bytes
|
||||
# Note: for compatibility, no super call; do not strip ID bytes.
|
||||
try:
|
||||
return zlib.decompress(data)
|
||||
except zlib.error as e:
|
||||
|
|
@ -417,17 +417,17 @@ class Auto(CompressorBase):
|
|||
compressor, cheap_compressed_data = self._decide(data)
|
||||
if compressor in (LZ4_COMPRESSOR, NONE_COMPRESSOR):
|
||||
# we know that trying to compress with expensive compressor is likely pointless,
|
||||
# so we fallback to return the cheap compressed data.
|
||||
# so we fall back to return the cheap compressed data.
|
||||
return cheap_compressed_data
|
||||
# if we get here, the decider decided to try the expensive compressor.
|
||||
# we also know that the compressed data returned by the decider is lz4 compressed.
|
||||
# we also know that the compressed data returned by the decider is LZ4-compressed.
|
||||
expensive_compressed_data = compressor.compress(data)
|
||||
ratio = len(expensive_compressed_data) / len(cheap_compressed_data)
|
||||
if ratio < 0.99:
|
||||
# the expensive compressor managed to squeeze the data significantly better than lz4.
|
||||
# the expensive compressor managed to squeeze the data significantly better than LZ4.
|
||||
return expensive_compressed_data
|
||||
else:
|
||||
# otherwise let's just store the lz4 data, which decompresses extremely fast.
|
||||
# otherwise let's just store the LZ4 data, which decompresses extremely fast.
|
||||
return cheap_compressed_data
|
||||
|
||||
def decompress(self, data):
|
||||
|
|
@ -463,8 +463,7 @@ class ObfuscateSize(CompressorBase):
|
|||
self._obfuscate = self._padme_obfuscate
|
||||
|
||||
def _obfuscate(self, compr_size):
|
||||
# implementations need to return the size of obfuscation data,
|
||||
# that the caller shall add.
|
||||
# Implementations need to return the size of the obfuscation data that the caller shall add.
|
||||
raise NotImplementedError
|
||||
|
||||
def _relative_random_reciprocal_obfuscate(self, compr_size):
|
||||
|
|
@ -538,8 +537,8 @@ LZ4_COMPRESSOR = get_compressor('lz4')
|
|||
|
||||
class Compressor:
|
||||
"""
|
||||
compresses using a compressor with given name and parameters
|
||||
decompresses everything we can handle (autodetect)
|
||||
Compresses using a compressor with a given name and parameters.
|
||||
Decompresses everything we can handle (autodetect).
|
||||
"""
|
||||
def __init__(self, name='null', **kwargs):
|
||||
self.params = kwargs
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
"""An AEAD style OpenSSL wrapper
|
||||
"""An AEAD-style OpenSSL wrapper.
|
||||
|
||||
API:
|
||||
|
||||
|
|
@ -15,10 +15,10 @@ Envelope layout:
|
|||
|------------- #header_len ------>|
|
||||
|
||||
S means a cryptographic signature function (like HMAC or GMAC).
|
||||
E means a encryption function (like AES).
|
||||
E means an encryption function (like AES).
|
||||
iv is the initialization vector / nonce, if needed.
|
||||
|
||||
The split of header into not authenticated data and aad (additional authenticated
|
||||
The split of header into unauthenticated data and AAD (additional authenticated
|
||||
data) is done to support the legacy envelope layout as used in attic and early borg
|
||||
(where the TYPE byte was not authenticated) and avoid unneeded memcpy and string
|
||||
garbage.
|
||||
|
|
@ -136,7 +136,7 @@ class UNENCRYPTED:
|
|||
|
||||
def encrypt(self, data, header=b'', iv=None):
|
||||
"""
|
||||
IMPORTANT: it is called encrypt to satisfy the crypto api naming convention,
|
||||
IMPORTANT: It is called encrypt to satisfy the crypto API naming convention,
|
||||
but this does NOT encrypt and it does NOT compute and store a MAC either.
|
||||
"""
|
||||
if iv is not None:
|
||||
|
|
@ -146,7 +146,7 @@ class UNENCRYPTED:
|
|||
|
||||
def decrypt(self, envelope):
|
||||
"""
|
||||
IMPORTANT: it is called decrypt to satisfy the crypto api naming convention,
|
||||
IMPORTANT: It is called decrypt to satisfy the crypto API naming convention,
|
||||
but this does NOT decrypt and it does NOT verify a MAC either, because data
|
||||
is not encrypted and there is no MAC.
|
||||
"""
|
||||
|
|
@ -220,8 +220,8 @@ cdef class AES256_CTR_BASE:
|
|||
|
||||
def encrypt(self, data, header=b'', iv=None):
|
||||
"""
|
||||
encrypt data, compute mac over aad + iv + cdata, prepend header.
|
||||
aad_offset is the offset into the header where aad starts.
|
||||
Encrypt data, compute MAC over AAD + IV + cdata, prepend header.
|
||||
aad_offset is the offset into the header where AAD starts.
|
||||
"""
|
||||
if iv is not None:
|
||||
self.set_iv(iv)
|
||||
|
|
@ -270,7 +270,7 @@ cdef class AES256_CTR_BASE:
|
|||
|
||||
def decrypt(self, envelope):
|
||||
"""
|
||||
authenticate aad + iv + cdata, decrypt cdata, ignore header bytes up to aad_offset.
|
||||
Authenticate AAD + IV + cdata, decrypt cdata, ignore header bytes up to aad_offset.
|
||||
"""
|
||||
cdef int ilen = len(envelope)
|
||||
cdef int hlen = self.header_len
|
||||
|
|
@ -314,7 +314,7 @@ cdef class AES256_CTR_BASE:
|
|||
return num_cipher_blocks(length, self.cipher_blk_len)
|
||||
|
||||
def set_iv(self, iv):
|
||||
# set_iv needs to be called before each encrypt() call
|
||||
# Call set_iv before each encrypt() call.
|
||||
if isinstance(iv, int):
|
||||
iv = iv.to_bytes(self.iv_len, byteorder='big')
|
||||
assert isinstance(iv, bytes) and len(iv) == self.iv_len
|
||||
|
|
@ -322,16 +322,16 @@ cdef class AES256_CTR_BASE:
|
|||
self.blocks = 0 # how many AES blocks got encrypted with this IV?
|
||||
|
||||
def next_iv(self):
|
||||
# call this after encrypt() to get the next iv (int) for the next encrypt() call
|
||||
# Call this after encrypt() to get the next IV (int) for the next encrypt() call
|
||||
iv = int.from_bytes(self.iv[:self.iv_len], byteorder='big')
|
||||
return iv + self.blocks
|
||||
|
||||
cdef fetch_iv(self, unsigned char * iv_in):
|
||||
# fetch lower self.iv_len_short bytes of iv and add upper zero bytes
|
||||
# Fetch lower self.iv_len_short bytes of IV and add upper zero bytes.
|
||||
return b'\0' * (self.iv_len - self.iv_len_short) + iv_in[0:self.iv_len_short]
|
||||
|
||||
cdef store_iv(self, unsigned char * iv_out, unsigned char * iv):
|
||||
# store only lower self.iv_len_short bytes, upper bytes are assumed to be 0
|
||||
# Store only lower self.iv_len_short bytes, upper bytes are assumed to be 0.
|
||||
cdef int i
|
||||
for i in range(self.iv_len_short):
|
||||
iv_out[i] = iv[(self.iv_len-self.iv_len_short)+i]
|
||||
|
|
@ -405,7 +405,7 @@ ctypedef const EVP_CIPHER * (* CIPHER)()
|
|||
|
||||
|
||||
cdef class AES:
|
||||
"""A thin wrapper around the OpenSSL EVP cipher API - for legacy code, like key file encryption"""
|
||||
"""A thin wrapper around the OpenSSL EVP cipher API - for legacy code, like key file encryption."""
|
||||
cdef CIPHER cipher
|
||||
cdef EVP_CIPHER_CTX *ctx
|
||||
cdef unsigned char enc_key[32]
|
||||
|
|
@ -476,8 +476,8 @@ cdef class AES:
|
|||
raise Exception('EVP_DecryptUpdate failed')
|
||||
offset += olen
|
||||
if EVP_DecryptFinal_ex(self.ctx, odata+offset, &olen) <= 0:
|
||||
# this error check is very important for modes with padding or
|
||||
# authentication. for them, a failure here means corrupted data.
|
||||
# This error check is very important for modes with padding or
|
||||
# authentication. For them, a failure here means corrupted data.
|
||||
# CTR mode does not use padding nor authentication.
|
||||
raise Exception('EVP_DecryptFinal failed')
|
||||
offset += olen
|
||||
|
|
@ -491,8 +491,8 @@ cdef class AES:
|
|||
return num_cipher_blocks(length, self.cipher_blk_len)
|
||||
|
||||
def set_iv(self, iv):
|
||||
# set_iv needs to be called before each encrypt() call,
|
||||
# because encrypt does a full initialisation of the cipher context.
|
||||
# Call set_iv before each encrypt() call,
|
||||
# because encrypt() does a full initialization of the cipher context.
|
||||
if isinstance(iv, int):
|
||||
iv = iv.to_bytes(self.iv_len, byteorder='big')
|
||||
assert isinstance(iv, bytes) and len(iv) == self.iv_len
|
||||
|
|
@ -500,7 +500,7 @@ cdef class AES:
|
|||
self.blocks = 0 # number of cipher blocks encrypted with this IV
|
||||
|
||||
def next_iv(self):
|
||||
# call this after encrypt() to get the next iv (int) for the next encrypt() call
|
||||
# Call this after encrypt() to get the next IV (int) for the next encrypt() call
|
||||
iv = int.from_bytes(self.iv[:self.iv_len], byteorder='big')
|
||||
return iv + self.blocks
|
||||
|
||||
|
|
|
|||
|
|
@ -60,17 +60,17 @@ cdef extern from "cache_sync/cache_sync.c":
|
|||
cdef _NoDefault = object()
|
||||
|
||||
"""
|
||||
The HashIndex is *not* a general purpose data structure. The value size must be at least 4 bytes, and these
|
||||
first bytes are used for in-band signalling in the data structure itself.
|
||||
The HashIndex is *not* a general-purpose data structure. The value size must be at least 4 bytes, and these
|
||||
first bytes are used for in-band signaling in the data structure itself.
|
||||
|
||||
The constant MAX_VALUE defines the valid range for these 4 bytes when interpreted as an uint32_t from 0
|
||||
The constant MAX_VALUE defines the valid range for these 4 bytes when interpreted as a uint32_t from 0
|
||||
to MAX_VALUE (inclusive). The following reserved values beyond MAX_VALUE are currently in use
|
||||
(byte order is LE)::
|
||||
|
||||
0xffffffff marks empty entries in the hashtable
|
||||
0xfffffffe marks deleted entries in the hashtable
|
||||
0xffffffff marks empty entries in the hash table
|
||||
0xfffffffe marks deleted entries in the hash table
|
||||
|
||||
None of the publicly available classes in this module will accept nor return a reserved value;
|
||||
None of the publicly available classes in this module will neither accept nor return a reserved value;
|
||||
AssertionError is raised instead.
|
||||
"""
|
||||
|
||||
|
|
@ -169,7 +169,7 @@ cdef class IndexBase:
|
|||
|
||||
|
||||
cdef class FuseVersionsIndex(IndexBase):
|
||||
# 4 byte version + 16 byte file contents hash
|
||||
# 4-byte version + 16-byte file contents hash
|
||||
value_size = 20
|
||||
_key_size = 16
|
||||
|
||||
|
|
@ -276,7 +276,7 @@ ChunkIndexEntry = namedtuple('ChunkIndexEntry', 'refcount size csize')
|
|||
|
||||
cdef class ChunkIndex(IndexBase):
|
||||
"""
|
||||
Mapping of 32 byte keys to (refcount, size, csize), which are all 32-bit unsigned.
|
||||
Mapping of 32-byte keys to (refcount, size, csize), which are all 32-bit unsigned.
|
||||
|
||||
The reference count cannot overflow. If an overflow would occur, the refcount
|
||||
is fixed to MAX_VALUE and will neither increase nor decrease by incref(), decref()
|
||||
|
|
@ -321,7 +321,7 @@ cdef class ChunkIndex(IndexBase):
|
|||
return data != NULL
|
||||
|
||||
def incref(self, key):
|
||||
"""Increase refcount for 'key', return (refcount, size, csize)"""
|
||||
"""Increase refcount for 'key', return (refcount, size, csize)."""
|
||||
assert len(key) == self.key_size
|
||||
data = <uint32_t *>hashindex_get(self.index, <unsigned char *>key)
|
||||
if not data:
|
||||
|
|
@ -334,7 +334,7 @@ cdef class ChunkIndex(IndexBase):
|
|||
return refcount, _le32toh(data[1]), _le32toh(data[2])
|
||||
|
||||
def decref(self, key):
|
||||
"""Decrease refcount for 'key', return (refcount, size, csize)"""
|
||||
"""Decrease refcount for 'key', return (refcount, size, csize)."""
|
||||
assert len(key) == self.key_size
|
||||
data = <uint32_t *>hashindex_get(self.index, <unsigned char *>key)
|
||||
if not data:
|
||||
|
|
|
|||
|
|
@ -230,7 +230,7 @@ class Item(PropDict):
|
|||
try:
|
||||
master = getattr(self, 'source')
|
||||
except AttributeError:
|
||||
# not a hardlink slave, likely a directory or special file w/o chunks
|
||||
# not a hardlink slave, likely a directory or special file without chunks
|
||||
chunks = None
|
||||
else:
|
||||
# hardlink slave, try to fetch hardlink master's chunks list
|
||||
|
|
@ -296,12 +296,12 @@ class EncryptedKey(PropDict):
|
|||
"""
|
||||
EncryptedKey abstraction that deals with validation and the low-level details internally:
|
||||
|
||||
A EncryptedKey is created either from msgpack unpacker output, from another dict, from kwargs or
|
||||
An EncryptedKey is created either from msgpack unpacker output, from another dict, from kwargs or
|
||||
built step-by-step by setting attributes.
|
||||
|
||||
msgpack gives us a dict with bytes-typed keys, just give it to EncryptedKey(d) and use enc_key.xxx later.
|
||||
|
||||
If a EncryptedKey shall be serialized, give as_dict() method output to msgpack packer.
|
||||
If an EncryptedKey shall be serialized, give as_dict() method output to msgpack packer.
|
||||
"""
|
||||
|
||||
VALID_KEYS = {'version', 'algorithm', 'iterations', 'salt', 'hash', 'data'} # str-typed keys
|
||||
|
|
@ -362,7 +362,7 @@ class ArchiveItem(PropDict):
|
|||
|
||||
msgpack gives us a dict with bytes-typed keys, just give it to ArchiveItem(d) and use arch.xxx later.
|
||||
|
||||
If a ArchiveItem shall be serialized, give as_dict() method output to msgpack packer.
|
||||
If an ArchiveItem shall be serialized, give as_dict() method output to msgpack packer.
|
||||
"""
|
||||
|
||||
VALID_KEYS = ARCHIVE_KEYS # str-typed keys
|
||||
|
|
|
|||
|
|
@ -101,8 +101,7 @@ def setxattr(path, name, value, *, follow_symlinks=False):
|
|||
|
||||
|
||||
def _remove_numeric_id_if_possible(acl):
|
||||
"""Replace the user/group field with the local uid/gid if possible
|
||||
"""
|
||||
"""Replace the user/group field with the local uid/gid if possible."""
|
||||
entries = []
|
||||
for entry in safe_decode(acl).split('\n'):
|
||||
if entry:
|
||||
|
|
@ -118,8 +117,7 @@ def _remove_numeric_id_if_possible(acl):
|
|||
|
||||
|
||||
def _remove_non_numeric_identifier(acl):
|
||||
"""Remove user and group names from the acl
|
||||
"""
|
||||
"""Remove user and group names from the ACL."""
|
||||
entries = []
|
||||
for entry in safe_decode(acl).split('\n'):
|
||||
if entry:
|
||||
|
|
|
|||
|
|
@ -57,18 +57,18 @@ NS_ID_MAP = {b"user": EXTATTR_NAMESPACE_USER, }
|
|||
|
||||
|
||||
def split_ns(ns_name, default_ns):
|
||||
# split ns_name (which is in the form of b"namespace.name") into namespace and name.
|
||||
# if there is no namespace given in ns_name, default to default_ns.
|
||||
# note:
|
||||
# Split ns_name (which is in the form of b"namespace.name") into namespace and name.
|
||||
# If there is no namespace given in ns_name, default to default_ns.
|
||||
# Note:
|
||||
# borg < 1.1.10 on FreeBSD did not prefix the namespace to the names, see #3952.
|
||||
# we also need to deal with "unexpected" namespaces here, they could come
|
||||
# We also need to deal with "unexpected" namespaces here; they could come
|
||||
# from borg archives made on other operating systems.
|
||||
ns_name_tuple = ns_name.split(b".", 1)
|
||||
if len(ns_name_tuple) == 2:
|
||||
# we have a namespace prefix in the given name
|
||||
# We have a namespace prefix in the given name.
|
||||
ns, name = ns_name_tuple
|
||||
else:
|
||||
# no namespace given in ns_name (== no dot found), maybe data coming from an old borg archive.
|
||||
# No namespace given in ns_name (no dot found), maybe data coming from an old borg archive.
|
||||
ns, name = default_ns, ns_name
|
||||
return ns, name
|
||||
|
||||
|
|
@ -100,7 +100,7 @@ def getxattr(path, name, *, follow_symlinks=False):
|
|||
return c_extattr_get_link(path, ns_id, name, <char *> buf, size)
|
||||
|
||||
ns, name = split_ns(name, b"user")
|
||||
ns_id = NS_ID_MAP[ns] # this will raise a KeyError it the namespace is unsupported
|
||||
ns_id = NS_ID_MAP[ns] # this will raise a KeyError if the namespace is unsupported
|
||||
n, buf = _getxattr_inner(func, path, name)
|
||||
return bytes(buf[:n])
|
||||
|
||||
|
|
@ -117,7 +117,7 @@ def setxattr(path, name, value, *, follow_symlinks=False):
|
|||
|
||||
ns, name = split_ns(name, b"user")
|
||||
try:
|
||||
ns_id = NS_ID_MAP[ns] # this will raise a KeyError it the namespace is unsupported
|
||||
ns_id = NS_ID_MAP[ns] # this will raise a KeyError if the namespace is unsupported
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
|
|
@ -142,9 +142,9 @@ cdef _get_acl(p, type, item, attribute, flags, fd=None):
|
|||
acl_free(acl)
|
||||
|
||||
def acl_get(path, item, st, numeric_ids=False, fd=None):
|
||||
"""Saves ACL Entries
|
||||
"""Save ACL entries.
|
||||
|
||||
If `numeric_ids` is True the user/group field is not preserved only uid/gid
|
||||
If numeric_ids is True, the user/group field is not preserved; only uid/gid.
|
||||
"""
|
||||
cdef int flags = ACL_TEXT_APPEND_ID
|
||||
flags |= ACL_TEXT_NUMERIC_IDS if numeric_ids else 0
|
||||
|
|
@ -154,7 +154,7 @@ def acl_get(path, item, st, numeric_ids=False, fd=None):
|
|||
if ret < 0:
|
||||
raise OSError(errno.errno, os.strerror(errno.errno), os.fsdecode(path))
|
||||
if ret == 0:
|
||||
# there is no ACL defining permissions other than those defined by the traditional file permission bits.
|
||||
# There is no ACL defining permissions other than those defined by the traditional file permission bits.
|
||||
return
|
||||
ret = lpathconf(path, _PC_ACL_NFS4)
|
||||
if ret < 0:
|
||||
|
|
@ -192,8 +192,7 @@ cdef _set_acl(p, type, item, attribute, numeric_ids=False, fd=None):
|
|||
|
||||
|
||||
cdef _nfs4_use_stored_uid_gid(acl):
|
||||
"""Replace the user/group field with the stored uid/gid
|
||||
"""
|
||||
"""Replace the user/group field with the stored uid/gid."""
|
||||
entries = []
|
||||
for entry in safe_decode(acl).split('\n'):
|
||||
if entry:
|
||||
|
|
@ -206,10 +205,9 @@ cdef _nfs4_use_stored_uid_gid(acl):
|
|||
|
||||
|
||||
def acl_set(path, item, numeric_ids=False, fd=None):
|
||||
"""Restore ACL Entries
|
||||
"""Restore ACL entries.
|
||||
|
||||
If `numeric_ids` is True the stored uid/gid is used instead
|
||||
of the user/group names
|
||||
If numeric_ids is True, the stored uid/gid is used instead of the user/group names.
|
||||
"""
|
||||
if isinstance(path, str):
|
||||
path = os.fsencode(path)
|
||||
|
|
|
|||
|
|
@ -134,7 +134,7 @@ def set_flags(path, bsd_flags, fd=None):
|
|||
if fd is None:
|
||||
st = os.stat(path, follow_symlinks=False)
|
||||
if stat.S_ISBLK(st.st_mode) or stat.S_ISCHR(st.st_mode) or stat.S_ISLNK(st.st_mode):
|
||||
# see comment in get_flags()
|
||||
# See comment in get_flags().
|
||||
return
|
||||
cdef int flags = 0
|
||||
for bsd_flag, linux_flag in BSD_TO_LINUX_FLAGS.items():
|
||||
|
|
@ -155,8 +155,8 @@ def set_flags(path, bsd_flags, fd=None):
|
|||
|
||||
def get_flags(path, st, fd=None):
|
||||
if stat.S_ISBLK(st.st_mode) or stat.S_ISCHR(st.st_mode) or stat.S_ISLNK(st.st_mode):
|
||||
# avoid opening devices files - trying to open non-present devices can be rather slow.
|
||||
# avoid opening symlinks, O_NOFOLLOW would make the open() fail anyway.
|
||||
# Avoid opening device files - trying to open non-present devices can be rather slow.
|
||||
# Avoid opening symlinks; O_NOFOLLOW would make the open() fail anyway.
|
||||
return 0
|
||||
cdef int linux_flags
|
||||
open_fd = fd is None
|
||||
|
|
@ -179,8 +179,7 @@ def get_flags(path, st, fd=None):
|
|||
|
||||
|
||||
def acl_use_local_uid_gid(acl):
|
||||
"""Replace the user/group field with the local uid/gid if possible
|
||||
"""
|
||||
"""Replace the user/group field with the local uid/gid if possible."""
|
||||
entries = []
|
||||
for entry in safe_decode(acl).split('\n'):
|
||||
if entry:
|
||||
|
|
@ -194,8 +193,7 @@ def acl_use_local_uid_gid(acl):
|
|||
|
||||
|
||||
cdef acl_append_numeric_ids(acl):
|
||||
"""Extend the "POSIX 1003.1e draft standard 17" format with an additional uid/gid field
|
||||
"""
|
||||
"""Extend the "POSIX 1003.1e draft standard 17" format with an additional uid/gid field."""
|
||||
entries = []
|
||||
for entry in _comment_re.sub('', safe_decode(acl)).split('\n'):
|
||||
if entry:
|
||||
|
|
@ -210,8 +208,7 @@ cdef acl_append_numeric_ids(acl):
|
|||
|
||||
|
||||
cdef acl_numeric_ids(acl):
|
||||
"""Replace the "POSIX 1003.1e draft standard 17" user/group field with uid/gid
|
||||
"""
|
||||
"""Replace the "POSIX 1003.1e draft standard 17" user/group field with uid/gid."""
|
||||
entries = []
|
||||
for entry in _comment_re.sub('', safe_decode(acl)).split('\n'):
|
||||
if entry:
|
||||
|
|
@ -243,8 +240,8 @@ def acl_get(path, item, st, numeric_ids=False, fd=None):
|
|||
if ret < 0:
|
||||
raise OSError(errno.errno, os.strerror(errno.errno), os.fsdecode(path))
|
||||
if ret == 0:
|
||||
# there is no ACL defining permissions other than those defined by the traditional file permission bits.
|
||||
# note: this should also be the case for symlink fs objects, as they can not have ACLs.
|
||||
# There is no ACL defining permissions other than those defined by the traditional file permission bits.
|
||||
# Note: this should also be the case for symlink file system objects, as they cannot have ACLs.
|
||||
return
|
||||
if numeric_ids:
|
||||
converter = acl_numeric_ids
|
||||
|
|
@ -265,7 +262,7 @@ def acl_get(path, item, st, numeric_ids=False, fd=None):
|
|||
acl_free(access_text)
|
||||
acl_free(access_acl)
|
||||
if stat.S_ISDIR(st.st_mode):
|
||||
# only directories can have a default ACL. there is no fd-based api to get it.
|
||||
# Only directories can have a default ACL. There is no fd-based API to get it.
|
||||
try:
|
||||
default_acl = acl_get_file(path, ACL_TYPE_DEFAULT)
|
||||
if default_acl == NULL:
|
||||
|
|
@ -284,7 +281,7 @@ def acl_set(path, item, numeric_ids=False, fd=None):
|
|||
cdef acl_t default_acl = NULL
|
||||
|
||||
if stat.S_ISLNK(item.get('mode', 0)):
|
||||
# Linux does not support setting ACLs on symlinks
|
||||
# Linux does not support setting ACLs on symlinks.
|
||||
return
|
||||
|
||||
if isinstance(path, str):
|
||||
|
|
@ -313,7 +310,7 @@ def acl_set(path, item, numeric_ids=False, fd=None):
|
|||
default_acl = acl_from_text(<bytes>converter(default_text))
|
||||
if default_acl == NULL:
|
||||
raise OSError(errno.errno, os.strerror(errno.errno), os.fsdecode(path))
|
||||
# only directories can get a default ACL. there is no fd-based api to set it.
|
||||
# Only directories can get a default ACL. There is no fd-based API to set it.
|
||||
if acl_set_file(path, ACL_TYPE_DEFAULT, default_acl) == -1:
|
||||
raise OSError(errno.errno, os.strerror(errno.errno), os.fsdecode(path))
|
||||
finally:
|
||||
|
|
@ -333,9 +330,9 @@ cdef unsigned PAGE_MASK = sysconf(_SC_PAGESIZE) - 1
|
|||
|
||||
if 'basesyncfile' in workarounds or not SYNC_FILE_RANGE_LOADED:
|
||||
class SyncFile(BaseSyncFile):
|
||||
# if we are on platforms with a broken or not implemented sync_file_range,
|
||||
# If we are on platforms with a broken or not implemented sync_file_range,
|
||||
# use the more generic BaseSyncFile to avoid issues.
|
||||
# see basesyncfile description in our docs for details.
|
||||
# See BaseSyncFile description in our docs for details.
|
||||
pass
|
||||
else:
|
||||
# a real Linux, so we can do better. :)
|
||||
|
|
@ -343,7 +340,7 @@ else:
|
|||
"""
|
||||
Implemented using sync_file_range for asynchronous write-out and fdatasync for actual durability.
|
||||
|
||||
"write-out" means that dirty pages (= data that was written) are submitted to an I/O queue and will be send to
|
||||
"write-out" means that dirty pages (= data that was written) are submitted to an I/O queue and will be sent to
|
||||
disk in the immediate future.
|
||||
"""
|
||||
|
||||
|
|
@ -369,6 +366,6 @@ else:
|
|||
def sync(self):
|
||||
self.f.flush()
|
||||
os.fdatasync(self.fd)
|
||||
# tell the OS that it does not need to cache what we just wrote,
|
||||
# avoids spoiling the cache for the OS and other processes.
|
||||
# Tell the OS that it does not need to cache what we just wrote,
|
||||
# This avoids spoiling the cache for the OS and other processes.
|
||||
safe_fadvise(self.fd, 0, 0, 'DONTNEED')
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ def process_alive(host, pid, thread):
|
|||
Check if the (host, pid, thread_id) combination corresponds to a potentially alive process.
|
||||
|
||||
If the process is local, then this will be accurate. If the process is not local, then this
|
||||
returns always True, since there is no real way to check.
|
||||
always returns True, since there is no real way to check.
|
||||
"""
|
||||
from . import local_pid_alive
|
||||
from . import hostid
|
||||
|
|
@ -110,8 +110,7 @@ def group2gid(group, default=None):
|
|||
|
||||
|
||||
def posix_acl_use_stored_uid_gid(acl):
|
||||
"""Replace the user/group field with the stored uid/gid
|
||||
"""
|
||||
"""Replace the user/group field with the stored uid/gid."""
|
||||
from ..helpers import safe_decode, safe_encode
|
||||
entries = []
|
||||
for entry in safe_decode(acl).split('\n'):
|
||||
|
|
|
|||
Loading…
Reference in a new issue