diff --git a/src/borg/archive.py b/src/borg/archive.py index 5ddf9790dc..8909fa330c 100644 --- a/src/borg/archive.py +++ b/src/borg/archive.py @@ -281,7 +281,7 @@ def unpack_many(self, ids, *, filter=None, preload=False): item = Item(internal_dict=_item) if "chunks" in item: item.chunks = [ChunkListEntry(*e) for e in item.chunks] - if "chunks_healthy" in item: + if "chunks_healthy" in item: # legacy item.chunks_healthy = [ChunkListEntry(*e) for e in item.chunks_healthy] if filter and not filter(item): continue @@ -744,7 +744,6 @@ def same_item(item, st): # if a previous extraction was interrupted between setting the mtime and setting non-default flags. return True - has_damaged_chunks = "chunks_healthy" in item if dry_run or stdout: with self.extract_helper(item, "", hlm, dry_run=dry_run or stdout) as hardlink_set: if not hardlink_set: @@ -771,8 +770,6 @@ def same_item(item, st): item_size, item_chunks_size ) ) - if has_damaged_chunks: - raise BackupError("File has damaged (all-zero) chunks. Try running borg check --repair.") return dest = self.cwd @@ -827,8 +824,6 @@ def make_parent(path): raise BackupError( f"Size inconsistency detected: size {item_size}, chunks size {item_chunks_size}" ) - if has_damaged_chunks: - raise BackupError("File has damaged (all-zero) chunks. Try running borg check --repair.") return with backup_io: # No repository access beyond this point. @@ -1141,10 +1136,6 @@ def chunk_processor(chunk): return chunk_entry item.chunks = [] - # if we rechunkify, we'll get a fundamentally different chunks list, thus we need - # to get rid of .chunks_healthy, as it might not correspond to .chunks any more. - if self.rechunkify and "chunks_healthy" in item: - del item.chunks_healthy for chunk in chunk_iter: chunk_entry = chunk_processor(chunk) item.chunks.append(chunk_entry) @@ -1761,13 +1752,10 @@ def verify_data(self): if defect_chunks: if self.repair: # if we kill the defect chunk here, subsequent actions within this "borg check" - # run will find missing chunks and replace them with all-zero replacement - # chunks and flag the files as "repaired". - # if another backup is done later and the missing chunks get backed up again, - # a "borg check" afterwards can heal all files where this chunk was missing. + # run will find missing chunks. logger.warning( - "Found defect chunks. They will be deleted now, so affected files can " - "get repaired now and maybe healed later." + "Found defect chunks and will delete them now. " + "Reading files referencing these chunks will result in an I/O error." ) for defect_chunk in defect_chunks: # remote repo (ssh): retry might help for strange network / NIC / RAM errors @@ -1787,10 +1775,7 @@ def verify_data(self): else: logger.warning("chunk %s not deleted, did not consistently fail.", bin_to_hex(defect_chunk)) else: - logger.warning( - "Found defect chunks. With --repair, they would get deleted, so affected " - "files could get repaired then and maybe healed later." - ) + logger.warning("Found defect chunks. With --repair, they would get deleted.") for defect_chunk in defect_chunks: logger.debug("chunk %s is defect.", bin_to_hex(defect_chunk)) log = logger.error if errors else logger.info @@ -1901,80 +1886,18 @@ def add_reference(id_, size, cdata): self.repository.put(id_, cdata) def verify_file_chunks(archive_name, item): - """Verifies that all file chunks are present. - - Missing file chunks will be replaced with new chunks of the same length containing all zeros. - If a previously missing file chunk re-appears, the replacement chunk is replaced by the correct one. - """ - - def replacement_chunk(size): - chunk = Chunk(None, allocation=CH_ALLOC, size=size) - chunk_id, data = cached_hash(chunk, self.key.id_hash) - cdata = self.repo_objs.format(chunk_id, {}, data, ro_type=ROBJ_FILE_STREAM) - return chunk_id, size, cdata - + """Verifies that all file chunks are present. Missing file chunks will be logged.""" offset = 0 - chunk_list = [] - chunks_replaced = False - has_chunks_healthy = "chunks_healthy" in item - chunks_current = item.chunks - chunks_healthy = item.chunks_healthy if has_chunks_healthy else chunks_current - if has_chunks_healthy and len(chunks_current) != len(chunks_healthy): - # should never happen, but there was issue #3218. - logger.warning(f"{archive_name}: {item.path}: Invalid chunks_healthy metadata removed!") - del item.chunks_healthy - has_chunks_healthy = False - chunks_healthy = chunks_current - for chunk_current, chunk_healthy in zip(chunks_current, chunks_healthy): - chunk_id, size = chunk_healthy + for chunk in item.chunks: + chunk_id, size = chunk if chunk_id not in self.chunks: - # a chunk of the healthy list is missing - if chunk_current == chunk_healthy: - logger.error( - "{}: {}: New missing file chunk detected (Byte {}-{}, Chunk {}). " - "Replacing with all-zero chunk.".format( - archive_name, item.path, offset, offset + size, bin_to_hex(chunk_id) - ) + logger.error( + "{}: {}: Missing file chunk detected (Byte {}-{}, Chunk {}).".format( + archive_name, item.path, offset, offset + size, bin_to_hex(chunk_id) ) - self.error_found = chunks_replaced = True - chunk_id, size, cdata = replacement_chunk(size) - add_reference(chunk_id, size, cdata) - else: - logger.info( - "{}: {}: Previously missing file chunk is still missing (Byte {}-{}, Chunk {}). " - "It has an all-zero replacement chunk already.".format( - archive_name, item.path, offset, offset + size, bin_to_hex(chunk_id) - ) - ) - chunk_id, size = chunk_current - if chunk_id not in self.chunks: - logger.warning( - "{}: {}: Missing all-zero replacement chunk detected (Byte {}-{}, Chunk {}). " - "Generating new replacement chunk.".format( - archive_name, item.path, offset, offset + size, bin_to_hex(chunk_id) - ) - ) - self.error_found = chunks_replaced = True - chunk_id, size, cdata = replacement_chunk(size) - add_reference(chunk_id, size, cdata) - else: - if chunk_current == chunk_healthy: - pass # normal case, all fine. - else: - logger.info( - "{}: {}: Healed previously missing file chunk! (Byte {}-{}, Chunk {}).".format( - archive_name, item.path, offset, offset + size, bin_to_hex(chunk_id) - ) - ) - chunk_list.append([chunk_id, size]) # list-typed element as chunks_healthy is list-of-lists + ) + self.error_found = True offset += size - if chunks_replaced and not has_chunks_healthy: - # if this is first repair, remember the correct chunk IDs, so we can maybe heal the file later - item.chunks_healthy = item.chunks - if has_chunks_healthy and chunk_list == chunks_healthy: - logger.info(f"{archive_name}: {item.path}: Completely healed previously damaged file!") - del item.chunks_healthy - item.chunks = chunk_list if "size" in item: item_size = item.size item_chunks_size = item.get_size(from_chunks=True) diff --git a/src/borg/archiver/check_cmd.py b/src/borg/archiver/check_cmd.py index 0bfda9b78b..a9bd9f05b3 100644 --- a/src/borg/archiver/check_cmd.py +++ b/src/borg/archiver/check_cmd.py @@ -168,28 +168,7 @@ def build_parser_check(self, subparsers, common_parser, mid_common_parser): 2. When checking the consistency and correctness of archives, repair mode might remove whole archives from the manifest if their archive metadata chunk is - corrupt or lost. On a chunk level (i.e. the contents of files), repair mode - will replace corrupt or lost chunks with a same-size replacement chunk of - zeroes. If a previously zeroed chunk reappears, repair mode will restore - this lost chunk using the new chunk. - - Most steps taken by repair mode have a one-time effect on the repository, like - removing a lost archive from the repository. However, replacing a corrupt or - lost chunk with an all-zero replacement will have an ongoing effect on the - repository: When attempting to extract a file referencing an all-zero chunk, - the ``extract`` command will distinctly warn about it. The FUSE filesystem - created by the ``mount`` command will reject reading such a "zero-patched" - file unless a special mount option is given. - - As mentioned earlier, Borg might be able to "heal" a "zero-patched" file in - repair mode, if all its previously lost chunks reappear (e.g. via a later - backup). This is achieved by Borg not only keeping track of the all-zero - replacement chunks, but also by keeping metadata about the lost chunks. In - repair mode Borg will check whether a previously lost chunk reappeared and will - replace the all-zero replacement chunk by the reappeared chunk. If all lost - chunks of a "zero-patched" file reappear, this effectively "heals" the file. - Consequently, if lost chunks were repaired earlier, it is advised to run - ``--repair`` a second time after creating some new backups. + corrupt or lost. Borg will also report files that reference missing chunks. If ``--repair --find-lost-archives`` is given, previously lost entries will be recreated in the archive directory. This is only possible before diff --git a/src/borg/archiver/compact_cmd.py b/src/borg/archiver/compact_cmd.py index c9c36f459b..04c0128ffe 100644 --- a/src/borg/archiver/compact_cmd.py +++ b/src/borg/archiver/compact_cmd.py @@ -6,7 +6,7 @@ from ..cache import write_chunkindex_to_repo_cache, build_chunkindex_from_repo from ..constants import * # NOQA from ..hashindex import ChunkIndex, ChunkIndexEntry -from ..helpers import set_ec, EXIT_WARNING, EXIT_ERROR, format_file_size, bin_to_hex +from ..helpers import set_ec, EXIT_ERROR, format_file_size, bin_to_hex from ..helpers import ProgressIndicatorPercent from ..manifest import Manifest from ..remote import RemoteRepository @@ -39,9 +39,7 @@ def garbage_collect(self): logger.info("Starting compaction / garbage collection...") self.chunks = self.get_repository_chunks() logger.info("Computing object IDs used by archives...") - (self.missing_chunks, self.reappeared_chunks, self.total_files, self.total_size, self.archives_count) = ( - self.analyze_archives() - ) + (self.missing_chunks, self.total_files, self.total_size, self.archives_count) = self.analyze_archives() self.report_and_delete() self.save_chunk_index() logger.info("Finished compaction / garbage collection...") @@ -74,27 +72,23 @@ def save_chunk_index(self): self.chunks = None # nothing there (cleared!) def analyze_archives(self) -> Tuple[Set, Set, int, int, int]: - """Iterate over all items in all archives, create the dicts id -> size of all used/wanted chunks.""" + """Iterate over all items in all archives, create the dicts id -> size of all used chunks.""" - def use_it(id, *, wanted=False): + def use_it(id): entry = self.chunks.get(id) if entry is not None: # the chunk is in the repo, mark it used. self.chunks[id] = entry._replace(flags=entry.flags | ChunkIndex.F_USED) - if wanted: - # chunk id is from chunks_healthy list: a lost chunk has re-appeared! - reappeared_chunks.add(id) else: # with --stats: we do NOT have this chunk in the repository! # without --stats: we do not have this chunk or the chunks index is incomplete. missing_chunks.add(id) missing_chunks: set[bytes] = set() - reappeared_chunks: set[bytes] = set() archive_infos = self.manifest.archives.list(sort_by=["ts"]) num_archives = len(archive_infos) pi = ProgressIndicatorPercent( - total=num_archives, msg="Computing used/wanted chunks %3.1f%%", step=0.1, msgid="compact.analyze_archives" + total=num_archives, msg="Computing used chunks %3.1f%%", step=0.1, msgid="compact.analyze_archives" ) total_size, total_files = 0, 0 for i, info in enumerate(archive_infos): @@ -114,25 +108,14 @@ def use_it(id, *, wanted=False): for id, size in item.chunks: total_size += size # original, uncompressed file content size use_it(id) - if "chunks_healthy" in item: - # we also consider the chunks_healthy chunks as referenced - do not throw away - # anything that borg check --repair might still need. - for id, size in item.chunks_healthy: - use_it(id, wanted=True) pi.finish() - return missing_chunks, reappeared_chunks, total_files, total_size, num_archives + return missing_chunks, total_files, total_size, num_archives def report_and_delete(self): - run_repair = " Run borg check --repair!" - if self.missing_chunks: - logger.error(f"Repository has {len(self.missing_chunks)} missing objects." + run_repair) + logger.error(f"Repository has {len(self.missing_chunks)} missing objects!") set_ec(EXIT_ERROR) - if self.reappeared_chunks: - logger.warning(f"{len(self.reappeared_chunks)} previously missing objects re-appeared!" + run_repair) - set_ec(EXIT_WARNING) - logger.info("Cleaning archives directory from soft-deleted archives...") archive_infos = self.manifest.archives.list(sort_by=["ts"], deleted=True) for archive_info in archive_infos: diff --git a/src/borg/archiver/mount_cmds.py b/src/borg/archiver/mount_cmds.py index 7535198cc0..7dcb2eda43 100644 --- a/src/borg/archiver/mount_cmds.py +++ b/src/borg/archiver/mount_cmds.py @@ -104,9 +104,9 @@ def build_parser_mount_umount(self, subparsers, common_parser, mid_common_parser - ``versions``: when used with a repository mount, this gives a merged, versioned view of the files in the archives. EXPERIMENTAL, layout may change in future. - - ``allow_damaged_files``: by default damaged files (where missing chunks were - replaced with runs of zeros by ``borg check --repair``) are not readable and - return EIO (I/O error). Set this option to read such files. + - ``allow_damaged_files``: by default damaged files (where chunks are missing) + will return EIO (I/O error) when trying to read the related parts of the file. + Set this option to replace the missing parts with all-zero bytes. - ``ignore_permissions``: for security reasons the ``default_permissions`` mount option is internally enforced by borg. ``ignore_permissions`` can be given to not enforce ``default_permissions``. diff --git a/src/borg/archiver/recreate_cmd.py b/src/borg/archiver/recreate_cmd.py index 26efe46fcd..c528d9018c 100644 --- a/src/borg/archiver/recreate_cmd.py +++ b/src/borg/archiver/recreate_cmd.py @@ -95,16 +95,10 @@ def build_parser_recreate(self, subparsers, common_parser, mid_common_parser): at least the entire deduplicated size of the archives using the previous chunker params. - If you recently ran borg check --repair and it had to fix lost chunks with all-zero - replacement chunks, please first run another backup for the same data and re-run - borg check --repair afterwards to heal any archives that had lost chunks which are - still generated from the input data. - - Important: running borg recreate to re-chunk will remove the chunks_healthy - metadata of all items with replacement chunks, so healing will not be possible - any more after re-chunking (it is also unlikely it would ever work: due to the - change of chunking parameters, the missing chunk likely will never be seen again - even if you still have the data that produced it). + If your most recent borg check found missing chunks, please first run another + backup for the same data, before doing any rechunking. If you are lucky, that + will re-create the missing chunks. Optionally, do another borg check, to see + if the chunks are still missing). """ ) subparser = subparsers.add_parser( diff --git a/src/borg/fuse.py b/src/borg/fuse.py index a15d4714e6..a391481473 100644 --- a/src/borg/fuse.py +++ b/src/borg/fuse.py @@ -10,6 +10,7 @@ from collections import defaultdict, Counter from signal import SIGINT +from . import constants from .constants import ROBJ_FILE_STREAM from .fuse_impl import llfuse, has_pyfuse3 @@ -46,6 +47,7 @@ def async_wrapper(fn): from .item import Item from .platform import uid2user, gid2group from .platformflags import is_darwin +from .repository import Repository from .remote import RemoteRepository @@ -652,17 +654,6 @@ def lookup(self, parent_inode, name, ctx=None): @async_wrapper def open(self, inode, flags, ctx=None): - if not self.allow_damaged_files: - item = self.get_item(inode) - if "chunks_healthy" in item: - # Processed archive items don't carry the path anymore; for converting the inode - # to the path we'd either have to store the inverse of the current structure, - # or search the entire archive. So we just don't print it. It's easy to correlate anyway. - logger.warning( - "File has damaged (all-zero) chunks. Try running borg check --repair. " - "Mount with allow_damaged_files to read damaged files." - ) - raise llfuse.FUSEError(errno.EIO) return llfuse.FileInfo(fh=inode) if has_pyfuse3 else inode @async_wrapper @@ -699,7 +690,16 @@ def read(self, fh, offset, size): # evict fully read chunk from cache del self.data_cache[id] else: - _, data = self.repo_objs.parse(id, self.repository_uncached.get(id), ro_type=ROBJ_FILE_STREAM) + try: + cdata = self.repository_uncached.get(id) + except Repository.ObjectNotFound: + if self.allow_damaged_files: + data = constants.zeros[:s] + assert len(data) == s + else: + raise llfuse.FUSEError(errno.EIO) from None + else: + _, data = self.repo_objs.parse(id, cdata, ro_type=ROBJ_FILE_STREAM) if offset + n < len(data): # chunk was only partially read, cache it self.data_cache[id] = data diff --git a/src/borg/helpers/fs.py b/src/borg/helpers/fs.py index b3c602f90b..44db2eeb80 100644 --- a/src/borg/helpers/fs.py +++ b/src/borg/helpers/fs.py @@ -308,8 +308,8 @@ class HardLinkManager: If we encounter the same hlid again later, we hardlink to the path of the already extracted content of same hlid. C) When transferring from a borg1 archive, we need: - path -> chunks, chunks_healthy # for borg1_hl_targets - If we encounter a regular file item with source == path later, we reuse chunks and chunks_healthy + path -> chunks_correct # for borg1_hl_targets, chunks_correct must be either from .chunks_healthy or .chunks. + If we encounter a regular file item with source == path later, we reuse chunks_correct and create the same hlid = hardlink_id_from_path(source). D) When importing a tar file (simplified 1-pass way for now, not creating borg hardlink items): @@ -353,7 +353,7 @@ def remember(self, *, id, info): a hlid (new borg style) [bytes] a (dev, inode) tuple (filesystem) :param info: information to remember, could be: - chunks / chunks_healthy list + chunks list hlid """ assert isinstance(id, self.id_type), f"id is {id!r}, not of type {self.id_type}" diff --git a/src/borg/helpers/parseformat.py b/src/borg/helpers/parseformat.py index 0f3f397da6..e420c26e1b 100644 --- a/src/borg/helpers/parseformat.py +++ b/src/borg/helpers/parseformat.py @@ -814,7 +814,6 @@ class ItemFormatter(BaseFormatter): "isoctime": "file change time (ISO 8601 format)", "isoatime": "file access time (ISO 8601 format)", "xxh64": "XXH64 checksum of this file (note: this is NOT a cryptographic hash!)", - "health": 'either "healthy" (file ok) or "broken" (if file has all-zero replacement chunks)', "archiveid": "internal ID of the archive", "archivename": "name of the archive", } @@ -824,7 +823,6 @@ class ItemFormatter(BaseFormatter): ("mtime", "ctime", "atime", "isomtime", "isoctime", "isoatime"), tuple(sorted(hash_algorithms)), ("archiveid", "archivename", "extra"), - ("health",), ) KEYS_REQUIRING_CACHE = () @@ -881,10 +879,6 @@ def get_item_data(self, item, jsonline=False): item_data.update(text_to_json("user", item.get("user", str(item_data["uid"])))) item_data.update(text_to_json("group", item.get("group", str(item_data["gid"])))) - if jsonline: - item_data["healthy"] = "chunks_healthy" not in item - else: - item_data["health"] = "broken" if "chunks_healthy" in item else "healthy" item_data["flags"] = item.get("bsdflags") # int if flags known, else (if flags unknown) None for key in self.used_call_keys: item_data[key] = self.call_keys[key](item) diff --git a/src/borg/testsuite/archiver/check_cmd_test.py b/src/borg/testsuite/archiver/check_cmd_test.py index cc2ee31e5c..f6b904de47 100644 --- a/src/borg/testsuite/archiver/check_cmd_test.py +++ b/src/borg/testsuite/archiver/check_cmd_test.py @@ -107,28 +107,19 @@ def test_missing_file_chunk(archivers, request): else: pytest.fail("should not happen") # convert 'fail' - cmd(archiver, "check", exit_code=1) + output = cmd(archiver, "check", exit_code=1) + assert "Missing file chunk detected" in output output = cmd(archiver, "check", "--repair", exit_code=0) - assert "New missing file chunk detected" in output - - cmd(archiver, "check", exit_code=0) - output = cmd(archiver, "list", "archive1", "--format={health}#{path}{NL}", exit_code=0) - assert "broken#" in output + assert "Missing file chunk detected" in output # repair is not changing anything, just reporting. - # check that the file in the old archives has now a different chunk list without the killed chunk. - # also check that the correct original chunks list is preserved in item.chunks_healthy. + # check does not modify the chunks list. for archive_name in ("archive1", "archive2"): archive, repository = open_archive(archiver.repository_path, archive_name) with repository: for item in archive.iter_items(): if item.path.endswith(src_file): assert len(valid_chunks) == len(item.chunks) - assert killed_chunk not in item.chunks - assert valid_chunks != item.chunks - assert "chunks_healthy" in item - assert len(valid_chunks) == len(item.chunks_healthy) - assert killed_chunk in item.chunks_healthy - assert valid_chunks == item.chunks_healthy + assert valid_chunks == item.chunks break else: pytest.fail("should not happen") # convert 'fail' @@ -137,32 +128,9 @@ def test_missing_file_chunk(archivers, request): with patch.object(ChunkBuffer, "BUFFER_SIZE", 10): create_src_archive(archiver, "archive3") - # check should be able to heal the file now: + # check should not complain anymore about missing chunks: output = cmd(archiver, "check", "-v", "--repair", exit_code=0) - assert "Healed previously missing file chunk" in output - assert f"{src_file}: Completely healed previously damaged file!" in output - - # check that the file in the old archives has the correct chunks again. - # also check that chunks_healthy list is removed as it is not needed any more. - for archive_name in ("archive1", "archive2"): - archive, repository = open_archive(archiver.repository_path, archive_name) - with repository: - for item in archive.iter_items(): - if item.path.endswith(src_file): - assert valid_chunks == item.chunks - assert "chunks_healthy" not in item - break - else: - pytest.fail("should not happen") - - # list is also all-healthy again - output = cmd(archiver, "list", "archive1", "--format={health}#{path}{NL}", exit_code=0) - assert "broken#" not in output - - # check should be fine now (and not show it has healed anything). - output = cmd(archiver, "check", "-v", "--repair", exit_code=0) - assert "Healed previously missing file chunk" not in output - assert "testsuite/archiver.py: Completely healed previously damaged file!" not in output + assert "Missing file chunk detected" not in output def test_missing_archive_item_chunk(archivers, request): @@ -377,13 +345,14 @@ def fake_xxh64(data, seed=0): output = cmd(archiver, "check", "--archives-only", "--verify-data", exit_code=1) assert f"{bin_to_hex(chunk.id)}, integrity error" in output - # repair (heal is tested in another test) + # repair will find the defect chunk and remove it output = cmd(archiver, "check", "--repair", "--verify-data", exit_code=0) assert f"{bin_to_hex(chunk.id)}, integrity error" in output - assert f"{src_file}: New missing file chunk detected" in output + assert f"{src_file}: Missing file chunk detected" in output - # run with --verify-data again, all fine now (file was patched with a replacement chunk). - cmd(archiver, "check", "--archives-only", "--verify-data", exit_code=0) + # run with --verify-data again, it will notice the missing chunk. + output = cmd(archiver, "check", "--archives-only", "--verify-data", exit_code=1) + assert f"{src_file}: Missing file chunk detected" in output @pytest.mark.parametrize("init_args", [["--encryption=repokey-aes-ocb"], ["--encryption", "none"]]) @@ -409,13 +378,15 @@ def test_corrupted_file_chunk(archivers, request, init_args): output = cmd(archiver, "check", "--repository-only", exit_code=1) assert f"{bin_to_hex(chunk.id)} is corrupted: data does not match checksum." in output - # repair (heal is tested in another test) + # repair: the defect chunk will be removed by repair. output = cmd(archiver, "check", "--repair", exit_code=0) assert f"{bin_to_hex(chunk.id)} is corrupted: data does not match checksum." in output - assert f"{src_file}: New missing file chunk detected" in output + assert f"{src_file}: Missing file chunk detected" in output - # run normal check again, all fine now (file was patched with a replacement chunk). + # run normal check again cmd(archiver, "check", "--repository-only", exit_code=0) + output = cmd(archiver, "check", "--archives-only", exit_code=1) + assert f"{src_file}: Missing file chunk detected" in output def test_empty_repository(archivers, request): diff --git a/src/borg/testsuite/archiver/mount_cmds_test.py b/src/borg/testsuite/archiver/mount_cmds_test.py index 1a1ea12925..55a3d645f4 100644 --- a/src/borg/testsuite/archiver/mount_cmds_test.py +++ b/src/borg/testsuite/archiver/mount_cmds_test.py @@ -233,15 +233,19 @@ def test_fuse_allow_damaged_files(archivers, request): break else: assert False # missed the file - cmd(archiver, "check", "--repair", exit_code=0) mountpoint = os.path.join(archiver.tmpdir, "mountpoint") with fuse_mount(archiver, mountpoint, "-a", "archive"): - with pytest.raises(OSError) as excinfo: - open(os.path.join(mountpoint, "archive", path)) - assert excinfo.value.errno == errno.EIO + with open(os.path.join(mountpoint, "archive", path), "rb") as f: + with pytest.raises(OSError) as excinfo: + f.read() + assert excinfo.value.errno == errno.EIO + with fuse_mount(archiver, mountpoint, "-a", "archive", "-o", "allow_damaged_files"): - open(os.path.join(mountpoint, "archive", path)).close() + with open(os.path.join(mountpoint, "archive", path), "rb") as f: + # no exception raised, missing data will be all-zero + data = f.read() + assert data.endswith(b"\0\0") @pytest.mark.skipif(not llfuse, reason="llfuse not installed") diff --git a/src/borg/testsuite/archiver/transfer_cmd_test.py b/src/borg/testsuite/archiver/transfer_cmd_test.py index 3084a425c8..e5398e59cc 100644 --- a/src/borg/testsuite/archiver/transfer_cmd_test.py +++ b/src/borg/testsuite/archiver/transfer_cmd_test.py @@ -164,7 +164,6 @@ def convert_tz(local_naive, tzoffset, tzinfo): # fix expectation for size e["size"] = g["size"] # Note: size == 0 for all items without a size or chunks list (like e.g. directories) - # Note: healthy == True indicates the *absence* of the additional chunks_healthy list del g["hlid"] # borg 1 used "linktarget" and "source" for links, borg 2 uses "target" for symlinks. @@ -177,6 +176,9 @@ def convert_tz(local_naive, tzoffset, tzinfo): # The S_IFBLK macro is broken on MINGW del e["type"], g["type"] del e["mode"], g["mode"] + + del e["healthy"] # not supported anymore + assert g == e if name == "archive1": diff --git a/src/borg/upgrade.py b/src/borg/upgrade.py index 6277f0172d..d4c94b6b39 100644 --- a/src/borg/upgrade.py +++ b/src/borg/upgrade.py @@ -48,15 +48,13 @@ def __init__(self, *, cache): def new_archive(self, *, archive): self.archive = archive - self.hlm = HardLinkManager(id_type=bytes, info_type=tuple) # hlid -> (chunks, chunks_healthy) + self.hlm = HardLinkManager(id_type=bytes, info_type=list) # hlid -> chunks_correct def upgrade_item(self, *, item): """upgrade item as needed, get rid of legacy crap""" ITEM_KEY_WHITELIST = { "path", "rdev", - "chunks", - "chunks_healthy", "hlid", "mode", "user", @@ -78,22 +76,32 @@ def upgrade_item(self, *, item): if self.hlm.borg1_hardlink_master(item): item.hlid = hlid = self.hlm.hardlink_id_from_path(item.path) - self.hlm.remember(id=hlid, info=(item.get("chunks"), item.get("chunks_healthy"))) + if "chunks_healthy" in item: + chunks_correct = item.chunks_healthy + elif "chunks" in item: + chunks_correct = item.chunks + else: + chunks_correct = None + self.hlm.remember(id=hlid, info=chunks_correct) elif self.hlm.borg1_hardlink_slave(item): item.hlid = hlid = self.hlm.hardlink_id_from_path(item.source) - chunks, chunks_healthy = self.hlm.retrieve(id=hlid, default=(None, None)) + chunks = self.hlm.retrieve(id=hlid) if chunks is not None: item.chunks = chunks for chunk_id, chunk_size in chunks: self.cache.reuse_chunk(chunk_id, chunk_size, self.archive.stats) - if chunks_healthy is not None: - item.chunks_healthy = chunks del item.source # not used for hardlinks any more, replaced by hlid # make sure we only have desired stuff in the new item. specifically, make sure to get rid of: # - 'acl' remnants of bug in attic <= 0.13 # - 'hardlink_master' (superseded by hlid) item_dict = item.as_dict() new_item_dict = {key: value for key, value in item_dict.items() if key in ITEM_KEY_WHITELIST} + if "chunks_healthy" in item_dict: + # we want the chunks list with the CORRECT chunk ids, even if the chunk is missing. + new_item_dict["chunks"] = item_dict["chunks_healthy"] + elif "chunks" in item_dict: + # if there is no .chunks_healthy, the correct chunk ids are in .chunks. + new_item_dict["chunks"] = item_dict["chunks"] # symlink targets were .source for borg1, but borg2 uses .target: if "source" in item_dict: new_item_dict["target"] = item_dict["source"]