diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index d3c6b0d26..9faeb3a9d 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -28,13 +28,15 @@ Checklist You do not need to check all the boxes below all at once. Feel free to take your time and add more commits. If you're done and ready for review, please check the last box. Enable a checkbox by replacing [ ] with [x]. + +Please always follow these steps: +- Read the [contribution guidelines](https://github.com/restic/restic/blob/master/CONTRIBUTING.md#providing-patches). +- Enable [maintainer edits](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/allowing-changes-to-a-pull-request-branch-created-from-a-fork). +- Run `gofmt` on the code in all commits. +- Format all commit messages in the same style as [the other commits in the repository](https://github.com/restic/restic/blob/master/CONTRIBUTING.md#git-commits). --> -- [ ] I have read the [contribution guidelines](https://github.com/restic/restic/blob/master/CONTRIBUTING.md#providing-patches). -- [ ] I have [enabled maintainer edits](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/allowing-changes-to-a-pull-request-branch-created-from-a-fork). - [ ] I have added tests for all code changes. - [ ] I have added documentation for relevant changes (in the manual). - [ ] There's a new file in `changelog/unreleased/` that describes the changes for our users (see [template](https://github.com/restic/restic/blob/master/changelog/TEMPLATE)). -- [ ] I have run `gofmt` on the code in all commits. -- [ ] All commit messages are formatted in the same style as [the other commits in the repo](https://github.com/restic/restic/blob/master/CONTRIBUTING.md#git-commits). - [ ] I'm done! This pull request is ready for review. diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index bf4e63bb7..5a41723bb 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -13,7 +13,7 @@ permissions: contents: read env: - latest_go: "1.22.x" + latest_go: "1.23.x" GO111MODULE: on jobs: @@ -23,27 +23,32 @@ jobs: # list of jobs to run: include: - job_name: Windows - go: 1.22.x + go: 1.23.x os: windows-latest - job_name: macOS - go: 1.22.x + go: 1.23.x os: macOS-latest test_fuse: false - job_name: Linux - go: 1.22.x + go: 1.23.x os: ubuntu-latest test_cloud_backends: true test_fuse: true check_changelog: true - job_name: Linux (race) - go: 1.22.x + go: 1.23.x os: ubuntu-latest test_fuse: true test_opts: "-race" + - job_name: Linux + go: 1.22.x + os: ubuntu-latest + test_fuse: true + - job_name: Linux go: 1.21.x os: ubuntu-latest @@ -254,7 +259,7 @@ jobs: uses: golangci/golangci-lint-action@v6 with: # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. - version: v1.57.1 + version: v1.61.0 args: --verbose --timeout 5m # only run golangci-lint for pull requests, otherwise ALL hints get diff --git a/CHANGELOG.md b/CHANGELOG.md index 9a5393915..7ab47f11d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,7 @@ # Table of Contents +* [Changelog for 0.17.3](#changelog-for-restic-0173-2024-11-08) +* [Changelog for 0.17.2](#changelog-for-restic-0172-2024-10-27) * [Changelog for 0.17.1](#changelog-for-restic-0171-2024-09-05) * [Changelog for 0.17.0](#changelog-for-restic-0170-2024-07-26) * [Changelog for 0.16.5](#changelog-for-restic-0165-2024-07-01) @@ -36,6 +38,160 @@ * [Changelog for 0.6.0](#changelog-for-restic-060-2017-05-29) +# Changelog for restic 0.17.3 (2024-11-08) +The following sections list the changes in restic 0.17.3 relevant to +restic users. The changes are ordered by importance. + +## Summary + + * Fix #4971: Fix unusable `mount` on macOS Sonoma + * Fix #5003: Fix metadata errors during backup of removable disks on Windows + * Fix #5101: Do not retry load/list operation if SFTP connection is broken + * Fix #5107: Fix metadata error on Windows for backups using VSS + * Enh #5096: Allow `prune --dry-run` without lock + +## Details + + * Bugfix #4971: Fix unusable `mount` on macOS Sonoma + + On macOS Sonoma when using FUSE-T, it was not possible to access files in a + mounted repository. This issue is now resolved. + + https://github.com/restic/restic/issues/4971 + https://github.com/restic/restic/pull/5048 + + * Bugfix #5003: Fix metadata errors during backup of removable disks on Windows + + Since restic 0.17.0, backing up removable disks on Windows could report errors + with retrieving metadata like shown below. + + ``` + error: incomplete metadata for d:\filename: get named security info failed with: Access is denied. + ``` + + This has now been fixed. + + https://github.com/restic/restic/issues/5003 + https://github.com/restic/restic/pull/5123 + https://forum.restic.net/t/backing-up-a-folder-from-a-veracrypt-volume-brings-up-errors-since-restic-v17-0/8444 + + * Bugfix #5101: Do not retry load/list operation if SFTP connection is broken + + When using restic with the SFTP backend, backend operations that load a file or + list files were retried even if the SFTP connection was broken. This has now + been fixed. + + https://github.com/restic/restic/pull/5101 + https://forum.restic.net/t/restic-hanging-on-backup/8559 + + * Bugfix #5107: Fix metadata error on Windows for backups using VSS + + Since restic 0.17.2, when creating a backup on Windows using + `--use-fs-snapshot`, restic would report an error like the following: + + ``` + error: incomplete metadata for C:\: get EA failed while opening file handle for path \\?\GLOBALROOT\Device\HarddiskVolumeShadowCopyXX\, with: The process cannot access the file because it is being used by another process. + ``` + + This has now been fixed by correctly handling paths that refer to volume shadow + copy snapshots. + + https://github.com/restic/restic/issues/5107 + https://github.com/restic/restic/pull/5110 + https://github.com/restic/restic/pull/5112 + + * Enhancement #5096: Allow `prune --dry-run` without lock + + The `prune --dry-run --no-lock` now allows performing a dry-run without locking + the repository. Note that if the repository is modified concurrently, `prune` + may return inaccurate statistics or errors. + + https://github.com/restic/restic/pull/5096 + + +# Changelog for restic 0.17.2 (2024-10-27) +The following sections list the changes in restic 0.17.2 relevant to +restic users. The changes are ordered by importance. + +## Summary + + * Fix #4004: Support container-level SAS/SAT tokens for Azure backend + * Fix #5047: Resolve potential error during concurrent cache cleanup + * Fix #5050: Return error if `tag` fails to lock repository + * Fix #5057: Exclude irregular files from backups + * Fix #5063: Correctly `backup` extended metadata when using VSS on Windows + +## Details + + * Bugfix #4004: Support container-level SAS/SAT tokens for Azure backend + + Restic previously expected SAS/SAT tokens to be generated at the account level, + which prevented tokens created at the container level from being used to + initialize a repository. This caused an error when attempting to initialize a + repository with container-level tokens. + + Restic now supports both account-level and container-level SAS/SAT tokens for + initializing a repository. + + https://github.com/restic/restic/issues/4004 + https://github.com/restic/restic/pull/5093 + + * Bugfix #5047: Resolve potential error during concurrent cache cleanup + + When multiple restic processes ran concurrently, they could compete to remove + obsolete snapshots from the local backend cache, sometimes leading to a "no such + file or directory" error. Restic now suppresses this error to prevent issues + during cache cleanup. + + https://github.com/restic/restic/pull/5047 + + * Bugfix #5050: Return error if `tag` fails to lock repository + + Since restic 0.17.0, the `tag` command did not return an error when it failed to + open or lock the repository. This issue has now been fixed. + + https://github.com/restic/restic/issues/5050 + https://github.com/restic/restic/pull/5056 + + * Bugfix #5057: Exclude irregular files from backups + + Since restic 0.17.1, files with the type `irregular` could mistakenly be + included in snapshots, especially when backing up special file types on Windows + that restic cannot process. This issue has now been fixed. + + Previously, this bug caused the `check` command to report errors like the + following one: + + ``` + tree 12345678[...]: node "example.zip" with invalid type "irregular" + ``` + + To repair affected snapshots, upgrade to restic 0.17.2 and run: + + ``` + restic repair snapshots --forget + ``` + + This will remove the `irregular` files from the snapshots (creating a new + snapshot ID for each of the affected snapshots). + + https://github.com/restic/restic/pull/5057 + https://forum.restic.net/t/errors-found-by-check-1-invalid-type-irregular-2-ciphertext-verification-failed/8447/2 + + * Bugfix #5063: Correctly `backup` extended metadata when using VSS on Windows + + On Windows, when creating a backup with the `--use-fs-snapshot` option, restic + read extended metadata from the original filesystem path instead of from the + snapshot. This could result in errors if files were removed during the backup + process. + + This issue has now been resolved. + + https://github.com/restic/restic/issues/5063 + https://github.com/restic/restic/pull/5097 + https://github.com/restic/restic/pull/5099 + + # Changelog for restic 0.17.1 (2024-09-05) The following sections list the changes in restic 0.17.1 relevant to restic users. The changes are ordered by importance. diff --git a/VERSION b/VERSION index 21997e69a..e2d1ad6ac 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.17.1-dev +0.17.3-dev diff --git a/changelog/0.17.2_2024-10-27/issue-4004 b/changelog/0.17.2_2024-10-27/issue-4004 new file mode 100644 index 000000000..d95ad02e9 --- /dev/null +++ b/changelog/0.17.2_2024-10-27/issue-4004 @@ -0,0 +1,12 @@ +Bugfix: Support container-level SAS/SAT tokens for Azure backend + +Restic previously expected SAS/SAT tokens to be generated at the account level, +which prevented tokens created at the container level from being used to +initialize a repository. This caused an error when attempting to initialize a +repository with container-level tokens. + +Restic now supports both account-level and container-level SAS/SAT tokens for +initializing a repository. + +https://github.com/restic/restic/issues/4004 +https://github.com/restic/restic/pull/5093 diff --git a/changelog/0.17.2_2024-10-27/issue-5050 b/changelog/0.17.2_2024-10-27/issue-5050 new file mode 100644 index 000000000..34536f6da --- /dev/null +++ b/changelog/0.17.2_2024-10-27/issue-5050 @@ -0,0 +1,7 @@ +Bugfix: Return error if `tag` fails to lock repository + +Since restic 0.17.0, the `tag` command did not return an error when it failed +to open or lock the repository. This issue has now been fixed. + +https://github.com/restic/restic/issues/5050 +https://github.com/restic/restic/pull/5056 diff --git a/changelog/0.17.2_2024-10-27/issue-5063 b/changelog/0.17.2_2024-10-27/issue-5063 new file mode 100644 index 000000000..54f97f0af --- /dev/null +++ b/changelog/0.17.2_2024-10-27/issue-5063 @@ -0,0 +1,12 @@ +Bugfix: Correctly `backup` extended metadata when using VSS on Windows + +On Windows, when creating a backup with the `--use-fs-snapshot` option, restic +read extended metadata from the original filesystem path instead of from the +snapshot. This could result in errors if files were removed during the backup +process. + +This issue has now been resolved. + +https://github.com/restic/restic/issues/5063 +https://github.com/restic/restic/pull/5097 +https://github.com/restic/restic/pull/5099 diff --git a/changelog/0.17.2_2024-10-27/pull-5047 b/changelog/0.17.2_2024-10-27/pull-5047 new file mode 100644 index 000000000..ace02c3b4 --- /dev/null +++ b/changelog/0.17.2_2024-10-27/pull-5047 @@ -0,0 +1,8 @@ +Bugfix: Resolve potential error during concurrent cache cleanup + +When multiple restic processes ran concurrently, they could compete to remove +obsolete snapshots from the local backend cache, sometimes leading to a "no +such file or directory" error. Restic now suppresses this error to prevent +issues during cache cleanup. + +https://github.com/restic/restic/pull/5047 diff --git a/changelog/0.17.2_2024-10-27/pull-5057 b/changelog/0.17.2_2024-10-27/pull-5057 new file mode 100644 index 000000000..aba2992b7 --- /dev/null +++ b/changelog/0.17.2_2024-10-27/pull-5057 @@ -0,0 +1,24 @@ +Bugfix: Exclude irregular files from backups + +Since restic 0.17.1, files with the type `irregular` could mistakenly be included +in snapshots, especially when backing up special file types on Windows that +restic cannot process. This issue has now been fixed. + +Previously, this bug caused the `check` command to report errors like the +following one: + +``` + tree 12345678[...]: node "example.zip" with invalid type "irregular" +``` + +To repair affected snapshots, upgrade to restic 0.17.2 and run: + +``` +restic repair snapshots --forget +``` + +This will remove the `irregular` files from the snapshots (creating +a new snapshot ID for each of the affected snapshots). + +https://github.com/restic/restic/pull/5057 +https://forum.restic.net/t/errors-found-by-check-1-invalid-type-irregular-2-ciphertext-verification-failed/8447/2 diff --git a/changelog/0.17.3_2024-11-08/issue-4971 b/changelog/0.17.3_2024-11-08/issue-4971 new file mode 100644 index 000000000..235d18cb5 --- /dev/null +++ b/changelog/0.17.3_2024-11-08/issue-4971 @@ -0,0 +1,7 @@ +Bugfix: Fix unusable `mount` on macOS Sonoma + +On macOS Sonoma when using FUSE-T, it was not possible to access files in +a mounted repository. This issue is now resolved. + +https://github.com/restic/restic/issues/4971 +https://github.com/restic/restic/pull/5048 diff --git a/changelog/0.17.3_2024-11-08/issue-5003 b/changelog/0.17.3_2024-11-08/issue-5003 new file mode 100644 index 000000000..f88ed3113 --- /dev/null +++ b/changelog/0.17.3_2024-11-08/issue-5003 @@ -0,0 +1,14 @@ +Bugfix: Fix metadata errors during backup of removable disks on Windows + +Since restic 0.17.0, backing up removable disks on Windows could report +errors with retrieving metadata like shown below. + +``` +error: incomplete metadata for d:\filename: get named security info failed with: Access is denied. +``` + +This has now been fixed. + +https://github.com/restic/restic/issues/5003 +https://github.com/restic/restic/pull/5123 +https://forum.restic.net/t/backing-up-a-folder-from-a-veracrypt-volume-brings-up-errors-since-restic-v17-0/8444 diff --git a/changelog/0.17.3_2024-11-08/issue-5107 b/changelog/0.17.3_2024-11-08/issue-5107 new file mode 100644 index 000000000..13bb380e4 --- /dev/null +++ b/changelog/0.17.3_2024-11-08/issue-5107 @@ -0,0 +1,15 @@ +Bugfix: Fix metadata error on Windows for backups using VSS + +Since restic 0.17.2, when creating a backup on Windows using `--use-fs-snapshot`, +restic would report an error like the following: + +``` +error: incomplete metadata for C:\: get EA failed while opening file handle for path \\?\GLOBALROOT\Device\HarddiskVolumeShadowCopyXX\, with: The process cannot access the file because it is being used by another process. +``` + +This has now been fixed by correctly handling paths that refer to volume +shadow copy snapshots. + +https://github.com/restic/restic/issues/5107 +https://github.com/restic/restic/pull/5110 +https://github.com/restic/restic/pull/5112 diff --git a/changelog/0.17.3_2024-11-08/pull-5096 b/changelog/0.17.3_2024-11-08/pull-5096 new file mode 100644 index 000000000..b1cc6edd3 --- /dev/null +++ b/changelog/0.17.3_2024-11-08/pull-5096 @@ -0,0 +1,8 @@ +Enhancement: Allow `prune --dry-run` without lock + +The `prune --dry-run --no-lock` now allows performing a dry-run +without locking the repository. Note that if the repository is +modified concurrently, `prune` may return inaccurate statistics +or errors. + +https://github.com/restic/restic/pull/5096 diff --git a/changelog/0.17.3_2024-11-08/pull-5101 b/changelog/0.17.3_2024-11-08/pull-5101 new file mode 100644 index 000000000..4152eb185 --- /dev/null +++ b/changelog/0.17.3_2024-11-08/pull-5101 @@ -0,0 +1,8 @@ +Bugfix: Do not retry load/list operation if SFTP connection is broken + +When using restic with the SFTP backend, backend operations that load a +file or list files were retried even if the SFTP connection was broken. +This has now been fixed. + +https://github.com/restic/restic/pull/5101 +https://forum.restic.net/t/restic-hanging-on-backup/8559 diff --git a/changelog/unreleased/issue-2165 b/changelog/unreleased/issue-2165 new file mode 100644 index 000000000..12bc9dfd9 --- /dev/null +++ b/changelog/unreleased/issue-2165 @@ -0,0 +1,16 @@ +Bugfix: Ignore disappeared backup source files + +If during a backup files were removed between restic listing the directory +content and backing up the file in question, the following error could occur: + +``` +error: lstat /some/file/name: no such file or directory +``` + +The backup command now ignores this particular error and silently skips the +removed file. + +https://github.com/restic/restic/issues/2165 +https://github.com/restic/restic/issues/3098 +https://github.com/restic/restic/pull/5143 +https://github.com/restic/restic/pull/5145 diff --git a/changelog/unreleased/issue-4004 b/changelog/unreleased/issue-4004 deleted file mode 100644 index ca23af26f..000000000 --- a/changelog/unreleased/issue-4004 +++ /dev/null @@ -1,12 +0,0 @@ -Bugfix: Allow use of container level SAS/SAT tokens with Azure backend - -When using a SAS/SAT token for authentication with Azure, restic was expecting -the provided token to be generated at the account level, granting permissions -to the storage account and all its containers. This caused an error that did -not allow tokens that were generated at the container level to be used to -initalize a repository. -Restic now allows SAS/SAT tokens that were generated at the account or -container level to be used to initalize a repository. - -https://github.com/restic/restic/issues/4004 -https://github.com/restic/restic/pull/5093 diff --git a/changelog/unreleased/issue-4521 b/changelog/unreleased/issue-4521 new file mode 100644 index 000000000..709741d11 --- /dev/null +++ b/changelog/unreleased/issue-4521 @@ -0,0 +1,21 @@ +Enhancement: Add config option to set Microsoft Blob Storage Access Tier + +The `azure.access-tier` option can be passed to Restic (using `-o`) to +specify the access tier for Microsoft Blob Storage objects created by Restic. + +The access tier is passed as-is to Microsoft Blob Storage, so it needs to be +understood by the API. The allowed values are `Hot`, `Cool`, or `Cold`. + +If unspecified, the default is inferred from the default configured on the +storage account. + +You can mix access tiers in the same container, and the setting isn't +stored in the restic repository, so be sure to specify it with each +command that writes to Microsoft Blob Storage. + +There is no official `Archive` storage support in restic, use this option at +your own risk. To restore any data, it is still necessary to manually warm up +the required data in the `Archive` tier. + +https://github.com/restic/restic/issues/4521 +https://github.com/restic/restic/pull/5046 \ No newline at end of file diff --git a/changelog/unreleased/issue-5050 b/changelog/unreleased/issue-5050 deleted file mode 100644 index 9604fc857..000000000 --- a/changelog/unreleased/issue-5050 +++ /dev/null @@ -1,7 +0,0 @@ -Bugfix: Missing error if `tag` fails to lock repository - -Since restic 0.17.0, the `tag` command did not return an error if it failed to -open or lock the repository. This has been fixed. - -https://github.com/restic/restic/issues/5050 -https://github.com/restic/restic/pull/5056 diff --git a/changelog/unreleased/issue-5063 b/changelog/unreleased/issue-5063 deleted file mode 100644 index 95048ec58..000000000 --- a/changelog/unreleased/issue-5063 +++ /dev/null @@ -1,10 +0,0 @@ -Bugfix: Correctly `backup` extended metadata when using VSS on Windows - -On Windows, when creating a backup using the `--use-fs-snapshot` option, -then the extended metadata was not read from the filesystem snapshot. This -could result in errors when files have been removed in the meantime. - -This issue has been resolved. - -https://github.com/restic/restic/issues/5063 -https://github.com/restic/restic/pull/5097 diff --git a/changelog/unreleased/issue-5081 b/changelog/unreleased/issue-5081 new file mode 100644 index 000000000..6cf1bf592 --- /dev/null +++ b/changelog/unreleased/issue-5081 @@ -0,0 +1,7 @@ +Enhancement: Retry loading repository config + +Restic now retries loading the repository config file when opening a repository. +In addition, the `init` command now also retries backend operations. + +https://github.com/restic/restic/issues/5081 +https://github.com/restic/restic/pull/5095 diff --git a/changelog/unreleased/issue-5092 b/changelog/unreleased/issue-5092 new file mode 100644 index 000000000..b6a32b68b --- /dev/null +++ b/changelog/unreleased/issue-5092 @@ -0,0 +1,8 @@ +Enhancement: Indicate the of deleted files/directories during restore + +Restic now indicates the number of deleted files/directories during restore. +The `--json` output now includes a `files_deleted` field that shows the number +of files and directories that were deleted during restore. + +https://github.com/restic/restic/issues/5092 +https://github.com/restic/restic/pull/5100 diff --git a/changelog/unreleased/issue-5131 b/changelog/unreleased/issue-5131 new file mode 100644 index 000000000..fd38a216d --- /dev/null +++ b/changelog/unreleased/issue-5131 @@ -0,0 +1,6 @@ +Enhancement: Add DragonflyBSD support + +Restic can now be compiled on DragonflyBSD. + +https://github.com/restic/restic/issues/5131 +https://github.com/restic/restic/pull/5138 diff --git a/changelog/unreleased/pull-5047 b/changelog/unreleased/pull-5047 deleted file mode 100644 index ee50c6ec7..000000000 --- a/changelog/unreleased/pull-5047 +++ /dev/null @@ -1,7 +0,0 @@ -Bugfix: Fix possible error on concurrent cache cleanup - -Fix for multiple restic processes executing concurrently and racing to -remove obsolete snapshots from the local backend cache. Restic now suppresses the `no -such file or directory` error. - -https://github.com/restic/restic/pull/5047 diff --git a/changelog/unreleased/pull-5057 b/changelog/unreleased/pull-5057 deleted file mode 100644 index c34436044..000000000 --- a/changelog/unreleased/pull-5057 +++ /dev/null @@ -1,21 +0,0 @@ -Bugfix: Do not include irregular files in backup - -Since restic 0.17.1, files with type `irregular` could incorrectly be included -in snapshots. This is most likely to occur when backing up special file types -on Windows that cannot be handled by restic. - -This has been fixed. - -When running the `check` command this bug resulted in an error like the -following: - -``` - tree 12345678[...]: node "example.zip" with invalid type "irregular" -``` - -Repairing the affected snapshots requires upgrading to restic 0.17.2 and then -manually running `restic repair snapshots --forget`. This will remove the -`irregular` files from the snapshots. - -https://github.com/restic/restic/pull/5057 -https://forum.restic.net/t/errors-found-by-check-1-invalid-type-irregular-2-ciphertext-verification-failed/8447/2 diff --git a/changelog/unreleased/pull-5119 b/changelog/unreleased/pull-5119 new file mode 100644 index 000000000..731e3ecd7 --- /dev/null +++ b/changelog/unreleased/pull-5119 @@ -0,0 +1,6 @@ +Enhancement: Include backup start and end in JSON output + +The JSON output of the backup command now also includes the timestamps +of the `backup_start` and `backup_end` times. + +https://github.com/restic/restic/pull/5119 diff --git a/changelog/unreleased/pull-5141 b/changelog/unreleased/pull-5141 new file mode 100644 index 000000000..7f71f2269 --- /dev/null +++ b/changelog/unreleased/pull-5141 @@ -0,0 +1,7 @@ +Enhancement: Provide clear error message if AZURE_ACCOUNT_NAME is not set + +If AZURE_ACCOUNT_NAME is not set, any command related to an Azure repository +would result in a misleading networking error. Restic will now detect this and +provide a clear warning that the variable is not defined. + +https://github.com/restic/restic/pull/5141 diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go index 5926fdd54..06d71e345 100644 --- a/cmd/restic/cmd_backup_integration_test.go +++ b/cmd/restic/cmd_backup_integration_test.go @@ -31,7 +31,7 @@ func testRunBackupAssumeFailure(t testing.TB, dir string, target []string, opts func testRunBackup(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) { err := testRunBackupAssumeFailure(t, dir, target, opts, gopts) - rtest.Assert(t, err == nil, "Error while backing up") + rtest.Assert(t, err == nil, "Error while backing up: %v", err) } func TestBackup(t *testing.T) { @@ -52,14 +52,14 @@ func testBackup(t *testing.T, useFsSnapshot bool) { opts := BackupOptions{UseFsSnapshot: useFsSnapshot} // first backup - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) testListSnapshots(t, env.gopts, 1) testRunCheck(t, env.gopts) stat1 := dirStats(env.repo) // second backup, implicit incremental - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) snapshotIDs := testListSnapshots(t, env.gopts, 2) stat2 := dirStats(env.repo) @@ -71,7 +71,7 @@ func testBackup(t *testing.T, useFsSnapshot bool) { testRunCheck(t, env.gopts) // third backup, explicit incremental opts.Parent = snapshotIDs[0].String() - testRunBackup(t, filepath.Dir(env.testdata), []string{"testdata"}, opts, env.gopts) + testRunBackup(t, "", []string{env.testdata}, opts, env.gopts) snapshotIDs = testListSnapshots(t, env.gopts, 3) stat3 := dirStats(env.repo) @@ -84,7 +84,7 @@ func testBackup(t *testing.T, useFsSnapshot bool) { for i, snapshotID := range snapshotIDs { restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i)) t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir) - testRunRestore(t, env.gopts, restoredir, snapshotID) + testRunRestore(t, env.gopts, restoredir, snapshotID.String()+":"+toPathInSnapshot(filepath.Dir(env.testdata))) diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata")) rtest.Assert(t, diff == "", "directories are not equal: %v", diff) } @@ -92,6 +92,20 @@ func testBackup(t *testing.T, useFsSnapshot bool) { testRunCheck(t, env.gopts) } +func toPathInSnapshot(path string) string { + // use path as is on most platforms, but convert it on windows + if runtime.GOOS == "windows" { + // the path generated by the test is always local so take the shortcut + vol := filepath.VolumeName(path) + if vol[len(vol)-1] != ':' { + panic(fmt.Sprintf("unexpected path: %q", path)) + } + path = vol[:len(vol)-1] + string(filepath.Separator) + path[len(vol)+1:] + path = filepath.ToSlash(path) + } + return path +} + func TestBackupWithRelativePath(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() @@ -118,7 +132,7 @@ type vssDeleteOriginalFS struct { hasRemoved bool } -func (f *vssDeleteOriginalFS) Lstat(name string) (os.FileInfo, error) { +func (f *vssDeleteOriginalFS) Lstat(name string) (*fs.ExtendedFileInfo, error) { if !f.hasRemoved { // call Lstat to trigger snapshot creation _, _ = f.FS.Lstat(name) @@ -351,12 +365,7 @@ func TestBackupExclude(t *testing.T) { for _, filename := range backupExcludeFilenames { fp := filepath.Join(datadir, filename) rtest.OK(t, os.MkdirAll(filepath.Dir(fp), 0755)) - - f, err := os.Create(fp) - rtest.OK(t, err) - - fmt.Fprint(f, filename) - rtest.OK(t, f.Close()) + rtest.OK(t, os.WriteFile(fp, []byte(filename), 0o666)) } snapshots := make(map[string]struct{}) @@ -557,7 +566,7 @@ func TestHardLink(t *testing.T) { for i, snapshotID := range snapshotIDs { restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i)) t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir) - testRunRestore(t, env.gopts, restoredir, snapshotID) + testRunRestore(t, env.gopts, restoredir, snapshotID.String()) diff := directoriesContentsDiff(env.testdata, filepath.Join(restoredir, "testdata")) rtest.Assert(t, diff == "", "directories are not equal %v", diff) diff --git a/cmd/restic/cmd_backup_test.go b/cmd/restic/cmd_backup_test.go index 5cbc42436..44e08ff96 100644 --- a/cmd/restic/cmd_backup_test.go +++ b/cmd/restic/cmd_backup_test.go @@ -39,21 +39,24 @@ func TestCollectTargets(t *testing.T) { f1, err := os.Create(filepath.Join(dir, "fromfile")) rtest.OK(t, err) // Empty lines should be ignored. A line starting with '#' is a comment. - fmt.Fprintf(f1, "\n%s*\n # here's a comment\n", f1.Name()) + _, err = fmt.Fprintf(f1, "\n%s*\n # here's a comment\n", f1.Name()) + rtest.OK(t, err) rtest.OK(t, f1.Close()) f2, err := os.Create(filepath.Join(dir, "fromfile-verbatim")) rtest.OK(t, err) for _, filename := range []string{fooSpace, barStar} { // Empty lines should be ignored. CR+LF is allowed. - fmt.Fprintf(f2, "%s\r\n\n", filepath.Join(dir, filename)) + _, err = fmt.Fprintf(f2, "%s\r\n\n", filepath.Join(dir, filename)) + rtest.OK(t, err) } rtest.OK(t, f2.Close()) f3, err := os.Create(filepath.Join(dir, "fromfile-raw")) rtest.OK(t, err) for _, filename := range []string{"baz", "quux"} { - fmt.Fprintf(f3, "%s\x00", filepath.Join(dir, filename)) + _, err = fmt.Fprintf(f3, "%s\x00", filepath.Join(dir, filename)) + rtest.OK(t, err) } rtest.OK(t, err) rtest.OK(t, f3.Close()) diff --git a/cmd/restic/cmd_copy_integration_test.go b/cmd/restic/cmd_copy_integration_test.go index 704615870..9ae78ba50 100644 --- a/cmd/restic/cmd_copy_integration_test.go +++ b/cmd/restic/cmd_copy_integration_test.go @@ -62,11 +62,11 @@ func TestCopy(t *testing.T) { for i, snapshotID := range snapshotIDs { restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i)) origRestores[restoredir] = struct{}{} - testRunRestore(t, env.gopts, restoredir, snapshotID) + testRunRestore(t, env.gopts, restoredir, snapshotID.String()) } for i, snapshotID := range copiedSnapshotIDs { restoredir := filepath.Join(env2.base, fmt.Sprintf("restore%d", i)) - testRunRestore(t, env2.gopts, restoredir, snapshotID) + testRunRestore(t, env2.gopts, restoredir, snapshotID.String()) foundMatch := false for cmpdir := range origRestores { diff := directoriesContentsDiff(restoredir, cmpdir) diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index 2213d8e7a..06ae6cc20 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -75,17 +75,17 @@ func init() { } type lsPrinter interface { - Snapshot(sn *restic.Snapshot) - Node(path string, node *restic.Node, isPrefixDirectory bool) - LeaveDir(path string) - Close() + Snapshot(sn *restic.Snapshot) error + Node(path string, node *restic.Node, isPrefixDirectory bool) error + LeaveDir(path string) error + Close() error } type jsonLsPrinter struct { enc *json.Encoder } -func (p *jsonLsPrinter) Snapshot(sn *restic.Snapshot) { +func (p *jsonLsPrinter) Snapshot(sn *restic.Snapshot) error { type lsSnapshot struct { *restic.Snapshot ID *restic.ID `json:"id"` @@ -94,27 +94,21 @@ func (p *jsonLsPrinter) Snapshot(sn *restic.Snapshot) { StructType string `json:"struct_type"` // "snapshot", deprecated } - err := p.enc.Encode(lsSnapshot{ + return p.enc.Encode(lsSnapshot{ Snapshot: sn, ID: sn.ID(), ShortID: sn.ID().Str(), MessageType: "snapshot", StructType: "snapshot", }) - if err != nil { - Warnf("JSON encode failed: %v\n", err) - } } // Print node in our custom JSON format, followed by a newline. -func (p *jsonLsPrinter) Node(path string, node *restic.Node, isPrefixDirectory bool) { +func (p *jsonLsPrinter) Node(path string, node *restic.Node, isPrefixDirectory bool) error { if isPrefixDirectory { - return - } - err := lsNodeJSON(p.enc, path, node) - if err != nil { - Warnf("JSON encode failed: %v\n", err) + return nil } + return lsNodeJSON(p.enc, path, node) } func lsNodeJSON(enc *json.Encoder, path string, node *restic.Node) error { @@ -160,8 +154,8 @@ func lsNodeJSON(enc *json.Encoder, path string, node *restic.Node) error { return enc.Encode(n) } -func (p *jsonLsPrinter) LeaveDir(_ string) {} -func (p *jsonLsPrinter) Close() {} +func (p *jsonLsPrinter) LeaveDir(_ string) error { return nil } +func (p *jsonLsPrinter) Close() error { return nil } type ncduLsPrinter struct { out io.Writer @@ -171,16 +165,17 @@ type ncduLsPrinter struct { // lsSnapshotNcdu prints a restic snapshot in Ncdu save format. // It opens the JSON list. Nodes are added with lsNodeNcdu and the list is closed by lsCloseNcdu. // Format documentation: https://dev.yorhel.nl/ncdu/jsonfmt -func (p *ncduLsPrinter) Snapshot(sn *restic.Snapshot) { +func (p *ncduLsPrinter) Snapshot(sn *restic.Snapshot) error { const NcduMajorVer = 1 const NcduMinorVer = 2 snapshotBytes, err := json.Marshal(sn) if err != nil { - Warnf("JSON encode failed: %v\n", err) + return err } p.depth++ - fmt.Fprintf(p.out, "[%d, %d, %s, [{\"name\":\"/\"}", NcduMajorVer, NcduMinorVer, string(snapshotBytes)) + _, err = fmt.Fprintf(p.out, "[%d, %d, %s, [{\"name\":\"/\"}", NcduMajorVer, NcduMinorVer, string(snapshotBytes)) + return err } func lsNcduNode(_ string, node *restic.Node) ([]byte, error) { @@ -232,27 +227,30 @@ func lsNcduNode(_ string, node *restic.Node) ([]byte, error) { return json.Marshal(outNode) } -func (p *ncduLsPrinter) Node(path string, node *restic.Node, _ bool) { +func (p *ncduLsPrinter) Node(path string, node *restic.Node, _ bool) error { out, err := lsNcduNode(path, node) if err != nil { - Warnf("JSON encode failed: %v\n", err) + return err } if node.Type == restic.NodeTypeDir { - fmt.Fprintf(p.out, ",\n%s[\n%s%s", strings.Repeat(" ", p.depth), strings.Repeat(" ", p.depth+1), string(out)) + _, err = fmt.Fprintf(p.out, ",\n%s[\n%s%s", strings.Repeat(" ", p.depth), strings.Repeat(" ", p.depth+1), string(out)) p.depth++ } else { - fmt.Fprintf(p.out, ",\n%s%s", strings.Repeat(" ", p.depth), string(out)) + _, err = fmt.Fprintf(p.out, ",\n%s%s", strings.Repeat(" ", p.depth), string(out)) } + return err } -func (p *ncduLsPrinter) LeaveDir(_ string) { +func (p *ncduLsPrinter) LeaveDir(_ string) error { p.depth-- - fmt.Fprintf(p.out, "\n%s]", strings.Repeat(" ", p.depth)) + _, err := fmt.Fprintf(p.out, "\n%s]", strings.Repeat(" ", p.depth)) + return err } -func (p *ncduLsPrinter) Close() { - fmt.Fprint(p.out, "\n]\n]\n") +func (p *ncduLsPrinter) Close() error { + _, err := fmt.Fprint(p.out, "\n]\n]\n") + return err } type textLsPrinter struct { @@ -261,17 +259,23 @@ type textLsPrinter struct { HumanReadable bool } -func (p *textLsPrinter) Snapshot(sn *restic.Snapshot) { +func (p *textLsPrinter) Snapshot(sn *restic.Snapshot) error { Verbosef("%v filtered by %v:\n", sn, p.dirs) + return nil } -func (p *textLsPrinter) Node(path string, node *restic.Node, isPrefixDirectory bool) { +func (p *textLsPrinter) Node(path string, node *restic.Node, isPrefixDirectory bool) error { if !isPrefixDirectory { Printf("%s\n", formatNode(path, node, p.ListLong, p.HumanReadable)) } + return nil } -func (p *textLsPrinter) LeaveDir(_ string) {} -func (p *textLsPrinter) Close() {} +func (p *textLsPrinter) LeaveDir(_ string) error { + return nil +} +func (p *textLsPrinter) Close() error { + return nil +} func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []string) error { if len(args) == 0 { @@ -374,7 +378,9 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri return err } - printer.Snapshot(sn) + if err := printer.Snapshot(sn); err != nil { + return err + } processNode := func(_ restic.ID, nodepath string, node *restic.Node, err error) error { if err != nil { @@ -387,7 +393,9 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri printedDir := false if withinDir(nodepath) { // if we're within a target path, print the node - printer.Node(nodepath, node, false) + if err := printer.Node(nodepath, node, false); err != nil { + return err + } printedDir = true // if recursive listing is requested, signal the walker that it @@ -402,7 +410,7 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri if approachingMatchingTree(nodepath) { // print node leading up to the target paths if !printedDir { - printer.Node(nodepath, node, true) + return printer.Node(nodepath, node, true) } return nil } @@ -412,7 +420,9 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri if node.Type == restic.NodeTypeDir { // immediately generate leaveDir if the directory is skipped if printedDir { - printer.LeaveDir(nodepath) + if err := printer.LeaveDir(nodepath); err != nil { + return err + } } return walker.ErrSkipNode } @@ -421,11 +431,12 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri err = walker.Walk(ctx, repo, *sn.Tree, walker.WalkVisitor{ ProcessNode: processNode, - LeaveDir: func(path string) { + LeaveDir: func(path string) error { // the root path `/` has no corresponding node and is thus also skipped by processNode if path != "/" { - printer.LeaveDir(path) + return printer.LeaveDir(path) } + return nil }, }) @@ -433,6 +444,5 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri return err } - printer.Close() - return nil + return printer.Close() } diff --git a/cmd/restic/cmd_ls_test.go b/cmd/restic/cmd_ls_test.go index b8b074242..3d4e1dbc7 100644 --- a/cmd/restic/cmd_ls_test.go +++ b/cmd/restic/cmd_ls_test.go @@ -134,29 +134,29 @@ func TestLsNcdu(t *testing.T) { } modTime := time.Date(2020, 1, 2, 3, 4, 5, 0, time.UTC) - printer.Snapshot(&restic.Snapshot{ + rtest.OK(t, printer.Snapshot(&restic.Snapshot{ Hostname: "host", Paths: []string{"/example"}, - }) - printer.Node("/directory", &restic.Node{ + })) + rtest.OK(t, printer.Node("/directory", &restic.Node{ Type: restic.NodeTypeDir, Name: "directory", ModTime: modTime, - }, false) - printer.Node("/directory/data", &restic.Node{ + }, false)) + rtest.OK(t, printer.Node("/directory/data", &restic.Node{ Type: restic.NodeTypeFile, Name: "data", Size: 42, ModTime: modTime, - }, false) - printer.LeaveDir("/directory") - printer.Node("/file", &restic.Node{ + }, false)) + rtest.OK(t, printer.LeaveDir("/directory")) + rtest.OK(t, printer.Node("/file", &restic.Node{ Type: restic.NodeTypeFile, Name: "file", Size: 12345, ModTime: modTime, - }, false) - printer.Close() + }, false)) + rtest.OK(t, printer.Close()) rtest.Equals(t, `[1, 2, {"time":"0001-01-01T00:00:00Z","tree":null,"paths":["/example"],"hostname":"host"}, [{"name":"/"}, [ diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index e8473bd6f..213714799 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -74,7 +74,7 @@ func init() { func addPruneOptions(c *cobra.Command, pruneOptions *PruneOptions) { f := c.Flags() f.StringVar(&pruneOptions.MaxUnused, "max-unused", "5%", "tolerate given `limit` of unused data (absolute value in bytes with suffixes k/K, m/M, g/G, t/T, a value in % or the word 'unlimited')") - f.StringVar(&pruneOptions.MaxRepackSize, "max-repack-size", "", "maximum `size` to repack (allowed suffixes: k/K, m/M, g/G, t/T)") + f.StringVar(&pruneOptions.MaxRepackSize, "max-repack-size", "", "stop after repacking this much data in total (allowed suffixes for `size`: k/K, m/M, g/G, t/T)") f.BoolVar(&pruneOptions.RepackCacheableOnly, "repack-cacheable-only", false, "only repack packs which are cacheable") f.BoolVar(&pruneOptions.RepackSmall, "repack-small", false, "repack pack files below 80% of target pack size") f.BoolVar(&pruneOptions.RepackUncompressed, "repack-uncompressed", false, "repack all uncompressed data") @@ -149,7 +149,11 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, term return errors.Fatal("disabled compression and `--repack-uncompressed` are mutually exclusive") } - ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) + if gopts.NoLock && !opts.DryRun { + return errors.Fatal("--no-lock is only applicable in combination with --dry-run for prune command") + } + + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, opts.DryRun && gopts.NoLock) if err != nil { return err } diff --git a/cmd/restic/cmd_restore_integration_test.go b/cmd/restic/cmd_restore_integration_test.go index 42cd1f87d..945c24a37 100644 --- a/cmd/restic/cmd_restore_integration_test.go +++ b/cmd/restic/cmd_restore_integration_test.go @@ -17,17 +17,17 @@ import ( "github.com/restic/restic/internal/ui/termstatus" ) -func testRunRestore(t testing.TB, opts GlobalOptions, dir string, snapshotID restic.ID) { +func testRunRestore(t testing.TB, opts GlobalOptions, dir string, snapshotID string) { testRunRestoreExcludes(t, opts, dir, snapshotID, nil) } -func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, excludes []string) { +func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID string, excludes []string) { opts := RestoreOptions{ Target: dir, } opts.Excludes = excludes - rtest.OK(t, testRunRestoreAssumeFailure(snapshotID.String(), opts, gopts)) + rtest.OK(t, testRunRestoreAssumeFailure(snapshotID, opts, gopts)) } func testRunRestoreAssumeFailure(snapshotID string, opts RestoreOptions, gopts GlobalOptions) error { @@ -197,7 +197,7 @@ func TestRestoreFilter(t *testing.T) { snapshotID := testListSnapshots(t, env.gopts, 1)[0] // no restore filter should restore all files - testRunRestore(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID) + testRunRestore(t, env.gopts, filepath.Join(env.base, "restore0"), snapshotID.String()) for _, testFile := range testfiles { rtest.OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", testFile.name), int64(testFile.size))) } @@ -219,7 +219,7 @@ func TestRestoreFilter(t *testing.T) { // restore with excludes restoredir := filepath.Join(env.base, "restore-with-excludes") - testRunRestoreExcludes(t, env.gopts, restoredir, snapshotID, excludePatterns) + testRunRestoreExcludes(t, env.gopts, restoredir, snapshotID.String(), excludePatterns) testRestoredFileExclusions(t, restoredir) // Create an exclude file with some patterns @@ -339,7 +339,7 @@ func TestRestoreWithPermissionFailure(t *testing.T) { _ = withRestoreGlobalOptions(func() error { globalOptions.stderr = io.Discard - testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshots[0]) + testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshots[0].String()) return nil }) diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index a9f664110..b62d1ed95 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -2,7 +2,6 @@ package main import ( "context" - "fmt" "time" "github.com/spf13/cobra" @@ -141,7 +140,7 @@ func rewriteSnapshot(ctx context.Context, repo *repository.Repository, sn *resti if selectByName(path) { return node } - Verbosef(fmt.Sprintf("excluding %s\n", path)) + Verbosef("excluding %s\n", path) return nil } diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go index 466f536e0..f935cec86 100644 --- a/cmd/restic/cmd_snapshots.go +++ b/cmd/restic/cmd_snapshots.go @@ -296,7 +296,9 @@ func PrintSnapshotGroupHeader(stdout io.Writer, groupKeyJSON string) error { } // Info - fmt.Fprintf(stdout, "snapshots") + if _, err := fmt.Fprintf(stdout, "snapshots"); err != nil { + return err + } var infoStrings []string if key.Hostname != "" { infoStrings = append(infoStrings, "host ["+key.Hostname+"]") @@ -308,11 +310,13 @@ func PrintSnapshotGroupHeader(stdout io.Writer, groupKeyJSON string) error { infoStrings = append(infoStrings, "paths ["+strings.Join(key.Paths, ", ")+"]") } if infoStrings != nil { - fmt.Fprintf(stdout, " for (%s)", strings.Join(infoStrings, ", ")) + if _, err := fmt.Fprintf(stdout, " for (%s)", strings.Join(infoStrings, ", ")); err != nil { + return err + } } - fmt.Fprintf(stdout, ":\n") + _, err = fmt.Fprintf(stdout, ":\n") - return nil + return err } // Snapshot helps to print Snapshots as JSON with their ID included. diff --git a/cmd/restic/global.go b/cmd/restic/global.go index ff54321bb..bea09837f 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -46,7 +46,7 @@ import ( // to a missing backend storage location or config file var ErrNoRepository = errors.New("repository does not exist") -var version = "0.17.1-dev (compiled manually)" +var version = "0.17.3-dev (compiled manually)" // TimeFormat is the format used for all timestamps printed by restic. const TimeFormat = "2006-01-02 15:04:05" @@ -308,7 +308,7 @@ func readPasswordTerminal(ctx context.Context, in *os.File, out *os.File, prompt fd := int(out.Fd()) state, err := term.GetState(fd) if err != nil { - fmt.Fprintf(os.Stderr, "unable to get terminal state: %v\n", err) + _, _ = fmt.Fprintf(os.Stderr, "unable to get terminal state: %v\n", err) return "", err } @@ -317,16 +317,22 @@ func readPasswordTerminal(ctx context.Context, in *os.File, out *os.File, prompt go func() { defer close(done) - fmt.Fprint(out, prompt) + _, err = fmt.Fprint(out, prompt) + if err != nil { + return + } buf, err = term.ReadPassword(int(in.Fd())) - fmt.Fprintln(out) + if err != nil { + return + } + _, err = fmt.Fprintln(out) }() select { case <-ctx.Done(): err := term.Restore(fd, state) if err != nil { - fmt.Fprintf(os.Stderr, "unable to restore terminal state: %v\n", err) + _, _ = fmt.Fprintf(os.Stderr, "unable to restore terminal state: %v\n", err) } return "", ctx.Err() case <-done: @@ -439,26 +445,6 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi return nil, err } - report := func(msg string, err error, d time.Duration) { - if d >= 0 { - Warnf("%v returned error, retrying after %v: %v\n", msg, d, err) - } else { - Warnf("%v failed: %v\n", msg, err) - } - } - success := func(msg string, retries int) { - Warnf("%v operation successful after %d retries\n", msg, retries) - } - be = retry.New(be, 15*time.Minute, report, success) - - // wrap backend if a test specified a hook - if opts.backendTestHook != nil { - be, err = opts.backendTestHook(be) - if err != nil { - return nil, err - } - } - s, err := repository.New(be, repository.Options{ Compression: opts.Compression, PackSize: opts.PackSize * 1024 * 1024, @@ -629,12 +615,31 @@ func innerOpen(ctx context.Context, s string, gopts GlobalOptions, opts options. } } + report := func(msg string, err error, d time.Duration) { + if d >= 0 { + Warnf("%v returned error, retrying after %v: %v\n", msg, d, err) + } else { + Warnf("%v failed: %v\n", msg, err) + } + } + success := func(msg string, retries int) { + Warnf("%v operation successful after %d retries\n", msg, retries) + } + be = retry.New(be, 15*time.Minute, report, success) + + // wrap backend if a test specified a hook + if gopts.backendTestHook != nil { + be, err = gopts.backendTestHook(be) + if err != nil { + return nil, err + } + } + return be, nil } // Open the backend specified by a location config. func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) { - be, err := innerOpen(ctx, s, gopts, opts, false) if err != nil { return nil, err diff --git a/cmd/restic/integration_helpers_unix_test.go b/cmd/restic/integration_helpers_unix_test.go index df0c4fe63..30852a753 100644 --- a/cmd/restic/integration_helpers_unix_test.go +++ b/cmd/restic/integration_helpers_unix_test.go @@ -13,17 +13,17 @@ import ( func (e *dirEntry) equals(out io.Writer, other *dirEntry) bool { if e.path != other.path { - fmt.Fprintf(out, "%v: path does not match (%v != %v)\n", e.path, e.path, other.path) + _, _ = fmt.Fprintf(out, "%v: path does not match (%v != %v)\n", e.path, e.path, other.path) return false } if e.fi.Mode() != other.fi.Mode() { - fmt.Fprintf(out, "%v: mode does not match (%v != %v)\n", e.path, e.fi.Mode(), other.fi.Mode()) + _, _ = fmt.Fprintf(out, "%v: mode does not match (%v != %v)\n", e.path, e.fi.Mode(), other.fi.Mode()) return false } if !sameModTime(e.fi, other.fi) { - fmt.Fprintf(out, "%v: ModTime does not match (%v != %v)\n", e.path, e.fi.ModTime(), other.fi.ModTime()) + _, _ = fmt.Fprintf(out, "%v: ModTime does not match (%v != %v)\n", e.path, e.fi.ModTime(), other.fi.ModTime()) return false } @@ -31,17 +31,17 @@ func (e *dirEntry) equals(out io.Writer, other *dirEntry) bool { stat2, _ := other.fi.Sys().(*syscall.Stat_t) if stat.Uid != stat2.Uid { - fmt.Fprintf(out, "%v: UID does not match (%v != %v)\n", e.path, stat.Uid, stat2.Uid) + _, _ = fmt.Fprintf(out, "%v: UID does not match (%v != %v)\n", e.path, stat.Uid, stat2.Uid) return false } if stat.Gid != stat2.Gid { - fmt.Fprintf(out, "%v: GID does not match (%v != %v)\n", e.path, stat.Gid, stat2.Gid) + _, _ = fmt.Fprintf(out, "%v: GID does not match (%v != %v)\n", e.path, stat.Gid, stat2.Gid) return false } if stat.Nlink != stat2.Nlink { - fmt.Fprintf(out, "%v: Number of links do not match (%v != %v)\n", e.path, stat.Nlink, stat2.Nlink) + _, _ = fmt.Fprintf(out, "%v: Number of links do not match (%v != %v)\n", e.path, stat.Nlink, stat2.Nlink) return false } diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go index df95031dc..777573f26 100644 --- a/cmd/restic/integration_test.go +++ b/cmd/restic/integration_test.go @@ -35,7 +35,7 @@ func TestCheckRestoreNoLock(t *testing.T) { testRunCheck(t, env.gopts) snapshotIDs := testListSnapshots(t, env.gopts, 4) - testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshotIDs[0]) + testRunRestore(t, env.gopts, filepath.Join(env.base, "restore"), snapshotIDs[0].String()) } // a listOnceBackend only allows listing once per filetype @@ -177,3 +177,47 @@ func TestFindListOnce(t *testing.T) { // the snapshots can only be listed once, if both lists match then the there has been only a single List() call rtest.Equals(t, thirdSnapshot, snapshotIDs) } + +type failConfigOnceBackend struct { + backend.Backend + failedOnce bool +} + +func (be *failConfigOnceBackend) Load(ctx context.Context, h backend.Handle, + length int, offset int64, fn func(rd io.Reader) error) error { + + if !be.failedOnce && h.Type == restic.ConfigFile { + be.failedOnce = true + return fmt.Errorf("oops") + } + return be.Backend.Load(ctx, h, length, offset, fn) +} + +func (be *failConfigOnceBackend) Stat(ctx context.Context, h backend.Handle) (backend.FileInfo, error) { + if !be.failedOnce && h.Type == restic.ConfigFile { + be.failedOnce = true + return backend.FileInfo{}, fmt.Errorf("oops") + } + return be.Backend.Stat(ctx, h) +} + +func TestBackendRetryConfig(t *testing.T) { + env, cleanup := withTestEnvironment(t) + defer cleanup() + + var wrappedBackend *failConfigOnceBackend + // cause config loading to fail once + env.gopts.backendInnerTestHook = func(r backend.Backend) (backend.Backend, error) { + wrappedBackend = &failConfigOnceBackend{Backend: r} + return wrappedBackend, nil + } + + testSetupBackupData(t, env) + rtest.Assert(t, wrappedBackend != nil, "backend not wrapped on init") + rtest.Assert(t, wrappedBackend != nil && wrappedBackend.failedOnce, "config loading was not retried on init") + wrappedBackend = nil + + testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9")}, BackupOptions{}, env.gopts) + rtest.Assert(t, wrappedBackend != nil, "backend not wrapped on backup") + rtest.Assert(t, wrappedBackend != nil && wrappedBackend.failedOnce, "config loading was not retried on init") +} diff --git a/cmd/restic/main.go b/cmd/restic/main.go index 4cb135c48..096c5695c 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -140,7 +140,7 @@ func printExitError(code int, message string) { return } } else { - fmt.Fprintf(globalOptions.stderr, "%v\n", message) + _, _ = fmt.Fprintf(globalOptions.stderr, "%v\n", message) } } @@ -152,10 +152,10 @@ func main() { log.SetOutput(logBuffer) err := feature.Flag.Apply(os.Getenv("RESTIC_FEATURES"), func(s string) { - fmt.Fprintln(os.Stderr, s) + _, _ = fmt.Fprintln(os.Stderr, s) }) if err != nil { - fmt.Fprintln(os.Stderr, err) + _, _ = fmt.Fprintln(os.Stderr, err) Exit(1) } diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index 0b35d1a1e..720bfc11d 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -568,6 +568,10 @@ The number of concurrent connections to the Azure Blob Storage service can be se ``-o azure.connections=10`` switch. By default, at most five parallel connections are established. +The access tier of the blobs uploaded to the Azure Blob Storage service can be set with the +``-o azure.access-tier=Cool`` switch. The allowed values are ``Hot``, ``Cool`` or ``Cold``. +If unspecified, the default is inferred from the default configured on the storage account. + Google Cloud Storage ******************** diff --git a/doc/050_restore.rst b/doc/050_restore.rst index 1a920fad4..9558ab1d4 100644 --- a/doc/050_restore.rst +++ b/doc/050_restore.rst @@ -132,6 +132,10 @@ options will be deleted. For example, the command ``restic -r /srv/restic-repo restore 79766175:/work --target /tmp/restore-work --include /foo --delete`` would only delete files within ``/tmp/restore-work/foo``. +When using ``--target / --delete`` then the ``restore`` command only works if either an ``--include`` +or ``--exclude`` option is also specified. This ensures that one cannot accidentaly delete +the whole system. + Dry run ------- diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index c619ead7c..39a6dbc7f 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -214,9 +214,9 @@ Summary is the last output line in a successful backup. +---------------------------+---------------------------------------------------------+ | ``dirs_unmodified`` | Number of directories that did not change | +---------------------------+---------------------------------------------------------+ -| ``data_blobs`` | Number of data blobs | +| ``data_blobs`` | Number of data blobs added | +---------------------------+---------------------------------------------------------+ -| ``tree_blobs`` | Number of tree blobs | +| ``tree_blobs`` | Number of tree blobs added | +---------------------------+---------------------------------------------------------+ | ``data_added`` | Amount of (uncompressed) data added, in bytes | +---------------------------+---------------------------------------------------------+ @@ -226,6 +226,10 @@ Summary is the last output line in a successful backup. +---------------------------+---------------------------------------------------------+ | ``total_bytes_processed`` | Total number of bytes processed | +---------------------------+---------------------------------------------------------+ +| ``backup_start`` | Time at which the backup was started | ++---------------------------+---------------------------------------------------------+ +| ``backup_end`` | Time at which the backup was completed | ++---------------------------+---------------------------------------------------------+ | ``total_duration`` | Total time it took for the operation to complete | +---------------------------+---------------------------------------------------------+ | ``snapshot_id`` | ID of the new snapshot. Field is omitted if snapshot | @@ -559,6 +563,8 @@ Status +----------------------+------------------------------------------------------------+ |``files_skipped`` | Files skipped due to overwrite setting | +----------------------+------------------------------------------------------------+ +|``files_deleted`` | Files deleted | ++----------------------+------------------------------------------------------------+ |``total_bytes`` | Total number of bytes in restore set | +----------------------+------------------------------------------------------------+ |``bytes_restored`` | Number of bytes restored | @@ -611,6 +617,8 @@ Summary +----------------------+------------------------------------------------------------+ |``files_skipped`` | Files skipped due to overwrite setting | +----------------------+------------------------------------------------------------+ +|``files_deleted`` | Files deleted | ++----------------------+------------------------------------------------------------+ |``total_bytes`` | Total number of bytes in restore set | +----------------------+------------------------------------------------------------+ |``bytes_restored`` | Number of bytes restored | @@ -676,9 +684,9 @@ was created. +---------------------------+---------------------------------------------------------+ | ``dirs_unmodified`` | Number of directories that did not change | +---------------------------+---------------------------------------------------------+ -| ``data_blobs`` | Number of data blobs | +| ``data_blobs`` | Number of data blobs added | +---------------------------+---------------------------------------------------------+ -| ``tree_blobs`` | Number of tree blobs | +| ``tree_blobs`` | Number of tree blobs added | +---------------------------+---------------------------------------------------------+ | ``data_added`` | Amount of (uncompressed) data added, in bytes | +---------------------------+---------------------------------------------------------+ diff --git a/doc/bash-completion.sh b/doc/bash-completion.sh index 0517fdf7c..985d0e369 100644 --- a/doc/bash-completion.sh +++ b/doc/bash-completion.sh @@ -2177,6 +2177,12 @@ _restic_list() must_have_one_flag=() must_have_one_noun=() + must_have_one_noun+=("blobs") + must_have_one_noun+=("index") + must_have_one_noun+=("keys") + must_have_one_noun+=("locks") + must_have_one_noun+=("packs") + must_have_one_noun+=("snapshots") noun_aliases=() } diff --git a/go.mod b/go.mod index ae00c92bf..54462ea9b 100644 --- a/go.mod +++ b/go.mod @@ -2,10 +2,10 @@ module github.com/restic/restic require ( cloud.google.com/go/storage v1.43.0 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 - github.com/Backblaze/blazer v0.6.1 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 + github.com/Backblaze/blazer v0.7.1 github.com/anacrolix/fuse v0.3.1 github.com/cenkalti/backoff/v4 v4.3.0 github.com/cespare/xxhash/v2 v2.3.0 @@ -19,29 +19,29 @@ require ( github.com/peterbourgon/unixtransport v0.0.4 github.com/pkg/errors v0.9.1 github.com/pkg/profile v1.7.0 - github.com/pkg/sftp v1.13.6 + github.com/pkg/sftp v1.13.7 github.com/pkg/xattr v0.4.10 github.com/restic/chunker v0.4.0 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 go.uber.org/automaxprocs v1.6.0 - golang.org/x/crypto v0.27.0 - golang.org/x/net v0.29.0 + golang.org/x/crypto v0.28.0 + golang.org/x/net v0.30.0 golang.org/x/oauth2 v0.23.0 - golang.org/x/sync v0.8.0 - golang.org/x/sys v0.25.0 - golang.org/x/term v0.24.0 - golang.org/x/text v0.18.0 - golang.org/x/time v0.6.0 - google.golang.org/api v0.199.0 + golang.org/x/sync v0.9.0 + golang.org/x/sys v0.27.0 + golang.org/x/term v0.25.0 + golang.org/x/text v0.20.0 + golang.org/x/time v0.7.0 + google.golang.org/api v0.204.0 ) require ( - cloud.google.com/go v0.115.1 // indirect - cloud.google.com/go/auth v0.9.5 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go v0.116.0 // indirect + cloud.google.com/go/auth v0.10.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect cloud.google.com/go/compute/metadata v0.5.2 // indirect - cloud.google.com/go/iam v1.2.0 // indirect + cloud.google.com/go/iam v1.2.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect @@ -73,11 +73,11 @@ require ( go.opentelemetry.io/otel v1.29.0 // indirect go.opentelemetry.io/otel/metric v1.29.0 // indirect go.opentelemetry.io/otel/trace v1.29.0 // indirect - google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/grpc v1.67.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect + google.golang.org/grpc v1.67.1 // indirect + google.golang.org/protobuf v1.35.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index fdbb820ea..bb125f0c0 100644 --- a/go.sum +++ b/go.sum @@ -1,32 +1,36 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= -cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= -cloud.google.com/go/auth v0.9.5 h1:4CTn43Eynw40aFVr3GpPqsQponx2jv0BQpjvajsbbzw= -cloud.google.com/go/auth v0.9.5/go.mod h1:Xo0n7n66eHyOWWCnitop6870Ilwo3PiZyodVkkH1xWM= -cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= -cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= +cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= +cloud.google.com/go/auth v0.10.0 h1:tWlkvFAh+wwTOzXIjrwM64karR1iTBZ/GRr0S/DULYo= +cloud.google.com/go/auth v0.10.0/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk= +cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= -cloud.google.com/go/iam v1.2.0 h1:kZKMKVNk/IsSSc/udOb83K0hL/Yh/Gcqpz+oAkoIFN8= -cloud.google.com/go/iam v1.2.0/go.mod h1:zITGuWgsLZxd8OwAlX+eMFgZDXzBm7icj1PVTYG766Q= -cloud.google.com/go/longrunning v0.6.0 h1:mM1ZmaNsQsnb+5n1DNPeL0KwQd9jQRqSqSDEkBZr+aI= -cloud.google.com/go/longrunning v0.6.0/go.mod h1:uHzSZqW89h7/pasCWNYdUpwGz3PcVWhrWupreVPYLts= +cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU= +cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g= +cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc= +cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0= cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 h1:Be6KInmFEKV81c0pOAEbRYehLMwmmGI1exuFj248AMk= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0/go.mod h1:WCPBHsOXfBVnivScjs2ypRfimjEW0qPVLGgJkZlrIOA= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 h1:mlmW46Q0B79I+Aj4azKC6xDMFN9a9SyZWESlGWYXbFs= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0/go.mod h1:PXe2h+LKcWTX9afWdZoHyODqR4fBa5boUM/8uJfZ0Jo= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= -github.com/Backblaze/blazer v0.6.1 h1:xC9HyC7OcxRzzmtfRiikIEvq4HZYWjU6caFwX2EXw1s= -github.com/Backblaze/blazer v0.6.1/go.mod h1:7/jrGx4O6OKOto6av+hLwelPR8rwZ+PLxQ5ZOiYAjwY= +github.com/Backblaze/blazer v0.7.1 h1:J43PbFj6hXLg1jvCNr+rQoAsxzKK0IP7ftl1ReCwpcQ= +github.com/Backblaze/blazer v0.7.1/go.mod h1:MhntL1nMpIuoqrPP6TnZu/xTydMgOAe/Xm6KongbjKs= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74= github.com/anacrolix/envpprof v1.3.0 h1:WJt9bpuT7A/CDCxPOv/eeZqHWlle/Y0keJUvc6tcJDk= @@ -54,6 +58,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dvyukov/go-fuzz v0.0.0-20220726122315-1d375ef9f9f6/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= @@ -126,6 +132,8 @@ github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyf github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= @@ -162,8 +170,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= -github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= -github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= +github.com/pkg/sftp v1.13.7 h1:uv+I3nNJvlKZIQGSr8JVQLNHFU9YhhNpvC14Y6KgmSM= +github.com/pkg/sftp v1.13.7/go.mod h1:KMKI0t3T6hfA+lTR/ssZdunHo+uwq7ghoN09/FSu3DY= github.com/pkg/xattr v0.4.10 h1:Qe0mtiNFHQZ296vRgUjRCoPHPqH7VdTOrZx3g0T+pGA= github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -171,6 +179,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= +github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= github.com/restic/chunker v0.4.0 h1:YUPYCUn70MYP7VO4yllypp2SjmsRhRJaad3xKu1QFRw= github.com/restic/chunker v0.4.0/go.mod h1:z0cH2BejpW636LXw0R/BGyv+Ey8+m9QGiOanDHItzyw= github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s= @@ -212,8 +222,8 @@ go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= @@ -222,9 +232,9 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= -golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20220428152302-39d4317da171 h1:TfdoLivD44QwvssI9Sv1xwa5DcL5XQr4au4sZ2F2NV4= golang.org/x/exp v0.0.0-20220428152302-39d4317da171/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= @@ -235,6 +245,7 @@ golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -245,10 +256,11 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= -golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= @@ -258,8 +270,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -275,24 +287,31 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= -golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= +golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -302,30 +321,31 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs= -google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28= +google.golang.org/api v0.204.0 h1:3PjmQQEDkR/ENVZZwIYB4W/KzYtN8OrqnNcHWpeR8E4= +google.golang.org/api v0.204.0/go.mod h1:69y8QSoKIbL9F94bWgWAq6wGqGwyjBgi2y8rAK8zLag= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= -google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed h1:3RgNmBoI9MZhsj3QxC+AP/qQhNwpCLOvYDYYsFrhFt0= -google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 h1:Q3nlH8iSQSRUwOskjbcSMcF2jiYMNiQYZ0c2KEJLKKU= +google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38/go.mod h1:xBI+tzfqGGN2JBeSebfKXFSdBpWVQ7sLW40PTupVRm4= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= +google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= -google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -335,8 +355,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/helpers/build-release-binaries/main.go b/helpers/build-release-binaries/main.go index 81d126b00..8fe8c24fb 100644 --- a/helpers/build-release-binaries/main.go +++ b/helpers/build-release-binaries/main.go @@ -243,14 +243,15 @@ func buildTargets(sourceDir, outputDir string, targets map[string][]string) { } var defaultBuildTargets = map[string][]string{ - "aix": {"ppc64"}, - "darwin": {"amd64", "arm64"}, - "freebsd": {"386", "amd64", "arm"}, - "linux": {"386", "amd64", "arm", "arm64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "riscv64", "s390x"}, - "netbsd": {"386", "amd64"}, - "openbsd": {"386", "amd64"}, - "windows": {"386", "amd64"}, - "solaris": {"amd64"}, + "aix": {"ppc64"}, + "darwin": {"amd64", "arm64"}, + "dragonfly": {"amd64"}, + "freebsd": {"386", "amd64", "arm"}, + "linux": {"386", "amd64", "arm", "arm64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "riscv64", "s390x"}, + "netbsd": {"386", "amd64"}, + "openbsd": {"386", "amd64"}, + "windows": {"386", "amd64"}, + "solaris": {"amd64"}, } func downloadModules(sourceDir string) { diff --git a/helpers/prepare-release/main.go b/helpers/prepare-release/main.go index ba3de38a5..607d16936 100644 --- a/helpers/prepare-release/main.go +++ b/helpers/prepare-release/main.go @@ -31,7 +31,7 @@ var opts = struct { var versionRegex = regexp.MustCompile(`^\d+\.\d+\.\d+$`) func init() { - pflag.BoolVar(&opts.IgnoreBranchName, "ignore-branch-name", false, "allow releasing from other branches as 'master'") + pflag.BoolVar(&opts.IgnoreBranchName, "ignore-branch-name", false, "allow releasing from other branches than 'master'") pflag.BoolVar(&opts.IgnoreUncommittedChanges, "ignore-uncommitted-changes", false, "allow uncommitted changes") pflag.BoolVar(&opts.IgnoreChangelogVersion, "ignore-changelog-version", false, "ignore missing entry in CHANGELOG.md") pflag.BoolVar(&opts.IgnoreChangelogReleaseDate, "ignore-changelog-release-date", false, "ignore missing subdir with date in changelog/") @@ -128,17 +128,22 @@ func uncommittedChanges(dirs ...string) string { return string(changes) } -func preCheckBranchMaster() { - if opts.IgnoreBranchName { - return - } - +func getBranchName() string { branch, err := exec.Command("git", "rev-parse", "--abbrev-ref", "HEAD").Output() if err != nil { die("error running 'git': %v", err) } - if strings.TrimSpace(string(branch)) != "master" { + return strings.TrimSpace(string(branch)) +} + +func preCheckBranchMaster() { + if opts.IgnoreBranchName { + return + } + + branch := getBranchName() + if branch != "master" { die("wrong branch: %s", branch) } } @@ -449,6 +454,7 @@ func main() { } preCheckBranchMaster() + branch := getBranchName() preCheckUncommittedChanges() preCheckVersionExists() preCheckDockerBuilderGoVersion() @@ -485,5 +491,5 @@ func main() { msg("done, output dir is %v", opts.OutputDir) - msg("now run:\n\ngit push --tags origin master\n%s\n\nrm -rf %q", dockerCmds, sourceDir) + msg("now run:\n\ngit push --tags origin %s\n%s\n\nrm -rf %q", branch, dockerCmds, sourceDir) } diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index d8f0157b1..55b6ee4b3 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -25,7 +25,7 @@ type SelectByNameFunc func(item string) bool // SelectFunc returns true for all items that should be included (files and // dirs). If false is returned, files are ignored and dirs are not even walked. -type SelectFunc func(item string, fi os.FileInfo, fs fs.FS) bool +type SelectFunc func(item string, fi *fs.ExtendedFileInfo, fs fs.FS) bool // ErrorFunc is called when an error during archiving occurs. When nil is // returned, the archiver continues, otherwise it aborts and passes the error @@ -49,6 +49,8 @@ type ChangeStats struct { } type Summary struct { + BackupStart time.Time + BackupEnd time.Time Files, Dirs ChangeStats ProcessedBytes uint64 ItemStats @@ -64,6 +66,11 @@ func (s *ItemStats) Add(other ItemStats) { s.TreeSizeInRepo += other.TreeSizeInRepo } +// ToNoder returns a restic.Node for a File. +type ToNoder interface { + ToNode(ignoreXattrListError bool) (*restic.Node, error) +} + type archiverRepo interface { restic.Loader restic.BlobSaver @@ -182,7 +189,7 @@ func New(repo archiverRepo, filesystem fs.FS, opts Options) *Archiver { arch := &Archiver{ Repo: repo, SelectByName: func(_ string) bool { return true }, - Select: func(_ string, _ os.FileInfo, _ fs.FS) bool { return true }, + Select: func(_ string, _ *fs.ExtendedFileInfo, _ fs.FS) bool { return true }, FS: filesystem, Options: opts.ApplyDefaults(), @@ -255,8 +262,8 @@ func (arch *Archiver) trackItem(item string, previous, current *restic.Node, s I } // nodeFromFileInfo returns the restic node from an os.FileInfo. -func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { - node, err := arch.FS.NodeFromFileInfo(filename, fi, ignoreXattrListError) +func (arch *Archiver) nodeFromFileInfo(snPath, filename string, meta ToNoder, ignoreXattrListError bool) (*restic.Node, error) { + node, err := meta.ToNode(ignoreXattrListError) if !arch.WithAtime { node.AccessTime = node.ModTime } @@ -306,20 +313,14 @@ func (arch *Archiver) wrapLoadTreeError(id restic.ID, err error) error { // saveDir stores a directory in the repo and returns the node. snPath is the // path within the current snapshot. -func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, fi os.FileInfo, previous *restic.Tree, complete fileCompleteFunc) (d futureNode, err error) { +func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, meta fs.File, previous *restic.Tree, complete fileCompleteFunc) (d futureNode, err error) { debug.Log("%v %v", snPath, dir) - treeNode, err := arch.nodeFromFileInfo(snPath, dir, fi, false) + treeNode, names, err := arch.dirToNodeAndEntries(snPath, dir, meta) if err != nil { return futureNode{}, err } - names, err := fs.Readdirnames(arch.FS, dir, fs.O_NOFOLLOW) - if err != nil { - return futureNode{}, err - } - sort.Strings(names) - nodes := make([]futureNode, 0, len(names)) for _, name := range names { @@ -357,6 +358,29 @@ func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, fi return fn, nil } +func (arch *Archiver) dirToNodeAndEntries(snPath, dir string, meta fs.File) (node *restic.Node, names []string, err error) { + err = meta.MakeReadable() + if err != nil { + return nil, nil, fmt.Errorf("openfile for readdirnames failed: %w", err) + } + + node, err = arch.nodeFromFileInfo(snPath, dir, meta, false) + if err != nil { + return nil, nil, err + } + if node.Type != restic.NodeTypeDir { + return nil, nil, fmt.Errorf("directory %q changed type, refusing to archive", snPath) + } + + names, err = meta.Readdirnames(-1) + if err != nil { + return nil, nil, fmt.Errorf("readdirnames %v failed: %w", dir, err) + } + sort.Strings(names) + + return node, names, nil +} + // futureNode holds a reference to a channel that returns a FutureNodeResult // or a reference to an already existing result. If the result is available // immediately, then storing a reference directly requires less memory than @@ -433,21 +457,47 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous return futureNode{}, false, err } + filterError := func(err error) (futureNode, bool, error) { + err = arch.error(abstarget, err) + if err != nil { + return futureNode{}, false, errors.WithStack(err) + } + return futureNode{}, true, nil + } + filterNotExist := func(err error) error { + if errors.Is(err, os.ErrNotExist) { + return nil + } + return err + } // exclude files by path before running Lstat to reduce number of lstat calls if !arch.SelectByName(abstarget) { debug.Log("%v is excluded by path", target) return futureNode{}, true, nil } + meta, err := arch.FS.OpenFile(target, fs.O_NOFOLLOW, true) + if err != nil { + debug.Log("open metadata for %v returned error: %v", target, err) + // ignore if file disappeared since it was returned by readdir + return filterError(filterNotExist(err)) + } + closeFile := true + defer func() { + if closeFile { + cerr := meta.Close() + if err == nil { + err = cerr + } + } + }() + // get file info and run remaining select functions that require file information - fi, err := arch.FS.Lstat(target) + fi, err := meta.Stat() if err != nil { debug.Log("lstat() for %v returned error: %v", target, err) - err = arch.error(abstarget, err) - if err != nil { - return futureNode{}, false, errors.WithStack(err) - } - return futureNode{}, true, nil + // ignore if file disappeared since it was returned by readdir + return filterError(filterNotExist(err)) } if !arch.Select(abstarget, fi, arch.FS) { debug.Log("%v is excluded", target) @@ -455,17 +505,17 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous } switch { - case fi.Mode().IsRegular(): + case fi.Mode.IsRegular(): debug.Log(" %v regular file", target) // check if the file has not changed before performing a fopen operation (more expensive, specially // in network filesystems) - if previous != nil && !fileChanged(arch.FS, fi, previous, arch.ChangeIgnoreFlags) { + if previous != nil && !fileChanged(fi, previous, arch.ChangeIgnoreFlags) { if arch.allBlobsPresent(previous) { debug.Log("%v hasn't changed, using old list of blobs", target) arch.trackItem(snPath, previous, previous, ItemStats{}, time.Since(start)) arch.CompleteBlob(previous.Size) - node, err := arch.nodeFromFileInfo(snPath, target, fi, false) + node, err := arch.nodeFromFileInfo(snPath, target, meta, false) if err != nil { return futureNode{}, false, err } @@ -492,40 +542,28 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous // reopen file and do an fstat() on the open file to check it is still // a file (and has not been exchanged for e.g. a symlink) - file, err := arch.FS.OpenFile(target, fs.O_RDONLY|fs.O_NOFOLLOW, 0) + err := meta.MakeReadable() if err != nil { - debug.Log("Openfile() for %v returned error: %v", target, err) - err = arch.error(abstarget, err) - if err != nil { - return futureNode{}, false, errors.WithStack(err) - } - return futureNode{}, true, nil + debug.Log("MakeReadable() for %v returned error: %v", target, err) + return filterError(err) } - fi, err = file.Stat() + fi, err := meta.Stat() if err != nil { debug.Log("stat() on opened file %v returned error: %v", target, err) - _ = file.Close() - err = arch.error(abstarget, err) - if err != nil { - return futureNode{}, false, errors.WithStack(err) - } - return futureNode{}, true, nil + return filterError(err) } // make sure it's still a file - if !fi.Mode().IsRegular() { - err = errors.Errorf("file %v changed type, refusing to archive", fi.Name()) - _ = file.Close() - err = arch.error(abstarget, err) - if err != nil { - return futureNode{}, false, err - } - return futureNode{}, true, nil + if !fi.Mode.IsRegular() { + err = errors.Errorf("file %q changed type, refusing to archive", target) + return filterError(err) } + closeFile = false + // Save will close the file, we don't need to do that - fn = arch.fileSaver.Save(ctx, snPath, target, file, fi, func() { + fn = arch.fileSaver.Save(ctx, snPath, target, meta, func() { arch.StartFile(snPath) }, func() { arch.trackItem(snPath, nil, nil, ItemStats{}, 0) @@ -533,7 +571,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous arch.trackItem(snPath, previous, node, stats, time.Since(start)) }) - case fi.IsDir(): + case fi.Mode.IsDir(): debug.Log(" %v dir", target) snItem := snPath + "/" @@ -545,7 +583,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous return futureNode{}, false, err } - fn, err = arch.saveDir(ctx, snPath, target, fi, oldSubtree, + fn, err = arch.saveDir(ctx, snPath, target, meta, oldSubtree, func(node *restic.Node, stats ItemStats) { arch.trackItem(snItem, previous, node, stats, time.Since(start)) }) @@ -554,14 +592,14 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous return futureNode{}, false, err } - case fi.Mode()&os.ModeSocket > 0: + case fi.Mode&os.ModeSocket > 0: debug.Log(" %v is a socket, ignoring", target) return futureNode{}, true, nil default: debug.Log(" %v other", target) - node, err := arch.nodeFromFileInfo(snPath, target, fi, false) + node, err := arch.nodeFromFileInfo(snPath, target, meta, false) if err != nil { return futureNode{}, false, err } @@ -580,27 +618,26 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous // fileChanged tries to detect whether a file's content has changed compared // to the contents of node, which describes the same path in the parent backup. // It should only be run for regular files. -func fileChanged(fs fs.FS, fi os.FileInfo, node *restic.Node, ignoreFlags uint) bool { +func fileChanged(fi *fs.ExtendedFileInfo, node *restic.Node, ignoreFlags uint) bool { switch { case node == nil: return true case node.Type != restic.NodeTypeFile: // We're only called for regular files, so this is a type change. return true - case uint64(fi.Size()) != node.Size: + case uint64(fi.Size) != node.Size: return true - case !fi.ModTime().Equal(node.ModTime): + case !fi.ModTime.Equal(node.ModTime): return true } checkCtime := ignoreFlags&ChangeIgnoreCtime == 0 checkInode := ignoreFlags&ChangeIgnoreInode == 0 - extFI := fs.ExtendedStat(fi) switch { - case checkCtime && !extFI.ChangeTime.Equal(node.ChangeTime): + case checkCtime && !fi.ChangeTime.Equal(node.ChangeTime): return true - case checkInode && node.Inode != extFI.Inode: + case checkInode && node.Inode != fi.Inode: return true } @@ -612,22 +649,6 @@ func join(elem ...string) string { return path.Join(elem...) } -// statDir returns the file info for the directory. Symbolic links are -// resolved. If the target directory is not a directory, an error is returned. -func (arch *Archiver) statDir(dir string) (os.FileInfo, error) { - fi, err := arch.FS.Stat(dir) - if err != nil { - return nil, errors.WithStack(err) - } - - tpe := fi.Mode() & (os.ModeType | os.ModeCharDevice) - if tpe != os.ModeDir { - return fi, errors.Errorf("path is not a directory: %v", dir) - } - - return fi, nil -} - // saveTree stores a Tree in the repo, returned is the tree. snPath is the path // within the current snapshot. func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *tree, previous *restic.Tree, complete fileCompleteFunc) (futureNode, int, error) { @@ -638,15 +659,8 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *tree, return futureNode{}, 0, errors.Errorf("FileInfoPath for %v is empty", snPath) } - fi, err := arch.statDir(atree.FileInfoPath) - if err != nil { - return futureNode{}, 0, err - } - - debug.Log("%v, dir node data loaded from %v", snPath, atree.FileInfoPath) - // in some cases reading xattrs for directories above the backup source is not allowed - // thus ignore errors for such folders. - node, err = arch.nodeFromFileInfo(snPath, atree.FileInfoPath, fi, true) + var err error + node, err = arch.dirPathToNode(snPath, atree.FileInfoPath) if err != nil { return futureNode{}, 0, err } @@ -717,6 +731,31 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *tree, return fn, len(nodes), nil } +func (arch *Archiver) dirPathToNode(snPath, target string) (node *restic.Node, err error) { + meta, err := arch.FS.OpenFile(target, 0, true) + if err != nil { + return nil, err + } + defer func() { + cerr := meta.Close() + if err == nil { + err = cerr + } + }() + + debug.Log("%v, reading dir node data from %v", snPath, target) + // in some cases reading xattrs for directories above the backup source is not allowed + // thus ignore errors for such folders. + node, err = arch.nodeFromFileInfo(snPath, target, meta, true) + if err != nil { + return nil, err + } + if node.Type != restic.NodeTypeDir { + return nil, errors.Errorf("path is not a directory: %v", target) + } + return node, err +} + // resolveRelativeTargets replaces targets that only contain relative // directories ("." or "../../") with the contents of the directory. Each // element of target is processed with fs.Clean(). @@ -811,7 +850,9 @@ func (arch *Archiver) stopWorkers() { // Snapshot saves several targets and returns a snapshot. func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts SnapshotOptions) (*restic.Snapshot, restic.ID, *Summary, error) { - arch.summary = &Summary{} + arch.summary = &Summary{ + BackupStart: opts.BackupStart, + } cleanTargets, err := resolveRelativeTargets(arch.FS, targets) if err != nil { @@ -894,9 +935,10 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps sn.Parent = opts.ParentSnapshot.ID() } sn.Tree = &rootTreeID + arch.summary.BackupEnd = time.Now() sn.Summary = &restic.SnapshotSummary{ - BackupStart: opts.BackupStart, - BackupEnd: time.Now(), + BackupStart: arch.summary.BackupStart, + BackupEnd: arch.summary.BackupEnd, FilesNew: arch.summary.Files.New, FilesChanged: arch.summary.Files.Changed, diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index d4f15c80b..fcc3d465d 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -76,17 +76,12 @@ func saveFile(t testing.TB, repo archiverRepo, filename string, filesystem fs.FS startCallback = true } - file, err := arch.FS.OpenFile(filename, fs.O_RDONLY|fs.O_NOFOLLOW, 0) + file, err := arch.FS.OpenFile(filename, fs.O_NOFOLLOW, false) if err != nil { t.Fatal(err) } - fi, err := file.Stat() - if err != nil { - t.Fatal(err) - } - - res := arch.fileSaver.Save(ctx, "/", filename, file, fi, start, completeReading, complete) + res := arch.fileSaver.Save(ctx, "/", filename, file, start, completeReading, complete) fnr := res.take(ctx) if fnr.err != nil { @@ -521,13 +516,13 @@ func chmodTwice(t testing.TB, name string) { rtest.OK(t, err) } -func lstat(t testing.TB, name string) os.FileInfo { +func lstat(t testing.TB, name string) *fs.ExtendedFileInfo { fi, err := os.Lstat(name) if err != nil { t.Fatal(err) } - return fi + return fs.ExtendedStat(fi) } func setTimestamp(t testing.TB, filename string, atime, mtime time.Time) { @@ -556,11 +551,12 @@ func rename(t testing.TB, oldname, newname string) { } } -func nodeFromFI(t testing.TB, fs fs.FS, filename string, fi os.FileInfo) *restic.Node { - node, err := fs.NodeFromFileInfo(filename, fi, false) - if err != nil { - t.Fatal(err) - } +func nodeFromFile(t testing.TB, localFs fs.FS, filename string) *restic.Node { + meta, err := localFs.OpenFile(filename, fs.O_NOFOLLOW, true) + rtest.OK(t, err) + node, err := meta.ToNode(false) + rtest.OK(t, err) + rtest.OK(t, meta.Close()) return node } @@ -664,7 +660,7 @@ func TestFileChanged(t *testing.T) { rename(t, filename, tempname) save(t, filename, defaultContent) remove(t, tempname) - setTimestamp(t, filename, fi.ModTime(), fi.ModTime()) + setTimestamp(t, filename, fi.ModTime, fi.ModTime) }, ChangeIgnore: ChangeIgnoreCtime | ChangeIgnoreInode, SameFile: true, @@ -687,10 +683,11 @@ func TestFileChanged(t *testing.T) { save(t, filename, content) fs := &fs.Local{} - fiBefore := lstat(t, filename) - node := nodeFromFI(t, fs, filename, fiBefore) + fiBefore, err := fs.Lstat(filename) + rtest.OK(t, err) + node := nodeFromFile(t, fs, filename) - if fileChanged(fs, fiBefore, node, 0) { + if fileChanged(fiBefore, node, 0) { t.Fatalf("unchanged file detected as changed") } @@ -700,12 +697,12 @@ func TestFileChanged(t *testing.T) { if test.SameFile { // file should be detected as unchanged - if fileChanged(fs, fiAfter, node, test.ChangeIgnore) { + if fileChanged(fiAfter, node, test.ChangeIgnore) { t.Fatalf("unmodified file detected as changed") } } else { // file should be detected as changed - if !fileChanged(fs, fiAfter, node, test.ChangeIgnore) && !test.SameFile { + if !fileChanged(fiAfter, node, test.ChangeIgnore) && !test.SameFile { t.Fatalf("modified file detected as unchanged") } } @@ -722,16 +719,16 @@ func TestFilChangedSpecialCases(t *testing.T) { t.Run("nil-node", func(t *testing.T) { fi := lstat(t, filename) - if !fileChanged(&fs.Local{}, fi, nil, 0) { + if !fileChanged(fi, nil, 0) { t.Fatal("nil node detected as unchanged") } }) t.Run("type-change", func(t *testing.T) { fi := lstat(t, filename) - node := nodeFromFI(t, &fs.Local{}, filename, fi) - node.Type = "restic.NodeTypeSymlink" - if !fileChanged(&fs.Local{}, fi, node, 0) { + node := nodeFromFile(t, &fs.Local{}, filename) + node.Type = restic.NodeTypeSymlink + if !fileChanged(fi, node, 0) { t.Fatal("node with changed type detected as unchanged") } }) @@ -834,7 +831,8 @@ func TestArchiverSaveDir(t *testing.T) { wg, ctx := errgroup.WithContext(context.Background()) repo.StartPackUploader(ctx, wg) - arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) + testFS := fs.Track{FS: fs.Local{}} + arch := New(repo, testFS, Options{}) arch.runWorkers(ctx, wg) arch.summary = &Summary{} @@ -846,15 +844,11 @@ func TestArchiverSaveDir(t *testing.T) { back := rtest.Chdir(t, chdir) defer back() - fi, err := os.Lstat(test.target) - if err != nil { - t.Fatal(err) - } - - ft, err := arch.saveDir(ctx, "/", test.target, fi, nil, nil) - if err != nil { - t.Fatal(err) - } + meta, err := testFS.OpenFile(test.target, fs.O_NOFOLLOW, true) + rtest.OK(t, err) + ft, err := arch.saveDir(ctx, "/", test.target, meta, nil, nil) + rtest.OK(t, err) + rtest.OK(t, meta.Close()) fnr := ft.take(ctx) node, stats := fnr.node, fnr.stats @@ -916,19 +910,16 @@ func TestArchiverSaveDirIncremental(t *testing.T) { wg, ctx := errgroup.WithContext(context.TODO()) repo.StartPackUploader(ctx, wg) - arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) + testFS := fs.Track{FS: fs.Local{}} + arch := New(repo, testFS, Options{}) arch.runWorkers(ctx, wg) arch.summary = &Summary{} - fi, err := os.Lstat(tempdir) - if err != nil { - t.Fatal(err) - } - - ft, err := arch.saveDir(ctx, "/", tempdir, fi, nil, nil) - if err != nil { - t.Fatal(err) - } + meta, err := testFS.OpenFile(tempdir, fs.O_NOFOLLOW, true) + rtest.OK(t, err) + ft, err := arch.saveDir(ctx, "/", tempdir, meta, nil, nil) + rtest.OK(t, err) + rtest.OK(t, meta.Close()) fnr := ft.take(ctx) node, stats := fnr.node, fnr.stats @@ -1530,7 +1521,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { }, "other": TestFile{Content: "another file"}, }, - selFn: func(item string, fi os.FileInfo, _ fs.FS) bool { + selFn: func(item string, fi *fs.ExtendedFileInfo, _ fs.FS) bool { return true }, }, @@ -1547,7 +1538,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { }, "other": TestFile{Content: "another file"}, }, - selFn: func(item string, fi os.FileInfo, _ fs.FS) bool { + selFn: func(item string, fi *fs.ExtendedFileInfo, _ fs.FS) bool { return false }, err: "snapshot is empty", @@ -1574,7 +1565,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { }, "other": TestFile{Content: "another file"}, }, - selFn: func(item string, fi os.FileInfo, _ fs.FS) bool { + selFn: func(item string, fi *fs.ExtendedFileInfo, _ fs.FS) bool { return filepath.Ext(item) != ".txt" }, }, @@ -1598,7 +1589,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { }, "other": TestFile{Content: "another file"}, }, - selFn: func(item string, fi os.FileInfo, fs fs.FS) bool { + selFn: func(item string, fi *fs.ExtendedFileInfo, fs fs.FS) bool { return fs.Base(item) != "subdir" }, }, @@ -1607,7 +1598,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { src: TestDir{ "foo": TestFile{Content: "foo"}, }, - selFn: func(item string, fi os.FileInfo, fs fs.FS) bool { + selFn: func(item string, fi *fs.ExtendedFileInfo, fs fs.FS) bool { return fs.IsAbs(item) }, }, @@ -1665,8 +1656,8 @@ type MockFS struct { bytesRead map[string]int // tracks bytes read from all opened files } -func (m *MockFS) OpenFile(name string, flag int, perm os.FileMode) (fs.File, error) { - f, err := m.FS.OpenFile(name, flag, perm) +func (m *MockFS) OpenFile(name string, flag int, metadataOnly bool) (fs.File, error) { + f, err := m.FS.OpenFile(name, flag, metadataOnly) if err != nil { return f, err } @@ -1692,14 +1683,17 @@ func (f MockFile) Read(p []byte) (int, error) { } func checkSnapshotStats(t *testing.T, sn *restic.Snapshot, stat Summary) { - rtest.Equals(t, stat.Files.New, sn.Summary.FilesNew) - rtest.Equals(t, stat.Files.Changed, sn.Summary.FilesChanged) - rtest.Equals(t, stat.Files.Unchanged, sn.Summary.FilesUnmodified) - rtest.Equals(t, stat.Dirs.New, sn.Summary.DirsNew) - rtest.Equals(t, stat.Dirs.Changed, sn.Summary.DirsChanged) - rtest.Equals(t, stat.Dirs.Unchanged, sn.Summary.DirsUnmodified) - rtest.Equals(t, stat.ProcessedBytes, sn.Summary.TotalBytesProcessed) - rtest.Equals(t, stat.Files.New+stat.Files.Changed+stat.Files.Unchanged, sn.Summary.TotalFilesProcessed) + t.Helper() + rtest.Equals(t, stat.BackupStart, sn.Summary.BackupStart, "BackupStart") + // BackupEnd is set to time.Now() and can't be compared to a fixed value + rtest.Equals(t, stat.Files.New, sn.Summary.FilesNew, "FilesNew") + rtest.Equals(t, stat.Files.Changed, sn.Summary.FilesChanged, "FilesChanged") + rtest.Equals(t, stat.Files.Unchanged, sn.Summary.FilesUnmodified, "FilesUnmodified") + rtest.Equals(t, stat.Dirs.New, sn.Summary.DirsNew, "DirsNew") + rtest.Equals(t, stat.Dirs.Changed, sn.Summary.DirsChanged, "DirsChanged") + rtest.Equals(t, stat.Dirs.Unchanged, sn.Summary.DirsUnmodified, "DirsUnmodified") + rtest.Equals(t, stat.ProcessedBytes, sn.Summary.TotalBytesProcessed, "TotalBytesProcessed") + rtest.Equals(t, stat.Files.New+stat.Files.Changed+stat.Files.Unchanged, sn.Summary.TotalFilesProcessed, "TotalFilesProcessed") bothZeroOrNeither(t, uint64(stat.DataBlobs), uint64(sn.Summary.DataBlobs)) bothZeroOrNeither(t, uint64(stat.TreeBlobs), uint64(sn.Summary.TreeBlobs)) bothZeroOrNeither(t, uint64(stat.DataSize+stat.TreeSize), uint64(sn.Summary.DataAdded)) @@ -2053,12 +2047,12 @@ type TrackFS struct { m sync.Mutex } -func (m *TrackFS) OpenFile(name string, flag int, perm os.FileMode) (fs.File, error) { +func (m *TrackFS) OpenFile(name string, flag int, metadataOnly bool) (fs.File, error) { m.m.Lock() m.opened[name]++ m.m.Unlock() - return m.FS.OpenFile(name, flag, perm) + return m.FS.OpenFile(name, flag, metadataOnly) } type failSaveRepo struct { @@ -2207,48 +2201,51 @@ func snapshot(t testing.TB, repo archiverRepo, fs fs.FS, parent *restic.Snapshot return snapshot, node } -// StatFS allows overwriting what is returned by the Lstat function. -type StatFS struct { +type overrideFS struct { fs.FS - - OverrideLstat map[string]os.FileInfo - OnlyOverrideStat bool + overrideFI *fs.ExtendedFileInfo + resetFIOnRead bool + overrideNode *restic.Node + overrideErr error } -func (fs *StatFS) Lstat(name string) (os.FileInfo, error) { - if !fs.OnlyOverrideStat { - if fi, ok := fs.OverrideLstat[fixpath(name)]; ok { - return fi, nil - } +func (m *overrideFS) OpenFile(name string, flag int, metadataOnly bool) (fs.File, error) { + f, err := m.FS.OpenFile(name, flag, metadataOnly) + if err != nil { + return f, err } - return fs.FS.Lstat(name) -} - -func (fs *StatFS) OpenFile(name string, flags int, perm os.FileMode) (fs.File, error) { - if fi, ok := fs.OverrideLstat[fixpath(name)]; ok { - f, err := fs.FS.OpenFile(name, flags, perm) - if err != nil { - return nil, err - } - - wrappedFile := fileStat{ - File: f, - fi: fi, - } - return wrappedFile, nil + if filepath.Base(name) == "testfile" || filepath.Base(name) == "testdir" { + return &overrideFile{f, m}, nil } - - return fs.FS.OpenFile(name, flags, perm) + return f, nil } -type fileStat struct { +type overrideFile struct { fs.File - fi os.FileInfo + ofs *overrideFS } -func (f fileStat) Stat() (os.FileInfo, error) { - return f.fi, nil +func (f overrideFile) Stat() (*fs.ExtendedFileInfo, error) { + if f.ofs.overrideFI == nil { + return f.File.Stat() + } + return f.ofs.overrideFI, nil + +} + +func (f overrideFile) MakeReadable() error { + if f.ofs.resetFIOnRead { + f.ofs.overrideFI = nil + } + return f.File.MakeReadable() +} + +func (f overrideFile) ToNode(ignoreXattrListError bool) (*restic.Node, error) { + if f.ofs.overrideNode == nil { + return f.File.ToNode(ignoreXattrListError) + } + return f.ofs.overrideNode, f.ofs.overrideErr } // used by wrapFileInfo, use untyped const in order to avoid having a version @@ -2276,17 +2273,18 @@ func TestMetadataChanged(t *testing.T) { // get metadata fi := lstat(t, "testfile") localFS := &fs.Local{} - want, err := localFS.NodeFromFileInfo("testfile", fi, false) - if err != nil { - t.Fatal(err) - } + meta, err := localFS.OpenFile("testfile", fs.O_NOFOLLOW, true) + rtest.OK(t, err) + want, err := meta.ToNode(false) + rtest.OK(t, err) + rtest.OK(t, meta.Close()) - fs := &StatFS{ - FS: localFS, - OverrideLstat: map[string]os.FileInfo{ - "testfile": fi, - }, + fs := &overrideFS{ + FS: localFS, + overrideFI: fi, + overrideNode: &restic.Node{}, } + *fs.overrideNode = *want sn, node2 := snapshot(t, repo, fs, nil, "testfile") @@ -2305,26 +2303,31 @@ func TestMetadataChanged(t *testing.T) { t.Fatalf("metadata does not match:\n%v", cmp.Diff(want, node2)) } - // modify the mode by wrapping it in a new struct, uses the consts defined above - fs.OverrideLstat["testfile"] = wrapFileInfo(fi) + // modify the mode and UID/GID + modFI := *fi + modFI.Mode = mockFileInfoMode + if runtime.GOOS != "windows" { + modFI.UID = mockFileInfoUID + modFI.GID = mockFileInfoGID + } + + fs.overrideFI = &modFI + rtest.Assert(t, !fileChanged(fs.overrideFI, node2, 0), "testfile must not be considered as changed") // set the override values in the 'want' node which - want.Mode = 0400 + want.Mode = mockFileInfoMode // ignore UID and GID on Windows if runtime.GOOS != "windows" { - want.UID = 51234 - want.GID = 51235 + want.UID = mockFileInfoUID + want.GID = mockFileInfoGID } - // no user and group name - want.User = "" - want.Group = "" + // update mock node accordingly + fs.overrideNode.Mode = want.Mode + fs.overrideNode.UID = want.UID + fs.overrideNode.GID = want.GID // make another snapshot _, node3 := snapshot(t, repo, fs, sn, "testfile") - // Override username and group to empty string - in case underlying system has user with UID 51234 - // See https://github.com/restic/restic/issues/2372 - node3.User = "" - node3.Group = "" // make sure that metadata was recorded successfully if !cmp.Equal(want, node3) { @@ -2337,62 +2340,83 @@ func TestMetadataChanged(t *testing.T) { checker.TestCheckRepo(t, repo, false) } -func TestRacyFileSwap(t *testing.T) { +func TestRacyFileTypeSwap(t *testing.T) { files := TestDir{ - "file": TestFile{ + "testfile": TestFile{ Content: "foo bar test file", }, + "testdir": TestDir{}, } - tempdir, repo := prepareTempdirRepoSrc(t, files) + for _, dirError := range []bool{false, true} { + desc := "file changed type" + if dirError { + desc = "dir changed type" + } + t.Run(desc, func(t *testing.T) { + tempdir, repo := prepareTempdirRepoSrc(t, files) - back := rtest.Chdir(t, tempdir) - defer back() + back := rtest.Chdir(t, tempdir) + defer back() - // get metadata of current folder - fi := lstat(t, ".") - tempfile := filepath.Join(tempdir, "file") + // get metadata of current folder + var fakeName, realName string + if dirError { + // lstat claims this is a directory, but it's actually a file + fakeName = "testdir" + realName = "testfile" + } else { + fakeName = "testfile" + realName = "testdir" + } + fakeFI := lstat(t, fakeName) + tempfile := filepath.Join(tempdir, realName) - statfs := &StatFS{ - FS: fs.Local{}, - OverrideLstat: map[string]os.FileInfo{ - tempfile: fi, - }, - OnlyOverrideStat: true, + statfs := &overrideFS{ + FS: fs.Local{}, + overrideFI: fakeFI, + resetFIOnRead: true, + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + wg, ctx := errgroup.WithContext(ctx) + repo.StartPackUploader(ctx, wg) + + arch := New(repo, fs.Track{FS: statfs}, Options{}) + arch.Error = func(item string, err error) error { + t.Logf("archiver error as expected for %v: %v", item, err) + return err + } + arch.runWorkers(ctx, wg) + + // fs.Track will panic if the file was not closed + _, excluded, err := arch.save(ctx, "/", tempfile, nil) + rtest.Assert(t, err != nil && strings.Contains(err.Error(), "changed type, refusing to archive"), "save() returned wrong error: %v", err) + tpe := "file" + if dirError { + tpe = "directory" + } + rtest.Assert(t, strings.Contains(err.Error(), tpe+" "), "unexpected item type in error: %v", err) + rtest.Assert(t, !excluded, "Save() excluded the node, that's unexpected") + }) } +} - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() +type mockToNoder struct { + node *restic.Node + err error +} - wg, ctx := errgroup.WithContext(ctx) - repo.StartPackUploader(ctx, wg) - - arch := New(repo, fs.Track{FS: statfs}, Options{}) - arch.Error = func(item string, err error) error { - t.Logf("archiver error as expected for %v: %v", item, err) - return err - } - arch.runWorkers(ctx, wg) - - // fs.Track will panic if the file was not closed - _, excluded, err := arch.save(ctx, "/", tempfile, nil) - if err == nil { - t.Errorf("Save() should have failed") - } - - if excluded { - t.Errorf("Save() excluded the node, that's unexpected") - } +func (m *mockToNoder) ToNode(_ bool) (*restic.Node, error) { + return m.node, m.err } func TestMetadataBackupErrorFiltering(t *testing.T) { tempdir := t.TempDir() - repo := repository.TestRepository(t) - filename := filepath.Join(tempdir, "file") - rtest.OK(t, os.WriteFile(filename, []byte("example"), 0o600)) - fi, err := os.Stat(filename) - rtest.OK(t, err) + repo := repository.TestRepository(t) arch := New(repo, fs.Local{}, Options{}) @@ -2403,15 +2427,24 @@ func TestMetadataBackupErrorFiltering(t *testing.T) { return replacementErr } + nonExistNoder := &mockToNoder{ + node: &restic.Node{Type: restic.NodeTypeFile}, + err: fmt.Errorf("not found"), + } + // check that errors from reading extended metadata are properly filtered - node, err := arch.nodeFromFileInfo("file", filename+"invalid", fi, false) + node, err := arch.nodeFromFileInfo("file", filename+"invalid", nonExistNoder, false) rtest.Assert(t, node != nil, "node is missing") rtest.Assert(t, err == replacementErr, "expected %v got %v", replacementErr, err) rtest.Assert(t, filteredErr != nil, "missing inner error") // check that errors from reading irregular file are not filtered filteredErr = nil - node, err = arch.nodeFromFileInfo("file", filename, wrapIrregularFileInfo(fi), false) + nonExistNoder = &mockToNoder{ + node: &restic.Node{Type: restic.NodeTypeIrregular}, + err: fmt.Errorf(`unsupported file type "irregular"`), + } + node, err = arch.nodeFromFileInfo("file", filename, nonExistNoder, false) rtest.Assert(t, node != nil, "node is missing") rtest.Assert(t, filteredErr == nil, "error for irregular node should not have been filtered") rtest.Assert(t, strings.Contains(err.Error(), "irregular"), "unexpected error %q does not warn about irregular file mode", err) @@ -2430,18 +2463,22 @@ func TestIrregularFile(t *testing.T) { tempfile := filepath.Join(tempdir, "testfile") fi := lstat(t, "testfile") + // patch mode to irregular + fi.Mode = (fi.Mode &^ os.ModeType) | os.ModeIrregular - statfs := &StatFS{ - FS: fs.Local{}, - OverrideLstat: map[string]os.FileInfo{ - tempfile: wrapIrregularFileInfo(fi), + override := &overrideFS{ + FS: fs.Local{}, + overrideFI: fi, + overrideNode: &restic.Node{ + Type: restic.NodeTypeIrregular, }, + overrideErr: fmt.Errorf(`unsupported file type "irregular"`), } ctx, cancel := context.WithCancel(context.Background()) defer cancel() - arch := New(repo, fs.Track{FS: statfs}, Options{}) + arch := New(repo, fs.Track{FS: override}, Options{}) _, excluded, err := arch.save(ctx, "/", tempfile, nil) if err == nil { t.Fatalf("Save() should have failed") @@ -2452,3 +2489,48 @@ func TestIrregularFile(t *testing.T) { t.Errorf("Save() excluded the node, that's unexpected") } } + +type missingFS struct { + fs.FS + errorOnOpen bool +} + +func (fs *missingFS) OpenFile(name string, flag int, metadataOnly bool) (fs.File, error) { + if fs.errorOnOpen { + return nil, os.ErrNotExist + } + + return &missingFile{}, nil +} + +type missingFile struct { + fs.File +} + +func (f *missingFile) Stat() (*fs.ExtendedFileInfo, error) { + return nil, os.ErrNotExist +} + +func (f *missingFile) Close() error { + // prevent segfault in test + return nil +} + +func TestDisappearedFile(t *testing.T) { + tempdir, repo := prepareTempdirRepoSrc(t, TestDir{}) + + back := rtest.Chdir(t, tempdir) + defer back() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // depending on the underlying FS implementation a missing file may be detected by OpenFile or + // the subsequent file.Stat() call. Thus test both cases. + for _, errorOnOpen := range []bool{false, true} { + arch := New(repo, fs.Track{FS: &missingFS{FS: &fs.Local{}, errorOnOpen: errorOnOpen}}, Options{}) + _, excluded, err := arch.save(ctx, "/", filepath.Join(tempdir, "testdir"), nil) + rtest.OK(t, err) + rtest.Assert(t, excluded, "testfile should have been excluded") + } +} diff --git a/internal/archiver/archiver_unix_test.go b/internal/archiver/archiver_unix_test.go index 621f84826..b6cc1ba4e 100644 --- a/internal/archiver/archiver_unix_test.go +++ b/internal/archiver/archiver_unix_test.go @@ -4,8 +4,6 @@ package archiver import ( - "os" - "syscall" "testing" "github.com/restic/restic/internal/feature" @@ -14,55 +12,9 @@ import ( rtest "github.com/restic/restic/internal/test" ) -type wrappedFileInfo struct { - os.FileInfo - sys interface{} - mode os.FileMode -} - -func (fi wrappedFileInfo) Sys() interface{} { - return fi.sys -} - -func (fi wrappedFileInfo) Mode() os.FileMode { - return fi.mode -} - -// wrapFileInfo returns a new os.FileInfo with the mode, owner, and group fields changed. -func wrapFileInfo(fi os.FileInfo) os.FileInfo { - // get the underlying stat_t and modify the values - stat := fi.Sys().(*syscall.Stat_t) - stat.Mode = mockFileInfoMode - stat.Uid = mockFileInfoUID - stat.Gid = mockFileInfoGID - - // wrap the os.FileInfo so we can return a modified stat_t - res := wrappedFileInfo{ - FileInfo: fi, - sys: stat, - mode: mockFileInfoMode, - } - - return res -} - -// wrapIrregularFileInfo returns a new os.FileInfo with the mode changed to irregular file -func wrapIrregularFileInfo(fi os.FileInfo) os.FileInfo { - // wrap the os.FileInfo so we can return a modified stat_t - return wrappedFileInfo{ - FileInfo: fi, - sys: fi.Sys().(*syscall.Stat_t), - mode: (fi.Mode() &^ os.ModeType) | os.ModeIrregular, - } -} - func statAndSnapshot(t *testing.T, repo archiverRepo, name string) (*restic.Node, *restic.Node) { - fi := lstat(t, name) - fs := &fs.Local{} - want, err := fs.NodeFromFileInfo(name, fi, false) - rtest.OK(t, err) - - _, node := snapshot(t, repo, fs, nil, name) + want := nodeFromFile(t, &fs.Local{}, name) + _, node := snapshot(t, repo, &fs.Local{}, nil, name) return want, node } diff --git a/internal/archiver/archiver_windows_test.go b/internal/archiver/archiver_windows_test.go deleted file mode 100644 index ac8a67f2b..000000000 --- a/internal/archiver/archiver_windows_test.go +++ /dev/null @@ -1,36 +0,0 @@ -//go:build windows -// +build windows - -package archiver - -import ( - "os" -) - -type wrappedFileInfo struct { - os.FileInfo - mode os.FileMode -} - -func (fi wrappedFileInfo) Mode() os.FileMode { - return fi.mode -} - -// wrapFileInfo returns a new os.FileInfo with the mode, owner, and group fields changed. -func wrapFileInfo(fi os.FileInfo) os.FileInfo { - // wrap the os.FileInfo and return the modified mode, uid and gid are ignored on Windows - res := wrappedFileInfo{ - FileInfo: fi, - mode: mockFileInfoMode, - } - - return res -} - -// wrapIrregularFileInfo returns a new os.FileInfo with the mode changed to irregular file -func wrapIrregularFileInfo(fi os.FileInfo) os.FileInfo { - return wrappedFileInfo{ - FileInfo: fi, - mode: (fi.Mode() &^ os.ModeType) | os.ModeIrregular, - } -} diff --git a/internal/archiver/exclude.go b/internal/archiver/exclude.go index 1e855fc3a..6db62aa20 100644 --- a/internal/archiver/exclude.go +++ b/internal/archiver/exclude.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "os" + "runtime" "strings" "sync" @@ -21,7 +22,7 @@ type RejectByNameFunc func(path string) bool // RejectFunc is a function that takes a filename and os.FileInfo of a // file that would be included in the backup. The function returns true if it // should be excluded (rejected) from the backup. -type RejectFunc func(path string, fi os.FileInfo, fs fs.FS) bool +type RejectFunc func(path string, fi *fs.ExtendedFileInfo, fs fs.FS) bool func CombineRejectByNames(funcs []RejectByNameFunc) SelectByNameFunc { return func(item string) bool { @@ -35,7 +36,7 @@ func CombineRejectByNames(funcs []RejectByNameFunc) SelectByNameFunc { } func CombineRejects(funcs []RejectFunc) SelectFunc { - return func(item string, fi os.FileInfo, fs fs.FS) bool { + return func(item string, fi *fs.ExtendedFileInfo, fs fs.FS) bool { for _, reject := range funcs { if reject(item, fi, fs) { return false @@ -104,7 +105,7 @@ func RejectIfPresent(excludeFileSpec string, warnf func(msg string, args ...inte } debug.Log("using %q as exclusion tagfile", tf) rc := newRejectionCache() - return func(filename string, _ os.FileInfo, fs fs.FS) bool { + return func(filename string, _ *fs.ExtendedFileInfo, fs fs.FS) bool { return isExcludedByFile(filename, tf, tc, rc, fs, warnf) }, nil } @@ -135,9 +136,9 @@ func isExcludedByFile(filename, tagFilename, header string, rc *rejectionCache, return rejected } -func isDirExcludedByFile(dir, tagFilename, header string, fs fs.FS, warnf func(msg string, args ...interface{})) bool { - tf := fs.Join(dir, tagFilename) - _, err := fs.Lstat(tf) +func isDirExcludedByFile(dir, tagFilename, header string, fsInst fs.FS, warnf func(msg string, args ...interface{})) bool { + tf := fsInst.Join(dir, tagFilename) + _, err := fsInst.Lstat(tf) if errors.Is(err, os.ErrNotExist) { return false } @@ -153,7 +154,7 @@ func isDirExcludedByFile(dir, tagFilename, header string, fs fs.FS, warnf func(m // From this stage, errors mean tagFilename exists but it is malformed. // Warnings will be generated so that the user is informed that the // indented ignore-action is not performed. - f, err := fs.OpenFile(tf, os.O_RDONLY, 0) + f, err := fsInst.OpenFile(tf, fs.O_RDONLY, false) if err != nil { warnf("could not open exclusion tagfile: %v", err) return false @@ -186,6 +187,10 @@ type deviceMap map[string]uint64 // newDeviceMap creates a new device map from the list of source paths. func newDeviceMap(allowedSourcePaths []string, fs fs.FS) (deviceMap, error) { + if runtime.GOOS == "windows" { + return nil, errors.New("Device IDs are not supported on Windows") + } + deviceMap := make(map[string]uint64) for _, item := range allowedSourcePaths { @@ -199,12 +204,7 @@ func newDeviceMap(allowedSourcePaths []string, fs fs.FS) (deviceMap, error) { return nil, err } - id, err := fs.DeviceID(fi) - if err != nil { - return nil, err - } - - deviceMap[item] = id + deviceMap[item] = fi.DeviceID } if len(deviceMap) == 0 { @@ -254,15 +254,8 @@ func RejectByDevice(samples []string, filesystem fs.FS) (RejectFunc, error) { } debug.Log("allowed devices: %v\n", deviceMap) - return func(item string, fi os.FileInfo, fs fs.FS) bool { - id, err := fs.DeviceID(fi) - if err != nil { - // This should never happen because gatherDevices() would have - // errored out earlier. If it still does that's a reason to panic. - panic(err) - } - - allowed, err := deviceMap.IsAllowed(fs.Clean(item), id, fs) + return func(item string, fi *fs.ExtendedFileInfo, fs fs.FS) bool { + allowed, err := deviceMap.IsAllowed(fs.Clean(item), fi.DeviceID, fs) if err != nil { // this should not happen panic(fmt.Sprintf("error checking device ID of %v: %v", item, err)) @@ -274,7 +267,7 @@ func RejectByDevice(samples []string, filesystem fs.FS) (RejectFunc, error) { } // reject everything except directories - if !fi.IsDir() { + if !fi.Mode.IsDir() { return true } @@ -290,14 +283,7 @@ func RejectByDevice(samples []string, filesystem fs.FS) (RejectFunc, error) { return true } - parentDeviceID, err := fs.DeviceID(parentFI) - if err != nil { - debug.Log("item %v: getting device ID of parent directory: %v", item, err) - // if in doubt, reject - return true - } - - parentAllowed, err := deviceMap.IsAllowed(parentDir, parentDeviceID, fs) + parentAllowed, err := deviceMap.IsAllowed(parentDir, parentFI.DeviceID, fs) if err != nil { debug.Log("item %v: error checking parent directory: %v", item, err) // if in doubt, reject @@ -315,13 +301,13 @@ func RejectByDevice(samples []string, filesystem fs.FS) (RejectFunc, error) { } func RejectBySize(maxSize int64) (RejectFunc, error) { - return func(item string, fi os.FileInfo, _ fs.FS) bool { + return func(item string, fi *fs.ExtendedFileInfo, _ fs.FS) bool { // directory will be ignored - if fi.IsDir() { + if fi.Mode.IsDir() { return false } - filesize := fi.Size() + filesize := fi.Size if filesize > maxSize { debug.Log("file %s is oversize: %d", item, filesize) return true diff --git a/internal/archiver/exclude_test.go b/internal/archiver/exclude_test.go index 7eb24b08b..9bfa5d83f 100644 --- a/internal/archiver/exclude_test.go +++ b/internal/archiver/exclude_test.go @@ -193,7 +193,7 @@ func TestIsExcludedByFileSize(t *testing.T) { return err } - excluded := sizeExclude(p, fi, nil) + excluded := sizeExclude(p, fs.ExtendedStat(fi), nil) // the log message helps debugging in case the test fails t.Logf("%q: dir:%t; size:%d; excluded:%v", p, fi.IsDir(), fi.Size(), excluded) m[p] = !excluded diff --git a/internal/archiver/file_saver.go b/internal/archiver/file_saver.go index dccaa9442..ca8ec2fbb 100644 --- a/internal/archiver/file_saver.go +++ b/internal/archiver/file_saver.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "os" "sync" "github.com/restic/chunker" @@ -29,7 +28,7 @@ type fileSaver struct { CompleteBlob func(bytes uint64) - NodeFromFileInfo func(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) + NodeFromFileInfo func(snPath, filename string, meta ToNoder, ignoreXattrListError bool) (*restic.Node, error) } // newFileSaver returns a new file saver. A worker pool with fileWorkers is @@ -71,13 +70,12 @@ type fileCompleteFunc func(*restic.Node, ItemStats) // file is closed by Save. completeReading is only called if the file was read // successfully. complete is always called. If completeReading is called, then // this will always happen before calling complete. -func (s *fileSaver) Save(ctx context.Context, snPath string, target string, file fs.File, fi os.FileInfo, start func(), completeReading func(), complete fileCompleteFunc) futureNode { +func (s *fileSaver) Save(ctx context.Context, snPath string, target string, file fs.File, start func(), completeReading func(), complete fileCompleteFunc) futureNode { fn, ch := newFutureNode() job := saveFileJob{ snPath: snPath, target: target, file: file, - fi: fi, ch: ch, start: start, @@ -100,7 +98,6 @@ type saveFileJob struct { snPath string target string file fs.File - fi os.FileInfo ch chan<- futureNodeResult start func() @@ -109,7 +106,7 @@ type saveFileJob struct { } // saveFile stores the file f in the repo, then closes it. -func (s *fileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPath string, target string, f fs.File, fi os.FileInfo, start func(), finishReading func(), finish func(res futureNodeResult)) { +func (s *fileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPath string, target string, f fs.File, start func(), finishReading func(), finish func(res futureNodeResult)) { start() fnr := futureNodeResult{ @@ -156,7 +153,7 @@ func (s *fileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPat debug.Log("%v", snPath) - node, err := s.NodeFromFileInfo(snPath, target, fi, false) + node, err := s.NodeFromFileInfo(snPath, target, f, false) if err != nil { _ = f.Close() completeError(err) @@ -262,7 +259,7 @@ func (s *fileSaver) worker(ctx context.Context, jobs <-chan saveFileJob) { } } - s.saveFile(ctx, chnker, job.snPath, job.target, job.file, job.fi, job.start, func() { + s.saveFile(ctx, chnker, job.snPath, job.target, job.file, job.start, func() { if job.completeReading != nil { job.completeReading() } diff --git a/internal/archiver/file_saver_test.go b/internal/archiver/file_saver_test.go index 5b17eca37..ce862f6fe 100644 --- a/internal/archiver/file_saver_test.go +++ b/internal/archiver/file_saver_test.go @@ -30,7 +30,7 @@ func createTestFiles(t testing.TB, num int) (files []string) { return files } -func startFileSaver(ctx context.Context, t testing.TB, fs fs.FS) (*fileSaver, context.Context, *errgroup.Group) { +func startFileSaver(ctx context.Context, t testing.TB, fsInst fs.FS) (*fileSaver, context.Context, *errgroup.Group) { wg, ctx := errgroup.WithContext(ctx) saveBlob := func(ctx context.Context, tpe restic.BlobType, buf *buffer, _ string, cb func(saveBlobResponse)) { @@ -49,8 +49,8 @@ func startFileSaver(ctx context.Context, t testing.TB, fs fs.FS) (*fileSaver, co } s := newFileSaver(ctx, wg, saveBlob, pol, workers, workers) - s.NodeFromFileInfo = func(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { - return fs.NodeFromFileInfo(filename, fi, ignoreXattrListError) + s.NodeFromFileInfo = func(snPath, filename string, meta ToNoder, ignoreXattrListError bool) (*restic.Node, error) { + return meta.ToNode(ignoreXattrListError) } return s, ctx, wg @@ -72,17 +72,12 @@ func TestFileSaver(t *testing.T) { var results []futureNode for _, filename := range files { - f, err := testFs.OpenFile(filename, os.O_RDONLY, 0) + f, err := testFs.OpenFile(filename, os.O_RDONLY, false) if err != nil { t.Fatal(err) } - fi, err := f.Stat() - if err != nil { - t.Fatal(err) - } - - ff := s.Save(ctx, filename, filename, f, fi, startFn, completeReadingFn, completeFn) + ff := s.Save(ctx, filename, filename, f, startFn, completeReadingFn, completeFn) results = append(results, ff) } diff --git a/internal/archiver/scanner.go b/internal/archiver/scanner.go index debd09aa3..2e6b7210c 100644 --- a/internal/archiver/scanner.go +++ b/internal/archiver/scanner.go @@ -2,7 +2,6 @@ package archiver import ( "context" - "os" "sort" "github.com/restic/restic/internal/debug" @@ -25,7 +24,7 @@ func NewScanner(filesystem fs.FS) *Scanner { return &Scanner{ FS: filesystem, SelectByName: func(_ string) bool { return true }, - Select: func(_ string, _ os.FileInfo, _ fs.FS) bool { return true }, + Select: func(_ string, _ *fs.ExtendedFileInfo, _ fs.FS) bool { return true }, Error: func(_ string, err error) error { return err }, Result: func(_ string, _ ScanStats) {}, } @@ -119,10 +118,10 @@ func (s *Scanner) scan(ctx context.Context, stats ScanStats, target string) (Sca } switch { - case fi.Mode().IsRegular(): + case fi.Mode.IsRegular(): stats.Files++ - stats.Bytes += uint64(fi.Size()) - case fi.Mode().IsDir(): + stats.Bytes += uint64(fi.Size) + case fi.Mode.IsDir(): names, err := fs.Readdirnames(s.FS, target, fs.O_NOFOLLOW) if err != nil { return stats, s.Error(target, err) diff --git a/internal/archiver/scanner_test.go b/internal/archiver/scanner_test.go index e4e2c9f59..a47952388 100644 --- a/internal/archiver/scanner_test.go +++ b/internal/archiver/scanner_test.go @@ -56,8 +56,8 @@ func TestScanner(t *testing.T) { }, }, }, - selFn: func(item string, fi os.FileInfo, fs fs.FS) bool { - if fi.IsDir() { + selFn: func(item string, fi *fs.ExtendedFileInfo, fs fs.FS) bool { + if fi.Mode.IsDir() { return true } diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go index 8f5ee9f00..27390ee13 100644 --- a/internal/backend/azure/azure.go +++ b/internal/backend/azure/azure.go @@ -37,6 +37,8 @@ type Backend struct { prefix string listMaxItems int layout.Layout + + accessTier blob.AccessTier } const saveLargeSize = 256 * 1024 * 1024 @@ -60,6 +62,11 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) { } else { endpointSuffix = "core.windows.net" } + + if cfg.AccountName == "" { + return nil, errors.Fatalf("unable to open Azure backend: Account name ($AZURE_ACCOUNT_NAME) is empty") + } + url := fmt.Sprintf("https://%s.blob.%s/%s", cfg.AccountName, endpointSuffix, cfg.Container) opts := &azContainer.ClientOptions{ ClientOptions: azcore.ClientOptions{ @@ -124,17 +131,33 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) { } } + var accessTier blob.AccessTier + // if the access tier is not supported, then we will not set the access tier; during the upload process, + // the value will be inferred from the default configured on the storage account. + for _, tier := range supportedAccessTiers() { + if strings.EqualFold(string(tier), cfg.AccessTier) { + accessTier = tier + debug.Log(" - using access tier %v", accessTier) + break + } + } + be := &Backend{ container: client, cfg: cfg, connections: cfg.Connections, Layout: layout.NewDefaultLayout(cfg.Prefix, path.Join), listMaxItems: defaultListMaxItems, + accessTier: accessTier, } return be, nil } +func supportedAccessTiers() []blob.AccessTier { + return []blob.AccessTier{blob.AccessTierHot, blob.AccessTierCool, blob.AccessTierCold, blob.AccessTierArchive} +} + // Open opens the Azure backend at specified container. func Open(_ context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) { return open(cfg, rt) @@ -213,25 +236,39 @@ func (be *Backend) Path() string { return be.prefix } +// useAccessTier determines whether to apply the configured access tier to a given file. +// For archive access tier, only data files are stored using that class; metadata +// must remain instantly accessible. +func (be *Backend) useAccessTier(h backend.Handle) bool { + notArchiveClass := !strings.EqualFold(be.cfg.AccessTier, "archive") + isDataFile := h.Type == backend.PackFile && !h.IsMetadata + return isDataFile || notArchiveClass +} + // Save stores data in the backend at the handle. func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error { objName := be.Filename(h) debug.Log("InsertObject(%v, %v)", be.cfg.AccountName, objName) + var accessTier blob.AccessTier + if be.useAccessTier(h) { + accessTier = be.accessTier + } + var err error if rd.Length() < saveLargeSize { // if it's smaller than 256miB, then just create the file directly from the reader - err = be.saveSmall(ctx, objName, rd) + err = be.saveSmall(ctx, objName, rd, accessTier) } else { // otherwise use the more complicated method - err = be.saveLarge(ctx, objName, rd) + err = be.saveLarge(ctx, objName, rd, accessTier) } return err } -func (be *Backend) saveSmall(ctx context.Context, objName string, rd backend.RewindReader) error { +func (be *Backend) saveSmall(ctx context.Context, objName string, rd backend.RewindReader, accessTier blob.AccessTier) error { blockBlobClient := be.container.NewBlockBlobClient(objName) // upload it as a new "block", use the base64 hash for the ID @@ -252,11 +289,13 @@ func (be *Backend) saveSmall(ctx context.Context, objName string, rd backend.Rew } blocks := []string{id} - _, err = blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{}) + _, err = blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{ + Tier: &accessTier, + }) return errors.Wrap(err, "CommitBlockList") } -func (be *Backend) saveLarge(ctx context.Context, objName string, rd backend.RewindReader) error { +func (be *Backend) saveLarge(ctx context.Context, objName string, rd backend.RewindReader, accessTier blob.AccessTier) error { blockBlobClient := be.container.NewBlockBlobClient(objName) buf := make([]byte, 100*1024*1024) @@ -303,7 +342,9 @@ func (be *Backend) saveLarge(ctx context.Context, objName string, rd backend.Rew return errors.Errorf("wrote %d bytes instead of the expected %d bytes", uploadedBytes, rd.Length()) } - _, err := blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{}) + _, err := blockBlobClient.CommitBlockList(ctx, blocks, &blockblob.CommitBlockListOptions{ + Tier: &accessTier, + }) debug.Log("uploaded %d parts: %v", len(blocks), blocks) return errors.Wrap(err, "CommitBlockList") diff --git a/internal/backend/azure/config.go b/internal/backend/azure/config.go index 7d69719ef..ee7ac51d8 100644 --- a/internal/backend/azure/config.go +++ b/internal/backend/azure/config.go @@ -22,7 +22,8 @@ type Config struct { Container string Prefix string - Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` + Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` + AccessTier string `option:"access-tier" help:"set the access tier for the blob storage (default: inferred from the storage account defaults)"` } // NewConfig returns a new Config with the default values filled in. diff --git a/internal/backend/rest/rest_test.go b/internal/backend/rest/rest_test.go index 891f60a87..50560f66d 100644 --- a/internal/backend/rest/rest_test.go +++ b/internal/backend/rest/rest_test.go @@ -106,7 +106,7 @@ func runRESTServer(ctx context.Context, t testing.TB, dir, reqListenAddr string) matched = true } } - fmt.Fprintln(os.Stdout, line) // print all output to console + _, _ = fmt.Fprintln(os.Stdout, line) // print all output to console } }() diff --git a/internal/backend/retry/backend_retry.go b/internal/backend/retry/backend_retry.go index 92c285c4b..de8a520ec 100644 --- a/internal/backend/retry/backend_retry.go +++ b/internal/backend/retry/backend_retry.go @@ -221,12 +221,19 @@ func (be *Backend) Load(ctx context.Context, h backend.Handle, length int, offse // Stat returns information about the File identified by h. func (be *Backend) Stat(ctx context.Context, h backend.Handle) (fi backend.FileInfo, err error) { - err = be.retry(ctx, fmt.Sprintf("Stat(%v)", h), + // see the call to `cancel()` below for why this context exists + statCtx, cancel := context.WithCancel(ctx) + defer cancel() + + err = be.retry(statCtx, fmt.Sprintf("Stat(%v)", h), func() error { var innerError error fi, innerError = be.Backend.Stat(ctx, h) if be.Backend.IsNotExist(innerError) { + // stat is only used to check the existence of the config file. + // cancel the context to suppress the final error message if the file is not found. + cancel() // do not retry if file is not found, as stat is usually used to check whether a file exists return backoff.Permanent(innerError) } diff --git a/internal/backend/retry/backend_retry_test.go b/internal/backend/retry/backend_retry_test.go index ffb8ae186..9259144d4 100644 --- a/internal/backend/retry/backend_retry_test.go +++ b/internal/backend/retry/backend_retry_test.go @@ -400,7 +400,11 @@ func TestBackendStatNotExists(t *testing.T) { } TestFastRetries(t) - retryBackend := New(be, 10, nil, nil) + retryBackend := New(be, 10, func(s string, err error, d time.Duration) { + t.Fatalf("unexpected error output %v", s) + }, func(s string, i int) { + t.Fatalf("unexpected log output %v", s) + }) _, err := retryBackend.Stat(context.TODO(), backend.Handle{}) test.Assert(t, be.IsNotExistFn(err), "unexpected error %v", err) diff --git a/internal/backend/sftp/sftp.go b/internal/backend/sftp/sftp.go index 0ecf7ae62..14819a2df 100644 --- a/internal/backend/sftp/sftp.go +++ b/internal/backend/sftp/sftp.go @@ -391,6 +391,10 @@ func (r *SFTP) checkNoSpace(dir string, size int64, origErr error) error { // Load runs fn with a reader that yields the contents of the file at h at the // given offset. func (r *SFTP) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { + if err := r.clientError(); err != nil { + return err + } + return util.DefaultLoad(ctx, h, length, offset, r.openReader, func(rd io.Reader) error { if length == 0 || !feature.Flag.Enabled(feature.BackendErrorRedesign) { return fn(rd) @@ -460,6 +464,10 @@ func (r *SFTP) Remove(_ context.Context, h backend.Handle) error { // List runs fn for each file in the backend which has the type t. When an // error occurs (or fn returns an error), List stops and returns it. func (r *SFTP) List(ctx context.Context, t backend.FileType, fn func(backend.FileInfo) error) error { + if err := r.clientError(); err != nil { + return err + } + basedir, subdirs := r.Basedir(t) walker := r.c.Walk(basedir) for { diff --git a/internal/debug/debug.go b/internal/debug/debug.go index 7bc3291d1..a09d6e74a 100644 --- a/internal/debug/debug.go +++ b/internal/debug/debug.go @@ -120,7 +120,7 @@ func goroutineNum() int { runtime.Stack(b, false) var num int - fmt.Sscanf(string(b), "goroutine %d ", &num) + _, _ = fmt.Sscanf(string(b), "goroutine %d ", &num) return num } diff --git a/internal/debug/round_tripper.go b/internal/debug/round_tripper.go index 9dced95c6..4afab7298 100644 --- a/internal/debug/round_tripper.go +++ b/internal/debug/round_tripper.go @@ -42,7 +42,7 @@ func (rd *eofDetectReader) Close() error { msg += fmt.Sprintf(", body: %q", buf) } - fmt.Fprintln(os.Stderr, msg) + _, _ = fmt.Fprintln(os.Stderr, msg) Log("%s: %+v", msg, errors.New("Close()")) } return rd.rd.Close() diff --git a/internal/fs/const_unix.go b/internal/fs/const_unix.go index fe84cda17..e570c2553 100644 --- a/internal/fs/const_unix.go +++ b/internal/fs/const_unix.go @@ -7,3 +7,6 @@ import "syscall" // O_NOFOLLOW instructs the kernel to not follow symlinks when opening a file. const O_NOFOLLOW int = syscall.O_NOFOLLOW + +// O_DIRECTORY instructs the kernel to only open directories. +const O_DIRECTORY int = syscall.O_DIRECTORY diff --git a/internal/fs/const_windows.go b/internal/fs/const_windows.go index f1b263a54..b2b1bab86 100644 --- a/internal/fs/const_windows.go +++ b/internal/fs/const_windows.go @@ -3,5 +3,12 @@ package fs -// O_NOFOLLOW is a noop on Windows. -const O_NOFOLLOW int = 0 +// TODO honor flags when opening files + +// O_NOFOLLOW is currently only interpreted by FS.OpenFile in metadataOnly mode and ignored by OpenFile. +// The value of the constant is invented and only for use within this fs package. It must not be used in other contexts. +// It must not conflict with the other O_* values from go/src/syscall/types_windows.go +const O_NOFOLLOW int = 0x40000000 + +// O_DIRECTORY is a noop on Windows. +const O_DIRECTORY int = 0 diff --git a/internal/fs/deviceid_unix.go b/internal/fs/deviceid_unix.go deleted file mode 100644 index 4d5593335..000000000 --- a/internal/fs/deviceid_unix.go +++ /dev/null @@ -1,31 +0,0 @@ -//go:build !windows -// +build !windows - -package fs - -import ( - "os" - "syscall" - - "github.com/restic/restic/internal/errors" -) - -// deviceID extracts the device ID from an os.FileInfo object by casting it -// to syscall.Stat_t -func deviceID(fi os.FileInfo) (deviceID uint64, err error) { - if fi == nil { - return 0, errors.New("unable to determine device: fi is nil") - } - - if fi.Sys() == nil { - return 0, errors.New("unable to determine device: fi.Sys() is nil") - } - - if st, ok := fi.Sys().(*syscall.Stat_t); ok { - // st.Dev is uint32 on Darwin and uint64 on Linux. Just cast - // everything to uint64. - return uint64(st.Dev), nil - } - - return 0, errors.New("Could not cast to syscall.Stat_t") -} diff --git a/internal/fs/deviceid_windows.go b/internal/fs/deviceid_windows.go deleted file mode 100644 index bfb22dc9a..000000000 --- a/internal/fs/deviceid_windows.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build windows -// +build windows - -package fs - -import ( - "os" - - "github.com/restic/restic/internal/errors" -) - -// deviceID extracts the device ID from an os.FileInfo object by casting it -// to syscall.Stat_t -func deviceID(_ os.FileInfo) (deviceID uint64, err error) { - return 0, errors.New("Device IDs are not supported on Windows") -} diff --git a/internal/fs/ea_windows.go b/internal/fs/ea_windows.go index 6bfe20209..fe9a3c42a 100644 --- a/internal/fs/ea_windows.go +++ b/internal/fs/ea_windows.go @@ -8,7 +8,6 @@ import ( "encoding/binary" "errors" "fmt" - "strings" "syscall" "unsafe" @@ -299,20 +298,3 @@ func pathSupportsExtendedAttributes(path string) (supported bool, err error) { supported = (fileSystemFlags & windows.FILE_SUPPORTS_EXTENDED_ATTRIBUTES) != 0 return supported, nil } - -// getVolumePathName returns the volume path name for the given path. -func getVolumePathName(path string) (volumeName string, err error) { - utf16Path, err := windows.UTF16PtrFromString(path) - if err != nil { - return "", err - } - // Get the volume path (e.g., "D:") - var volumePath [windows.MAX_PATH + 1]uint16 - err = windows.GetVolumePathName(utf16Path, &volumePath[0], windows.MAX_PATH+1) - if err != nil { - return "", err - } - // Trim any trailing backslashes - volumeName = strings.TrimRight(windows.UTF16ToString(volumePath[:]), "\\") - return volumeName, nil -} diff --git a/internal/fs/ea_windows_test.go b/internal/fs/ea_windows_test.go index 64bc7f7b6..00cbe97f8 100644 --- a/internal/fs/ea_windows_test.go +++ b/internal/fs/ea_windows_test.go @@ -10,7 +10,6 @@ import ( "os" "path/filepath" "reflect" - "strings" "syscall" "testing" "unsafe" @@ -278,46 +277,3 @@ func TestPathSupportsExtendedAttributes(t *testing.T) { t.Error("Expected an error for non-existent path, but got nil") } } - -func TestGetVolumePathName(t *testing.T) { - tempDirVolume := filepath.VolumeName(os.TempDir()) - testCases := []struct { - name string - path string - expectedPrefix string - }{ - { - name: "Root directory", - path: os.Getenv("SystemDrive") + `\`, - expectedPrefix: os.Getenv("SystemDrive"), - }, - { - name: "Nested directory", - path: os.Getenv("SystemDrive") + `\Windows\System32`, - expectedPrefix: os.Getenv("SystemDrive"), - }, - { - name: "Temp directory", - path: os.TempDir() + `\`, - expectedPrefix: tempDirVolume, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - volumeName, err := getVolumePathName(tc.path) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if !strings.HasPrefix(volumeName, tc.expectedPrefix) { - t.Errorf("Expected volume name to start with %s, but got %s", tc.expectedPrefix, volumeName) - } - }) - } - - // Test with an invalid path - _, err := getVolumePathName("Z:\\NonExistentPath") - if err == nil { - t.Error("Expected an error for non-existent path, but got nil") - } -} diff --git a/internal/fs/file.go b/internal/fs/file.go index 8d60ed159..57f1a996a 100644 --- a/internal/fs/file.go +++ b/internal/fs/file.go @@ -3,6 +3,7 @@ package fs import ( "fmt" "os" + "runtime" ) // MkdirAll creates a directory named path, along with any necessary parents, @@ -47,6 +48,9 @@ func Lstat(name string) (os.FileInfo, error) { // methods on the returned File can be used for I/O. // If there is an error, it will be of type *PathError. func OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) { + if runtime.GOOS == "windows" { + flag &^= O_NOFOLLOW + } return os.OpenFile(fixpath(name), flag, perm) } @@ -64,9 +68,10 @@ func ResetPermissions(path string) error { return nil } -// Readdirnames returns a list of file in a directory. Flags are passed to fs.OpenFile. O_RDONLY is implied. +// Readdirnames returns a list of file in a directory. Flags are passed to fs.OpenFile. +// O_RDONLY and O_DIRECTORY are implied. func Readdirnames(filesystem FS, dir string, flags int) ([]string, error) { - f, err := filesystem.OpenFile(dir, O_RDONLY|flags, 0) + f, err := filesystem.OpenFile(dir, O_RDONLY|O_DIRECTORY|flags, false) if err != nil { return nil, fmt.Errorf("openfile for readdirnames failed: %w", err) } diff --git a/internal/fs/file_unix_test.go b/internal/fs/file_unix_test.go new file mode 100644 index 000000000..00d68abb8 --- /dev/null +++ b/internal/fs/file_unix_test.go @@ -0,0 +1,22 @@ +//go:build unix + +package fs + +import ( + "path/filepath" + "syscall" + "testing" + + "github.com/restic/restic/internal/errors" + rtest "github.com/restic/restic/internal/test" +) + +func TestReaddirnamesFifo(t *testing.T) { + // should not block when reading from a fifo instead of a directory + tempdir := t.TempDir() + fifoFn := filepath.Join(tempdir, "fifo") + rtest.OK(t, mkfifo(fifoFn, 0o600)) + + _, err := Readdirnames(&Local{}, fifoFn, 0) + rtest.Assert(t, errors.Is(err, syscall.ENOTDIR), "unexpected error %v", err) +} diff --git a/internal/fs/file_windows.go b/internal/fs/file_windows.go index 3d011f719..d7aabf360 100644 --- a/internal/fs/file_windows.go +++ b/internal/fs/file_windows.go @@ -18,19 +18,28 @@ func fixpath(name string) string { abspath, err := filepath.Abs(name) if err == nil { // Check if \\?\UNC\ already exist - if strings.HasPrefix(abspath, `\\?\UNC\`) { + if strings.HasPrefix(abspath, uncPathPrefix) { + return abspath + } + // Check if \\?\GLOBALROOT exists which marks volume shadow copy snapshots + if strings.HasPrefix(abspath, globalRootPrefix) { + if strings.Count(abspath, `\`) == 5 { + // Append slash if this just a volume name, e.g. `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopyXX` + // Without the trailing slash any access to the volume itself will fail. + return abspath + string(filepath.Separator) + } return abspath } // Check if \\?\ already exist - if strings.HasPrefix(abspath, `\\?\`) { + if strings.HasPrefix(abspath, extendedPathPrefix) { return abspath } // Check if path starts with \\ if strings.HasPrefix(abspath, `\\`) { - return strings.Replace(abspath, `\\`, `\\?\UNC\`, 1) + return strings.Replace(abspath, `\\`, uncPathPrefix, 1) } // Normal path - return `\\?\` + abspath + return extendedPathPrefix + abspath } return name } diff --git a/internal/fs/fs_local.go b/internal/fs/fs_local.go index 5fac88dbb..fc6c69cf2 100644 --- a/internal/fs/fs_local.go +++ b/internal/fs/fs_local.go @@ -20,47 +20,28 @@ func (fs Local) VolumeName(path string) string { return filepath.VolumeName(path) } -// OpenFile is the generalized open call; most users will use Open -// or Create instead. It opens the named file with specified flag -// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, -// methods on the returned File can be used for I/O. -// If there is an error, it will be of type *PathError. -func (fs Local) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - f, err := os.OpenFile(fixpath(name), flag, perm) - if err != nil { - return nil, err - } - _ = setFlags(f) - return f, nil -} - -// Stat returns a FileInfo describing the named file. If there is an error, it -// will be of type *PathError. -func (fs Local) Stat(name string) (os.FileInfo, error) { - return os.Stat(fixpath(name)) +// OpenFile opens a file or directory for reading. +// +// If metadataOnly is set, an implementation MUST return a File object for +// arbitrary file types including symlinks. The implementation may internally use +// the given file path or a file handle. In particular, an implementation may +// delay actually accessing the underlying filesystem. +// +// Only the O_NOFOLLOW and O_DIRECTORY flags are supported. +func (fs Local) OpenFile(name string, flag int, metadataOnly bool) (File, error) { + return newLocalFile(name, flag, metadataOnly) } // Lstat returns the FileInfo structure describing the named file. // If the file is a symbolic link, the returned FileInfo // describes the symbolic link. Lstat makes no attempt to follow the link. // If there is an error, it will be of type *PathError. -func (fs Local) Lstat(name string) (os.FileInfo, error) { - return os.Lstat(fixpath(name)) -} - -// DeviceID extracts the DeviceID from the given FileInfo. If the fs does -// not support a DeviceID, it returns an error instead -func (fs Local) DeviceID(fi os.FileInfo) (id uint64, err error) { - return deviceID(fi) -} - -// ExtendedStat converts the give FileInfo into ExtendedFileInfo. -func (fs Local) ExtendedStat(fi os.FileInfo) ExtendedFileInfo { - return ExtendedStat(fi) -} - -func (fs Local) NodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { - return nodeFromFileInfo(path, fi, ignoreXattrListError) +func (fs Local) Lstat(name string) (*ExtendedFileInfo, error) { + fi, err := os.Lstat(fixpath(name)) + if err != nil { + return nil, err + } + return extendedStat(fi), nil } // Join joins any number of path elements into a single path, adding a @@ -103,3 +84,92 @@ func (fs Local) Base(path string) string { func (fs Local) Dir(path string) string { return filepath.Dir(path) } + +type localFile struct { + name string + flag int + f *os.File + fi *ExtendedFileInfo +} + +// See the File interface for a description of each method +var _ File = &localFile{} + +func newLocalFile(name string, flag int, metadataOnly bool) (*localFile, error) { + var f *os.File + if !metadataOnly { + var err error + f, err = os.OpenFile(fixpath(name), flag, 0) + if err != nil { + return nil, err + } + _ = setFlags(f) + } + return &localFile{ + name: name, + flag: flag, + f: f, + }, nil +} + +func (f *localFile) MakeReadable() error { + if f.f != nil { + panic("file is already readable") + } + + newF, err := newLocalFile(f.name, f.flag, false) + if err != nil { + return err + } + // replace state and also reset cached FileInfo + *f = *newF + return nil +} + +func (f *localFile) cacheFI() error { + if f.fi != nil { + return nil + } + var fi os.FileInfo + var err error + if f.f != nil { + fi, err = f.f.Stat() + } else if f.flag&O_NOFOLLOW != 0 { + fi, err = os.Lstat(f.name) + } else { + fi, err = os.Stat(f.name) + } + if err != nil { + return err + } + f.fi = extendedStat(fi) + return nil +} + +func (f *localFile) Stat() (*ExtendedFileInfo, error) { + err := f.cacheFI() + // the call to cacheFI MUST happen before reading from f.fi + return f.fi, err +} + +func (f *localFile) ToNode(ignoreXattrListError bool) (*restic.Node, error) { + if err := f.cacheFI(); err != nil { + return nil, err + } + return nodeFromFileInfo(f.name, f.fi, ignoreXattrListError) +} + +func (f *localFile) Read(p []byte) (n int, err error) { + return f.f.Read(p) +} + +func (f *localFile) Readdirnames(n int) ([]string, error) { + return f.f.Readdirnames(n) +} + +func (f *localFile) Close() error { + if f.f != nil { + return f.f.Close() + } + return nil +} diff --git a/internal/fs/fs_local_test.go b/internal/fs/fs_local_test.go new file mode 100644 index 000000000..8fd8eb136 --- /dev/null +++ b/internal/fs/fs_local_test.go @@ -0,0 +1,221 @@ +package fs + +import ( + "io" + "os" + "path/filepath" + "slices" + "testing" + + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +type fsLocalMetadataTestcase struct { + name string + follow bool + setup func(t *testing.T, path string) + nodeType restic.NodeType +} + +func TestFSLocalMetadata(t *testing.T) { + for _, test := range []fsLocalMetadataTestcase{ + { + name: "file", + setup: func(t *testing.T, path string) { + rtest.OK(t, os.WriteFile(path, []byte("example"), 0o600)) + }, + nodeType: restic.NodeTypeFile, + }, + { + name: "directory", + setup: func(t *testing.T, path string) { + rtest.OK(t, os.Mkdir(path, 0o600)) + }, + nodeType: restic.NodeTypeDir, + }, + { + name: "symlink", + setup: func(t *testing.T, path string) { + rtest.OK(t, os.Symlink(path+"old", path)) + }, + nodeType: restic.NodeTypeSymlink, + }, + { + name: "symlink file", + follow: true, + setup: func(t *testing.T, path string) { + rtest.OK(t, os.WriteFile(path+"file", []byte("example"), 0o600)) + rtest.OK(t, os.Symlink(path+"file", path)) + }, + nodeType: restic.NodeTypeFile, + }, + } { + runFSLocalTestcase(t, test) + } +} + +func runFSLocalTestcase(t *testing.T, test fsLocalMetadataTestcase) { + t.Run(test.name, func(t *testing.T) { + tmp := t.TempDir() + path := filepath.Join(tmp, "item") + test.setup(t, path) + + testFs := &Local{} + flags := 0 + if !test.follow { + flags |= O_NOFOLLOW + } + f, err := testFs.OpenFile(path, flags, true) + rtest.OK(t, err) + checkMetadata(t, f, path, test.follow, test.nodeType) + rtest.OK(t, f.Close()) + }) + +} + +func checkMetadata(t *testing.T, f File, path string, follow bool, nodeType restic.NodeType) { + fi, err := f.Stat() + rtest.OK(t, err) + var fi2 os.FileInfo + if follow { + fi2, err = os.Stat(path) + } else { + fi2, err = os.Lstat(path) + } + rtest.OK(t, err) + assertFIEqual(t, fi2, fi) + + node, err := f.ToNode(false) + rtest.OK(t, err) + + // ModTime is likely unique per file, thus it provides a good indication that it is from the correct file + rtest.Equals(t, fi.ModTime, node.ModTime, "node ModTime") + rtest.Equals(t, nodeType, node.Type, "node Type") +} + +func assertFIEqual(t *testing.T, want os.FileInfo, got *ExtendedFileInfo) { + t.Helper() + rtest.Equals(t, want.Name(), got.Name, "Name") + rtest.Equals(t, want.ModTime(), got.ModTime, "ModTime") + rtest.Equals(t, want.Mode(), got.Mode, "Mode") + rtest.Equals(t, want.Size(), got.Size, "Size") +} + +func TestFSLocalRead(t *testing.T) { + testFSLocalRead(t, false) + testFSLocalRead(t, true) +} + +func testFSLocalRead(t *testing.T, makeReadable bool) { + tmp := t.TempDir() + path := filepath.Join(tmp, "item") + testdata := "example" + rtest.OK(t, os.WriteFile(path, []byte(testdata), 0o600)) + + f := openReadable(t, path, makeReadable) + checkMetadata(t, f, path, false, restic.NodeTypeFile) + + data, err := io.ReadAll(f) + rtest.OK(t, err) + rtest.Equals(t, testdata, string(data), "file content mismatch") + + rtest.OK(t, f.Close()) +} + +func openReadable(t *testing.T, path string, useMakeReadable bool) File { + testFs := &Local{} + f, err := testFs.OpenFile(path, O_NOFOLLOW, useMakeReadable) + rtest.OK(t, err) + if useMakeReadable { + // file was opened as metadataOnly. open for reading + rtest.OK(t, f.MakeReadable()) + } + return f +} + +func TestFSLocalReaddir(t *testing.T) { + testFSLocalReaddir(t, false) + testFSLocalReaddir(t, true) +} + +func testFSLocalReaddir(t *testing.T, makeReadable bool) { + tmp := t.TempDir() + path := filepath.Join(tmp, "item") + rtest.OK(t, os.Mkdir(path, 0o700)) + entries := []string{"testfile"} + rtest.OK(t, os.WriteFile(filepath.Join(path, entries[0]), []byte("example"), 0o600)) + + f := openReadable(t, path, makeReadable) + checkMetadata(t, f, path, false, restic.NodeTypeDir) + + names, err := f.Readdirnames(-1) + rtest.OK(t, err) + slices.Sort(names) + rtest.Equals(t, entries, names, "directory content mismatch") + + rtest.OK(t, f.Close()) +} + +func TestFSLocalReadableRace(t *testing.T) { + tmp := t.TempDir() + path := filepath.Join(tmp, "item") + testdata := "example" + rtest.OK(t, os.WriteFile(path, []byte(testdata), 0o600)) + + testFs := &Local{} + f, err := testFs.OpenFile(path, O_NOFOLLOW, true) + rtest.OK(t, err) + + pathNew := path + "new" + rtest.OK(t, os.Rename(path, pathNew)) + + err = f.MakeReadable() + if err == nil { + // a file handle based implementation should still work + checkMetadata(t, f, pathNew, false, restic.NodeTypeFile) + + data, err := io.ReadAll(f) + rtest.OK(t, err) + rtest.Equals(t, testdata, string(data), "file content mismatch") + } + + rtest.OK(t, f.Close()) +} + +func TestFSLocalTypeChange(t *testing.T) { + tmp := t.TempDir() + path := filepath.Join(tmp, "item") + testdata := "example" + rtest.OK(t, os.WriteFile(path, []byte(testdata), 0o600)) + + testFs := &Local{} + f, err := testFs.OpenFile(path, O_NOFOLLOW, true) + rtest.OK(t, err) + // cache metadata + _, err = f.Stat() + rtest.OK(t, err) + + pathNew := path + "new" + // rename instead of unlink to let the test also work on windows + rtest.OK(t, os.Rename(path, pathNew)) + + rtest.OK(t, os.Mkdir(path, 0o700)) + rtest.OK(t, f.MakeReadable()) + + fi, err := f.Stat() + rtest.OK(t, err) + if !fi.Mode.IsDir() { + // a file handle based implementation should still reference the file + checkMetadata(t, f, pathNew, false, restic.NodeTypeFile) + + data, err := io.ReadAll(f) + rtest.OK(t, err) + rtest.Equals(t, testdata, string(data), "file content mismatch") + } + // else: + // path-based implementation + // nothing to test here. stat returned the new file type + + rtest.OK(t, f.Close()) +} diff --git a/internal/fs/fs_local_unix_test.go b/internal/fs/fs_local_unix_test.go new file mode 100644 index 000000000..5bcb5efd0 --- /dev/null +++ b/internal/fs/fs_local_unix_test.go @@ -0,0 +1,40 @@ +//go:build unix + +package fs + +import ( + "syscall" + "testing" + + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" +) + +func TestFSLocalMetadataUnix(t *testing.T) { + for _, test := range []fsLocalMetadataTestcase{ + { + name: "socket", + setup: func(t *testing.T, path string) { + fd, err := syscall.Socket(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) + rtest.OK(t, err) + defer func() { + _ = syscall.Close(fd) + }() + + addr := &syscall.SockaddrUnix{Name: path} + rtest.OK(t, syscall.Bind(fd, addr)) + }, + nodeType: restic.NodeTypeSocket, + }, + { + name: "fifo", + setup: func(t *testing.T, path string) { + rtest.OK(t, mkfifo(path, 0o600)) + }, + nodeType: restic.NodeTypeFifo, + }, + // device files can only be created as root + } { + runFSLocalTestcase(t, test) + } +} diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 1915e2a7c..dfee31779 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -1,7 +1,6 @@ package fs import ( - "os" "path/filepath" "runtime" "strings" @@ -10,7 +9,6 @@ import ( "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/options" - "github.com/restic/restic/internal/restic" ) // VSSConfig holds extended options of windows volume shadow copy service. @@ -126,25 +124,16 @@ func (fs *LocalVss) DeleteSnapshots() { fs.snapshots = activeSnapshots } -// OpenFile wraps the Open method of the underlying file system. -func (fs *LocalVss) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - return fs.FS.OpenFile(fs.snapshotPath(name), flag, perm) -} - -// Stat wraps the Stat method of the underlying file system. -func (fs *LocalVss) Stat(name string) (os.FileInfo, error) { - return fs.FS.Stat(fs.snapshotPath(name)) +// OpenFile wraps the OpenFile method of the underlying file system. +func (fs *LocalVss) OpenFile(name string, flag int, metadataOnly bool) (File, error) { + return fs.FS.OpenFile(fs.snapshotPath(name), flag, metadataOnly) } // Lstat wraps the Lstat method of the underlying file system. -func (fs *LocalVss) Lstat(name string) (os.FileInfo, error) { +func (fs *LocalVss) Lstat(name string) (*ExtendedFileInfo, error) { return fs.FS.Lstat(fs.snapshotPath(name)) } -func (fs *LocalVss) NodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { - return fs.FS.NodeFromFileInfo(fs.snapshotPath(path), fi, ignoreXattrListError) -} - // isMountPointIncluded is true if given mountpoint included by user. func (fs *LocalVss) isMountPointIncluded(mountPoint string) bool { if fs.excludeVolumes == nil { @@ -176,7 +165,7 @@ func (fs *LocalVss) snapshotPath(path string) string { return path } - fixPath = strings.TrimPrefix(fixpath(path), `\\?\`) + fixPath = strings.TrimPrefix(fixPath, `\\?\`) fixPathLower := strings.ToLower(fixPath) volumeName := filepath.VolumeName(fixPath) volumeNameLower := strings.ToLower(volumeName) diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index f1a043118..b64897d1c 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -317,28 +317,25 @@ func TestVSSFS(t *testing.T) { // trigger snapshot creation and // capture FI while file still exists (should already be within the snapshot) - origFi, err := localVss.Stat(tempfile) + origFi, err := localVss.Lstat(tempfile) rtest.OK(t, err) // remove original file rtest.OK(t, os.Remove(tempfile)) - statFi, err := localVss.Stat(tempfile) - rtest.OK(t, err) - rtest.Equals(t, origFi.Mode(), statFi.Mode()) - lstatFi, err := localVss.Lstat(tempfile) rtest.OK(t, err) - rtest.Equals(t, origFi.Mode(), lstatFi.Mode()) + rtest.Equals(t, origFi.Mode, lstatFi.Mode) - f, err := localVss.OpenFile(tempfile, os.O_RDONLY, 0) + f, err := localVss.OpenFile(tempfile, os.O_RDONLY, false) rtest.OK(t, err) data, err := io.ReadAll(f) rtest.OK(t, err) rtest.Equals(t, "example", string(data), "unexpected file content") - rtest.OK(t, f.Close()) - node, err := localVss.NodeFromFileInfo(tempfile, statFi, false) + node, err := f.ToNode(false) rtest.OK(t, err) - rtest.Equals(t, node.Mode, statFi.Mode()) + rtest.Equals(t, node.Mode, lstatFi.Mode) + + rtest.OK(t, f.Close()) } diff --git a/internal/fs/fs_reader.go b/internal/fs/fs_reader.go index 97d4e1660..bbe5c95ab 100644 --- a/internal/fs/fs_reader.go +++ b/internal/fs/fs_reader.go @@ -5,6 +5,7 @@ import ( "io" "os" "path" + "slices" "sync" "syscall" "time" @@ -40,21 +41,16 @@ func (fs *Reader) VolumeName(_ string) string { return "" } -func (fs *Reader) fi() os.FileInfo { - return fakeFileInfo{ - name: fs.Name, - size: fs.Size, - mode: fs.Mode, - modtime: fs.ModTime, +func (fs *Reader) fi() *ExtendedFileInfo { + return &ExtendedFileInfo{ + Name: fs.Name, + Mode: fs.Mode, + ModTime: fs.ModTime, + Size: fs.Size, } } -// OpenFile is the generalized open call; most users will use Open -// or Create instead. It opens the named file with specified flag -// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, -// methods on the returned File can be used for I/O. -// If there is an error, it will be of type *os.PathError. -func (fs *Reader) OpenFile(name string, flag int, _ os.FileMode) (f File, err error) { +func (fs *Reader) OpenFile(name string, flag int, _ bool) (f File, err error) { if flag & ^(O_RDONLY|O_NOFOLLOW) != 0 { return nil, pathError("open", name, fmt.Errorf("invalid combination of flags 0x%x", flag)) @@ -73,7 +69,7 @@ func (fs *Reader) OpenFile(name string, flag int, _ os.FileMode) (f File, err er return f, nil case "/", ".": f = fakeDir{ - entries: []os.FileInfo{fs.fi()}, + entries: []string{fs.fi().Name}, } return f, nil } @@ -81,25 +77,18 @@ func (fs *Reader) OpenFile(name string, flag int, _ os.FileMode) (f File, err er return nil, pathError("open", name, syscall.ENOENT) } -// Stat returns a FileInfo describing the named file. If there is an error, it -// will be of type *os.PathError. -func (fs *Reader) Stat(name string) (os.FileInfo, error) { - return fs.Lstat(name) -} - // Lstat returns the FileInfo structure describing the named file. // If the file is a symbolic link, the returned FileInfo // describes the symbolic link. Lstat makes no attempt to follow the link. // If there is an error, it will be of type *os.PathError. -func (fs *Reader) Lstat(name string) (os.FileInfo, error) { - getDirInfo := func(name string) os.FileInfo { - fi := fakeFileInfo{ - name: fs.Base(name), - size: 0, - mode: os.ModeDir | 0755, - modtime: time.Now(), +func (fs *Reader) Lstat(name string) (*ExtendedFileInfo, error) { + getDirInfo := func(name string) *ExtendedFileInfo { + return &ExtendedFileInfo{ + Name: fs.Base(name), + Size: 0, + Mode: os.ModeDir | 0755, + ModTime: time.Now(), } - return fi } switch name { @@ -123,27 +112,6 @@ func (fs *Reader) Lstat(name string) (os.FileInfo, error) { return nil, pathError("lstat", name, os.ErrNotExist) } -func (fs *Reader) DeviceID(_ os.FileInfo) (deviceID uint64, err error) { - return 0, errors.New("Device IDs are not supported") -} - -func (fs *Reader) ExtendedStat(fi os.FileInfo) ExtendedFileInfo { - return ExtendedFileInfo{ - FileInfo: fi, - } -} - -func (fs *Reader) NodeFromFileInfo(path string, fi os.FileInfo, _ bool) (*restic.Node, error) { - node := buildBasicNode(path, fi) - - // fill minimal info with current values for uid, gid - node.UID = uint32(os.Getuid()) - node.GID = uint32(os.Getgid()) - node.ChangeTime = node.ModTime - - return node, nil -} - // Join joins any number of path elements into a single path, adding a // Separator if necessary. Join calls Clean on the result; in particular, all // empty strings are ignored. On Windows, the result is a UNC path if and only @@ -187,13 +155,13 @@ func (fs *Reader) Dir(p string) string { return path.Dir(p) } -func newReaderFile(rd io.ReadCloser, fi os.FileInfo, allowEmptyFile bool) *readerFile { +func newReaderFile(rd io.ReadCloser, fi *ExtendedFileInfo, allowEmptyFile bool) *readerFile { return &readerFile{ ReadCloser: rd, AllowEmptyFile: allowEmptyFile, fakeFile: fakeFile{ - FileInfo: fi, - name: fi.Name(), + fi: fi, + name: fi.Name, }, } } @@ -235,12 +203,16 @@ var _ File = &readerFile{} // except Stat() type fakeFile struct { name string - os.FileInfo + fi *ExtendedFileInfo } // ensure that fakeFile implements File var _ File = fakeFile{} +func (f fakeFile) MakeReadable() error { + return nil +} + func (f fakeFile) Readdirnames(_ int) ([]string, error) { return nil, pathError("readdirnames", f.name, os.ErrInvalid) } @@ -253,13 +225,24 @@ func (f fakeFile) Close() error { return nil } -func (f fakeFile) Stat() (os.FileInfo, error) { - return f.FileInfo, nil +func (f fakeFile) Stat() (*ExtendedFileInfo, error) { + return f.fi, nil +} + +func (f fakeFile) ToNode(_ bool) (*restic.Node, error) { + node := buildBasicNode(f.name, f.fi) + + // fill minimal info with current values for uid, gid + node.UID = uint32(os.Getuid()) + node.GID = uint32(os.Getgid()) + node.ChangeTime = node.ModTime + + return node, nil } // fakeDir implements Readdirnames and Readdir, everything else is delegated to fakeFile. type fakeDir struct { - entries []os.FileInfo + entries []string fakeFile } @@ -267,44 +250,7 @@ func (d fakeDir) Readdirnames(n int) ([]string, error) { if n > 0 { return nil, pathError("readdirnames", d.name, errors.New("not implemented")) } - names := make([]string, 0, len(d.entries)) - for _, entry := range d.entries { - names = append(names, entry.Name()) - } - - return names, nil -} - -// fakeFileInfo implements the bare minimum of os.FileInfo. -type fakeFileInfo struct { - name string - size int64 - mode os.FileMode - modtime time.Time -} - -func (fi fakeFileInfo) Name() string { - return fi.name -} - -func (fi fakeFileInfo) Size() int64 { - return fi.size -} - -func (fi fakeFileInfo) Mode() os.FileMode { - return fi.mode -} - -func (fi fakeFileInfo) ModTime() time.Time { - return fi.modtime -} - -func (fi fakeFileInfo) IsDir() bool { - return fi.mode&os.ModeDir > 0 -} - -func (fi fakeFileInfo) Sys() interface{} { - return nil + return slices.Clone(d.entries), nil } func pathError(op, name string, err error) *os.PathError { diff --git a/internal/fs/fs_reader_test.go b/internal/fs/fs_reader_test.go index 442912fe3..257bfbbac 100644 --- a/internal/fs/fs_reader_test.go +++ b/internal/fs/fs_reader_test.go @@ -16,7 +16,7 @@ import ( ) func verifyFileContentOpenFile(t testing.TB, fs FS, filename string, want []byte) { - f, err := fs.OpenFile(filename, O_RDONLY, 0) + f, err := fs.OpenFile(filename, O_RDONLY, false) if err != nil { t.Fatal(err) } @@ -37,7 +37,7 @@ func verifyFileContentOpenFile(t testing.TB, fs FS, filename string, want []byte } func verifyDirectoryContents(t testing.TB, fs FS, dir string, want []string) { - f, err := fs.OpenFile(dir, os.O_RDONLY, 0) + f, err := fs.OpenFile(dir, O_RDONLY, false) if err != nil { t.Fatal(err) } @@ -60,25 +60,25 @@ func verifyDirectoryContents(t testing.TB, fs FS, dir string, want []string) { } } -func checkFileInfo(t testing.TB, fi os.FileInfo, filename string, modtime time.Time, mode os.FileMode, isdir bool) { - if fi.IsDir() != isdir { - t.Errorf("IsDir returned %t, want %t", fi.IsDir(), isdir) +func checkFileInfo(t testing.TB, fi *ExtendedFileInfo, filename string, modtime time.Time, mode os.FileMode, isdir bool) { + if fi.Mode.IsDir() != isdir { + t.Errorf("IsDir returned %t, want %t", fi.Mode.IsDir(), isdir) } - if fi.Mode() != mode { - t.Errorf("Mode() returned wrong value, want 0%o, got 0%o", mode, fi.Mode()) + if fi.Mode != mode { + t.Errorf("Mode has wrong value, want 0%o, got 0%o", mode, fi.Mode) } - if !modtime.Equal(time.Time{}) && !fi.ModTime().Equal(modtime) { - t.Errorf("ModTime() returned wrong value, want %v, got %v", modtime, fi.ModTime()) + if !modtime.Equal(time.Time{}) && !fi.ModTime.Equal(modtime) { + t.Errorf("ModTime has wrong value, want %v, got %v", modtime, fi.ModTime) } - if path.Base(fi.Name()) != fi.Name() { - t.Errorf("Name() returned is not base, want %q, got %q", path.Base(fi.Name()), fi.Name()) + if path.Base(fi.Name) != fi.Name { + t.Errorf("Name is not base, want %q, got %q", path.Base(fi.Name), fi.Name) } - if fi.Name() != path.Base(filename) { - t.Errorf("Name() returned wrong value, want %q, got %q", path.Base(filename), fi.Name()) + if fi.Name != path.Base(filename) { + t.Errorf("Name has wrong value, want %q, got %q", path.Base(filename), fi.Name) } } @@ -123,7 +123,7 @@ func TestFSReader(t *testing.T) { { name: "file/Stat", f: func(t *testing.T, fs FS) { - f, err := fs.OpenFile(filename, os.O_RDONLY, 0) + f, err := fs.OpenFile(filename, O_RDONLY, true) if err != nil { t.Fatal(err) } @@ -295,7 +295,7 @@ func TestFSReaderMinFileSize(t *testing.T) { AllowEmptyFile: test.allowEmpty, } - f, err := fs.OpenFile("testfile", os.O_RDONLY, 0) + f, err := fs.OpenFile("testfile", O_RDONLY, false) if err != nil { t.Fatal(err) } diff --git a/internal/fs/fs_track.go b/internal/fs/fs_track.go index 366bbee76..9ebdbb8c4 100644 --- a/internal/fs/fs_track.go +++ b/internal/fs/fs_track.go @@ -16,8 +16,8 @@ type Track struct { } // OpenFile wraps the OpenFile method of the underlying file system. -func (fs Track) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - f, err := fs.FS.OpenFile(fixpath(name), flag, perm) +func (fs Track) OpenFile(name string, flag int, metadataOnly bool) (File, error) { + f, err := fs.FS.OpenFile(name, flag, metadataOnly) if err != nil { return nil, err } @@ -31,7 +31,7 @@ type trackFile struct { func newTrackFile(stack []byte, filename string, file File) *trackFile { f := &trackFile{file} - runtime.SetFinalizer(f, func(_ *trackFile) { + runtime.SetFinalizer(f, func(_ any) { fmt.Fprintf(os.Stderr, "file %s not closed\n\nStacktrack:\n%s\n", filename, stack) panic("file " + filename + " not closed") }) diff --git a/internal/fs/interface.go b/internal/fs/interface.go index 2967429c0..d75b0a91d 100644 --- a/internal/fs/interface.go +++ b/internal/fs/interface.go @@ -2,19 +2,22 @@ package fs import ( "io" - "os" "github.com/restic/restic/internal/restic" ) // FS bundles all methods needed for a file system. type FS interface { - OpenFile(name string, flag int, perm os.FileMode) (File, error) - Stat(name string) (os.FileInfo, error) - Lstat(name string) (os.FileInfo, error) - DeviceID(fi os.FileInfo) (deviceID uint64, err error) - ExtendedStat(fi os.FileInfo) ExtendedFileInfo - NodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) + // OpenFile opens a file or directory for reading. + // + // If metadataOnly is set, an implementation MUST return a File object for + // arbitrary file types including symlinks. The implementation may internally use + // the given file path or a file handle. In particular, an implementation may + // delay actually accessing the underlying filesystem. + // + // Only the O_NOFOLLOW and O_DIRECTORY flags are supported. + OpenFile(name string, flag int, metadataOnly bool) (File, error) + Lstat(name string) (*ExtendedFileInfo, error) Join(elem ...string) string Separator() string @@ -27,11 +30,23 @@ type FS interface { Base(path string) string } -// File is an open file on a file system. +// File is an open file on a file system. When opened as metadataOnly, an +// implementation may opt to perform filesystem operations using the filepath +// instead of actually opening the file. type File interface { + // MakeReadable reopens a File that was opened metadataOnly for reading. + // The method must not be called for files that are opened for reading. + // If possible, the underlying file should be reopened atomically. + // MakeReadable must work for files and directories. + MakeReadable() error + io.Reader io.Closer Readdirnames(n int) ([]string, error) - Stat() (os.FileInfo, error) + Stat() (*ExtendedFileInfo, error) + // ToNode returns a restic.Node for the File. The internally used os.FileInfo + // must be consistent with that returned by Stat(). In particular, the metadata + // returned by consecutive calls to Stat() and ToNode() must match. + ToNode(ignoreXattrListError bool) (*restic.Node, error) } diff --git a/internal/fs/node.go b/internal/fs/node.go index d36194322..058d9cc7b 100644 --- a/internal/fs/node.go +++ b/internal/fs/node.go @@ -1,6 +1,7 @@ package fs import ( + "fmt" "os" "os/user" "strconv" @@ -14,40 +15,36 @@ import ( // nodeFromFileInfo returns a new node from the given path and FileInfo. It // returns the first error that is encountered, together with a node. -func nodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { +func nodeFromFileInfo(path string, fi *ExtendedFileInfo, ignoreXattrListError bool) (*restic.Node, error) { node := buildBasicNode(path, fi) - stat := ExtendedStat(fi) - if err := nodeFillExtendedStat(node, path, &stat); err != nil { + if err := nodeFillExtendedStat(node, path, fi); err != nil { return node, err } - allowExtended, err := nodeFillGenericAttributes(node, path, &stat) - if allowExtended { - // Skip processing ExtendedAttributes if allowExtended is false. - err = errors.Join(err, nodeFillExtendedAttributes(node, path, ignoreXattrListError)) - } + err := nodeFillGenericAttributes(node, path, fi) + err = errors.Join(err, nodeFillExtendedAttributes(node, path, ignoreXattrListError)) return node, err } -func buildBasicNode(path string, fi os.FileInfo) *restic.Node { +func buildBasicNode(path string, fi *ExtendedFileInfo) *restic.Node { mask := os.ModePerm | os.ModeType | os.ModeSetuid | os.ModeSetgid | os.ModeSticky node := &restic.Node{ Path: path, - Name: fi.Name(), - Mode: fi.Mode() & mask, - ModTime: fi.ModTime(), + Name: fi.Name, + Mode: fi.Mode & mask, + ModTime: fi.ModTime, } - node.Type = nodeTypeFromFileInfo(fi) + node.Type = nodeTypeFromFileInfo(fi.Mode) if node.Type == restic.NodeTypeFile { - node.Size = uint64(fi.Size()) + node.Size = uint64(fi.Size) } return node } -func nodeTypeFromFileInfo(fi os.FileInfo) restic.NodeType { - switch fi.Mode() & os.ModeType { +func nodeTypeFromFileInfo(mode os.FileMode) restic.NodeType { + switch mode & os.ModeType { case 0: return restic.NodeTypeFile case os.ModeDir: @@ -296,7 +293,7 @@ func nodeRestoreTimestamps(node *restic.Node, path string) error { mtime := node.ModTime.UnixNano() if err := utimesNano(fixpath(path), atime, mtime, node.Type); err != nil { - return &os.PathError{Op: "UtimesNano", Path: path, Err: err} + return fmt.Errorf("failed to restore timestamp of %q: %w", path, err) } return nil } diff --git a/internal/fs/node_aix.go b/internal/fs/node_aix.go deleted file mode 100644 index fd185724f..000000000 --- a/internal/fs/node_aix.go +++ /dev/null @@ -1,26 +0,0 @@ -//go:build aix -// +build aix - -package fs - -import "github.com/restic/restic/internal/restic" - -// nodeRestoreExtendedAttributes is a no-op on AIX. -func nodeRestoreExtendedAttributes(_ *restic.Node, _ string) error { - return nil -} - -// nodeFillExtendedAttributes is a no-op on AIX. -func nodeFillExtendedAttributes(_ *restic.Node, _ string, _ bool) error { - return nil -} - -// nodeRestoreGenericAttributes is no-op on AIX. -func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg string)) error { - return restic.HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) -} - -// nodeFillGenericAttributes is a no-op on AIX. -func nodeFillGenericAttributes(_ *restic.Node, _ string, _ *ExtendedFileInfo) (allowExtended bool, err error) { - return true, nil -} diff --git a/internal/fs/node_netbsd.go b/internal/fs/node_netbsd.go deleted file mode 100644 index d295bf579..000000000 --- a/internal/fs/node_netbsd.go +++ /dev/null @@ -1,23 +0,0 @@ -package fs - -import "github.com/restic/restic/internal/restic" - -// nodeRestoreExtendedAttributes is a no-op on netbsd. -func nodeRestoreExtendedAttributes(_ *restic.Node, _ string) error { - return nil -} - -// nodeFillExtendedAttributes is a no-op on netbsd. -func nodeFillExtendedAttributes(_ *restic.Node, _ string, _ bool) error { - return nil -} - -// nodeRestoreGenericAttributes is no-op on netbsd. -func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg string)) error { - return restic.HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) -} - -// nodeFillGenericAttributes is a no-op on netbsd. -func nodeFillGenericAttributes(_ *restic.Node, _ string, _ *ExtendedFileInfo) (allowExtended bool, err error) { - return true, nil -} diff --git a/internal/fs/node_noxattr.go b/internal/fs/node_noxattr.go new file mode 100644 index 000000000..281a16dbd --- /dev/null +++ b/internal/fs/node_noxattr.go @@ -0,0 +1,18 @@ +//go:build aix || dragonfly || netbsd || openbsd +// +build aix dragonfly netbsd openbsd + +package fs + +import ( + "github.com/restic/restic/internal/restic" +) + +// nodeRestoreExtendedAttributes is a no-op +func nodeRestoreExtendedAttributes(_ *restic.Node, _ string) error { + return nil +} + +// nodeFillExtendedAttributes is a no-op +func nodeFillExtendedAttributes(_ *restic.Node, _ string, _ bool) error { + return nil +} diff --git a/internal/fs/node_openbsd.go b/internal/fs/node_openbsd.go deleted file mode 100644 index 712b144b4..000000000 --- a/internal/fs/node_openbsd.go +++ /dev/null @@ -1,23 +0,0 @@ -package fs - -import "github.com/restic/restic/internal/restic" - -// nodeRestoreExtendedAttributes is a no-op on openbsd. -func nodeRestoreExtendedAttributes(_ *restic.Node, _ string) error { - return nil -} - -// nodeFillExtendedAttributes is a no-op on openbsd. -func nodeFillExtendedAttributes(_ *restic.Node, _ string, _ bool) error { - return nil -} - -// nodeRestoreGenericAttributes is no-op on openbsd. -func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg string)) error { - return restic.HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) -} - -// fillGenericAttributes is a no-op on openbsd. -func nodeFillGenericAttributes(_ *restic.Node, _ string, _ *ExtendedFileInfo) (allowExtended bool, err error) { - return true, nil -} diff --git a/internal/fs/node_test.go b/internal/fs/node_test.go index 58facceb1..65098e304 100644 --- a/internal/fs/node_test.go +++ b/internal/fs/node_test.go @@ -17,56 +17,26 @@ import ( rtest "github.com/restic/restic/internal/test" ) -func BenchmarkNodeFillUser(t *testing.B) { - tempfile, err := os.CreateTemp("", "restic-test-temp-") - if err != nil { - t.Fatal(err) - } - - fi, err := tempfile.Stat() - if err != nil { - t.Fatal(err) - } - +func BenchmarkNodeFromFileInfo(t *testing.B) { + tempfile, err := os.CreateTemp(t.TempDir(), "restic-test-temp-") + rtest.OK(t, err) path := tempfile.Name() + rtest.OK(t, tempfile.Close()) + fs := Local{} + f, err := fs.OpenFile(path, O_NOFOLLOW, true) + rtest.OK(t, err) + _, err = f.Stat() + rtest.OK(t, err) t.ResetTimer() for i := 0; i < t.N; i++ { - _, err := fs.NodeFromFileInfo(path, fi, false) + _, err := f.ToNode(false) rtest.OK(t, err) } - rtest.OK(t, tempfile.Close()) - rtest.RemoveAll(t, tempfile.Name()) -} - -func BenchmarkNodeFromFileInfo(t *testing.B) { - tempfile, err := os.CreateTemp("", "restic-test-temp-") - if err != nil { - t.Fatal(err) - } - - fi, err := tempfile.Stat() - if err != nil { - t.Fatal(err) - } - - path := tempfile.Name() - fs := Local{} - - t.ResetTimer() - - for i := 0; i < t.N; i++ { - _, err := fs.NodeFromFileInfo(path, fi, false) - if err != nil { - t.Fatal(err) - } - } - - rtest.OK(t, tempfile.Close()) - rtest.RemoveAll(t, tempfile.Name()) + rtest.OK(t, f.Close()) } func parseTime(s string) time.Time { @@ -249,14 +219,14 @@ func TestNodeRestoreAt(t *testing.T) { rtest.OK(t, NodeCreateAt(&test, nodePath)) rtest.OK(t, NodeRestoreMetadata(&test, nodePath, func(msg string) { rtest.OK(t, fmt.Errorf("Warning triggered for path: %s: %s", nodePath, msg)) })) - fi, err := os.Lstat(nodePath) - rtest.OK(t, err) - fs := &Local{} - n2, err := fs.NodeFromFileInfo(nodePath, fi, false) + meta, err := fs.OpenFile(nodePath, O_NOFOLLOW, true) rtest.OK(t, err) - n3, err := fs.NodeFromFileInfo(nodePath, fi, true) + n2, err := meta.ToNode(false) rtest.OK(t, err) + n3, err := meta.ToNode(true) + rtest.OK(t, err) + rtest.OK(t, meta.Close()) rtest.Assert(t, n2.Equals(*n3), "unexpected node info mismatch %v", cmp.Diff(n2, n3)) rtest.Assert(t, test.Name == n2.Name, diff --git a/internal/fs/node_unix.go b/internal/fs/node_unix.go index 5f08f3623..e88e54251 100644 --- a/internal/fs/node_unix.go +++ b/internal/fs/node_unix.go @@ -5,8 +5,20 @@ package fs import ( "os" + + "github.com/restic/restic/internal/restic" ) func lchown(name string, uid, gid int) error { return os.Lchown(name, uid, gid) } + +// nodeRestoreGenericAttributes is no-op. +func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg string)) error { + return restic.HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) +} + +// nodeFillGenericAttributes is a no-op. +func nodeFillGenericAttributes(_ *restic.Node, _ string, _ *ExtendedFileInfo) error { + return nil +} diff --git a/internal/fs/node_unix_test.go b/internal/fs/node_unix_test.go index 6b47eafba..1eb1ee506 100644 --- a/internal/fs/node_unix_test.go +++ b/internal/fs/node_unix_test.go @@ -114,16 +114,14 @@ func TestNodeFromFileInfo(t *testing.T) { return } - if fi.Sys() == nil { - t.Skip("fi.Sys() is nil") - return - } - fs := &Local{} - node, err := fs.NodeFromFileInfo(test.filename, fi, false) - if err != nil { - t.Fatal(err) - } + meta, err := fs.OpenFile(test.filename, O_NOFOLLOW, true) + rtest.OK(t, err) + node, err := meta.ToNode(false) + rtest.OK(t, err) + rtest.OK(t, meta.Close()) + + rtest.OK(t, err) switch node.Type { case restic.NodeTypeFile, restic.NodeTypeSymlink: diff --git a/internal/fs/node_windows.go b/internal/fs/node_windows.go index 9ea813eb1..74cf6c0e5 100644 --- a/internal/fs/node_windows.go +++ b/internal/fs/node_windows.go @@ -83,8 +83,28 @@ func nodeRestoreExtendedAttributes(node *restic.Node, path string) (err error) { return nil } -// fill extended attributes in the node. This also includes the Generic attributes for windows. +// fill extended attributes in the node +// It also checks if the volume supports extended attributes and stores the result in a map +// so that it does not have to be checked again for subsequent calls for paths in the same volume. func nodeFillExtendedAttributes(node *restic.Node, path string, _ bool) (err error) { + if strings.Contains(filepath.Base(path), ":") { + // Do not process for Alternate Data Streams in Windows + return nil + } + + // only capture xattrs for file/dir + if node.Type != restic.NodeTypeFile && node.Type != restic.NodeTypeDir { + return nil + } + + allowExtended, err := checkAndStoreEASupport(path) + if err != nil { + return err + } + if !allowExtended { + return nil + } + var fileHandle windows.Handle if fileHandle, err = openHandleForEA(node.Type, path, false); fileHandle == 0 { return nil @@ -316,41 +336,32 @@ func decryptFile(pathPointer *uint16) error { // nodeFillGenericAttributes fills in the generic attributes for windows like File Attributes, // Created time and Security Descriptors. -// It also checks if the volume supports extended attributes and stores the result in a map -// so that it does not have to be checked again for subsequent calls for paths in the same volume. -func nodeFillGenericAttributes(node *restic.Node, path string, stat *ExtendedFileInfo) (allowExtended bool, err error) { +func nodeFillGenericAttributes(node *restic.Node, path string, stat *ExtendedFileInfo) error { if strings.Contains(filepath.Base(path), ":") { // Do not process for Alternate Data Streams in Windows - // Also do not allow processing of extended attributes for ADS. - return false, nil + return nil } - if strings.HasSuffix(filepath.Clean(path), `\`) { - // filepath.Clean(path) ends with '\' for Windows root volume paths only + isVolume, err := isVolumePath(path) + if err != nil { + return err + } + if isVolume { // Do not process file attributes, created time and sd for windows root volume paths // Security descriptors are not supported for root volume paths. // Though file attributes and created time are supported for root volume paths, // we ignore them and we do not want to replace them during every restore. - allowExtended, err = checkAndStoreEASupport(path) - if err != nil { - return false, err - } - return allowExtended, nil + return nil } var sd *[]byte if node.Type == restic.NodeTypeFile || node.Type == restic.NodeTypeDir { - // Check EA support and get security descriptor for file/dir only - allowExtended, err = checkAndStoreEASupport(path) - if err != nil { - return false, err - } if sd, err = getSecurityDescriptor(path); err != nil { - return allowExtended, err + return err } } - winFI := stat.Sys().(*syscall.Win32FileAttributeData) + winFI := stat.sys.(*syscall.Win32FileAttributeData) // Add Windows attributes node.GenericAttributes, err = restic.WindowsAttrsToGenericAttributes(restic.WindowsAttributes{ @@ -358,7 +369,7 @@ func nodeFillGenericAttributes(node *restic.Node, path string, stat *ExtendedFil FileAttributes: &winFI.FileAttributes, SecurityDescriptor: sd, }) - return allowExtended, err + return err } // checkAndStoreEASupport checks if the volume of the path supports extended attributes and stores the result in a map @@ -420,6 +431,35 @@ func checkAndStoreEASupport(path string) (isEASupportedVolume bool, err error) { return isEASupportedVolume, err } +// getVolumePathName returns the volume path name for the given path. +func getVolumePathName(path string) (volumeName string, err error) { + utf16Path, err := windows.UTF16PtrFromString(path) + if err != nil { + return "", err + } + // Get the volume path (e.g., "D:") + var volumePath [windows.MAX_PATH + 1]uint16 + err = windows.GetVolumePathName(utf16Path, &volumePath[0], windows.MAX_PATH+1) + if err != nil { + return "", err + } + // Trim any trailing backslashes + volumeName = strings.TrimRight(windows.UTF16ToString(volumePath[:]), "\\") + return volumeName, nil +} + +// isVolumePath returns whether a path refers to a volume +func isVolumePath(path string) (bool, error) { + volName, err := prepareVolumeName(path) + if err != nil { + return false, err + } + + cleanPath := filepath.Clean(path) + cleanVolume := filepath.Clean(volName + `\`) + return cleanPath == cleanVolume, nil +} + // prepareVolumeName prepares the volume name for different cases in Windows func prepareVolumeName(path string) (volumeName string, err error) { // Check if it's an extended length path diff --git a/internal/fs/node_windows_test.go b/internal/fs/node_windows_test.go index 730740fe0..f75df54d3 100644 --- a/internal/fs/node_windows_test.go +++ b/internal/fs/node_windows_test.go @@ -222,11 +222,11 @@ func restoreAndGetNode(t *testing.T, tempDir string, testNode *restic.Node, warn test.OK(t, errors.Wrapf(err, "Failed to restore metadata for: %s", testPath)) fs := &Local{} - fi, err := fs.Lstat(testPath) - test.OK(t, errors.Wrapf(err, "Could not Lstat for path: %s", testPath)) - - nodeFromFileInfo, err := fs.NodeFromFileInfo(testPath, fi, false) + meta, err := fs.OpenFile(testPath, O_NOFOLLOW, true) + test.OK(t, err) + nodeFromFileInfo, err := meta.ToNode(false) test.OK(t, errors.Wrapf(err, "Could not get NodeFromFileInfo for path: %s", testPath)) + test.OK(t, meta.Close()) return testPath, nodeFromFileInfo } @@ -451,10 +451,17 @@ func TestPrepareVolumeName(t *testing.T) { expectError: false, expectedEASupported: false, }, + { + name: "Volume Shadow Copy root", + path: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy5555`, + expectedVolume: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy5555`, + expectError: false, + expectedEASupported: false, + }, { name: "Volume Shadow Copy path", - path: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy1\Users\test`, - expectedVolume: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy1`, + path: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy5555\Users\test`, + expectedVolume: `\\?\GLOBALROOT\Device\HarddiskVolumeShadowCopy5555`, expectError: false, expectedEASupported: false, }, @@ -526,3 +533,46 @@ func getOSVolumeGUIDPath(t *testing.T) string { return windows.UTF16ToString(volumeGUID[:]) } + +func TestGetVolumePathName(t *testing.T) { + tempDirVolume := filepath.VolumeName(os.TempDir()) + testCases := []struct { + name string + path string + expectedPrefix string + }{ + { + name: "Root directory", + path: os.Getenv("SystemDrive") + `\`, + expectedPrefix: os.Getenv("SystemDrive"), + }, + { + name: "Nested directory", + path: os.Getenv("SystemDrive") + `\Windows\System32`, + expectedPrefix: os.Getenv("SystemDrive"), + }, + { + name: "Temp directory", + path: os.TempDir() + `\`, + expectedPrefix: tempDirVolume, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + volumeName, err := getVolumePathName(tc.path) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if !strings.HasPrefix(volumeName, tc.expectedPrefix) { + t.Errorf("Expected volume name to start with %s, but got %s", tc.expectedPrefix, volumeName) + } + }) + } + + // Test with an invalid path + _, err := getVolumePathName("Z:\\NonExistentPath") + if err == nil { + t.Error("Expected an error for non-existent path, but got nil") + } +} diff --git a/internal/fs/node_xattr.go b/internal/fs/node_xattr.go index 1781452f7..e1ddf9826 100644 --- a/internal/fs/node_xattr.go +++ b/internal/fs/node_xattr.go @@ -65,16 +65,6 @@ func handleXattrErr(err error) error { } } -// nodeRestoreGenericAttributes is no-op. -func nodeRestoreGenericAttributes(node *restic.Node, _ string, warn func(msg string)) error { - return restic.HandleAllUnknownGenericAttributesFound(node.GenericAttributes, warn) -} - -// nodeFillGenericAttributes is a no-op. -func nodeFillGenericAttributes(_ *restic.Node, _ string, _ *ExtendedFileInfo) (allowExtended bool, err error) { - return true, nil -} - func nodeRestoreExtendedAttributes(node *restic.Node, path string) error { expectedAttrs := map[string]struct{}{} for _, attr := range node.ExtendedAttributes { diff --git a/internal/fs/sd_windows.go b/internal/fs/sd_windows.go index 66d9bcb54..6bffa4fe2 100644 --- a/internal/fs/sd_windows.go +++ b/internal/fs/sd_windows.go @@ -54,6 +54,15 @@ func getSecurityDescriptor(filePath string) (securityDescriptor *[]byte, err err sd, err = getNamedSecurityInfoLow(filePath) } else { sd, err = getNamedSecurityInfoHigh(filePath) + // Fallback to the low privilege version when receiving an access denied error. + // For some reason the ERROR_PRIVILEGE_NOT_HELD error is not returned for removable media + // but instead an access denied error is returned. Workaround that by just retrying with + // the low privilege version, but don't switch privileges as we cannot distinguish this + // case from actual access denied errors. + // see https://github.com/restic/restic/issues/5003#issuecomment-2452314191 for details + if err != nil && isAccessDeniedError(err) { + sd, err = getNamedSecurityInfoLow(filePath) + } } if err != nil { if !useLowerPrivileges && isHandlePrivilegeNotHeldError(err) { @@ -114,6 +123,10 @@ func setSecurityDescriptor(filePath string, securityDescriptor *[]byte) error { err = setNamedSecurityInfoLow(filePath, dacl) } else { err = setNamedSecurityInfoHigh(filePath, owner, group, dacl, sacl) + // See corresponding fallback in getSecurityDescriptor for an explanation + if err != nil && isAccessDeniedError(err) { + err = setNamedSecurityInfoLow(filePath, dacl) + } } if err != nil { @@ -174,6 +187,15 @@ func isHandlePrivilegeNotHeldError(err error) bool { return false } +// isAccessDeniedError checks if the error is ERROR_ACCESS_DENIED +func isAccessDeniedError(err error) bool { + if errno, ok := err.(syscall.Errno); ok { + // Compare the error code to the expected value + return errno == windows.ERROR_ACCESS_DENIED + } + return false +} + // securityDescriptorBytesToStruct converts the security descriptor bytes representation // into a pointer to windows SECURITY_DESCRIPTOR. func securityDescriptorBytesToStruct(sd []byte) (*windows.SECURITY_DESCRIPTOR, error) { diff --git a/internal/fs/stat.go b/internal/fs/stat.go index e1006fd61..bd3993f41 100644 --- a/internal/fs/stat.go +++ b/internal/fs/stat.go @@ -8,7 +8,8 @@ import ( // ExtendedFileInfo is an extended stat_t, filled with attributes that are // supported by most operating systems. The original FileInfo is embedded. type ExtendedFileInfo struct { - os.FileInfo + Name string + Mode os.FileMode DeviceID uint64 // ID of device containing the file Inode uint64 // Inode number @@ -23,10 +24,13 @@ type ExtendedFileInfo struct { AccessTime time.Time // last access time stamp ModTime time.Time // last (content) modification time stamp ChangeTime time.Time // last status change time stamp + + // nolint:unused // only used on Windows + sys any // Value returned by os.FileInfo.Sys() } // ExtendedStat returns an ExtendedFileInfo constructed from the os.FileInfo. -func ExtendedStat(fi os.FileInfo) ExtendedFileInfo { +func ExtendedStat(fi os.FileInfo) *ExtendedFileInfo { if fi == nil { panic("os.FileInfo is nil") } diff --git a/internal/fs/stat_bsd.go b/internal/fs/stat_bsd.go index 11e075b50..165064153 100644 --- a/internal/fs/stat_bsd.go +++ b/internal/fs/stat_bsd.go @@ -10,11 +10,13 @@ import ( ) // extendedStat extracts info into an ExtendedFileInfo for unix based operating systems. -func extendedStat(fi os.FileInfo) ExtendedFileInfo { +func extendedStat(fi os.FileInfo) *ExtendedFileInfo { s := fi.Sys().(*syscall.Stat_t) - extFI := ExtendedFileInfo{ - FileInfo: fi, + return &ExtendedFileInfo{ + Name: fi.Name(), + Mode: fi.Mode(), + DeviceID: uint64(s.Dev), Inode: uint64(s.Ino), Links: uint64(s.Nlink), @@ -29,6 +31,4 @@ func extendedStat(fi os.FileInfo) ExtendedFileInfo { ModTime: time.Unix(s.Mtimespec.Unix()), ChangeTime: time.Unix(s.Ctimespec.Unix()), } - - return extFI } diff --git a/internal/fs/stat_unix.go b/internal/fs/stat_unix.go index c55571031..723ac8b19 100644 --- a/internal/fs/stat_unix.go +++ b/internal/fs/stat_unix.go @@ -10,11 +10,13 @@ import ( ) // extendedStat extracts info into an ExtendedFileInfo for unix based operating systems. -func extendedStat(fi os.FileInfo) ExtendedFileInfo { +func extendedStat(fi os.FileInfo) *ExtendedFileInfo { s := fi.Sys().(*syscall.Stat_t) - extFI := ExtendedFileInfo{ - FileInfo: fi, + return &ExtendedFileInfo{ + Name: fi.Name(), + Mode: fi.Mode(), + DeviceID: uint64(s.Dev), Inode: s.Ino, Links: uint64(s.Nlink), @@ -29,6 +31,4 @@ func extendedStat(fi os.FileInfo) ExtendedFileInfo { ModTime: time.Unix(s.Mtim.Unix()), ChangeTime: time.Unix(s.Ctim.Unix()), } - - return extFI } diff --git a/internal/fs/stat_windows.go b/internal/fs/stat_windows.go index 57f330fb5..a2dfa5f6d 100644 --- a/internal/fs/stat_windows.go +++ b/internal/fs/stat_windows.go @@ -11,15 +11,18 @@ import ( ) // extendedStat extracts info into an ExtendedFileInfo for Windows. -func extendedStat(fi os.FileInfo) ExtendedFileInfo { +func extendedStat(fi os.FileInfo) *ExtendedFileInfo { s, ok := fi.Sys().(*syscall.Win32FileAttributeData) if !ok { panic(fmt.Sprintf("conversion to syscall.Win32FileAttributeData failed, type is %T", fi.Sys())) } extFI := ExtendedFileInfo{ - FileInfo: fi, - Size: int64(s.FileSizeLow) | (int64(s.FileSizeHigh) << 32), + Name: fi.Name(), + Mode: fi.Mode(), + + Size: int64(s.FileSizeLow) | (int64(s.FileSizeHigh) << 32), + sys: fi.Sys(), } atime := syscall.NsecToTimespec(s.LastAccessTime.Nanoseconds()) @@ -31,5 +34,5 @@ func extendedStat(fi os.FileInfo) ExtendedFileInfo { // Windows does not have the concept of a "change time" in the sense Unix uses it, so we're using the LastWriteTime here. extFI.ChangeTime = extFI.ModTime - return extFI + return &extFI } diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 7281e0210..840e97107 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -171,6 +171,11 @@ func (h HRESULT) Str() string { return "UNKNOWN" } +// Error implements the error interface +func (h HRESULT) Error() string { + return h.Str() +} + // VssError encapsulates errors returned from calling VSS api. type vssError struct { text string @@ -195,6 +200,11 @@ func (e *vssError) Error() string { return fmt.Sprintf("VSS error: %s: %s (%#x)", e.text, e.hresult.Str(), e.hresult) } +// Unwrap returns the underlying HRESULT error +func (e *vssError) Unwrap() error { + return e.hresult +} + // vssTextError encapsulates errors returned from calling VSS api. type vssTextError struct { text string @@ -943,10 +953,23 @@ func NewVssSnapshot(provider string, "%s", volume)) } - snapshotSetID, err := iVssBackupComponents.StartSnapshotSet() - if err != nil { - iVssBackupComponents.Release() - return VssSnapshot{}, err + const retryStartSnapshotSetSleep = 5 * time.Second + var snapshotSetID ole.GUID + for { + var err error + snapshotSetID, err = iVssBackupComponents.StartSnapshotSet() + if errors.Is(err, VSS_E_SNAPSHOT_SET_IN_PROGRESS) && time.Now().Add(-retryStartSnapshotSetSleep).Before(deadline) { + // retry snapshot set creation while deadline is not reached + time.Sleep(retryStartSnapshotSetSleep) + continue + } + + if err != nil { + iVssBackupComponents.Release() + return VssSnapshot{}, err + } else { + break + } } if err := iVssBackupComponents.AddToSnapshotSet(volume, providerID, &snapshotSetID); err != nil { diff --git a/internal/fuse/dir.go b/internal/fuse/dir.go index 62298cf24..a0317a757 100644 --- a/internal/fuse/dir.go +++ b/internal/fuse/dir.go @@ -20,29 +20,36 @@ import ( // Statically ensure that *dir implement those interface var _ = fs.HandleReadDirAller(&dir{}) +var _ = fs.NodeForgetter(&dir{}) +var _ = fs.NodeGetxattrer(&dir{}) +var _ = fs.NodeListxattrer(&dir{}) var _ = fs.NodeStringLookuper(&dir{}) type dir struct { root *Root + forget forgetFn items map[string]*restic.Node inode uint64 parentInode uint64 node *restic.Node m sync.Mutex + cache treeCache } func cleanupNodeName(name string) string { return filepath.Base(name) } -func newDir(root *Root, inode, parentInode uint64, node *restic.Node) (*dir, error) { +func newDir(root *Root, forget forgetFn, inode, parentInode uint64, node *restic.Node) (*dir, error) { debug.Log("new dir for %v (%v)", node.Name, node.Subtree) return &dir{ root: root, + forget: forget, node: node, inode: inode, parentInode: parentInode, + cache: *newTreeCache(), }, nil } @@ -75,10 +82,11 @@ func replaceSpecialNodes(ctx context.Context, repo restic.BlobLoader, node *rest return tree.Nodes, nil } -func newDirFromSnapshot(root *Root, inode uint64, snapshot *restic.Snapshot) (*dir, error) { +func newDirFromSnapshot(root *Root, forget forgetFn, inode uint64, snapshot *restic.Snapshot) (*dir, error) { debug.Log("new dir for snapshot %v (%v)", snapshot.ID(), snapshot.Tree) return &dir{ - root: root, + root: root, + forget: forget, node: &restic.Node{ AccessTime: snapshot.Time, ModTime: snapshot.Time, @@ -87,6 +95,7 @@ func newDirFromSnapshot(root *Root, inode uint64, snapshot *restic.Snapshot) (*d Subtree: snapshot.Tree, }, inode: inode, + cache: *newTreeCache(), }, nil } @@ -208,25 +217,27 @@ func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) { return nil, err } - node, ok := d.items[name] - if !ok { - debug.Log(" Lookup(%v) -> not found", name) - return nil, syscall.ENOENT - } - inode := inodeFromNode(d.inode, node) - switch node.Type { - case restic.NodeTypeDir: - return newDir(d.root, inode, d.inode, node) - case restic.NodeTypeFile: - return newFile(d.root, inode, node) - case restic.NodeTypeSymlink: - return newLink(d.root, inode, node) - case restic.NodeTypeDev, restic.NodeTypeCharDev, restic.NodeTypeFifo, restic.NodeTypeSocket: - return newOther(d.root, inode, node) - default: - debug.Log(" node %v has unknown type %v", name, node.Type) - return nil, syscall.ENOENT - } + return d.cache.lookupOrCreate(name, func(forget forgetFn) (fs.Node, error) { + node, ok := d.items[name] + if !ok { + debug.Log(" Lookup(%v) -> not found", name) + return nil, syscall.ENOENT + } + inode := inodeFromNode(d.inode, node) + switch node.Type { + case restic.NodeTypeDir: + return newDir(d.root, forget, inode, d.inode, node) + case restic.NodeTypeFile: + return newFile(d.root, forget, inode, node) + case restic.NodeTypeSymlink: + return newLink(d.root, forget, inode, node) + case restic.NodeTypeDev, restic.NodeTypeCharDev, restic.NodeTypeFifo, restic.NodeTypeSocket: + return newOther(d.root, forget, inode, node) + default: + debug.Log(" node %v has unknown type %v", name, node.Type) + return nil, syscall.ENOENT + } + }) } func (d *dir) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error { @@ -237,3 +248,7 @@ func (d *dir) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fus func (d *dir) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { return nodeGetXattr(d.node, req, resp) } + +func (d *dir) Forget() { + d.forget() +} diff --git a/internal/fuse/file.go b/internal/fuse/file.go index 494fca283..a69471f83 100644 --- a/internal/fuse/file.go +++ b/internal/fuse/file.go @@ -20,14 +20,16 @@ const blockSize = 512 // Statically ensure that *file and *openFile implement the given interfaces var _ = fs.HandleReader(&openFile{}) -var _ = fs.NodeListxattrer(&file{}) +var _ = fs.NodeForgetter(&file{}) var _ = fs.NodeGetxattrer(&file{}) +var _ = fs.NodeListxattrer(&file{}) var _ = fs.NodeOpener(&file{}) type file struct { - root *Root - node *restic.Node - inode uint64 + root *Root + forget forgetFn + node *restic.Node + inode uint64 } type openFile struct { @@ -36,12 +38,13 @@ type openFile struct { cumsize []uint64 } -func newFile(root *Root, inode uint64, node *restic.Node) (fusefile *file, err error) { +func newFile(root *Root, forget forgetFn, inode uint64, node *restic.Node) (fusefile *file, err error) { debug.Log("create new file for %v with %d blobs", node.Name, len(node.Content)) return &file{ - inode: inode, - root: root, - node: node, + inode: inode, + forget: forget, + root: root, + node: node, }, nil } @@ -172,3 +175,7 @@ func (f *file) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fu func (f *file) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { return nodeGetXattr(f.node, req, resp) } + +func (f *file) Forget() { + f.forget() +} diff --git a/internal/fuse/fuse_test.go b/internal/fuse/fuse_test.go index bbdfe6b3f..3c0648bc6 100644 --- a/internal/fuse/fuse_test.go +++ b/internal/fuse/fuse_test.go @@ -119,7 +119,7 @@ func TestFuseFile(t *testing.T) { root := &Root{repo: repo, blobCache: bloblru.New(blobCacheSize)} inode := inodeFromNode(1, node) - f, err := newFile(root, inode, node) + f, err := newFile(root, func() {}, inode, node) rtest.OK(t, err) of, err := f.Open(context.TODO(), nil, nil) rtest.OK(t, err) @@ -162,7 +162,7 @@ func TestFuseDir(t *testing.T) { } parentInode := inodeFromName(0, "parent") inode := inodeFromName(1, "foo") - d, err := newDir(root, inode, parentInode, node) + d, err := newDir(root, func() {}, inode, parentInode, node) rtest.OK(t, err) // don't open the directory as that would require setting up a proper tree blob @@ -217,6 +217,34 @@ func testTopUIDGID(t *testing.T, cfg Config, repo restic.Repository, uid, gid ui rtest.Equals(t, uint32(0), attr.Gid) } +// The Lookup method must return the same Node object unless it was forgotten in the meantime +func testStableLookup(t *testing.T, node fs.Node, path string) fs.Node { + t.Helper() + result, err := node.(fs.NodeStringLookuper).Lookup(context.TODO(), path) + rtest.OK(t, err) + result2, err := node.(fs.NodeStringLookuper).Lookup(context.TODO(), path) + rtest.OK(t, err) + rtest.Assert(t, result == result2, "%v are not the same object", path) + + result2.(fs.NodeForgetter).Forget() + result2, err = node.(fs.NodeStringLookuper).Lookup(context.TODO(), path) + rtest.OK(t, err) + rtest.Assert(t, result != result2, "object for %v should change after forget", path) + return result +} + +func TestStableNodeObjects(t *testing.T) { + repo := repository.TestRepository(t) + restic.TestCreateSnapshot(t, repo, time.Unix(1460289341, 207401672), 2) + root := NewRoot(repo, Config{}) + + idsdir := testStableLookup(t, root, "ids") + snapID := loadFirstSnapshot(t, repo).ID().Str() + snapshotdir := testStableLookup(t, idsdir, snapID) + dir := testStableLookup(t, snapshotdir, "dir-0") + testStableLookup(t, dir, "file-2") +} + // Test reporting of fuse.Attr.Blocks in multiples of 512. func TestBlocks(t *testing.T) { root := &Root{} @@ -276,7 +304,7 @@ func TestLink(t *testing.T) { {Name: "foo", Value: []byte("bar")}, }} - lnk, err := newLink(&Root{}, 42, node) + lnk, err := newLink(&Root{}, func() {}, 42, node) rtest.OK(t, err) target, err := lnk.Readlink(context.TODO(), nil) rtest.OK(t, err) diff --git a/internal/fuse/link.go b/internal/fuse/link.go index 3aea8b06e..f8bf8d3ee 100644 --- a/internal/fuse/link.go +++ b/internal/fuse/link.go @@ -12,16 +12,20 @@ import ( ) // Statically ensure that *link implements the given interface +var _ = fs.NodeForgetter(&link{}) +var _ = fs.NodeGetxattrer(&link{}) +var _ = fs.NodeListxattrer(&link{}) var _ = fs.NodeReadlinker(&link{}) type link struct { - root *Root - node *restic.Node - inode uint64 + root *Root + forget forgetFn + node *restic.Node + inode uint64 } -func newLink(root *Root, inode uint64, node *restic.Node) (*link, error) { - return &link{root: root, inode: inode, node: node}, nil +func newLink(root *Root, forget forgetFn, inode uint64, node *restic.Node) (*link, error) { + return &link{root: root, forget: forget, inode: inode, node: node}, nil } func (l *link) Readlink(_ context.Context, _ *fuse.ReadlinkRequest) (string, error) { @@ -55,3 +59,7 @@ func (l *link) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fu func (l *link) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error { return nodeGetXattr(l.node, req, resp) } + +func (l *link) Forget() { + l.forget() +} diff --git a/internal/fuse/other.go b/internal/fuse/other.go index f536de5c1..cbd9667cc 100644 --- a/internal/fuse/other.go +++ b/internal/fuse/other.go @@ -7,17 +7,23 @@ import ( "context" "github.com/anacrolix/fuse" + "github.com/anacrolix/fuse/fs" "github.com/restic/restic/internal/restic" ) +// Statically ensure that *other implements the given interface +var _ = fs.NodeForgetter(&other{}) +var _ = fs.NodeReadlinker(&other{}) + type other struct { - root *Root - node *restic.Node - inode uint64 + root *Root + forget forgetFn + node *restic.Node + inode uint64 } -func newOther(root *Root, inode uint64, node *restic.Node) (*other, error) { - return &other{root: root, inode: inode, node: node}, nil +func newOther(root *Root, forget forgetFn, inode uint64, node *restic.Node) (*other, error) { + return &other{root: root, forget: forget, inode: inode, node: node}, nil } func (l *other) Readlink(_ context.Context, _ *fuse.ReadlinkRequest) (string, error) { @@ -40,3 +46,7 @@ func (l *other) Attr(_ context.Context, a *fuse.Attr) error { return nil } + +func (l *other) Forget() { + l.forget() +} diff --git a/internal/fuse/root.go b/internal/fuse/root.go index ab6116f0d..72a0634fc 100644 --- a/internal/fuse/root.go +++ b/internal/fuse/root.go @@ -66,7 +66,7 @@ func NewRoot(repo restic.Repository, cfg Config) *Root { } } - root.SnapshotsDir = NewSnapshotsDir(root, rootInode, rootInode, NewSnapshotsDirStructure(root, cfg.PathTemplates, cfg.TimeTemplate), "") + root.SnapshotsDir = NewSnapshotsDir(root, func() {}, rootInode, rootInode, NewSnapshotsDirStructure(root, cfg.PathTemplates, cfg.TimeTemplate), "") return root } diff --git a/internal/fuse/snapshots_dir.go b/internal/fuse/snapshots_dir.go index 4cae7106c..bcab16084 100644 --- a/internal/fuse/snapshots_dir.go +++ b/internal/fuse/snapshots_dir.go @@ -19,25 +19,30 @@ import ( // It uses the saved prefix to select the corresponding MetaDirData. type SnapshotsDir struct { root *Root + forget forgetFn inode uint64 parentInode uint64 dirStruct *SnapshotsDirStructure prefix string + cache treeCache } // ensure that *SnapshotsDir implements these interfaces var _ = fs.HandleReadDirAller(&SnapshotsDir{}) +var _ = fs.NodeForgetter(&SnapshotsDir{}) var _ = fs.NodeStringLookuper(&SnapshotsDir{}) // NewSnapshotsDir returns a new directory structure containing snapshots and "latest" links -func NewSnapshotsDir(root *Root, inode, parentInode uint64, dirStruct *SnapshotsDirStructure, prefix string) *SnapshotsDir { +func NewSnapshotsDir(root *Root, forget forgetFn, inode, parentInode uint64, dirStruct *SnapshotsDirStructure, prefix string) *SnapshotsDir { debug.Log("create snapshots dir, inode %d", inode) return &SnapshotsDir{ root: root, + forget: forget, inode: inode, parentInode: parentInode, dirStruct: dirStruct, prefix: prefix, + cache: *newTreeCache(), } } @@ -107,33 +112,41 @@ func (d *SnapshotsDir) Lookup(ctx context.Context, name string) (fs.Node, error) return nil, syscall.ENOENT } - entry := meta.names[name] - if entry != nil { + return d.cache.lookupOrCreate(name, func(forget forgetFn) (fs.Node, error) { + entry := meta.names[name] + if entry == nil { + return nil, syscall.ENOENT + } + inode := inodeFromName(d.inode, name) if entry.linkTarget != "" { - return newSnapshotLink(d.root, inode, entry.linkTarget, entry.snapshot) + return newSnapshotLink(d.root, forget, inode, entry.linkTarget, entry.snapshot) } else if entry.snapshot != nil { - return newDirFromSnapshot(d.root, inode, entry.snapshot) + return newDirFromSnapshot(d.root, forget, inode, entry.snapshot) } - return NewSnapshotsDir(d.root, inode, d.inode, d.dirStruct, d.prefix+"/"+name), nil - } + return NewSnapshotsDir(d.root, forget, inode, d.inode, d.dirStruct, d.prefix+"/"+name), nil + }) +} - return nil, syscall.ENOENT +func (d *SnapshotsDir) Forget() { + d.forget() } // SnapshotLink type snapshotLink struct { root *Root + forget forgetFn inode uint64 target string snapshot *restic.Snapshot } +var _ = fs.NodeForgetter(&snapshotLink{}) var _ = fs.NodeReadlinker(&snapshotLink{}) // newSnapshotLink -func newSnapshotLink(root *Root, inode uint64, target string, snapshot *restic.Snapshot) (*snapshotLink, error) { - return &snapshotLink{root: root, inode: inode, target: target, snapshot: snapshot}, nil +func newSnapshotLink(root *Root, forget forgetFn, inode uint64, target string, snapshot *restic.Snapshot) (*snapshotLink, error) { + return &snapshotLink{root: root, forget: forget, inode: inode, target: target, snapshot: snapshot}, nil } // Readlink @@ -157,3 +170,7 @@ func (l *snapshotLink) Attr(_ context.Context, a *fuse.Attr) error { return nil } + +func (l *snapshotLink) Forget() { + l.forget() +} diff --git a/internal/fuse/tree_cache.go b/internal/fuse/tree_cache.go new file mode 100644 index 000000000..d913f9b81 --- /dev/null +++ b/internal/fuse/tree_cache.go @@ -0,0 +1,45 @@ +//go:build darwin || freebsd || linux +// +build darwin freebsd linux + +package fuse + +import ( + "sync" + + "github.com/anacrolix/fuse/fs" +) + +type treeCache struct { + nodes map[string]fs.Node + m sync.Mutex +} + +type forgetFn func() + +func newTreeCache() *treeCache { + return &treeCache{ + nodes: map[string]fs.Node{}, + } +} + +func (t *treeCache) lookupOrCreate(name string, create func(forget forgetFn) (fs.Node, error)) (fs.Node, error) { + t.m.Lock() + defer t.m.Unlock() + + if node, ok := t.nodes[name]; ok { + return node, nil + } + + node, err := create(func() { + t.m.Lock() + defer t.m.Unlock() + + delete(t.nodes, name) + }) + if err != nil { + return nil, err + } + + t.nodes[name] = node + return node, nil +} diff --git a/internal/restic/tree.go b/internal/restic/tree.go index c4125653b..f406b489f 100644 --- a/internal/restic/tree.go +++ b/internal/restic/tree.go @@ -162,7 +162,7 @@ func NewTreeJSONBuilder() *TreeJSONBuilder { func (builder *TreeJSONBuilder) AddNode(node *Node) error { if node.Name <= builder.lastName { - return fmt.Errorf("node %q, last%q: %w", node.Name, builder.lastName, ErrTreeNotOrdered) + return fmt.Errorf("node %q, last %q: %w", node.Name, builder.lastName, ErrTreeNotOrdered) } if builder.lastName != "" { _ = builder.buf.WriteByte(',') diff --git a/internal/restic/tree_test.go b/internal/restic/tree_test.go index f1979f135..07ca254f1 100644 --- a/internal/restic/tree_test.go +++ b/internal/restic/tree_test.go @@ -83,13 +83,17 @@ func TestNodeMarshal(t *testing.T) { } } -func TestNodeComparison(t *testing.T) { - fs := &fs.Local{} - fi, err := fs.Lstat("tree_test.go") +func nodeForFile(t *testing.T, name string) *restic.Node { + f, err := (&fs.Local{}).OpenFile(name, fs.O_NOFOLLOW, true) rtest.OK(t, err) + node, err := f.ToNode(false) + rtest.OK(t, err) + rtest.OK(t, f.Close()) + return node +} - node, err := fs.NodeFromFileInfo("tree_test.go", fi, false) - rtest.OK(t, err) +func TestNodeComparison(t *testing.T) { + node := nodeForFile(t, "tree_test.go") n2 := *node rtest.Assert(t, node.Equals(n2), "nodes aren't equal") @@ -127,11 +131,7 @@ func TestTreeEqualSerialization(t *testing.T) { builder := restic.NewTreeJSONBuilder() for _, fn := range files[:i] { - fs := &fs.Local{} - fi, err := fs.Lstat(fn) - rtest.OK(t, err) - node, err := fs.NodeFromFileInfo(fn, fi, false) - rtest.OK(t, err) + node := nodeForFile(t, fn) rtest.OK(t, tree.Insert(node)) rtest.OK(t, builder.AddNode(node)) diff --git a/internal/restorer/restorer.go b/internal/restorer/restorer.go index f28cd0ba3..14a8edeac 100644 --- a/internal/restorer/restorer.go +++ b/internal/restorer/restorer.go @@ -511,12 +511,30 @@ func (res *Restorer) removeUnexpectedFiles(ctx context.Context, target, location selectedForRestore, _ := res.SelectFilter(nodeLocation, false) // only delete files that were selected for restore if selectedForRestore { - res.opts.Progress.ReportDeletedFile(nodeLocation) + // First collect all files that will be deleted + var filesToDelete []string + err := filepath.Walk(nodeTarget, func(path string, _ os.FileInfo, err error) error { + if err != nil { + return err + } + filesToDelete = append(filesToDelete, path) + return nil + }) + if err != nil { + return err + } + if !res.opts.DryRun { + // Perform the deletion if err := fs.RemoveAll(nodeTarget); err != nil { return err } } + + // Report paths as deleted only after successful removal + for i := len(filesToDelete) - 1; i >= 0; i-- { + res.opts.Progress.ReportDeletion(filesToDelete[i]) + } } } diff --git a/internal/ui/backup/json.go b/internal/ui/backup/json.go index f4a76afd7..79da353eb 100644 --- a/internal/ui/backup/json.go +++ b/internal/ui/backup/json.go @@ -162,7 +162,7 @@ func (b *JSONProgress) ReportTotal(start time.Time, s archiver.ScanStats) { } // Finish prints the finishing messages. -func (b *JSONProgress) Finish(snapshotID restic.ID, start time.Time, summary *archiver.Summary, dryRun bool) { +func (b *JSONProgress) Finish(snapshotID restic.ID, summary *archiver.Summary, dryRun bool) { id := "" // empty if snapshot creation was skipped if !snapshotID.IsNull() { @@ -182,7 +182,9 @@ func (b *JSONProgress) Finish(snapshotID restic.ID, start time.Time, summary *ar DataAddedPacked: summary.ItemStats.DataSizeInRepo + summary.ItemStats.TreeSizeInRepo, TotalFilesProcessed: summary.Files.New + summary.Files.Changed + summary.Files.Unchanged, TotalBytesProcessed: summary.ProcessedBytes, - TotalDuration: time.Since(start).Seconds(), + BackupStart: summary.BackupStart, + BackupEnd: summary.BackupEnd, + TotalDuration: summary.BackupEnd.Sub(summary.BackupStart).Seconds(), SnapshotID: id, DryRun: dryRun, }) @@ -229,20 +231,22 @@ type verboseUpdate struct { } type summaryOutput struct { - MessageType string `json:"message_type"` // "summary" - FilesNew uint `json:"files_new"` - FilesChanged uint `json:"files_changed"` - FilesUnmodified uint `json:"files_unmodified"` - DirsNew uint `json:"dirs_new"` - DirsChanged uint `json:"dirs_changed"` - DirsUnmodified uint `json:"dirs_unmodified"` - DataBlobs int `json:"data_blobs"` - TreeBlobs int `json:"tree_blobs"` - DataAdded uint64 `json:"data_added"` - DataAddedPacked uint64 `json:"data_added_packed"` - TotalFilesProcessed uint `json:"total_files_processed"` - TotalBytesProcessed uint64 `json:"total_bytes_processed"` - TotalDuration float64 `json:"total_duration"` // in seconds - SnapshotID string `json:"snapshot_id,omitempty"` - DryRun bool `json:"dry_run,omitempty"` + MessageType string `json:"message_type"` // "summary" + FilesNew uint `json:"files_new"` + FilesChanged uint `json:"files_changed"` + FilesUnmodified uint `json:"files_unmodified"` + DirsNew uint `json:"dirs_new"` + DirsChanged uint `json:"dirs_changed"` + DirsUnmodified uint `json:"dirs_unmodified"` + DataBlobs int `json:"data_blobs"` + TreeBlobs int `json:"tree_blobs"` + DataAdded uint64 `json:"data_added"` + DataAddedPacked uint64 `json:"data_added_packed"` + TotalFilesProcessed uint `json:"total_files_processed"` + TotalBytesProcessed uint64 `json:"total_bytes_processed"` + TotalDuration float64 `json:"total_duration"` // in seconds + BackupStart time.Time `json:"backup_start"` + BackupEnd time.Time `json:"backup_end"` + SnapshotID string `json:"snapshot_id,omitempty"` + DryRun bool `json:"dry_run,omitempty"` } diff --git a/internal/ui/backup/progress.go b/internal/ui/backup/progress.go index 24640d71f..318d30435 100644 --- a/internal/ui/backup/progress.go +++ b/internal/ui/backup/progress.go @@ -17,7 +17,7 @@ type ProgressPrinter interface { ScannerError(item string, err error) error CompleteItem(messageType string, item string, s archiver.ItemStats, d time.Duration) ReportTotal(start time.Time, s archiver.ScanStats) - Finish(snapshotID restic.ID, start time.Time, summary *archiver.Summary, dryRun bool) + Finish(snapshotID restic.ID, summary *archiver.Summary, dryRun bool) Reset() P(msg string, args ...interface{}) @@ -173,5 +173,5 @@ func (p *Progress) ReportTotal(item string, s archiver.ScanStats) { func (p *Progress) Finish(snapshotID restic.ID, summary *archiver.Summary, dryrun bool) { // wait for the status update goroutine to shut down p.Updater.Done() - p.printer.Finish(snapshotID, p.start, summary, dryrun) + p.printer.Finish(snapshotID, summary, dryrun) } diff --git a/internal/ui/backup/progress_test.go b/internal/ui/backup/progress_test.go index 512fbab26..60e754b4a 100644 --- a/internal/ui/backup/progress_test.go +++ b/internal/ui/backup/progress_test.go @@ -33,7 +33,7 @@ func (p *mockPrinter) CompleteItem(messageType string, _ string, _ archiver.Item } func (p *mockPrinter) ReportTotal(_ time.Time, _ archiver.ScanStats) {} -func (p *mockPrinter) Finish(id restic.ID, _ time.Time, _ *archiver.Summary, _ bool) { +func (p *mockPrinter) Finish(id restic.ID, _ *archiver.Summary, _ bool) { p.Lock() defer p.Unlock() diff --git a/internal/ui/backup/text.go b/internal/ui/backup/text.go index 097f0d0d8..efd7ffdfe 100644 --- a/internal/ui/backup/text.go +++ b/internal/ui/backup/text.go @@ -130,7 +130,7 @@ func (b *TextProgress) Reset() { } // Finish prints the finishing messages. -func (b *TextProgress) Finish(id restic.ID, start time.Time, summary *archiver.Summary, dryRun bool) { +func (b *TextProgress) Finish(id restic.ID, summary *archiver.Summary, dryRun bool) { b.P("\n") b.P("Files: %5d new, %5d changed, %5d unmodified\n", summary.Files.New, summary.Files.Changed, summary.Files.Unchanged) b.P("Dirs: %5d new, %5d changed, %5d unmodified\n", summary.Dirs.New, summary.Dirs.Changed, summary.Dirs.Unchanged) @@ -147,7 +147,7 @@ func (b *TextProgress) Finish(id restic.ID, start time.Time, summary *archiver.S b.P("processed %v files, %v in %s", summary.Files.New+summary.Files.Changed+summary.Files.Unchanged, ui.FormatBytes(summary.ProcessedBytes), - ui.FormatDuration(time.Since(start)), + ui.FormatDuration(summary.BackupEnd.Sub(summary.BackupStart)), ) if !dryRun { diff --git a/internal/ui/restore/json.go b/internal/ui/restore/json.go index 72cc38a6e..f7f7bdd1f 100644 --- a/internal/ui/restore/json.go +++ b/internal/ui/restore/json.go @@ -33,6 +33,7 @@ func (t *jsonPrinter) Update(p State, duration time.Duration) { TotalFiles: p.FilesTotal, FilesRestored: p.FilesFinished, FilesSkipped: p.FilesSkipped, + FilesDeleted: p.FilesDeleted, TotalBytes: p.AllBytesTotal, BytesRestored: p.AllBytesWritten, BytesSkipped: p.AllBytesSkipped, @@ -94,6 +95,7 @@ func (t *jsonPrinter) Finish(p State, duration time.Duration) { TotalFiles: p.FilesTotal, FilesRestored: p.FilesFinished, FilesSkipped: p.FilesSkipped, + FilesDeleted: p.FilesDeleted, TotalBytes: p.AllBytesTotal, BytesRestored: p.AllBytesWritten, BytesSkipped: p.AllBytesSkipped, @@ -108,6 +110,7 @@ type statusUpdate struct { TotalFiles uint64 `json:"total_files,omitempty"` FilesRestored uint64 `json:"files_restored,omitempty"` FilesSkipped uint64 `json:"files_skipped,omitempty"` + FilesDeleted uint64 `json:"files_deleted,omitempty"` TotalBytes uint64 `json:"total_bytes,omitempty"` BytesRestored uint64 `json:"bytes_restored,omitempty"` BytesSkipped uint64 `json:"bytes_skipped,omitempty"` @@ -137,6 +140,7 @@ type summaryOutput struct { TotalFiles uint64 `json:"total_files,omitempty"` FilesRestored uint64 `json:"files_restored,omitempty"` FilesSkipped uint64 `json:"files_skipped,omitempty"` + FilesDeleted uint64 `json:"files_deleted,omitempty"` TotalBytes uint64 `json:"total_bytes,omitempty"` BytesRestored uint64 `json:"bytes_restored,omitempty"` BytesSkipped uint64 `json:"bytes_skipped,omitempty"` diff --git a/internal/ui/restore/json_test.go b/internal/ui/restore/json_test.go index 917a48070..c7096c246 100644 --- a/internal/ui/restore/json_test.go +++ b/internal/ui/restore/json_test.go @@ -17,31 +17,31 @@ func createJSONProgress() (*ui.MockTerminal, ProgressPrinter) { func TestJSONPrintUpdate(t *testing.T) { term, printer := createJSONProgress() - printer.Update(State{3, 11, 0, 29, 47, 0}, 5*time.Second) + printer.Update(State{3, 11, 0, 0, 29, 47, 0}, 5*time.Second) test.Equals(t, []string{"{\"message_type\":\"status\",\"seconds_elapsed\":5,\"percent_done\":0.6170212765957447,\"total_files\":11,\"files_restored\":3,\"total_bytes\":47,\"bytes_restored\":29}\n"}, term.Output) } func TestJSONPrintUpdateWithSkipped(t *testing.T) { term, printer := createJSONProgress() - printer.Update(State{3, 11, 2, 29, 47, 59}, 5*time.Second) + printer.Update(State{3, 11, 2, 0, 29, 47, 59}, 5*time.Second) test.Equals(t, []string{"{\"message_type\":\"status\",\"seconds_elapsed\":5,\"percent_done\":0.6170212765957447,\"total_files\":11,\"files_restored\":3,\"files_skipped\":2,\"total_bytes\":47,\"bytes_restored\":29,\"bytes_skipped\":59}\n"}, term.Output) } func TestJSONPrintSummaryOnSuccess(t *testing.T) { term, printer := createJSONProgress() - printer.Finish(State{11, 11, 0, 47, 47, 0}, 5*time.Second) + printer.Finish(State{11, 11, 0, 0, 47, 47, 0}, 5*time.Second) test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":11,\"total_bytes\":47,\"bytes_restored\":47}\n"}, term.Output) } func TestJSONPrintSummaryOnErrors(t *testing.T) { term, printer := createJSONProgress() - printer.Finish(State{3, 11, 0, 29, 47, 0}, 5*time.Second) + printer.Finish(State{3, 11, 0, 0, 29, 47, 0}, 5*time.Second) test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":3,\"total_bytes\":47,\"bytes_restored\":29}\n"}, term.Output) } func TestJSONPrintSummaryOnSuccessWithSkipped(t *testing.T) { term, printer := createJSONProgress() - printer.Finish(State{11, 11, 2, 47, 47, 59}, 5*time.Second) + printer.Finish(State{11, 11, 2, 0, 47, 47, 59}, 5*time.Second) test.Equals(t, []string{"{\"message_type\":\"summary\",\"seconds_elapsed\":5,\"total_files\":11,\"files_restored\":11,\"files_skipped\":2,\"total_bytes\":47,\"bytes_restored\":47,\"bytes_skipped\":59}\n"}, term.Output) } diff --git a/internal/ui/restore/progress.go b/internal/ui/restore/progress.go index 06f4c86aa..41367f346 100644 --- a/internal/ui/restore/progress.go +++ b/internal/ui/restore/progress.go @@ -11,6 +11,7 @@ type State struct { FilesFinished uint64 FilesTotal uint64 FilesSkipped uint64 + FilesDeleted uint64 AllBytesWritten uint64 AllBytesTotal uint64 AllBytesSkipped uint64 @@ -124,11 +125,13 @@ func (p *Progress) AddSkippedFile(name string, size uint64) { p.printer.CompleteItem(ActionFileUnchanged, name, size) } -func (p *Progress) ReportDeletedFile(name string) { +func (p *Progress) ReportDeletion(name string) { if p == nil { return } + p.s.FilesDeleted++ + p.m.Lock() defer p.m.Unlock() diff --git a/internal/ui/restore/progress_test.go b/internal/ui/restore/progress_test.go index b01440bee..b6f72726c 100644 --- a/internal/ui/restore/progress_test.go +++ b/internal/ui/restore/progress_test.go @@ -72,7 +72,7 @@ func TestNew(t *testing.T) { return false }) test.Equals(t, printerTrace{ - printerTraceEntry{State{0, 0, 0, 0, 0, 0}, 0, false}, + printerTraceEntry{State{0, 0, 0, 0, 0, 0, 0}, 0, false}, }, result) test.Equals(t, itemTrace{}, items) } @@ -85,7 +85,7 @@ func TestAddFile(t *testing.T) { return false }) test.Equals(t, printerTrace{ - printerTraceEntry{State{0, 1, 0, 0, fileSize, 0}, 0, false}, + printerTraceEntry{State{0, 1, 0, 0, 0, fileSize, 0}, 0, false}, }, result) test.Equals(t, itemTrace{}, items) } @@ -100,7 +100,7 @@ func TestFirstProgressOnAFile(t *testing.T) { return false }) test.Equals(t, printerTrace{ - printerTraceEntry{State{0, 1, 0, expectedBytesWritten, expectedBytesTotal, 0}, 0, false}, + printerTraceEntry{State{0, 1, 0, 0, expectedBytesWritten, expectedBytesTotal, 0}, 0, false}, }, result) test.Equals(t, itemTrace{}, items) } @@ -116,7 +116,7 @@ func TestLastProgressOnAFile(t *testing.T) { return false }) test.Equals(t, printerTrace{ - printerTraceEntry{State{1, 1, 0, fileSize, fileSize, 0}, 0, false}, + printerTraceEntry{State{1, 1, 0, 0, fileSize, fileSize, 0}, 0, false}, }, result) test.Equals(t, itemTrace{ itemTraceEntry{action: ActionFileUpdated, item: "test", size: fileSize}, @@ -135,7 +135,7 @@ func TestLastProgressOnLastFile(t *testing.T) { return false }) test.Equals(t, printerTrace{ - printerTraceEntry{State{2, 2, 0, 50 + fileSize, 50 + fileSize, 0}, 0, false}, + printerTraceEntry{State{2, 2, 0, 0, 50 + fileSize, 50 + fileSize, 0}, 0, false}, }, result) test.Equals(t, itemTrace{ itemTraceEntry{action: ActionFileUpdated, item: "test1", size: 50}, @@ -154,7 +154,7 @@ func TestSummaryOnSuccess(t *testing.T) { return true }) test.Equals(t, printerTrace{ - printerTraceEntry{State{2, 2, 0, 50 + fileSize, 50 + fileSize, 0}, mockFinishDuration, true}, + printerTraceEntry{State{2, 2, 0, 0, 50 + fileSize, 50 + fileSize, 0}, mockFinishDuration, true}, }, result) } @@ -169,7 +169,7 @@ func TestSummaryOnErrors(t *testing.T) { return true }) test.Equals(t, printerTrace{ - printerTraceEntry{State{1, 2, 0, 50 + fileSize/2, 50 + fileSize, 0}, mockFinishDuration, true}, + printerTraceEntry{State{1, 2, 0, 0, 50 + fileSize/2, 50 + fileSize, 0}, mockFinishDuration, true}, }, result) } @@ -181,7 +181,7 @@ func TestSkipFile(t *testing.T) { return true }) test.Equals(t, printerTrace{ - printerTraceEntry{State{0, 0, 1, 0, 0, fileSize}, mockFinishDuration, true}, + printerTraceEntry{State{0, 0, 1, 0, 0, 0, fileSize}, mockFinishDuration, true}, }, result) test.Equals(t, itemTrace{ itemTraceEntry{ActionFileUnchanged, "test", fileSize}, @@ -196,7 +196,7 @@ func TestProgressTypes(t *testing.T) { progress.AddFile(0) progress.AddProgress("dir", ActionDirRestored, fileSize, fileSize) progress.AddProgress("new", ActionFileRestored, 0, 0) - progress.ReportDeletedFile("del") + progress.ReportDeletion("del") return true }) test.Equals(t, itemTrace{ diff --git a/internal/ui/restore/text.go b/internal/ui/restore/text.go index ba0dcd007..35c9db029 100644 --- a/internal/ui/restore/text.go +++ b/internal/ui/restore/text.go @@ -30,6 +30,9 @@ func (t *textPrinter) Update(p State, duration time.Duration) { if p.FilesSkipped > 0 { progress += fmt.Sprintf(", skipped %v files/dirs %v", p.FilesSkipped, ui.FormatBytes(p.AllBytesSkipped)) } + if p.FilesDeleted > 0 { + progress += fmt.Sprintf(", deleted %v files/dirs", p.FilesDeleted) + } t.terminal.SetStatus([]string{progress}) } @@ -82,6 +85,9 @@ func (t *textPrinter) Finish(p State, duration time.Duration) { if p.FilesSkipped > 0 { summary += fmt.Sprintf(", skipped %v files/dirs %v", p.FilesSkipped, ui.FormatBytes(p.AllBytesSkipped)) } + if p.FilesDeleted > 0 { + summary += fmt.Sprintf(", deleted %v files/dirs", p.FilesDeleted) + } t.terminal.Print(summary) } diff --git a/internal/ui/restore/text_test.go b/internal/ui/restore/text_test.go index 4ffb1615d..746700cd8 100644 --- a/internal/ui/restore/text_test.go +++ b/internal/ui/restore/text_test.go @@ -17,31 +17,31 @@ func createTextProgress() (*ui.MockTerminal, ProgressPrinter) { func TestPrintUpdate(t *testing.T) { term, printer := createTextProgress() - printer.Update(State{3, 11, 0, 29, 47, 0}, 5*time.Second) + printer.Update(State{3, 11, 0, 0, 29, 47, 0}, 5*time.Second) test.Equals(t, []string{"[0:05] 61.70% 3 files/dirs 29 B, total 11 files/dirs 47 B"}, term.Output) } func TestPrintUpdateWithSkipped(t *testing.T) { term, printer := createTextProgress() - printer.Update(State{3, 11, 2, 29, 47, 59}, 5*time.Second) + printer.Update(State{3, 11, 2, 0, 29, 47, 59}, 5*time.Second) test.Equals(t, []string{"[0:05] 61.70% 3 files/dirs 29 B, total 11 files/dirs 47 B, skipped 2 files/dirs 59 B"}, term.Output) } func TestPrintSummaryOnSuccess(t *testing.T) { term, printer := createTextProgress() - printer.Finish(State{11, 11, 0, 47, 47, 0}, 5*time.Second) + printer.Finish(State{11, 11, 0, 0, 47, 47, 0}, 5*time.Second) test.Equals(t, []string{"Summary: Restored 11 files/dirs (47 B) in 0:05"}, term.Output) } func TestPrintSummaryOnErrors(t *testing.T) { term, printer := createTextProgress() - printer.Finish(State{3, 11, 0, 29, 47, 0}, 5*time.Second) + printer.Finish(State{3, 11, 0, 0, 29, 47, 0}, 5*time.Second) test.Equals(t, []string{"Summary: Restored 3 / 11 files/dirs (29 B / 47 B) in 0:05"}, term.Output) } func TestPrintSummaryOnSuccessWithSkipped(t *testing.T) { term, printer := createTextProgress() - printer.Finish(State{11, 11, 2, 47, 47, 59}, 5*time.Second) + printer.Finish(State{11, 11, 2, 0, 47, 47, 59}, 5*time.Second) test.Equals(t, []string{"Summary: Restored 11 files/dirs (47 B) in 0:05, skipped 2 files/dirs 59 B"}, term.Output) } diff --git a/internal/ui/termstatus/status.go b/internal/ui/termstatus/status.go index 39654cc8c..e65330958 100644 --- a/internal/ui/termstatus/status.go +++ b/internal/ui/termstatus/status.go @@ -212,7 +212,7 @@ func (t *Terminal) runWithoutStatus(ctx context.Context) { } if _, err := io.WriteString(dst, msg.line); err != nil { - fmt.Fprintf(os.Stderr, "write failed: %v\n", err) + _, _ = fmt.Fprintf(os.Stderr, "write failed: %v\n", err) } if flush == nil { @@ -220,16 +220,18 @@ func (t *Terminal) runWithoutStatus(ctx context.Context) { } if err := flush(); err != nil { - fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) + _, _ = fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) } case stat := <-t.status: for _, line := range stat.lines { // Ensure that each message ends with exactly one newline. - fmt.Fprintln(t.wr, strings.TrimRight(line, "\n")) + if _, err := fmt.Fprintln(t.wr, strings.TrimRight(line, "\n")); err != nil { + _, _ = fmt.Fprintf(os.Stderr, "write failed: %v\n", err) + } } if err := t.wr.Flush(); err != nil { - fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) + _, _ = fmt.Fprintf(os.Stderr, "flush failed: %v\n", err) } } } diff --git a/internal/walker/walker.go b/internal/walker/walker.go index 8acfed2f2..252bc3530 100644 --- a/internal/walker/walker.go +++ b/internal/walker/walker.go @@ -28,7 +28,7 @@ type WalkVisitor struct { // was returned. This function is mandatory ProcessNode WalkFunc // Optional callback - LeaveDir func(path string) + LeaveDir func(path string) error } // Walk calls walkFn recursively for each node in root. If walkFn returns an @@ -100,7 +100,7 @@ func walk(ctx context.Context, repo restic.BlobLoader, prefix string, parentTree } if visitor.LeaveDir != nil { - visitor.LeaveDir(prefix) + return visitor.LeaveDir(prefix) } return nil diff --git a/internal/walker/walker_test.go b/internal/walker/walker_test.go index fa377bb8f..3614a2397 100644 --- a/internal/walker/walker_test.go +++ b/internal/walker/walker_test.go @@ -93,12 +93,12 @@ func (t TreeMap) Connections() uint { // checkFunc returns a function suitable for walking the tree to check // something, and a function which will check the final result. -type checkFunc func(t testing.TB) (walker WalkFunc, leaveDir func(path string), final func(testing.TB)) +type checkFunc func(t testing.TB) (walker WalkFunc, leaveDir func(path string) error, final func(testing.TB)) // checkItemOrder ensures that the order of the 'path' arguments is the one passed in as 'want'. func checkItemOrder(want []string) checkFunc { pos := 0 - return func(t testing.TB) (walker WalkFunc, leaveDir func(path string), final func(testing.TB)) { + return func(t testing.TB) (walker WalkFunc, leaveDir func(path string) error, final func(testing.TB)) { walker = func(treeID restic.ID, path string, node *restic.Node, err error) error { if err != nil { t.Errorf("error walking %v: %v", path, err) @@ -117,8 +117,8 @@ func checkItemOrder(want []string) checkFunc { return nil } - leaveDir = func(path string) { - _ = walker(restic.ID{}, "leave: "+path, nil, nil) + leaveDir = func(path string) error { + return walker(restic.ID{}, "leave: "+path, nil, nil) } final = func(t testing.TB) { @@ -134,7 +134,7 @@ func checkItemOrder(want []string) checkFunc { // checkParentTreeOrder ensures that the order of the 'parentID' arguments is the one passed in as 'want'. func checkParentTreeOrder(want []string) checkFunc { pos := 0 - return func(t testing.TB) (walker WalkFunc, leaveDir func(path string), final func(testing.TB)) { + return func(t testing.TB) (walker WalkFunc, leaveDir func(path string) error, final func(testing.TB)) { walker = func(treeID restic.ID, path string, node *restic.Node, err error) error { if err != nil { t.Errorf("error walking %v: %v", path, err) @@ -168,7 +168,7 @@ func checkParentTreeOrder(want []string) checkFunc { func checkSkipFor(skipFor map[string]struct{}, wantPaths []string) checkFunc { var pos int - return func(t testing.TB) (walker WalkFunc, leaveDir func(path string), final func(testing.TB)) { + return func(t testing.TB) (walker WalkFunc, leaveDir func(path string) error, final func(testing.TB)) { walker = func(treeID restic.ID, path string, node *restic.Node, err error) error { if err != nil { t.Errorf("error walking %v: %v", path, err) @@ -192,8 +192,8 @@ func checkSkipFor(skipFor map[string]struct{}, wantPaths []string) checkFunc { return nil } - leaveDir = func(path string) { - _ = walker(restic.ID{}, "leave: "+path, nil, nil) + leaveDir = func(path string) error { + return walker(restic.ID{}, "leave: "+path, nil, nil) } final = func(t testing.TB) {