mirror of
https://github.com/restic/restic.git
synced 2025-03-16 00:00:05 +01:00
Merge remote-tracking branch 'old-origin/master' into add-smb-backend
This commit is contained in:
commit
bc3331082c
113 changed files with 2628 additions and 1545 deletions
|
@ -1,12 +0,0 @@
|
|||
# Folders
|
||||
.git/
|
||||
.github/
|
||||
changelog/
|
||||
doc/
|
||||
docker/
|
||||
helpers/
|
||||
|
||||
# Files
|
||||
.gitignore
|
||||
.golangci.yml
|
||||
*.md
|
59
.github/workflows/docker.yml
vendored
Normal file
59
.github/workflows/docker.yml
vendored
Normal file
|
@ -0,0 +1,59 @@
|
|||
|
||||
name: Create and publish a Docker image
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
branches:
|
||||
- 'master'
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
build-and-push-image:
|
||||
if: github.repository == 'restic/restic'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@e81a89b1732b9c48d79cd809d8d81d79c4647a18
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@4b4e9c3e2d4531116a6f8ba8e71fc6e2cb6e6c8c
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
|
||||
with:
|
||||
push: true
|
||||
context: .
|
||||
file: docker/Dockerfile.release
|
||||
platforms: linux/386,linux/amd64,linux/arm,linux/arm64
|
||||
pull: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
44
.github/workflows/tests.yml
vendored
44
.github/workflows/tests.yml
vendored
|
@ -7,6 +7,7 @@ on:
|
|||
|
||||
# run tests for all pull requests
|
||||
pull_request:
|
||||
merge_group:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
@ -257,6 +258,14 @@ jobs:
|
|||
run: |
|
||||
go run build.go
|
||||
|
||||
- name: Minimal test
|
||||
run: |
|
||||
./restic init
|
||||
./restic backup .
|
||||
env:
|
||||
RESTIC_REPOSITORY: ../testrepo
|
||||
RESTIC_PASSWORD: password
|
||||
|
||||
- name: Run local Tests
|
||||
env:
|
||||
RESTIC_TEST_FUSE: ${{ matrix.test_fuse }}
|
||||
|
@ -305,7 +314,7 @@ jobs:
|
|||
# own repo, otherwise the secrets are not available
|
||||
# Skip for Dependabot pull requests as these are run without secrets
|
||||
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#responding-to-events
|
||||
if: (github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository) && (github.actor != 'dependabot[bot]') && matrix.test_cloud_backends
|
||||
if: ((github.repository == 'restic/restic' && github.event_name == 'push') || github.event.pull_request.head.repo.full_name == github.repository) && (github.actor != 'dependabot[bot]') && matrix.test_cloud_backends
|
||||
|
||||
- name: Check changelog files with calens
|
||||
run: |
|
||||
|
@ -319,27 +328,19 @@ jobs:
|
|||
cross_compile:
|
||||
strategy:
|
||||
|
||||
# ATTENTION: the list of architectures must be in sync with helpers/build-release-binaries/main.go!
|
||||
matrix:
|
||||
# run cross-compile in three batches parallel so the overall tests run faster
|
||||
targets:
|
||||
- "linux/386 linux/amd64 linux/arm linux/arm64 linux/ppc64le linux/mips linux/mipsle linux/mips64 linux/mips64le linux/riscv64 linux/s390x"
|
||||
|
||||
- "openbsd/386 openbsd/amd64 \
|
||||
freebsd/386 freebsd/amd64 freebsd/arm \
|
||||
aix/ppc64 \
|
||||
darwin/amd64 darwin/arm64"
|
||||
|
||||
- "netbsd/386 netbsd/amd64 \
|
||||
windows/386 windows/amd64 \
|
||||
solaris/amd64"
|
||||
subset:
|
||||
- "0/3"
|
||||
- "1/3"
|
||||
- "2/3"
|
||||
|
||||
env:
|
||||
GOPROXY: https://proxy.golang.org
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
name: Cross Compile for ${{ matrix.targets }}
|
||||
name: Cross Compile for subset ${{ matrix.subset }}
|
||||
|
||||
steps:
|
||||
- name: Set up Go ${{ env.latest_go }}
|
||||
|
@ -347,21 +348,14 @@ jobs:
|
|||
with:
|
||||
go-version: ${{ env.latest_go }}
|
||||
|
||||
- name: Install gox
|
||||
run: |
|
||||
go install github.com/mitchellh/gox@latest
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Cross-compile with gox for ${{ matrix.targets }}
|
||||
env:
|
||||
GOFLAGS: "-trimpath"
|
||||
GOX_ARCHS: "${{ matrix.targets }}"
|
||||
- name: Cross-compile for subset ${{ matrix.subset }}
|
||||
run: |
|
||||
mkdir build-output
|
||||
gox -parallel 2 -verbose -osarch "$GOX_ARCHS" -output "build-output/{{.Dir}}_{{.OS}}_{{.Arch}}" ./cmd/restic
|
||||
gox -parallel 2 -verbose -osarch "$GOX_ARCHS" -tags debug -output "build-output/{{.Dir}}_{{.OS}}_{{.Arch}}_debug" ./cmd/restic
|
||||
mkdir build-output build-output-debug
|
||||
go run ./helpers/build-release-binaries/main.go -o build-output -s . --platform-subset ${{ matrix.subset }}
|
||||
go run ./helpers/build-release-binaries/main.go -o build-output-debug -s . --platform-subset ${{ matrix.subset }} --tags debug
|
||||
|
||||
lint:
|
||||
name: lint
|
||||
|
|
|
@ -68,6 +68,9 @@ it might be necessary to manually clean up stale lock files using
|
|||
On Windows, please set the environment variable `RESTIC_DEBUG_STACKTRACE_SIGINT`
|
||||
to `true` and press `Ctrl-C` to create a stacktrace.
|
||||
|
||||
If you think restic uses too much memory or a too large cache directory, then
|
||||
please include the output of `restic stats --mode debug`.
|
||||
|
||||
|
||||
Development Environment
|
||||
=======================
|
||||
|
@ -88,10 +91,40 @@ Then use the `go` tool to build restic:
|
|||
$ ./restic version
|
||||
restic 0.14.0-dev (compiled manually) compiled with go1.19 on linux/amd64
|
||||
|
||||
To create a debug build use:
|
||||
|
||||
$ go build -tags debug ./cmd/restic
|
||||
|
||||
You can run all tests with the following command:
|
||||
|
||||
$ go test ./...
|
||||
|
||||
|
||||
Performance and Memory Usage Issues
|
||||
===================================
|
||||
|
||||
Debug builds of restic support the `--block-profile`, `--cpu-profile`,
|
||||
`--mem-profile`, and `--trace-profile` options which collect performance data
|
||||
that later on can be analyzed using the go tools:
|
||||
|
||||
$ restic --cpu-profile . [...]
|
||||
$ go tool pprof -http localhost:12345 cpu.pprof
|
||||
|
||||
To analyze a trace profile use `go tool trace -http=localhost:12345 trace.out`.
|
||||
|
||||
As the memory usage of restic changes over time, it may be useful to capture a
|
||||
snapshot of the current heap. This is possible using then `--listen-profile`
|
||||
option. Then while restic runs you can query and afterwards analyze the heap statistics.
|
||||
|
||||
$ restic --listen-profile localhost:12345 [...]
|
||||
$ curl http://localhost:12345/debug/pprof/heap -o heap.pprof
|
||||
$ go tool pprof -http localhost:12345 heap.pprof
|
||||
|
||||
Further useful tools are setting the environment variable `GODEBUG=gctrace=1`,
|
||||
which provides information about garbage collector runs. For a graphical variant
|
||||
combine this with gcvis.
|
||||
|
||||
|
||||
Providing Patches
|
||||
=================
|
||||
|
||||
|
|
8
changelog/unreleased/issue-1926
Normal file
8
changelog/unreleased/issue-1926
Normal file
|
@ -0,0 +1,8 @@
|
|||
Enhancement: Certificates can be passed through environment variables
|
||||
|
||||
Restic will now read the paths to the certificates from the environment
|
||||
variables `RESTIC_CACERT` or `RESTIC_TLS_CLIENT_CERT` if `--cacert` or
|
||||
`--tls-client-cert` are not specified.
|
||||
|
||||
https://github.com/restic/restic/issues/1926
|
||||
https://github.com/restic/restic/pull/4384
|
11
changelog/unreleased/issue-2359
Normal file
11
changelog/unreleased/issue-2359
Normal file
|
@ -0,0 +1,11 @@
|
|||
Enhancement: Provide multi-platform Docker containers
|
||||
|
||||
The official Docker containers are now built for the architectures linux/386,
|
||||
linux/amd64, linux/arm and linux/arm64.
|
||||
|
||||
As an alternative to the Docker Hub, the Docker containers are now also
|
||||
available on ghcr.io, the GitHub Container Registry.
|
||||
|
||||
https://github.com/restic/restic/issues/2359
|
||||
https://github.com/restic/restic/issues/4269
|
||||
https://github.com/restic/restic/pull/4364
|
10
changelog/unreleased/issue-2468
Normal file
10
changelog/unreleased/issue-2468
Normal file
|
@ -0,0 +1,10 @@
|
|||
Enhancement: Add support for non-global Azure clouds
|
||||
|
||||
Restic backups on Azure only supported storages using the global domain
|
||||
`core.windows.net`. This meant that backups to other domains such as Azure
|
||||
China (`core.chinacloudapi.cn') or Azure Germany (`core.cloudapi.de`) were
|
||||
not supported. Restic now allows overriding the global domain using the
|
||||
environment variable `AZURE_ENDPOINT_SUFFIX'.
|
||||
|
||||
https://github.com/restic/restic/issues/2468
|
||||
https://github.com/restic/restic/pull/4387
|
5
changelog/unreleased/issue-3328
Normal file
5
changelog/unreleased/issue-3328
Normal file
|
@ -0,0 +1,5 @@
|
|||
Enhancement: Reduce memory usage by up to 25%
|
||||
|
||||
https://github.com/restic/restic/issues/3328
|
||||
https://github.com/restic/restic/pull/4352
|
||||
https://github.com/restic/restic/pull/4353
|
9
changelog/unreleased/issue-3624
Normal file
9
changelog/unreleased/issue-3624
Normal file
|
@ -0,0 +1,9 @@
|
|||
Enhancement: Keep oldest snapshot when there are not enough snapshots
|
||||
|
||||
The `forget` command now additionally preserves the oldest snapshot if fewer
|
||||
snapshots are kept than allowed by the `--keep-*` parameters. This maximizes
|
||||
amount of history kept while the specified limits are not yet reached.
|
||||
|
||||
https://github.com/restic/restic/issues/3624
|
||||
https://github.com/restic/restic/pull/4366
|
||||
https://forum.restic.net/t/keeping-yearly-snapshots-policy-when-backup-began-during-the-year/4670/2
|
8
changelog/unreleased/issue-3698
Normal file
8
changelog/unreleased/issue-3698
Normal file
|
@ -0,0 +1,8 @@
|
|||
Enhancement: Add support for Managed / Worload Identity to azure backend
|
||||
|
||||
Restic now additionally supports authenticating to Azure using Workload
|
||||
Identity or Managed Identity credentials which are automatically injected in
|
||||
several environments such as a managed Kubernetes cluster.
|
||||
|
||||
https://github.com/restic/restic/issues/3698
|
||||
https://github.com/restic/restic/pull/4029
|
8
changelog/unreleased/issue-4188
Normal file
8
changelog/unreleased/issue-4188
Normal file
|
@ -0,0 +1,8 @@
|
|||
Enhancement: `backup` includes restic version in snapshot metadata
|
||||
|
||||
The restic version used backup the snapshot is now included in its metadata.
|
||||
The program version is shown when inspecting a snapshot using `restic cat
|
||||
snapshot <snapshotID>` or `restic snapshots --json`.
|
||||
|
||||
https://github.com/restic/restic/issues/4188
|
||||
https://github.com/restic/restic/pull/4378
|
|
@ -4,6 +4,11 @@ The `restore` command now shows a progress report while restoring files.
|
|||
|
||||
Example: [0:42] 5.76% 23 files 12.98 MiB, total 3456 files 23.54 GiB
|
||||
|
||||
JSON output is now also supported.
|
||||
|
||||
https://github.com/restic/restic/issues/426
|
||||
https://github.com/restic/restic/issues/3413
|
||||
https://github.com/restic/restic/issues/3627
|
||||
https://github.com/restic/restic/pull/3991
|
||||
https://github.com/restic/restic/pull/4314
|
||||
https://forum.restic.net/t/progress-bar-for-restore/5210
|
8
changelog/unreleased/issue-4375
Normal file
8
changelog/unreleased/issue-4375
Normal file
|
@ -0,0 +1,8 @@
|
|||
Enhancement: Add support for extended attributes on symlinks
|
||||
|
||||
Restic now supports extended attributes on symlinks when backing up,
|
||||
restoring, or FUSE-mounting snapshots. This includes, for example, the
|
||||
`security.selinux` xattr on Linux distributions that use SELinux.
|
||||
|
||||
https://github.com/restic/restic/issues/4375
|
||||
https://github.com/restic/restic/pull/4379
|
6
changelog/unreleased/pull-4365
Normal file
6
changelog/unreleased/pull-4365
Normal file
|
@ -0,0 +1,6 @@
|
|||
Change: Building restic on AIX is temporarily unsupported
|
||||
|
||||
As the current version of the library used for the Azure backend does not
|
||||
compile on AIX, there are currently no restic builds available for AIX.
|
||||
|
||||
https://github.com/restic/restic/pull/4365
|
|
@ -645,6 +645,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter
|
|||
Time: timeStamp,
|
||||
Hostname: opts.Host,
|
||||
ParentSnapshot: parentSnapshot,
|
||||
ProgramVersion: "restic " + version,
|
||||
}
|
||||
|
||||
if !gopts.JSON {
|
||||
|
|
|
@ -13,34 +13,19 @@ import (
|
|||
"github.com/restic/restic/internal/restic"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
"github.com/restic/restic/internal/ui/termstatus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
func testRunBackupAssumeFailure(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) error {
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
return withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error {
|
||||
t.Logf("backing up %v in %v", target, dir)
|
||||
if dir != "" {
|
||||
cleanup := rtest.Chdir(t, dir)
|
||||
defer cleanup()
|
||||
}
|
||||
|
||||
var wg errgroup.Group
|
||||
term := termstatus.New(gopts.stdout, gopts.stderr, gopts.Quiet)
|
||||
wg.Go(func() error { term.Run(ctx); return nil })
|
||||
|
||||
t.Logf("backing up %v in %v", target, dir)
|
||||
if dir != "" {
|
||||
cleanup := rtest.Chdir(t, dir)
|
||||
defer cleanup()
|
||||
}
|
||||
|
||||
opts.GroupBy = restic.SnapshotGroupByOptions{Host: true, Path: true}
|
||||
backupErr := runBackup(ctx, opts, gopts, term, target)
|
||||
|
||||
cancel()
|
||||
|
||||
err := wg.Wait()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return backupErr
|
||||
opts.GroupBy = restic.SnapshotGroupByOptions{Host: true, Path: true}
|
||||
return runBackup(ctx, opts, gopts, term, target)
|
||||
})
|
||||
}
|
||||
|
||||
func testRunBackup(t testing.TB, dir string, target []string, opts BackupOptions, gopts GlobalOptions) {
|
||||
|
@ -455,6 +440,22 @@ func TestBackupTags(t *testing.T) {
|
|||
"expected parent to be %v, got %v", parent.ID, newest.Parent)
|
||||
}
|
||||
|
||||
func TestBackupProgramVersion(t *testing.T) {
|
||||
env, cleanup := withTestEnvironment(t)
|
||||
defer cleanup()
|
||||
|
||||
testSetupBackupData(t, env)
|
||||
testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts)
|
||||
newest, _ := testRunSnapshots(t, env.gopts)
|
||||
|
||||
if newest == nil {
|
||||
t.Fatal("expected a backup, got nil")
|
||||
}
|
||||
resticVersion := "restic " + version
|
||||
rtest.Assert(t, newest.ProgramVersion == resticVersion,
|
||||
"expected %v, got %v", resticVersion, newest.ProgramVersion)
|
||||
}
|
||||
|
||||
func TestQuietBackup(t *testing.T) {
|
||||
env, cleanup := withTestEnvironment(t)
|
||||
defer cleanup()
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/fs"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/restic/restic/internal/ui"
|
||||
)
|
||||
|
||||
var cmdCheck = &cobra.Command{
|
||||
|
@ -97,7 +98,7 @@ func checkFlags(opts CheckOptions) error {
|
|||
}
|
||||
|
||||
} else {
|
||||
fileSize, err := parseSizeStr(opts.ReadDataSubset)
|
||||
fileSize, err := ui.ParseBytes(opts.ReadDataSubset)
|
||||
if err != nil {
|
||||
return argumentError
|
||||
}
|
||||
|
@ -363,7 +364,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
|
|||
if repoSize == 0 {
|
||||
return errors.Fatal("Cannot read from a repository having size 0")
|
||||
}
|
||||
subsetSize, _ := parseSizeStr(opts.ReadDataSubset)
|
||||
subsetSize, _ := ui.ParseBytes(opts.ReadDataSubset)
|
||||
if subsetSize > repoSize {
|
||||
subsetSize = repoSize
|
||||
}
|
||||
|
|
|
@ -87,9 +87,9 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
|
|||
return err
|
||||
}
|
||||
|
||||
be, err := create(ctx, repo, gopts.extended)
|
||||
be, err := create(ctx, repo, gopts, gopts.extended)
|
||||
if err != nil {
|
||||
return errors.Fatalf("create repository at %s failed: %v\n", location.StripPassword(gopts.Repo), err)
|
||||
return errors.Fatalf("create repository at %s failed: %v\n", location.StripPassword(gopts.backends, gopts.Repo), err)
|
||||
}
|
||||
|
||||
s, err := repository.New(be, repository.Options{
|
||||
|
@ -102,11 +102,11 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
|
|||
|
||||
err = s.Init(ctx, version, gopts.password, chunkerPolynomial)
|
||||
if err != nil {
|
||||
return errors.Fatalf("create key in repository at %s failed: %v\n", location.StripPassword(gopts.Repo), err)
|
||||
return errors.Fatalf("create key in repository at %s failed: %v\n", location.StripPassword(gopts.backends, gopts.Repo), err)
|
||||
}
|
||||
|
||||
if !gopts.JSON {
|
||||
Verbosef("created restic repository %v at %s", s.Config().ID[:10], location.StripPassword(gopts.Repo))
|
||||
Verbosef("created restic repository %v at %s", s.Config().ID[:10], location.StripPassword(gopts.backends, gopts.Repo))
|
||||
if opts.CopyChunkerParameters && chunkerPolynomial != nil {
|
||||
Verbosef(" with chunker parameters copied from secondary repository\n")
|
||||
} else {
|
||||
|
@ -121,7 +121,7 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args []
|
|||
status := initSuccess{
|
||||
MessageType: "initialized",
|
||||
ID: s.Config().ID,
|
||||
Repository: location.StripPassword(gopts.Repo),
|
||||
Repository: location.StripPassword(gopts.backends, gopts.Repo),
|
||||
}
|
||||
return json.NewEncoder(globalOptions.stdout).Encode(status)
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package main
|
|||
import (
|
||||
"context"
|
||||
"math"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -80,7 +81,7 @@ func addPruneOptions(c *cobra.Command) {
|
|||
func verifyPruneOptions(opts *PruneOptions) error {
|
||||
opts.MaxRepackBytes = math.MaxUint64
|
||||
if len(opts.MaxRepackSize) > 0 {
|
||||
size, err := parseSizeStr(opts.MaxRepackSize)
|
||||
size, err := ui.ParseBytes(opts.MaxRepackSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -123,7 +124,7 @@ func verifyPruneOptions(opts *PruneOptions) error {
|
|||
}
|
||||
|
||||
default:
|
||||
size, err := parseSizeStr(maxUnused)
|
||||
size, err := ui.ParseBytes(maxUnused)
|
||||
if err != nil {
|
||||
return errors.Fatalf("invalid number of bytes %q for --max-unused: %v", opts.MaxUnused, err)
|
||||
}
|
||||
|
@ -205,6 +206,9 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption
|
|||
return err
|
||||
}
|
||||
|
||||
// Trigger GC to reset garbage collection threshold
|
||||
runtime.GC()
|
||||
|
||||
return doPrune(ctx, opts, gopts, repo, plan)
|
||||
}
|
||||
|
||||
|
|
|
@ -175,16 +175,20 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions,
|
|||
return err
|
||||
}
|
||||
|
||||
var progress *restoreui.Progress
|
||||
if !gopts.Quiet && !gopts.JSON {
|
||||
progress = restoreui.NewProgress(restoreui.NewProgressPrinter(term), calculateProgressInterval(!gopts.Quiet, gopts.JSON))
|
||||
msg := ui.NewMessage(term, gopts.verbosity)
|
||||
var printer restoreui.ProgressPrinter
|
||||
if gopts.JSON {
|
||||
printer = restoreui.NewJSONProgress(term)
|
||||
} else {
|
||||
printer = restoreui.NewTextProgress(term)
|
||||
}
|
||||
|
||||
progress := restoreui.NewProgress(printer, calculateProgressInterval(!gopts.Quiet, gopts.JSON))
|
||||
res := restorer.NewRestorer(repo, sn, opts.Sparse, progress)
|
||||
|
||||
totalErrors := 0
|
||||
res.Error = func(location string, err error) error {
|
||||
Warnf("ignoring error for %s: %s\n", location, err)
|
||||
msg.E("ignoring error for %s: %s\n", location, err)
|
||||
totalErrors++
|
||||
return nil
|
||||
}
|
||||
|
@ -194,12 +198,12 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions,
|
|||
selectExcludeFilter := func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
|
||||
matched, err := filter.List(excludePatterns, item)
|
||||
if err != nil {
|
||||
Warnf("error for exclude pattern: %v", err)
|
||||
msg.E("error for exclude pattern: %v", err)
|
||||
}
|
||||
|
||||
matchedInsensitive, err := filter.List(insensitiveExcludePatterns, strings.ToLower(item))
|
||||
if err != nil {
|
||||
Warnf("error for iexclude pattern: %v", err)
|
||||
msg.E("error for iexclude pattern: %v", err)
|
||||
}
|
||||
|
||||
// An exclude filter is basically a 'wildcard but foo',
|
||||
|
@ -217,12 +221,12 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions,
|
|||
selectIncludeFilter := func(item string, dstpath string, node *restic.Node) (selectedForRestore bool, childMayBeSelected bool) {
|
||||
matched, childMayMatch, err := filter.ListWithChild(includePatterns, item)
|
||||
if err != nil {
|
||||
Warnf("error for include pattern: %v", err)
|
||||
msg.E("error for include pattern: %v", err)
|
||||
}
|
||||
|
||||
matchedInsensitive, childMayMatchInsensitive, err := filter.ListWithChild(insensitiveIncludePatterns, strings.ToLower(item))
|
||||
if err != nil {
|
||||
Warnf("error for iexclude pattern: %v", err)
|
||||
msg.E("error for iexclude pattern: %v", err)
|
||||
}
|
||||
|
||||
selectedForRestore = matched || matchedInsensitive
|
||||
|
@ -237,23 +241,25 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions,
|
|||
res.SelectFilter = selectIncludeFilter
|
||||
}
|
||||
|
||||
Verbosef("restoring %s to %s\n", res.Snapshot(), opts.Target)
|
||||
if !gopts.JSON {
|
||||
msg.P("restoring %s to %s\n", res.Snapshot(), opts.Target)
|
||||
}
|
||||
|
||||
err = res.RestoreTo(ctx, opts.Target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if progress != nil {
|
||||
progress.Finish()
|
||||
}
|
||||
progress.Finish()
|
||||
|
||||
if totalErrors > 0 {
|
||||
return errors.Fatalf("There were %d errors\n", totalErrors)
|
||||
}
|
||||
|
||||
if opts.Verify {
|
||||
Verbosef("verifying files in %s\n", opts.Target)
|
||||
if !gopts.JSON {
|
||||
msg.P("verifying files in %s\n", opts.Target)
|
||||
}
|
||||
var count int
|
||||
t0 := time.Now()
|
||||
count, err = res.VerifyFiles(ctx, opts.Target)
|
||||
|
@ -263,8 +269,11 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions,
|
|||
if totalErrors > 0 {
|
||||
return errors.Fatalf("There were %d errors\n", totalErrors)
|
||||
}
|
||||
Verbosef("finished verifying %d files in %s (took %s)\n", count, opts.Target,
|
||||
time.Since(t0).Round(time.Millisecond))
|
||||
|
||||
if !gopts.JSON {
|
||||
msg.P("finished verifying %d files in %s (took %s)\n", count, opts.Target,
|
||||
time.Since(t0).Round(time.Millisecond))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/restic/restic/internal/filter"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
"github.com/restic/restic/internal/ui/termstatus"
|
||||
)
|
||||
|
||||
func testRunRestore(t testing.TB, opts GlobalOptions, dir string, snapshotID restic.ID) {
|
||||
|
@ -26,11 +27,13 @@ func testRunRestoreExcludes(t testing.TB, gopts GlobalOptions, dir string, snaps
|
|||
Exclude: excludes,
|
||||
}
|
||||
|
||||
rtest.OK(t, runRestore(context.TODO(), opts, gopts, nil, []string{snapshotID.String()}))
|
||||
rtest.OK(t, testRunRestoreAssumeFailure(snapshotID.String(), opts, gopts))
|
||||
}
|
||||
|
||||
func testRunRestoreAssumeFailure(snapshotID string, opts RestoreOptions, gopts GlobalOptions) error {
|
||||
return runRestore(context.TODO(), opts, gopts, nil, []string{snapshotID})
|
||||
return withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error {
|
||||
return runRestore(ctx, opts, gopts, term, []string{snapshotID})
|
||||
})
|
||||
}
|
||||
|
||||
func testRunRestoreLatest(t testing.TB, gopts GlobalOptions, dir string, paths []string, hosts []string) {
|
||||
|
@ -42,7 +45,7 @@ func testRunRestoreLatest(t testing.TB, gopts GlobalOptions, dir string, paths [
|
|||
},
|
||||
}
|
||||
|
||||
rtest.OK(t, runRestore(context.TODO(), opts, gopts, nil, []string{"latest"}))
|
||||
rtest.OK(t, testRunRestoreAssumeFailure("latest", opts, gopts))
|
||||
}
|
||||
|
||||
func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, includes []string) {
|
||||
|
@ -51,7 +54,7 @@ func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snaps
|
|||
Include: includes,
|
||||
}
|
||||
|
||||
rtest.OK(t, runRestore(context.TODO(), opts, gopts, nil, []string{snapshotID.String()}))
|
||||
rtest.OK(t, testRunRestoreAssumeFailure(snapshotID.String(), opts, gopts))
|
||||
}
|
||||
|
||||
func TestRestoreFilter(t *testing.T) {
|
||||
|
|
|
@ -5,11 +5,15 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/restic/chunker"
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/crypto"
|
||||
"github.com/restic/restic/internal/repository"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/restic/restic/internal/ui"
|
||||
"github.com/restic/restic/internal/ui/table"
|
||||
"github.com/restic/restic/internal/walker"
|
||||
|
||||
"github.com/minio/sha256-simd"
|
||||
|
@ -99,6 +103,10 @@ func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args
|
|||
return err
|
||||
}
|
||||
|
||||
if opts.countMode == countModeDebug {
|
||||
return statsDebug(ctx, repo)
|
||||
}
|
||||
|
||||
if !gopts.JSON {
|
||||
Printf("scanning...\n")
|
||||
}
|
||||
|
@ -291,6 +299,7 @@ func verifyStatsInput(opts StatsOptions) error {
|
|||
case countModeUniqueFilesByContents:
|
||||
case countModeBlobsPerFile:
|
||||
case countModeRawData:
|
||||
case countModeDebug:
|
||||
default:
|
||||
return fmt.Errorf("unknown counting mode: %s (use the -h flag to get a list of supported modes)", opts.countMode)
|
||||
}
|
||||
|
@ -335,4 +344,149 @@ const (
|
|||
countModeUniqueFilesByContents = "files-by-contents"
|
||||
countModeBlobsPerFile = "blobs-per-file"
|
||||
countModeRawData = "raw-data"
|
||||
countModeDebug = "debug"
|
||||
)
|
||||
|
||||
func statsDebug(ctx context.Context, repo restic.Repository) error {
|
||||
Warnf("Collecting size statistics\n\n")
|
||||
for _, t := range []restic.FileType{restic.KeyFile, restic.LockFile, restic.IndexFile, restic.PackFile} {
|
||||
hist, err := statsDebugFileType(ctx, repo, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
Warnf("File Type: %v\n%v\n", t, hist)
|
||||
}
|
||||
|
||||
hist := statsDebugBlobs(ctx, repo)
|
||||
for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} {
|
||||
Warnf("Blob Type: %v\n%v\n\n", t, hist[t])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func statsDebugFileType(ctx context.Context, repo restic.Repository, tpe restic.FileType) (*sizeHistogram, error) {
|
||||
hist := newSizeHistogram(2 * repository.MaxPackSize)
|
||||
err := repo.List(ctx, tpe, func(id restic.ID, size int64) error {
|
||||
hist.Add(uint64(size))
|
||||
return nil
|
||||
})
|
||||
|
||||
return hist, err
|
||||
}
|
||||
|
||||
func statsDebugBlobs(ctx context.Context, repo restic.Repository) [restic.NumBlobTypes]*sizeHistogram {
|
||||
var hist [restic.NumBlobTypes]*sizeHistogram
|
||||
for i := 0; i < len(hist); i++ {
|
||||
hist[i] = newSizeHistogram(2 * chunker.MaxSize)
|
||||
}
|
||||
|
||||
repo.Index().Each(ctx, func(pb restic.PackedBlob) {
|
||||
hist[pb.Type].Add(uint64(pb.Length))
|
||||
})
|
||||
|
||||
return hist
|
||||
}
|
||||
|
||||
type sizeClass struct {
|
||||
lower, upper uint64
|
||||
count int64
|
||||
}
|
||||
|
||||
type sizeHistogram struct {
|
||||
count int64
|
||||
totalSize uint64
|
||||
buckets []sizeClass
|
||||
oversized []uint64
|
||||
}
|
||||
|
||||
func newSizeHistogram(sizeLimit uint64) *sizeHistogram {
|
||||
h := &sizeHistogram{}
|
||||
h.buckets = append(h.buckets, sizeClass{0, 0, 0})
|
||||
|
||||
lowerBound := uint64(1)
|
||||
growthFactor := uint64(10)
|
||||
|
||||
for lowerBound < sizeLimit {
|
||||
upperBound := lowerBound*growthFactor - 1
|
||||
if upperBound > sizeLimit {
|
||||
upperBound = sizeLimit
|
||||
}
|
||||
h.buckets = append(h.buckets, sizeClass{lowerBound, upperBound, 0})
|
||||
lowerBound *= growthFactor
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func (s *sizeHistogram) Add(size uint64) {
|
||||
s.count++
|
||||
s.totalSize += size
|
||||
|
||||
for i, bucket := range s.buckets {
|
||||
if size >= bucket.lower && size <= bucket.upper {
|
||||
s.buckets[i].count++
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
s.oversized = append(s.oversized, size)
|
||||
}
|
||||
|
||||
func (s sizeHistogram) String() string {
|
||||
var out strings.Builder
|
||||
|
||||
out.WriteString(fmt.Sprintf("Count: %d\n", s.count))
|
||||
out.WriteString(fmt.Sprintf("Total Size: %s\n", ui.FormatBytes(s.totalSize)))
|
||||
|
||||
t := table.New()
|
||||
t.AddColumn("Size", "{{.SizeRange}}")
|
||||
t.AddColumn("Count", "{{.Count}}")
|
||||
type line struct {
|
||||
SizeRange string
|
||||
Count int64
|
||||
}
|
||||
|
||||
// only print up to the highest used bucket size
|
||||
lastFilledIdx := 0
|
||||
for i := 0; i < len(s.buckets); i++ {
|
||||
if s.buckets[i].count != 0 {
|
||||
lastFilledIdx = i
|
||||
}
|
||||
}
|
||||
|
||||
var lines []line
|
||||
hasStarted := false
|
||||
for i, b := range s.buckets {
|
||||
if i > lastFilledIdx {
|
||||
break
|
||||
}
|
||||
|
||||
if b.count > 0 {
|
||||
hasStarted = true
|
||||
}
|
||||
if hasStarted {
|
||||
lines = append(lines, line{
|
||||
SizeRange: fmt.Sprintf("%d - %d Byte", b.lower, b.upper),
|
||||
Count: b.count,
|
||||
})
|
||||
}
|
||||
}
|
||||
longestRange := 0
|
||||
for _, l := range lines {
|
||||
if longestRange < len(l.SizeRange) {
|
||||
longestRange = len(l.SizeRange)
|
||||
}
|
||||
}
|
||||
for i := range lines {
|
||||
lines[i].SizeRange = strings.Repeat(" ", longestRange-len(lines[i].SizeRange)) + lines[i].SizeRange
|
||||
t.AddRow(lines[i])
|
||||
}
|
||||
|
||||
_ = t.Write(&out)
|
||||
|
||||
if len(s.oversized) > 0 {
|
||||
out.WriteString(fmt.Sprintf("Oversized: %v\n", s.oversized))
|
||||
}
|
||||
return out.String()
|
||||
}
|
||||
|
|
62
cmd/restic/cmd_stats_test.go
Normal file
62
cmd/restic/cmd_stats_test.go
Normal file
|
@ -0,0 +1,62 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func TestSizeHistogramNew(t *testing.T) {
|
||||
h := newSizeHistogram(42)
|
||||
|
||||
exp := &sizeHistogram{
|
||||
count: 0,
|
||||
totalSize: 0,
|
||||
buckets: []sizeClass{
|
||||
{0, 0, 0},
|
||||
{1, 9, 0},
|
||||
{10, 42, 0},
|
||||
},
|
||||
}
|
||||
|
||||
rtest.Equals(t, exp, h)
|
||||
}
|
||||
|
||||
func TestSizeHistogramAdd(t *testing.T) {
|
||||
h := newSizeHistogram(42)
|
||||
for i := uint64(0); i < 45; i++ {
|
||||
h.Add(i)
|
||||
}
|
||||
|
||||
exp := &sizeHistogram{
|
||||
count: 45,
|
||||
totalSize: 990,
|
||||
buckets: []sizeClass{
|
||||
{0, 0, 1},
|
||||
{1, 9, 9},
|
||||
{10, 42, 33},
|
||||
},
|
||||
oversized: []uint64{43, 44},
|
||||
}
|
||||
|
||||
rtest.Equals(t, exp, h)
|
||||
}
|
||||
|
||||
func TestSizeHistogramString(t *testing.T) {
|
||||
t.Run("overflow", func(t *testing.T) {
|
||||
h := newSizeHistogram(42)
|
||||
h.Add(8)
|
||||
h.Add(50)
|
||||
|
||||
rtest.Equals(t, "Count: 2\nTotal Size: 58 B\nSize Count\n-----------------\n1 - 9 Byte 1\n-----------------\nOversized: [50]\n", h.String())
|
||||
})
|
||||
|
||||
t.Run("withZero", func(t *testing.T) {
|
||||
h := newSizeHistogram(42)
|
||||
h.Add(0)
|
||||
h.Add(1)
|
||||
h.Add(10)
|
||||
|
||||
rtest.Equals(t, "Count: 3\nTotal Size: 11 B\nSize Count\n-------------------\n 0 - 0 Byte 1\n 1 - 9 Byte 1\n10 - 42 Byte 1\n-------------------\n", h.String())
|
||||
})
|
||||
}
|
|
@ -7,7 +7,6 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
|
@ -17,6 +16,7 @@ import (
|
|||
"github.com/restic/restic/internal/fs"
|
||||
"github.com/restic/restic/internal/repository"
|
||||
"github.com/restic/restic/internal/textfile"
|
||||
"github.com/restic/restic/internal/ui"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
|
@ -364,7 +364,7 @@ func rejectResticCache(repo *repository.Repository) (RejectByNameFunc, error) {
|
|||
}
|
||||
|
||||
func rejectBySize(maxSizeStr string) (RejectFunc, error) {
|
||||
maxSize, err := parseSizeStr(maxSizeStr)
|
||||
maxSize, err := ui.ParseBytes(maxSizeStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -385,35 +385,6 @@ func rejectBySize(maxSizeStr string) (RejectFunc, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func parseSizeStr(sizeStr string) (int64, error) {
|
||||
if sizeStr == "" {
|
||||
return 0, errors.New("expected size, got empty string")
|
||||
}
|
||||
|
||||
numStr := sizeStr[:len(sizeStr)-1]
|
||||
var unit int64 = 1
|
||||
|
||||
switch sizeStr[len(sizeStr)-1] {
|
||||
case 'b', 'B':
|
||||
// use initialized values, do nothing here
|
||||
case 'k', 'K':
|
||||
unit = 1024
|
||||
case 'm', 'M':
|
||||
unit = 1024 * 1024
|
||||
case 'g', 'G':
|
||||
unit = 1024 * 1024 * 1024
|
||||
case 't', 'T':
|
||||
unit = 1024 * 1024 * 1024 * 1024
|
||||
default:
|
||||
numStr = sizeStr
|
||||
}
|
||||
value, err := strconv.ParseInt(numStr, 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return value * unit, nil
|
||||
}
|
||||
|
||||
// readExcludePatternsFromFiles reads all exclude files and returns the list of
|
||||
// exclude patterns. For each line, leading and trailing white space is removed
|
||||
// and comment lines are ignored. For each remaining pattern, environment
|
||||
|
|
|
@ -187,54 +187,6 @@ func TestMultipleIsExcludedByFile(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestParseSizeStr(t *testing.T) {
|
||||
sizeStrTests := []struct {
|
||||
in string
|
||||
expected int64
|
||||
}{
|
||||
{"1024", 1024},
|
||||
{"1024b", 1024},
|
||||
{"1024B", 1024},
|
||||
{"1k", 1024},
|
||||
{"100k", 102400},
|
||||
{"100K", 102400},
|
||||
{"10M", 10485760},
|
||||
{"100m", 104857600},
|
||||
{"20G", 21474836480},
|
||||
{"10g", 10737418240},
|
||||
{"2T", 2199023255552},
|
||||
{"2t", 2199023255552},
|
||||
}
|
||||
|
||||
for _, tt := range sizeStrTests {
|
||||
actual, err := parseSizeStr(tt.in)
|
||||
test.OK(t, err)
|
||||
|
||||
if actual != tt.expected {
|
||||
t.Errorf("parseSizeStr(%s) = %d; expected %d", tt.in, actual, tt.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseInvalidSizeStr(t *testing.T) {
|
||||
invalidSizes := []string{
|
||||
"",
|
||||
" ",
|
||||
"foobar",
|
||||
"zzz",
|
||||
}
|
||||
|
||||
for _, s := range invalidSizes {
|
||||
v, err := parseSizeStr(s)
|
||||
if err == nil {
|
||||
t.Errorf("wanted error for invalid value %q, got nil", s)
|
||||
}
|
||||
if v != 0 {
|
||||
t.Errorf("wanted zero for invalid value %q, got: %v", s, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestIsExcludedByFileSize is for testing the instance of
|
||||
// --exclude-larger-than parameters
|
||||
func TestIsExcludedByFileSize(t *testing.T) {
|
||||
|
|
|
@ -76,6 +76,7 @@ type GlobalOptions struct {
|
|||
stdout io.Writer
|
||||
stderr io.Writer
|
||||
|
||||
backends *location.Registry
|
||||
backendTestHook, backendInnerTestHook backendWrapper
|
||||
|
||||
// verbosity is set as follows:
|
||||
|
@ -99,6 +100,19 @@ var isReadingPassword bool
|
|||
var internalGlobalCtx context.Context
|
||||
|
||||
func init() {
|
||||
backends := location.NewRegistry()
|
||||
backends.Register(azure.NewFactory())
|
||||
backends.Register(b2.NewFactory())
|
||||
backends.Register(gs.NewFactory())
|
||||
backends.Register(local.NewFactory())
|
||||
backends.Register(rclone.NewFactory())
|
||||
backends.Register(rest.NewFactory())
|
||||
backends.Register(s3.NewFactory())
|
||||
backends.Register(sftp.NewFactory())
|
||||
backends.Register(swift.NewFactory())
|
||||
backends.Register(smb.NewFactory())
|
||||
globalOptions.backends = backends
|
||||
|
||||
var cancel context.CancelFunc
|
||||
internalGlobalCtx, cancel = context.WithCancel(context.Background())
|
||||
AddCleanupHandler(func(code int) (int, error) {
|
||||
|
@ -122,8 +136,8 @@ func init() {
|
|||
f.BoolVarP(&globalOptions.JSON, "json", "", false, "set output mode to JSON for commands that support it")
|
||||
f.StringVar(&globalOptions.CacheDir, "cache-dir", "", "set the cache `directory`. (default: use system default cache directory)")
|
||||
f.BoolVar(&globalOptions.NoCache, "no-cache", false, "do not use a local cache")
|
||||
f.StringSliceVar(&globalOptions.RootCertFilenames, "cacert", nil, "`file` to load root certificates from (default: use system certificates)")
|
||||
f.StringVar(&globalOptions.TLSClientCertKeyFilename, "tls-client-cert", "", "path to a `file` containing PEM encoded TLS client certificate and private key")
|
||||
f.StringSliceVar(&globalOptions.RootCertFilenames, "cacert", nil, "`file` to load root certificates from (default: use system certificates or $RESTIC_CACERT)")
|
||||
f.StringVar(&globalOptions.TLSClientCertKeyFilename, "tls-client-cert", "", "path to a `file` containing PEM encoded TLS client certificate and private key (default: $RESTIC_TLS_CLIENT_CERT)")
|
||||
f.BoolVar(&globalOptions.InsecureTLS, "insecure-tls", false, "skip TLS certificate verification when connecting to the repository (insecure)")
|
||||
f.BoolVar(&globalOptions.CleanupCache, "cleanup-cache", false, "auto remove old cache directories")
|
||||
f.Var(&globalOptions.Compression, "compression", "compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)")
|
||||
|
@ -139,6 +153,10 @@ func init() {
|
|||
globalOptions.PasswordFile = os.Getenv("RESTIC_PASSWORD_FILE")
|
||||
globalOptions.KeyHint = os.Getenv("RESTIC_KEY_HINT")
|
||||
globalOptions.PasswordCommand = os.Getenv("RESTIC_PASSWORD_COMMAND")
|
||||
if os.Getenv("RESTIC_CACERT") != "" {
|
||||
globalOptions.RootCertFilenames = strings.Split(os.Getenv("RESTIC_CACERT"), ",")
|
||||
}
|
||||
globalOptions.TLSClientCertKeyFilename = os.Getenv("RESTIC_TLS_CLIENT_CERT")
|
||||
comp := os.Getenv("RESTIC_COMPRESSION")
|
||||
if comp != "" {
|
||||
// ignore error as there's no good way to handle it
|
||||
|
@ -538,9 +556,7 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi
|
|||
func parseConfig(loc location.Location, opts options.Options) (interface{}, error) {
|
||||
cfg := loc.Config
|
||||
if cfg, ok := cfg.(restic.ApplyEnvironmenter); ok {
|
||||
if err := cfg.ApplyEnvironment(""); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfg.ApplyEnvironment("")
|
||||
}
|
||||
|
||||
// only apply options for a particular backend here
|
||||
|
@ -555,8 +571,8 @@ func parseConfig(loc location.Location, opts options.Options) (interface{}, erro
|
|||
|
||||
// Open the backend specified by a location config.
|
||||
func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (restic.Backend, error) {
|
||||
debug.Log("parsing location %v", location.StripPassword(s))
|
||||
loc, err := location.Parse(s)
|
||||
debug.Log("parsing location %v", location.StripPassword(gopts.backends, s))
|
||||
loc, err := location.Parse(gopts.backends, s)
|
||||
if err != nil {
|
||||
return nil, errors.Fatalf("parsing repository location failed: %v", err)
|
||||
}
|
||||
|
@ -570,41 +586,21 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio
|
|||
|
||||
rt, err := backend.Transport(globalOptions.TransportOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Fatal(err.Error())
|
||||
}
|
||||
|
||||
// wrap the transport so that the throughput via HTTP is limited
|
||||
lim := limiter.NewStaticLimiter(gopts.Limits)
|
||||
rt = lim.Transport(rt)
|
||||
|
||||
switch loc.Scheme {
|
||||
case "local":
|
||||
be, err = local.Open(ctx, *cfg.(*local.Config))
|
||||
case "sftp":
|
||||
be, err = sftp.Open(ctx, *cfg.(*sftp.Config))
|
||||
case "s3":
|
||||
be, err = s3.Open(ctx, *cfg.(*s3.Config), rt)
|
||||
case "gs":
|
||||
be, err = gs.Open(*cfg.(*gs.Config), rt)
|
||||
case "azure":
|
||||
be, err = azure.Open(ctx, *cfg.(*azure.Config), rt)
|
||||
case "swift":
|
||||
be, err = swift.Open(ctx, *cfg.(*swift.Config), rt)
|
||||
case "b2":
|
||||
be, err = b2.Open(ctx, *cfg.(*b2.Config), rt)
|
||||
case "rest":
|
||||
be, err = rest.Open(*cfg.(*rest.Config), rt)
|
||||
case "rclone":
|
||||
be, err = rclone.Open(*cfg.(*rclone.Config), lim)
|
||||
case "smb":
|
||||
be, err = smb.Open(ctx, *cfg.(*smb.Config))
|
||||
|
||||
default:
|
||||
factory := gopts.backends.Lookup(loc.Scheme)
|
||||
if factory == nil {
|
||||
return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
|
||||
}
|
||||
|
||||
be, err = factory.Open(ctx, cfg, rt, lim)
|
||||
if err != nil {
|
||||
return nil, errors.Fatalf("unable to open repository at %v: %v", location.StripPassword(s), err)
|
||||
return nil, errors.Fatalf("unable to open repository at %v: %v", location.StripPassword(gopts.backends, s), err)
|
||||
}
|
||||
|
||||
// wrap with debug logging and connection limiting
|
||||
|
@ -618,15 +614,10 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio
|
|||
}
|
||||
}
|
||||
|
||||
if loc.Scheme == "local" || loc.Scheme == "sftp" || loc.Scheme == "smb" {
|
||||
// wrap the backend in a LimitBackend so that the throughput is limited
|
||||
be = limiter.LimitBackend(be, lim)
|
||||
}
|
||||
|
||||
// check if config is there
|
||||
fi, err := be.Stat(ctx, restic.Handle{Type: restic.ConfigFile})
|
||||
if err != nil {
|
||||
return nil, errors.Fatalf("unable to open config file: %v\nIs there a repository at the following location?\n%v", err, location.StripPassword(s))
|
||||
return nil, errors.Fatalf("unable to open config file: %v\nIs there a repository at the following location?\n%v", err, location.StripPassword(gopts.backends, s))
|
||||
}
|
||||
|
||||
if fi.Size == 0 {
|
||||
|
@ -637,9 +628,9 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio
|
|||
}
|
||||
|
||||
// Create the backend specified by URI.
|
||||
func create(ctx context.Context, s string, opts options.Options) (restic.Backend, error) {
|
||||
debug.Log("parsing location %v", s)
|
||||
loc, err := location.Parse(s)
|
||||
func create(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (restic.Backend, error) {
|
||||
debug.Log("parsing location %v", location.StripPassword(gopts.backends, s))
|
||||
loc, err := location.Parse(gopts.backends, s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -651,36 +642,15 @@ func create(ctx context.Context, s string, opts options.Options) (restic.Backend
|
|||
|
||||
rt, err := backend.Transport(globalOptions.TransportOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Fatal(err.Error())
|
||||
}
|
||||
|
||||
var be restic.Backend
|
||||
switch loc.Scheme {
|
||||
case "local":
|
||||
be, err = local.Create(ctx, *cfg.(*local.Config))
|
||||
case "sftp":
|
||||
be, err = sftp.Create(ctx, *cfg.(*sftp.Config))
|
||||
case "s3":
|
||||
be, err = s3.Create(ctx, *cfg.(*s3.Config), rt)
|
||||
case "gs":
|
||||
be, err = gs.Create(ctx, *cfg.(*gs.Config), rt)
|
||||
case "azure":
|
||||
be, err = azure.Create(ctx, *cfg.(*azure.Config), rt)
|
||||
case "swift":
|
||||
be, err = swift.Open(ctx, *cfg.(*swift.Config), rt)
|
||||
case "b2":
|
||||
be, err = b2.Create(ctx, *cfg.(*b2.Config), rt)
|
||||
case "rest":
|
||||
be, err = rest.Create(ctx, *cfg.(*rest.Config), rt)
|
||||
case "rclone":
|
||||
be, err = rclone.Create(ctx, *cfg.(*rclone.Config))
|
||||
case "smb":
|
||||
be, err = smb.Create(ctx, *cfg.(*smb.Config))
|
||||
default:
|
||||
debug.Log("invalid repository scheme: %v", s)
|
||||
return nil, errors.Fatalf("invalid scheme %q", loc.Scheme)
|
||||
factory := gopts.backends.Lookup(loc.Scheme)
|
||||
if factory == nil {
|
||||
return nil, errors.Fatalf("invalid backend: %q", loc.Scheme)
|
||||
}
|
||||
|
||||
be, err := factory.Create(ctx, cfg, rt, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/restic/restic/internal/backend/retry"
|
||||
|
@ -17,6 +18,7 @@ import (
|
|||
"github.com/restic/restic/internal/repository"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
"github.com/restic/restic/internal/ui/termstatus"
|
||||
)
|
||||
|
||||
type dirEntry struct {
|
||||
|
@ -204,6 +206,8 @@ func withTestEnvironment(t testing.TB) (env *testEnvironment, cleanup func()) {
|
|||
|
||||
// replace this hook with "nil" if listing a filetype more than once is necessary
|
||||
backendTestHook: func(r restic.Backend) (restic.Backend, error) { return newOrderedListOnceBackend(r), nil },
|
||||
// start with default set of backends
|
||||
backends: globalOptions.backends,
|
||||
}
|
||||
|
||||
// always overwrite global options
|
||||
|
@ -356,3 +360,20 @@ func withCaptureStdout(inner func() error) (*bytes.Buffer, error) {
|
|||
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func withTermStatus(gopts GlobalOptions, callback func(ctx context.Context, term *termstatus.Terminal) error) error {
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
var wg sync.WaitGroup
|
||||
|
||||
term := termstatus.New(gopts.stdout, gopts.stderr, gopts.Quiet)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
term.Run(ctx)
|
||||
}()
|
||||
|
||||
defer wg.Wait()
|
||||
defer cancel()
|
||||
|
||||
return callback(ctx, term)
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
godebug "runtime/debug"
|
||||
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/options"
|
||||
|
@ -81,7 +82,16 @@ func needsPassword(cmd string) bool {
|
|||
|
||||
var logBuffer = bytes.NewBuffer(nil)
|
||||
|
||||
func tweakGoGC() {
|
||||
// lower GOGC from 100 to 50, unless it was manually overwritten by the user
|
||||
oldValue := godebug.SetGCPercent(50)
|
||||
if oldValue != 100 {
|
||||
godebug.SetGCPercent(oldValue)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
tweakGoGC()
|
||||
// install custom global logger into a buffer, if an error occurs
|
||||
// we can show the logs
|
||||
log.SetOutput(logBuffer)
|
||||
|
|
|
@ -265,6 +265,16 @@ binary, you can get it with `docker pull` like this:
|
|||
|
||||
$ docker pull restic/restic
|
||||
|
||||
The container is also available on the GitHub Container Registry:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ docker pull ghcr.io/restic/restic
|
||||
|
||||
Restic relies on the hostname for various operations. Make sure to set a static
|
||||
hostname using `--hostname` when creating a Docker container, otherwise Docker
|
||||
will assign a random hostname each time.
|
||||
|
||||
From Source
|
||||
***********
|
||||
|
||||
|
|
|
@ -523,19 +523,30 @@ Microsoft Azure Blob Storage
|
|||
****************************
|
||||
|
||||
You can also store backups on Microsoft Azure Blob Storage. Export the Azure
|
||||
Blob Storage account name and key as follows:
|
||||
Blob Storage account name:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ export AZURE_ACCOUNT_NAME=<ACCOUNT_NAME>
|
||||
|
||||
For authentication export one of the following variables:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# For storage account key
|
||||
$ export AZURE_ACCOUNT_KEY=<SECRET_KEY>
|
||||
# For SAS
|
||||
$ export AZURE_ACCOUNT_SAS=<SAS_TOKEN>
|
||||
|
||||
or
|
||||
Alternatively, if run on Azure, restic will automatically uses service accounts configured
|
||||
via the standard environment variables or Workload / Managed Identities.
|
||||
|
||||
Restic will by default use Azure's global domain ``core.windows.net`` as endpoint suffix.
|
||||
You can specify other suffixes as follows:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ export AZURE_ACCOUNT_NAME=<ACCOUNT_NAME>
|
||||
$ export AZURE_ACCOUNT_SAS=<SAS_TOKEN>
|
||||
$ export AZURE_ENDPOINT_SUFFIX=<ENDPOINT_SUFFIX>
|
||||
|
||||
Afterwards you can initialize a repository in a container called ``foo`` in the
|
||||
root path like this:
|
||||
|
|
|
@ -567,6 +567,8 @@ environment variables. The following lists these environment variables:
|
|||
RESTIC_PASSWORD The actual password for the repository
|
||||
RESTIC_PASSWORD_COMMAND Command printing the password for the repository to stdout
|
||||
RESTIC_KEY_HINT ID of key to try decrypting first, before other keys
|
||||
RESTIC_CACERT Location(s) of certificate file(s), comma separated if multiple (replaces --cacert)
|
||||
RESTIC_TLS_CLIENT_CERT Location of TLS client certificate and private key (replaces --tls-client-cert)
|
||||
RESTIC_CACHE_DIR Location of the cache directory
|
||||
RESTIC_COMPRESSION Compression mode (only available for repository format version 2)
|
||||
RESTIC_PROGRESS_FPS Frames per second by which the progress bar is updated
|
||||
|
@ -614,6 +616,7 @@ environment variables. The following lists these environment variables:
|
|||
AZURE_ACCOUNT_NAME Account name for Azure
|
||||
AZURE_ACCOUNT_KEY Account key for Azure
|
||||
AZURE_ACCOUNT_SAS Shared access signatures (SAS) for Azure
|
||||
AZURE_ENDPOINT_SUFFIX Endpoint suffix for Azure Storage (default: core.windows.net)
|
||||
|
||||
GOOGLE_PROJECT_ID Project ID for Google Cloud Storage
|
||||
GOOGLE_APPLICATION_CREDENTIALS Application Credentials for Google Cloud Storage (e.g. $HOME/.config/gs-secret-restic-key.json)
|
||||
|
|
|
@ -232,6 +232,8 @@ modifying the repository. Instead restic will only print the actions it would
|
|||
perform.
|
||||
|
||||
|
||||
.. _checking-integrity:
|
||||
|
||||
Checking integrity and consistency
|
||||
==================================
|
||||
|
||||
|
@ -284,6 +286,14 @@ If the repository structure is intact, restic will show that no errors were foun
|
|||
check snapshots, trees and blobs
|
||||
no errors were found
|
||||
|
||||
By default, check creates a new temporary cache directory to verify that the
|
||||
data stored in the repository is intact. To reuse the existing cache, you can
|
||||
use the ``--with-cache`` flag.
|
||||
|
||||
If the cache directory is not explicitly set, then ``check`` creates its
|
||||
temporary cache directory in the temporary directory, see :ref:`temporary_files`.
|
||||
Otherwise, the specified cache directory is used, as described in :ref:`caching`.
|
||||
|
||||
By default, the ``check`` command does not verify that the actual pack files
|
||||
on disk in the repository are unmodified, because doing so requires reading
|
||||
a copy of every pack file in the repository. To tell restic to also verify the
|
||||
|
|
|
@ -37,3 +37,538 @@ exit code if a different error is encountered (e.g.: incorrect password
|
|||
to ``cat config``) and it may print a different error message. If there
|
||||
are no errors, restic will return a zero exit code and print the repository
|
||||
metadata.
|
||||
|
||||
JSON output
|
||||
***********
|
||||
|
||||
Restic outputs JSON data to ``stdout`` if requested with the ``--json`` flag.
|
||||
The structure of that data varies depending on the circumstance. The
|
||||
JSON output of most restic commands are documented here.
|
||||
|
||||
.. note::
|
||||
Not all commands support JSON output. If a command does not support JSON output,
|
||||
feel free to submit a pull request!
|
||||
|
||||
.. warning::
|
||||
We try to keep the JSON output backwards compatible. However, new message types
|
||||
or fields may be added at any time. Similarly, enum-like fields for which a fixed
|
||||
list of allowed values is documented may be extended at any time.
|
||||
|
||||
|
||||
Output formats
|
||||
--------------
|
||||
|
||||
Currently only the output on ``stdout`` is JSON formatted. Errors printed on ``stderr``
|
||||
are still printed as plain text messages. The generated JSON output uses one of the
|
||||
following two formats.
|
||||
|
||||
Single JSON document
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Several commands output a single JSON document that can be parsed in its entirety.
|
||||
Depending on the command, the output consists of either a single or multiple lines.
|
||||
|
||||
JSON lines
|
||||
^^^^^^^^^^
|
||||
|
||||
Several commands, in particular long running ones or those that generate a large output,
|
||||
use a format also known as JSON lines. It consists of a stream of new-line separated JSON
|
||||
messages. You can determine the nature of the message using the ``message_type`` field.
|
||||
|
||||
As an exception, the ``ls`` command uses the field ``struct_type`` instead.
|
||||
|
||||
|
||||
backup
|
||||
------
|
||||
|
||||
The ``backup`` command uses the JSON lines format with the following message types.
|
||||
|
||||
Status
|
||||
^^^^^^
|
||||
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|``message_type`` | Always "status" |
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|``seconds_elapsed`` | Time since backup started |
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|``seconds_remaining`` | Estimated time remaining |
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|``percent_done`` | Percentage of data backed up (bytes_done/total_bytes) |
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|``total_files`` | Total number of files detected |
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|``files_done`` | Files completed (backed up to repo) |
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|``total_bytes`` | Total number of bytes in backup set |
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|``bytes_done`` | Number of bytes completed (backed up to repo) |
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|``error_count`` | Number of errors |
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|``current_files`` | List of files currently being backed up |
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|
||||
Error
|
||||
^^^^^
|
||||
|
||||
+----------------------+-------------------------------------------+
|
||||
| ``message_type`` | Always "error" |
|
||||
+----------------------+-------------------------------------------+
|
||||
| ``error`` | Error message |
|
||||
+----------------------+-------------------------------------------+
|
||||
| ``during`` | What restic was trying to do |
|
||||
+----------------------+-------------------------------------------+
|
||||
| ``item`` | Usually, the path of the problematic file |
|
||||
+----------------------+-------------------------------------------+
|
||||
|
||||
Verbose Status
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
Verbose status provides details about the progress, including details about backed up files.
|
||||
|
||||
+----------------------+-----------------------------------------------------------+
|
||||
| ``message_type`` | Always "verbose_status" |
|
||||
+----------------------+-----------------------------------------------------------+
|
||||
| ``action`` | Either "new", "unchanged", "modified" or "scan_finished" |
|
||||
+----------------------+-----------------------------------------------------------+
|
||||
| ``item`` | The item in question |
|
||||
+----------------------+-----------------------------------------------------------+
|
||||
| ``duration`` | How long it took, in seconds |
|
||||
+----------------------+-----------------------------------------------------------+
|
||||
| ``data_size`` | How big the item is |
|
||||
+----------------------+-----------------------------------------------------------+
|
||||
| ``metadata_size`` | How big the metadata is |
|
||||
+----------------------+-----------------------------------------------------------+
|
||||
| ``total_files`` | Total number of files |
|
||||
+----------------------+-----------------------------------------------------------+
|
||||
|
||||
Summary
|
||||
^^^^^^^
|
||||
|
||||
Summary is the last output line in a successful backup.
|
||||
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``message_type`` | Always "summary" |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``files_new`` | Number of new files |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``files_changed`` | Number of files that changed |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``files_unmodified`` | Number of files that did not change |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``dirs_new`` | Number of new directories |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``dirs_changed`` | Number of directories that changed |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``dirs_unmodified`` | Number of directories that did not change |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``data_blobs`` | Number of data blobs |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``tree_blobs`` | Number of tree blobs |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``data_added`` | Amount of data added, in bytes |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``total_files_processed`` | Total number of files processed |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``total_bytes_processed`` | Total number of bytes processed |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``total_duration`` | Total time it took for the operation to complete |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
| ``snapshot_id`` | ID of the new snapshot |
|
||||
+---------------------------+---------------------------------------------------------+
|
||||
|
||||
|
||||
cat
|
||||
---
|
||||
|
||||
The ``cat`` command returns data about various objects in the repository, which
|
||||
are stored in JSON form. Specifying ``--json`` or ``--quiet`` will suppress any
|
||||
non-JSON messages the command generates.
|
||||
|
||||
|
||||
diff
|
||||
----
|
||||
|
||||
The ``diff`` command uses the JSON lines format with the following message types.
|
||||
|
||||
change
|
||||
^^^^^^
|
||||
|
||||
+------------------+--------------------------------------------------------------+
|
||||
| ``message_type`` | Always "change" |
|
||||
+------------------+--------------------------------------------------------------+
|
||||
| ``path`` | Path that has changed |
|
||||
+------------------+--------------------------------------------------------------+
|
||||
| ``modifier`` | Type of change, a concatenation of the following characters: |
|
||||
| | "+" = added, "-" = removed, "T" = entry type changed, |
|
||||
| | "M" = file content changed, "U" = metadata changed |
|
||||
+------------------+--------------------------------------------------------------+
|
||||
|
||||
statistics
|
||||
^^^^^^^^^^
|
||||
|
||||
+---------------------+----------------------------+
|
||||
| ``message_type`` | Always "statistics" |
|
||||
+---------------------+----------------------------+
|
||||
| ``source_snapshot`` | ID of first snapshot |
|
||||
+---------------------+----------------------------+
|
||||
| ``target_snapshot`` | ID of second snapshot |
|
||||
+---------------------+----------------------------+
|
||||
| ``changed_files`` | Number of changed files |
|
||||
+---------------------+----------------------------+
|
||||
| ``added`` | DiffStat object, see below |
|
||||
+---------------------+----------------------------+
|
||||
| ``removed`` | DiffStat object, see below |
|
||||
+---------------------+----------------------------+
|
||||
|
||||
DiffStat object
|
||||
|
||||
+----------------+-------------------------------------------+
|
||||
| ``files`` | Number of changed files |
|
||||
+----------------+-------------------------------------------+
|
||||
| ``dirs`` | Number of changed directories |
|
||||
+----------------+-------------------------------------------+
|
||||
| ``others`` | Number of changed other directory entries |
|
||||
+----------------+-------------------------------------------+
|
||||
| ``data_blobs`` | Number of data blobs |
|
||||
+----------------+-------------------------------------------+
|
||||
| ``tree_blobs`` | Number of tree blobs |
|
||||
+----------------+-------------------------------------------+
|
||||
| ``bytes`` | Number of bytes |
|
||||
+----------------+-------------------------------------------+
|
||||
|
||||
|
||||
find
|
||||
----
|
||||
|
||||
The ``find`` command outputs a single JSON document containing an array of JSON
|
||||
objects with matches for your search term. These matches are organized by snapshot.
|
||||
|
||||
If the ``--blob`` or ``--tree`` option is passed, then the output is an array of
|
||||
Blob objects.
|
||||
|
||||
|
||||
+-----------------+----------------------------------------------+
|
||||
| ``hits`` | Number of matches in the snapshot |
|
||||
+-----------------+----------------------------------------------+
|
||||
| ``snapshot`` | ID of the snapshot |
|
||||
+-----------------+----------------------------------------------+
|
||||
| ``matches`` | Array of Match objects detailing a match |
|
||||
+-----------------+----------------------------------------------+
|
||||
|
||||
Match object
|
||||
|
||||
+-----------------+----------------------------------------------+
|
||||
| ``path`` | Object path |
|
||||
+-----------------+----------------------------------------------+
|
||||
| ``permissions`` | UNIX permissions |
|
||||
+-----------------+----------------------------------------------+
|
||||
| ``type`` | Object type e.g. file, dir, etc... |
|
||||
+-----------------+----------------------------------------------+
|
||||
| ``atime`` | Access time |
|
||||
+-----------------+----------------------------------------------+
|
||||
| ``mtime`` | Modification time |
|
||||
+-----------------+----------------------------------------------+
|
||||
| ``ctime`` | Change time |
|
||||
+-----------------+----------------------------------------------+
|
||||
| ``name`` | Object name |
|
||||
+-----------------+----------------------------------------------+
|
||||
| ``user`` | Name of owner |
|
||||
+-----------------+----------------------------------------------+
|
||||
| ``group`` | Name of group |
|
||||
+-----------------+----------------------------------------------+
|
||||
| ``mode`` | UNIX file mode, shorthand of ``permissions`` |
|
||||
+-----------------+----------------------------------------------+
|
||||
| ``device_id`` | OS specific device identifier |
|
||||
+-----------------+----------------------------------------------+
|
||||
| ``links`` | Number of hardlinks |
|
||||
+-----------------+----------------------------------------------+
|
||||
| ``uid`` | ID of owner |
|
||||
+-----------------+----------------------------------------------+
|
||||
| ``gid`` | ID of group |
|
||||
+-----------------+----------------------------------------------+
|
||||
| ``size`` | Size of object in bytes |
|
||||
+-----------------+----------------------------------------------+
|
||||
|
||||
Blob object
|
||||
|
||||
+-----------------+--------------------------------------------+
|
||||
| ``object_type`` | Either "blob" or "tree" |
|
||||
+-----------------+--------------------------------------------+
|
||||
| ``id`` | ID of found blob |
|
||||
+-----------------+--------------------------------------------+
|
||||
| ``path`` | Path in snapshot |
|
||||
+-----------------+--------------------------------------------+
|
||||
| ``parent_tree`` | Parent tree blob, only set for type "blob" |
|
||||
+-----------------+--------------------------------------------+
|
||||
| ``snapshot`` | Snapshot ID |
|
||||
+-----------------+--------------------------------------------+
|
||||
| ``time`` | Snapshot timestamp |
|
||||
+-----------------+--------------------------------------------+
|
||||
|
||||
|
||||
forget
|
||||
------
|
||||
|
||||
The ``forget`` command prints a single JSON document containing an array of
|
||||
ForgetGroups. If specific snapshot IDs are specified, then no output is generated.
|
||||
|
||||
The ``prune`` command does not yet support JSON such that ``forget --prune``
|
||||
results in a mix of JSON and text output.
|
||||
|
||||
ForgetGroup
|
||||
^^^^^^^^^^^
|
||||
|
||||
+-------------+-----------------------------------------------------------+
|
||||
| ``tags`` | Tags identifying the snapshot group |
|
||||
+-------------+-----------------------------------------------------------+
|
||||
| ``host`` | Host identifying the snapshot group |
|
||||
+-------------+-----------------------------------------------------------+
|
||||
| ``paths`` | Paths identifying the snapshot group |
|
||||
+-------------+-----------------------------------------------------------+
|
||||
| ``keep`` | Array of Snapshot objects that are kept |
|
||||
+-------------+-----------------------------------------------------------+
|
||||
| ``remove`` | Array of Snapshot objects that were removed |
|
||||
+-------------+-----------------------------------------------------------+
|
||||
| ``reasons`` | Array of Reason objects describing why a snapshot is kept |
|
||||
+-------------+-----------------------------------------------------------+
|
||||
|
||||
Snapshot object
|
||||
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``time`` | Timestamp of when the backup was started |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``parent`` | ID of the parent snapshot |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``tree`` | ID of the root tree blob |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``paths`` | List of paths included in the backup |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``hostname`` | Hostname of the backed up machine |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``username`` | Username the backup command was run as |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``uid`` | ID of owner |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``gid`` | ID of group |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``excludes`` | List of paths and globs excluded from the backup |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``tags`` | List of tags for the snapshot in question |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``program_version`` | restic version used to create snapshot |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``id`` | Snapshot ID |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``short_id`` | Snapshot ID, short form |
|
||||
+---------------------+--------------------------------------------------+
|
||||
|
||||
Reason object
|
||||
|
||||
+----------------+---------------------------------------------------------+
|
||||
| ``snapshot`` | Snapshot object, without ``id`` and ``short_id`` fields |
|
||||
+----------------+---------------------------------------------------------+
|
||||
| ``matches`` | Array containing descriptions of the matching criteria |
|
||||
+----------------+---------------------------------------------------------+
|
||||
| ``counters`` | Object containing counters used by the policies |
|
||||
+----------------+---------------------------------------------------------+
|
||||
|
||||
|
||||
init
|
||||
----
|
||||
|
||||
The ``init`` command uses the JSON lines format, but only outputs a single message.
|
||||
|
||||
+------------------+--------------------------------+
|
||||
| ``message_type`` | Always "initialized" |
|
||||
+------------------+--------------------------------+
|
||||
| ``id`` | ID of the created repository |
|
||||
+------------------+--------------------------------+
|
||||
| ``repository`` | URL of the repository |
|
||||
+------------------+--------------------------------+
|
||||
|
||||
|
||||
key list
|
||||
--------
|
||||
|
||||
The ``key list`` command returns an array of objects with the following structure.
|
||||
|
||||
+--------------+------------------------------------+
|
||||
| ``current`` | Is currently used key? |
|
||||
+--------------+------------------------------------+
|
||||
| ``id`` | Unique key ID |
|
||||
+--------------+------------------------------------+
|
||||
| ``userName`` | User who created it |
|
||||
+--------------+------------------------------------+
|
||||
| ``hostName`` | Name of machine it was created on |
|
||||
+--------------+------------------------------------+
|
||||
| ``created`` | Timestamp when it was created |
|
||||
+--------------+------------------------------------+
|
||||
|
||||
|
||||
ls
|
||||
--
|
||||
|
||||
The ``ls`` command uses the JSON lines format with the following message types.
|
||||
As an exception, the ``struct_type`` field is used to determine the message type.
|
||||
|
||||
snapshot
|
||||
^^^^^^^^
|
||||
|
||||
+----------------+--------------------------------------------------+
|
||||
| ``struct_type``| Always "snapshot" |
|
||||
+----------------+--------------------------------------------------+
|
||||
| ``time`` | Timestamp of when the backup was started |
|
||||
+----------------+--------------------------------------------------+
|
||||
| ``parent`` | ID of the parent snapshot |
|
||||
+----------------+--------------------------------------------------+
|
||||
| ``tree`` | ID of the root tree blob |
|
||||
+----------------+--------------------------------------------------+
|
||||
| ``paths`` | List of paths included in the backup |
|
||||
+----------------+--------------------------------------------------+
|
||||
| ``hostname`` | Hostname of the backed up machine |
|
||||
+----------------+--------------------------------------------------+
|
||||
| ``username`` | Username the backup command was run as |
|
||||
+----------------+--------------------------------------------------+
|
||||
| ``uid`` | ID of owner |
|
||||
+----------------+--------------------------------------------------+
|
||||
| ``gid`` | ID of group |
|
||||
+----------------+--------------------------------------------------+
|
||||
| ``excludes`` | List of paths and globs excluded from the backup |
|
||||
+----------------+--------------------------------------------------+
|
||||
| ``tags`` | List of tags for the snapshot in question |
|
||||
+----------------+--------------------------------------------------+
|
||||
| ``id`` | Snapshot ID |
|
||||
+----------------+--------------------------------------------------+
|
||||
| ``short_id`` | Snapshot ID, short form |
|
||||
+----------------+--------------------------------------------------+
|
||||
|
||||
|
||||
node
|
||||
^^^^
|
||||
|
||||
+-----------------+--------------------------+
|
||||
| ``struct_type`` | Always "node" |
|
||||
+-----------------+--------------------------+
|
||||
| ``name`` | Node name |
|
||||
+-----------------+--------------------------+
|
||||
| ``type`` | Node type |
|
||||
+-----------------+--------------------------+
|
||||
| ``path`` | Node path |
|
||||
+-----------------+--------------------------+
|
||||
| ``uid`` | UID of node |
|
||||
+-----------------+--------------------------+
|
||||
| ``gid`` | GID of node |
|
||||
+-----------------+--------------------------+
|
||||
| ``size`` | Size in bytes |
|
||||
+-----------------+--------------------------+
|
||||
| ``mode`` | Node mode |
|
||||
+-----------------+--------------------------+
|
||||
| ``atime`` | Node access time |
|
||||
+-----------------+--------------------------+
|
||||
| ``mtime`` | Node modification time |
|
||||
+-----------------+--------------------------+
|
||||
| ``ctime`` | Node creation time |
|
||||
+-----------------+--------------------------+
|
||||
|
||||
|
||||
restore
|
||||
-------
|
||||
|
||||
The ``restore`` command uses the JSON lines format with the following message types.
|
||||
|
||||
Status
|
||||
^^^^^^
|
||||
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|``message_type`` | Always "status" |
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|``seconds_elapsed`` | Time since restore started |
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|``percent_done`` | Percentage of data backed up (bytes_restored/total_bytes) |
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|``total_files`` | Total number of files detected |
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|``files_restored`` | Files restored |
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|``total_bytes`` | Total number of bytes in restore set |
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|``bytes_restored`` | Number of bytes restored |
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|
||||
|
||||
Summary
|
||||
^^^^^^^
|
||||
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|``message_type`` | Always "summary" |
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|``seconds_elapsed`` | Time since restore started |
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|``total_files`` | Total number of files detected |
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|``files_restored`` | Files restored |
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|``total_bytes`` | Total number of bytes in restore set |
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|``bytes_restored`` | Number of bytes restored |
|
||||
+----------------------+------------------------------------------------------------+
|
||||
|
||||
|
||||
snapshots
|
||||
---------
|
||||
|
||||
The snapshots command returns a single JSON object, an array with objects of the structure outlined below.
|
||||
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``time`` | Timestamp of when the backup was started |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``parent`` | ID of the parent snapshot |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``tree`` | ID of the root tree blob |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``paths`` | List of paths included in the backup |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``hostname`` | Hostname of the backed up machine |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``username`` | Username the backup command was run as |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``uid`` | ID of owner |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``gid`` | ID of group |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``excludes`` | List of paths and globs excluded from the backup |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``tags`` | List of tags for the snapshot in question |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``program_version`` | restic version used to create snapshot |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``id`` | Snapshot ID |
|
||||
+---------------------+--------------------------------------------------+
|
||||
| ``short_id`` | Snapshot ID, short form |
|
||||
+---------------------+--------------------------------------------------+
|
||||
|
||||
|
||||
stats
|
||||
-----
|
||||
|
||||
The snapshots command returns a single JSON object.
|
||||
|
||||
+------------------------------+-----------------------------------------------------+
|
||||
| ``total_size`` | Repository size in bytes |
|
||||
+------------------------------+-----------------------------------------------------+
|
||||
| ``total_file_count`` | Number of files backed up in the repository |
|
||||
+------------------------------+-----------------------------------------------------+
|
||||
| ``total_blob_count`` | Number of blobs in the repository |
|
||||
+------------------------------+-----------------------------------------------------+
|
||||
| ``snapshots_count`` | Number of processed snapshots |
|
||||
+------------------------------+-----------------------------------------------------+
|
||||
| ``total_uncompressed_size`` | Repository size in bytes if blobs were uncompressed |
|
||||
+------------------------------+-----------------------------------------------------+
|
||||
| ``compression_ratio`` | Factor by which the already compressed data |
|
||||
| | has shrunk due to compression |
|
||||
+------------------------------+-----------------------------------------------------+
|
||||
| ``compression_progress`` | Percentage of already compressed data |
|
||||
+------------------------------+-----------------------------------------------------+
|
||||
| ``compression_space_saving`` | Overall space saving due to compression |
|
||||
+------------------------------+-----------------------------------------------------+
|
||||
|
|
|
@ -127,3 +127,5 @@ required argument is the new version number (in `Semantic Versioning
|
|||
go run helpers/prepare-release/main.go 0.14.0
|
||||
|
||||
Checks can be skipped on demand via flags, please see ``--help`` for details.
|
||||
|
||||
The build process requires ``docker``, ``docker-buildx`` and ``qemu-user-static-binfmt``.
|
||||
|
|
|
@ -418,7 +418,6 @@ instead of the default, set the environment variable like this:
|
|||
$ restic -r /srv/restic-repo backup ~/work
|
||||
|
||||
|
||||
|
||||
.. _caching:
|
||||
|
||||
Caching
|
||||
|
@ -442,6 +441,10 @@ The command line parameter ``--cache-dir`` or the environment variable
|
|||
parameter ``--no-cache`` disables the cache entirely. In this case, all data
|
||||
is loaded from the repository.
|
||||
|
||||
If a cache location is explicitly specified, then the ``check`` command will use
|
||||
that location to store its temporary cache. See :ref:`checking-integrity` for
|
||||
more details.
|
||||
|
||||
The cache is ephemeral: When a file cannot be read from the cache, it is loaded
|
||||
from the repository.
|
||||
|
||||
|
@ -451,4 +454,3 @@ time it is used, so by looking at the timestamps of the sub directories of the
|
|||
cache directory it can decide which sub directories are old and probably not
|
||||
needed any more. You can either remove these directories manually, or run a
|
||||
restic command with the ``--cleanup-cache`` flag.
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.19-alpine AS builder
|
||||
FROM golang:1.20-alpine AS builder
|
||||
|
||||
WORKDIR /go/src/github.com/restic/restic
|
||||
|
||||
|
|
18
docker/Dockerfile.release
Normal file
18
docker/Dockerfile.release
Normal file
|
@ -0,0 +1,18 @@
|
|||
# the official binaries are cross-built from Linux running on an AMD64 host
|
||||
# other architectures also seem to generate identical binaries but stay on the safe side
|
||||
FROM --platform=linux/amd64 restic/builder:latest as helper
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
||||
COPY --chown=build . /restic
|
||||
RUN go run helpers/build-release-binaries/main.go --platform $TARGETOS/$TARGETARCH --skip-compress
|
||||
RUN mv /output/restic_${TARGETOS}_${TARGETARCH} /output/restic
|
||||
|
||||
|
||||
FROM alpine:latest
|
||||
|
||||
COPY --from=helper /output/restic /usr/bin
|
||||
RUN apk add --update --no-cache ca-certificates fuse openssh-client tzdata jq
|
||||
|
||||
ENTRYPOINT ["/usr/bin/restic"]
|
|
@ -16,9 +16,13 @@ Set environment variable `RESTIC_REPOSITORY` and map volume to directories and
|
|||
files like:
|
||||
|
||||
```
|
||||
docker run --rm -ti \
|
||||
docker run --rm --hostname my-host -ti \
|
||||
-v $HOME/.restic/passfile:/pass \
|
||||
-v $HOME/importantdirectory:/data \
|
||||
-e RESTIC_REPOSITORY=rest:https://user:pass@hostname/ \
|
||||
restic/restic -p /pass backup /data
|
||||
```
|
||||
|
||||
Restic relies on the hostname for various operations. Make sure to set a static
|
||||
hostname using `--hostname` when creating a Docker container, otherwise Docker
|
||||
will assign a random hostname each time.
|
||||
|
|
39
go.mod
39
go.mod
|
@ -2,7 +2,8 @@ module github.com/restic/restic
|
|||
|
||||
require (
|
||||
cloud.google.com/go/storage v1.30.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0
|
||||
github.com/anacrolix/fuse v0.2.0
|
||||
github.com/cenkalti/backoff/v4 v4.2.0
|
||||
|
@ -26,48 +27,54 @@ require (
|
|||
github.com/restic/chunker v0.4.0
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
golang.org/x/crypto v0.9.0
|
||||
golang.org/x/net v0.10.0
|
||||
golang.org/x/oauth2 v0.8.0
|
||||
golang.org/x/sync v0.2.0
|
||||
golang.org/x/sys v0.8.0
|
||||
golang.org/x/term v0.8.0
|
||||
golang.org/x/text v0.9.0
|
||||
google.golang.org/api v0.116.0
|
||||
golang.org/x/crypto v0.10.0
|
||||
golang.org/x/net v0.11.0
|
||||
golang.org/x/oauth2 v0.9.0
|
||||
golang.org/x/sync v0.3.0
|
||||
golang.org/x/sys v0.9.0
|
||||
golang.org/x/term v0.9.0
|
||||
golang.org/x/text v0.10.0
|
||||
google.golang.org/api v0.129.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.110.0 // indirect
|
||||
cloud.google.com/go/compute v1.19.0 // indirect
|
||||
cloud.google.com/go/compute v1.19.3 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
cloud.google.com/go/iam v0.13.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/dnaeon/go-vcr v1.2.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/felixge/fgprof v0.9.3 // indirect
|
||||
github.com/geoffgarside/ber v1.1.0 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.8.0 // indirect
|
||||
github.com/google/s2a-go v0.1.4 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.11.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
|
||||
github.com/rs/xid v1.5.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.2 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633 // indirect
|
||||
google.golang.org/grpc v1.54.0 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect
|
||||
google.golang.org/grpc v1.56.1 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
|
127
go.sum
127
go.sum
|
@ -1,30 +1,34 @@
|
|||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys=
|
||||
cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY=
|
||||
cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ=
|
||||
cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU=
|
||||
cloud.google.com/go/compute v1.19.3 h1:DcTwsFgGev/wV5+q8o2fzgcHOaac+DKGC91ZlvpsQds=
|
||||
cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI=
|
||||
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k=
|
||||
cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0=
|
||||
cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM=
|
||||
cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM=
|
||||
cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1 h1:SEy2xmstIphdPwNBUi7uhvjyjhVKISfwjfOJmuy7kg4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 h1:BWe8a+f/t+7KY7zH2mqygeUD0t8hNFXe08p1Pb3/jKE=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74=
|
||||
github.com/anacrolix/fuse v0.2.0 h1:pc+To78kI2d/WUjIyrsdqeJQAesuwpGxlI3h1nAv3Do=
|
||||
github.com/anacrolix/fuse v0.2.0/go.mod h1:Kfu02xBwnySDpH3N23BmrP3MDfwAQGRLUCj6XyeOvBQ=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4=
|
||||
github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
|
@ -32,13 +36,17 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
|
|||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
|
||||
|
@ -48,14 +56,18 @@ github.com/elithrar/simple-scrypt v1.3.0/go.mod h1:U2XQRI95XHY0St410VE3UjT7vuKb1
|
|||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g=
|
||||
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
|
||||
github.com/geoffgarside/ber v1.1.0 h1:qTmFG4jJbwiSzSXoNJeHcOprVzZ8Ulde2Rrrifu5U9w=
|
||||
github.com/geoffgarside/ber v1.1.0/go.mod h1:jVPKeCbj6MvQZhwLYsGwaGI52oUorHoHKNecGT85ZCc=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
|
@ -64,14 +76,17 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
|
|||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
|
@ -88,13 +103,16 @@ github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdf
|
|||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b h1:8htHrh2bw9c7Idkb7YNac+ZpTqLMjRpI+FWu51ltaQc=
|
||||
github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
|
||||
github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc=
|
||||
github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
|
||||
github.com/googleapis/gax-go/v2 v2.8.0 h1:UBtEZqx1bjXtOQ5BVTkuYghXrr3N4V123VKJK67vJZc=
|
||||
github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w=
|
||||
github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4=
|
||||
github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.1 h1:5pv5N1lT1fjLg2VQ5KWc7kmucp2x/kvFOnxuVTqZ6x4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hirochachacha/go-smb2 v1.1.0 h1:b6hs9qKIql9eVXAiN0M2wSFY5xnhbHAQoCwRKbaRTZI=
|
||||
|
@ -116,6 +134,7 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
|||
github.com/kurin/blazer v0.5.4-0.20230113224640-3887e1ec64b5 h1:OUlGa6AAolmjyPtILbMJ8vHayz5wd4wBUloheGcMhfA=
|
||||
github.com/kurin/blazer v0.5.4-0.20230113224640-3887e1ec64b5/go.mod h1:4FCXMUWo9DllR2Do4TtBd377ezyAJ51vB5uTBjt0pGU=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.56 h1:pkZplIEHu8vinjkmhsexcXpWth2tjVLphrTZx6fBVZY=
|
||||
|
@ -127,10 +146,10 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
|
|||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
|
||||
github.com/ncw/swift/v2 v2.0.1 h1:q1IN8hNViXEv8Zvg3Xdis4a3c4IlIGezkYz09zQL5J0=
|
||||
github.com/ncw/swift/v2 v2.0.1/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg=
|
||||
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
|
@ -145,6 +164,7 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:
|
|||
github.com/restic/chunker v0.4.0 h1:YUPYCUn70MYP7VO4yllypp2SjmsRhRJaad3xKu1QFRw=
|
||||
github.com/restic/chunker v0.4.0/go.mod h1:z0cH2BejpW636LXw0R/BGyv+Ey8+m9QGiOanDHItzyw=
|
||||
github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
|
@ -160,6 +180,7 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
|||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
|
@ -168,66 +189,85 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
|||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ=
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
|
||||
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
|
||||
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
|
||||
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU=
|
||||
golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8=
|
||||
golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.9.0 h1:BPpt2kU7oMRq3kCHAA1tbSEshXRw1LpG2ztgDwrzuAs=
|
||||
golang.org/x/oauth2 v0.9.0/go.mod h1:qYgFZaFiu6Wg24azG8bdV52QJXJGbZzIIsRCdVKzbLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
|
||||
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
|
||||
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28=
|
||||
golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
|
||||
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
|
@ -235,29 +275,39 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3
|
|||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200423201157-2723c5de0d66/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
google.golang.org/api v0.116.0 h1:09tOPVufPwfm5W4aA8EizGHJ7BcoRDsIareM2a15gO4=
|
||||
google.golang.org/api v0.116.0/go.mod h1:9cD4/t6uvd9naoEJFA+M96d0IuB6BqFuyhpw68+mRGg=
|
||||
google.golang.org/api v0.129.0 h1:2XbdjjNfFPXQyufzQVwPf1RRnHH8Den2pfNE2jw7L8w=
|
||||
google.golang.org/api v0.129.0/go.mod h1:dFjiXlanKwWE3612X97llhsoI36FAoIiRj3aTl5b/zE=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633 h1:0BOZf6qNozI3pkN3fJLwNubheHJYHhMh91GRFOWWK08=
|
||||
google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
|
||||
google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc h1:8DyZCyvI8mE1IdLy/60bS+52xfymkE72wv1asokgtao=
|
||||
google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag=
|
||||
google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||
google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ=
|
||||
google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -269,13 +319,14 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
|
|||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
|
||||
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
|
|
|
@ -1,11 +1,14 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -14,16 +17,24 @@ import (
|
|||
)
|
||||
|
||||
var opts = struct {
|
||||
Verbose bool
|
||||
SourceDir string
|
||||
OutputDir string
|
||||
Version string
|
||||
Verbose bool
|
||||
SourceDir string
|
||||
OutputDir string
|
||||
Tags string
|
||||
PlatformSubset string
|
||||
Platform string
|
||||
SkipCompress bool
|
||||
Version string
|
||||
}{}
|
||||
|
||||
func init() {
|
||||
pflag.BoolVarP(&opts.Verbose, "verbose", "v", false, "be verbose")
|
||||
pflag.StringVarP(&opts.SourceDir, "source", "s", "/restic", "path to the source code `directory`")
|
||||
pflag.StringVarP(&opts.OutputDir, "output", "o", "/output", "path to the output `directory`")
|
||||
pflag.StringVar(&opts.Tags, "tags", "", "additional build `tags`")
|
||||
pflag.StringVar(&opts.PlatformSubset, "platform-subset", "", "specify `n/t` to only build this subset")
|
||||
pflag.StringVarP(&opts.Platform, "platform", "p", "", "specify `os/arch` to only build this specific platform")
|
||||
pflag.BoolVar(&opts.SkipCompress, "skip-compress", false, "skip binary compression step")
|
||||
pflag.StringVar(&opts.Version, "version", "", "use `x.y.z` as the version for output files")
|
||||
pflag.Parse()
|
||||
}
|
||||
|
@ -95,10 +106,15 @@ func build(sourceDir, outputDir, goos, goarch string) (filename string) {
|
|||
}
|
||||
outputFile := filepath.Join(outputDir, filename)
|
||||
|
||||
tags := "selfupdate"
|
||||
if opts.Tags != "" {
|
||||
tags += "," + opts.Tags
|
||||
}
|
||||
|
||||
c := exec.Command("go", "build",
|
||||
"-o", outputFile,
|
||||
"-ldflags", "-s -w",
|
||||
"-tags", "selfupdate",
|
||||
"-tags", tags,
|
||||
"./cmd/restic",
|
||||
)
|
||||
c.Stdout = os.Stdout
|
||||
|
@ -176,7 +192,9 @@ func buildForTarget(sourceDir, outputDir, goos, goarch string) (filename string)
|
|||
filename = build(sourceDir, outputDir, goos, goarch)
|
||||
touch(filepath.Join(outputDir, filename), mtime)
|
||||
chmod(filepath.Join(outputDir, filename), 0755)
|
||||
filename = compress(goos, outputDir, filename)
|
||||
if !opts.SkipCompress {
|
||||
filename = compress(goos, outputDir, filename)
|
||||
}
|
||||
return filename
|
||||
}
|
||||
|
||||
|
@ -220,9 +238,7 @@ func buildTargets(sourceDir, outputDir string, targets map[string][]string) {
|
|||
msg("build finished in %.3fs", time.Since(start).Seconds())
|
||||
}
|
||||
|
||||
// ATTENTION: the list of architectures must be in sync with .github/workflows/tests.yml!
|
||||
var defaultBuildTargets = map[string][]string{
|
||||
"aix": {"ppc64"},
|
||||
"darwin": {"amd64", "arm64"},
|
||||
"freebsd": {"386", "amd64", "arm"},
|
||||
"linux": {"386", "amd64", "arm", "arm64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "riscv64", "s390x"},
|
||||
|
@ -244,15 +260,71 @@ func downloadModules(sourceDir string) {
|
|||
}
|
||||
}
|
||||
|
||||
func selectSubset(subset string, target map[string][]string) (map[string][]string, error) {
|
||||
t, n, _ := strings.Cut(subset, "/")
|
||||
part, err := strconv.ParseInt(t, 10, 8)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse platform subset %q", subset)
|
||||
}
|
||||
total, err := strconv.ParseInt(n, 10, 8)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse platform subset %q", subset)
|
||||
}
|
||||
if total < 0 || part < 0 {
|
||||
return nil, errors.New("platform subset out of range")
|
||||
}
|
||||
if part >= total {
|
||||
return nil, errors.New("t must be in 0 <= t < n")
|
||||
}
|
||||
|
||||
// flatten platform list
|
||||
platforms := []string{}
|
||||
for os, archs := range target {
|
||||
for _, arch := range archs {
|
||||
platforms = append(platforms, os+"/"+arch)
|
||||
}
|
||||
}
|
||||
sort.Strings(platforms)
|
||||
|
||||
// select subset
|
||||
lower := len(platforms) * int(part) / int(total)
|
||||
upper := len(platforms) * int(part+1) / int(total)
|
||||
platforms = platforms[lower:upper]
|
||||
|
||||
return buildPlatformList(platforms), nil
|
||||
}
|
||||
|
||||
func buildPlatformList(platforms []string) map[string][]string {
|
||||
fmt.Printf("Building for %v\n", platforms)
|
||||
|
||||
targets := make(map[string][]string)
|
||||
for _, platform := range platforms {
|
||||
os, arch, _ := strings.Cut(platform, "/")
|
||||
targets[os] = append(targets[os], arch)
|
||||
}
|
||||
return targets
|
||||
}
|
||||
|
||||
func main() {
|
||||
if len(pflag.Args()) != 0 {
|
||||
die("USAGE: build-release-binaries [OPTIONS]")
|
||||
}
|
||||
|
||||
targets := defaultBuildTargets
|
||||
if opts.PlatformSubset != "" {
|
||||
var err error
|
||||
targets, err = selectSubset(opts.PlatformSubset, targets)
|
||||
if err != nil {
|
||||
die("%s", err)
|
||||
}
|
||||
} else if opts.Platform != "" {
|
||||
targets = buildPlatformList([]string{opts.Platform})
|
||||
}
|
||||
|
||||
sourceDir := abs(opts.SourceDir)
|
||||
outputDir := abs(opts.OutputDir)
|
||||
mkdir(outputDir)
|
||||
|
||||
downloadModules(sourceDir)
|
||||
buildTargets(sourceDir, outputDir, defaultBuildTargets)
|
||||
buildTargets(sourceDir, outputDir, targets)
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
@ -409,13 +410,19 @@ func signFiles(filenames ...string) {
|
|||
}
|
||||
}
|
||||
|
||||
func updateDocker(outputDir, version string) {
|
||||
cmd := fmt.Sprintf("bzcat %s/restic_%s_linux_amd64.bz2 > restic", outputDir, version)
|
||||
run("sh", "-c", cmd)
|
||||
run("chmod", "+x", "restic")
|
||||
run("docker", "pull", "alpine:latest")
|
||||
run("docker", "build", "--rm", "--tag", "restic/restic:latest", "-f", "docker/Dockerfile", ".")
|
||||
run("docker", "tag", "restic/restic:latest", "restic/restic:"+version)
|
||||
func updateDocker(sourceDir, version string) string {
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
builderName := fmt.Sprintf("restic-release-builder-%d", r.Int())
|
||||
run("docker", "buildx", "create", "--name", builderName, "--driver", "docker-container", "--bootstrap")
|
||||
|
||||
buildCmd := fmt.Sprintf("docker buildx build --builder %s --platform linux/386,linux/amd64,linux/arm,linux/arm64 --pull -f docker/Dockerfile.release %q", builderName, sourceDir)
|
||||
run("sh", "-c", buildCmd+" --no-cache")
|
||||
|
||||
publishCmds := ""
|
||||
for _, tag := range []string{"restic/restic:latest", "restic/restic:" + version} {
|
||||
publishCmds += buildCmd + fmt.Sprintf(" --tag %q --push\n", tag)
|
||||
}
|
||||
return publishCmds + "\ndocker buildx rm " + builderName
|
||||
}
|
||||
|
||||
func tempdir(prefix string) string {
|
||||
|
@ -464,15 +471,14 @@ func main() {
|
|||
|
||||
extractTar(tarFilename, sourceDir)
|
||||
runBuild(sourceDir, opts.OutputDir, opts.Version)
|
||||
rmdir(sourceDir)
|
||||
|
||||
sha256sums(opts.OutputDir, filepath.Join(opts.OutputDir, "SHA256SUMS"))
|
||||
|
||||
signFiles(filepath.Join(opts.OutputDir, "SHA256SUMS"), tarFilename)
|
||||
|
||||
updateDocker(opts.OutputDir, opts.Version)
|
||||
dockerCmds := updateDocker(sourceDir, opts.Version)
|
||||
|
||||
msg("done, output dir is %v", opts.OutputDir)
|
||||
|
||||
msg("now run:\n\ngit push --tags origin master\ndocker push restic/restic:latest\ndocker push restic/restic:%s\n", opts.Version)
|
||||
msg("now run:\n\ngit push --tags origin master\n%s\n\nrm -rf %q", dockerCmds, sourceDir)
|
||||
}
|
||||
|
|
|
@ -680,6 +680,7 @@ type SnapshotOptions struct {
|
|||
Excludes []string
|
||||
Time time.Time
|
||||
ParentSnapshot *restic.Snapshot
|
||||
ProgramVersion string
|
||||
}
|
||||
|
||||
// loadParentTree loads a tree referenced by snapshot id. If id is null, nil is returned.
|
||||
|
@ -796,6 +797,7 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps
|
|||
return nil, restic.ID{}, err
|
||||
}
|
||||
|
||||
sn.ProgramVersion = opts.ProgramVersion
|
||||
sn.Excludes = opts.Excludes
|
||||
if opts.ParentSnapshot != nil {
|
||||
sn.Parent = opts.ParentSnapshot.ID()
|
||||
|
|
|
@ -14,12 +14,14 @@ import (
|
|||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/layout"
|
||||
"github.com/restic/restic/internal/backend/location"
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
|
||||
|
@ -43,12 +45,22 @@ const defaultListMaxItems = 5000
|
|||
// make sure that *Backend implements backend.Backend
|
||||
var _ restic.Backend = &Backend{}
|
||||
|
||||
func NewFactory() location.Factory {
|
||||
return location.NewHTTPBackendFactory("azure", ParseConfig, location.NoPassword, Create, Open)
|
||||
}
|
||||
|
||||
func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
|
||||
debug.Log("open, config %#v", cfg)
|
||||
var client *azContainer.Client
|
||||
var err error
|
||||
|
||||
url := fmt.Sprintf("https://%s.blob.core.windows.net/%s", cfg.AccountName, cfg.Container)
|
||||
var endpointSuffix string
|
||||
if cfg.EndpointSuffix != "" {
|
||||
endpointSuffix = cfg.EndpointSuffix
|
||||
} else {
|
||||
endpointSuffix = "core.windows.net"
|
||||
}
|
||||
url := fmt.Sprintf("https://%s.blob.%s/%s", cfg.AccountName, endpointSuffix, cfg.Container)
|
||||
opts := &azContainer.ClientOptions{
|
||||
ClientOptions: azcore.ClientOptions{
|
||||
Transport: &http.Client{Transport: rt},
|
||||
|
@ -90,7 +102,16 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
|
|||
return nil, errors.Wrap(err, "NewAccountSASClientFromEndpointToken")
|
||||
}
|
||||
} else {
|
||||
return nil, errors.New("no azure authentication information found")
|
||||
debug.Log(" - using DefaultAzureCredential")
|
||||
cred, err := azidentity.NewDefaultAzureCredential(nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewDefaultAzureCredential")
|
||||
}
|
||||
|
||||
client, err = azContainer.NewClient(url, cred, opts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewClient")
|
||||
}
|
||||
}
|
||||
|
||||
be := &Backend{
|
||||
|
|
|
@ -12,18 +12,12 @@ import (
|
|||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/azure"
|
||||
"github.com/restic/restic/internal/backend/test"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/options"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func newAzureTestSuite(t testing.TB) *test.Suite[azure.Config] {
|
||||
tr, err := backend.Transport(backend.TransportOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create transport for tests: %v", err)
|
||||
}
|
||||
|
||||
func newAzureTestSuite() *test.Suite[azure.Config] {
|
||||
return &test.Suite[azure.Config]{
|
||||
// do not use excessive data
|
||||
MinimalData: true,
|
||||
|
@ -35,51 +29,12 @@ func newAzureTestSuite(t testing.TB) *test.Suite[azure.Config] {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
err = cfg.ApplyEnvironment("RESTIC_TEST_")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg.ApplyEnvironment("RESTIC_TEST_")
|
||||
cfg.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano())
|
||||
return cfg, nil
|
||||
},
|
||||
|
||||
// CreateFn is a function that creates a temporary repository for the tests.
|
||||
Create: func(cfg azure.Config) (restic.Backend, error) {
|
||||
ctx := context.TODO()
|
||||
be, err := azure.Create(ctx, cfg, tr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = be.Stat(context.TODO(), restic.Handle{Type: restic.ConfigFile})
|
||||
if err != nil && !be.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
return nil, errors.New("config already exists")
|
||||
}
|
||||
|
||||
return be, nil
|
||||
},
|
||||
|
||||
// OpenFn is a function that opens a previously created temporary repository.
|
||||
Open: func(cfg azure.Config) (restic.Backend, error) {
|
||||
ctx := context.TODO()
|
||||
return azure.Open(ctx, cfg, tr)
|
||||
},
|
||||
|
||||
// CleanupFn removes data created during the tests.
|
||||
Cleanup: func(cfg azure.Config) error {
|
||||
ctx := context.TODO()
|
||||
be, err := azure.Open(ctx, cfg, tr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return be.Delete(context.TODO())
|
||||
},
|
||||
Factory: azure.NewFactory(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -104,7 +59,7 @@ func TestBackendAzure(t *testing.T) {
|
|||
}
|
||||
|
||||
t.Logf("run tests")
|
||||
newAzureTestSuite(t).RunTests(t)
|
||||
newAzureTestSuite().RunTests(t)
|
||||
}
|
||||
|
||||
func BenchmarkBackendAzure(t *testing.B) {
|
||||
|
@ -122,7 +77,7 @@ func BenchmarkBackendAzure(t *testing.B) {
|
|||
}
|
||||
|
||||
t.Logf("run tests")
|
||||
newAzureTestSuite(t).RunBenchmarks(t)
|
||||
newAzureTestSuite().RunBenchmarks(t)
|
||||
}
|
||||
|
||||
func TestUploadLargeFile(t *testing.T) {
|
||||
|
|
|
@ -13,11 +13,12 @@ import (
|
|||
// Config contains all configuration necessary to connect to an azure compatible
|
||||
// server.
|
||||
type Config struct {
|
||||
AccountName string
|
||||
AccountSAS options.SecretString
|
||||
AccountKey options.SecretString
|
||||
Container string
|
||||
Prefix string
|
||||
AccountName string
|
||||
AccountSAS options.SecretString
|
||||
AccountKey options.SecretString
|
||||
EndpointSuffix string
|
||||
Container string
|
||||
Prefix string
|
||||
|
||||
Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"`
|
||||
}
|
||||
|
@ -59,7 +60,7 @@ func ParseConfig(s string) (*Config, error) {
|
|||
var _ restic.ApplyEnvironmenter = &Config{}
|
||||
|
||||
// ApplyEnvironment saves values from the environment to the config.
|
||||
func (cfg *Config) ApplyEnvironment(prefix string) error {
|
||||
func (cfg *Config) ApplyEnvironment(prefix string) {
|
||||
if cfg.AccountName == "" {
|
||||
cfg.AccountName = os.Getenv(prefix + "AZURE_ACCOUNT_NAME")
|
||||
}
|
||||
|
@ -71,5 +72,8 @@ func (cfg *Config) ApplyEnvironment(prefix string) error {
|
|||
if cfg.AccountSAS.String() == "" {
|
||||
cfg.AccountSAS = options.NewSecretString(os.Getenv(prefix + "AZURE_ACCOUNT_SAS"))
|
||||
}
|
||||
return nil
|
||||
|
||||
if cfg.EndpointSuffix == "" {
|
||||
cfg.EndpointSuffix = os.Getenv(prefix + "AZURE_ENDPOINT_SUFFIX")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/layout"
|
||||
"github.com/restic/restic/internal/backend/location"
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
|
@ -36,6 +37,10 @@ const defaultListMaxItems = 10 * 1000
|
|||
// ensure statically that *b2Backend implements restic.Backend.
|
||||
var _ restic.Backend = &b2Backend{}
|
||||
|
||||
func NewFactory() location.Factory {
|
||||
return location.NewHTTPBackendFactory("b2", ParseConfig, location.NoPassword, Create, Open)
|
||||
}
|
||||
|
||||
type sniffingRoundTripper struct {
|
||||
sync.Mutex
|
||||
lastErr error
|
||||
|
@ -53,6 +58,13 @@ func (s *sniffingRoundTripper) RoundTrip(req *http.Request) (*http.Response, err
|
|||
}
|
||||
|
||||
func newClient(ctx context.Context, cfg Config, rt http.RoundTripper) (*b2.Client, error) {
|
||||
if cfg.AccountID == "" {
|
||||
return nil, errors.Fatalf("unable to open B2 backend: Account ID ($B2_ACCOUNT_ID) is empty")
|
||||
}
|
||||
if cfg.Key.String() == "" {
|
||||
return nil, errors.Fatalf("unable to open B2 backend: Key ($B2_ACCOUNT_KEY) is empty")
|
||||
}
|
||||
|
||||
sniffer := &sniffingRoundTripper{RoundTripper: rt}
|
||||
opts := []b2.ClientOption{b2.Transport(sniffer)}
|
||||
|
||||
|
@ -135,16 +147,6 @@ func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (restic.Backe
|
|||
},
|
||||
listMaxItems: defaultListMaxItems,
|
||||
}
|
||||
|
||||
_, err = be.Stat(ctx, restic.Handle{Type: restic.ConfigFile})
|
||||
if err != nil && !be.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
return nil, errors.New("config already exists")
|
||||
}
|
||||
|
||||
return be, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -1,26 +1,18 @@
|
|||
package b2_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/b2"
|
||||
"github.com/restic/restic/internal/backend/test"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func newB2TestSuite(t testing.TB) *test.Suite[b2.Config] {
|
||||
tr, err := backend.Transport(backend.TransportOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create transport for tests: %v", err)
|
||||
}
|
||||
|
||||
func newB2TestSuite() *test.Suite[b2.Config] {
|
||||
return &test.Suite[b2.Config]{
|
||||
// do not use excessive data
|
||||
MinimalData: true,
|
||||
|
@ -35,34 +27,12 @@ func newB2TestSuite(t testing.TB) *test.Suite[b2.Config] {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
err = cfg.ApplyEnvironment("RESTIC_TEST_")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg.ApplyEnvironment("RESTIC_TEST_")
|
||||
cfg.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano())
|
||||
return cfg, nil
|
||||
},
|
||||
|
||||
// CreateFn is a function that creates a temporary repository for the tests.
|
||||
Create: func(cfg b2.Config) (restic.Backend, error) {
|
||||
return b2.Create(context.Background(), cfg, tr)
|
||||
},
|
||||
|
||||
// OpenFn is a function that opens a previously created temporary repository.
|
||||
Open: func(cfg b2.Config) (restic.Backend, error) {
|
||||
return b2.Open(context.Background(), cfg, tr)
|
||||
},
|
||||
|
||||
// CleanupFn removes data created during the tests.
|
||||
Cleanup: func(cfg b2.Config) error {
|
||||
be, err := b2.Open(context.Background(), cfg, tr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return be.Delete(context.TODO())
|
||||
},
|
||||
Factory: b2.NewFactory(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -89,10 +59,10 @@ func TestBackendB2(t *testing.T) {
|
|||
}()
|
||||
|
||||
testVars(t)
|
||||
newB2TestSuite(t).RunTests(t)
|
||||
newB2TestSuite().RunTests(t)
|
||||
}
|
||||
|
||||
func BenchmarkBackendb2(t *testing.B) {
|
||||
testVars(t)
|
||||
newB2TestSuite(t).RunBenchmarks(t)
|
||||
newB2TestSuite().RunBenchmarks(t)
|
||||
}
|
||||
|
|
|
@ -85,21 +85,11 @@ func ParseConfig(s string) (*Config, error) {
|
|||
var _ restic.ApplyEnvironmenter = &Config{}
|
||||
|
||||
// ApplyEnvironment saves values from the environment to the config.
|
||||
func (cfg *Config) ApplyEnvironment(prefix string) error {
|
||||
func (cfg *Config) ApplyEnvironment(prefix string) {
|
||||
if cfg.AccountID == "" {
|
||||
cfg.AccountID = os.Getenv(prefix + "B2_ACCOUNT_ID")
|
||||
}
|
||||
|
||||
if cfg.AccountID == "" {
|
||||
return errors.Fatalf("unable to open B2 backend: Account ID ($B2_ACCOUNT_ID) is empty")
|
||||
}
|
||||
|
||||
if cfg.Key.String() == "" {
|
||||
cfg.Key = options.NewSecretString(os.Getenv(prefix + "B2_ACCOUNT_KEY"))
|
||||
}
|
||||
|
||||
if cfg.Key.String() == "" {
|
||||
return errors.Fatalf("unable to open B2 backend: Key ($B2_ACCOUNT_KEY) is empty")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -62,9 +62,8 @@ func ParseConfig(s string) (*Config, error) {
|
|||
var _ restic.ApplyEnvironmenter = &Config{}
|
||||
|
||||
// ApplyEnvironment saves values from the environment to the config.
|
||||
func (cfg *Config) ApplyEnvironment(prefix string) error {
|
||||
func (cfg *Config) ApplyEnvironment(prefix string) {
|
||||
if cfg.ProjectID == "" {
|
||||
cfg.ProjectID = os.Getenv(prefix + "GOOGLE_PROJECT_ID")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/layout"
|
||||
"github.com/restic/restic/internal/backend/location"
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
|
||||
|
@ -47,6 +48,10 @@ type Backend struct {
|
|||
// Ensure that *Backend implements restic.Backend.
|
||||
var _ restic.Backend = &Backend{}
|
||||
|
||||
func NewFactory() location.Factory {
|
||||
return location.NewHTTPBackendFactory("gs", ParseConfig, location.NoPassword, Create, Open)
|
||||
}
|
||||
|
||||
func getStorageClient(rt http.RoundTripper) (*storage.Client, error) {
|
||||
// create a new HTTP client
|
||||
httpClient := &http.Client{
|
||||
|
@ -117,7 +122,7 @@ func open(cfg Config, rt http.RoundTripper) (*Backend, error) {
|
|||
}
|
||||
|
||||
// Open opens the gs backend at the specified bucket.
|
||||
func Open(cfg Config, rt http.RoundTripper) (restic.Backend, error) {
|
||||
func Open(_ context.Context, cfg Config, rt http.RoundTripper) (restic.Backend, error) {
|
||||
return open(cfg, rt)
|
||||
}
|
||||
|
||||
|
|
|
@ -1,26 +1,17 @@
|
|||
package gs_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/gs"
|
||||
"github.com/restic/restic/internal/backend/test"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func newGSTestSuite(t testing.TB) *test.Suite[gs.Config] {
|
||||
tr, err := backend.Transport(backend.TransportOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create transport for tests: %v", err)
|
||||
}
|
||||
|
||||
func newGSTestSuite() *test.Suite[gs.Config] {
|
||||
return &test.Suite[gs.Config]{
|
||||
// do not use excessive data
|
||||
MinimalData: true,
|
||||
|
@ -37,39 +28,7 @@ func newGSTestSuite(t testing.TB) *test.Suite[gs.Config] {
|
|||
return cfg, nil
|
||||
},
|
||||
|
||||
// CreateFn is a function that creates a temporary repository for the tests.
|
||||
Create: func(cfg gs.Config) (restic.Backend, error) {
|
||||
be, err := gs.Create(context.Background(), cfg, tr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = be.Stat(context.TODO(), restic.Handle{Type: restic.ConfigFile})
|
||||
if err != nil && !be.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
return nil, errors.New("config already exists")
|
||||
}
|
||||
|
||||
return be, nil
|
||||
},
|
||||
|
||||
// OpenFn is a function that opens a previously created temporary repository.
|
||||
Open: func(cfg gs.Config) (restic.Backend, error) {
|
||||
return gs.Open(cfg, tr)
|
||||
},
|
||||
|
||||
// CleanupFn removes data created during the tests.
|
||||
Cleanup: func(cfg gs.Config) error {
|
||||
be, err := gs.Open(cfg, tr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return be.Delete(context.TODO())
|
||||
},
|
||||
Factory: gs.NewFactory(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -97,7 +56,7 @@ func TestBackendGS(t *testing.T) {
|
|||
}
|
||||
|
||||
t.Logf("run tests")
|
||||
newGSTestSuite(t).RunTests(t)
|
||||
newGSTestSuite().RunTests(t)
|
||||
}
|
||||
|
||||
func BenchmarkBackendGS(t *testing.B) {
|
||||
|
@ -118,5 +77,5 @@ func BenchmarkBackendGS(t *testing.B) {
|
|||
}
|
||||
|
||||
t.Logf("run tests")
|
||||
newGSTestSuite(t).RunBenchmarks(t)
|
||||
newGSTestSuite().RunBenchmarks(t)
|
||||
}
|
||||
|
|
|
@ -7,6 +7,21 @@ import (
|
|||
"github.com/restic/restic/internal/restic"
|
||||
)
|
||||
|
||||
func WrapBackendConstructor[B restic.Backend, C any](constructor func(ctx context.Context, cfg C) (B, error)) func(ctx context.Context, cfg C, lim Limiter) (restic.Backend, error) {
|
||||
return func(ctx context.Context, cfg C, lim Limiter) (restic.Backend, error) {
|
||||
var be restic.Backend
|
||||
be, err := constructor(ctx, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if lim != nil {
|
||||
be = LimitBackend(be, lim)
|
||||
}
|
||||
return be, nil
|
||||
}
|
||||
}
|
||||
|
||||
// LimitBackend wraps a Backend and applies rate limiting to Load() and Save()
|
||||
// calls on the backend.
|
||||
func LimitBackend(be restic.Backend, l Limiter) restic.Backend {
|
||||
|
|
|
@ -11,6 +11,34 @@ var configTests = []test.ConfigTestData[Config]{
|
|||
Path: "/some/path",
|
||||
Connections: 2,
|
||||
}},
|
||||
{S: "local:dir1/dir2", Cfg: Config{
|
||||
Path: "dir1/dir2",
|
||||
Connections: 2,
|
||||
}},
|
||||
{S: "local:../dir1/dir2", Cfg: Config{
|
||||
Path: "../dir1/dir2",
|
||||
Connections: 2,
|
||||
}},
|
||||
{S: "local:/dir1:foobar/dir2", Cfg: Config{
|
||||
Path: "/dir1:foobar/dir2",
|
||||
Connections: 2,
|
||||
}},
|
||||
{S: `local:\dir1\foobar\dir2`, Cfg: Config{
|
||||
Path: `\dir1\foobar\dir2`,
|
||||
Connections: 2,
|
||||
}},
|
||||
{S: `local:c:\dir1\foobar\dir2`, Cfg: Config{
|
||||
Path: `c:\dir1\foobar\dir2`,
|
||||
Connections: 2,
|
||||
}},
|
||||
{S: `local:C:\Users\appveyor\AppData\Local\Temp\1\restic-test-879453535\repo`, Cfg: Config{
|
||||
Path: `C:\Users\appveyor\AppData\Local\Temp\1\restic-test-879453535\repo`,
|
||||
Connections: 2,
|
||||
}},
|
||||
{S: `local:c:/dir1/foobar/dir2`, Cfg: Config{
|
||||
Path: `c:/dir1/foobar/dir2`,
|
||||
Connections: 2,
|
||||
}},
|
||||
}
|
||||
|
||||
func TestParseConfig(t *testing.T) {
|
||||
|
|
|
@ -10,6 +10,8 @@ import (
|
|||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/layout"
|
||||
"github.com/restic/restic/internal/backend/limiter"
|
||||
"github.com/restic/restic/internal/backend/location"
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/fs"
|
||||
|
@ -28,6 +30,10 @@ type Local struct {
|
|||
// ensure statically that *Local implements restic.Backend.
|
||||
var _ restic.Backend = &Local{}
|
||||
|
||||
func NewFactory() location.Factory {
|
||||
return location.NewLimitedBackendFactory("local", ParseConfig, location.NoPassword, limiter.WrapBackendConstructor(Create), limiter.WrapBackendConstructor(Open))
|
||||
}
|
||||
|
||||
const defaultLayout = "default"
|
||||
|
||||
func open(ctx context.Context, cfg Config) (*Local, error) {
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
|
||||
"github.com/restic/restic/internal/backend/local"
|
||||
"github.com/restic/restic/internal/backend/test"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
|
@ -16,11 +15,7 @@ func newTestSuite(t testing.TB) *test.Suite[local.Config] {
|
|||
return &test.Suite[local.Config]{
|
||||
// NewConfig returns a config for a new temporary backend that will be used in tests.
|
||||
NewConfig: func() (*local.Config, error) {
|
||||
dir, err := os.MkdirTemp(rtest.TestTempDir, "restic-test-local-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dir := rtest.TempDir(t)
|
||||
t.Logf("create new backend at %v", dir)
|
||||
|
||||
cfg := &local.Config{
|
||||
|
@ -30,25 +25,7 @@ func newTestSuite(t testing.TB) *test.Suite[local.Config] {
|
|||
return cfg, nil
|
||||
},
|
||||
|
||||
// CreateFn is a function that creates a temporary repository for the tests.
|
||||
Create: func(cfg local.Config) (restic.Backend, error) {
|
||||
return local.Create(context.TODO(), cfg)
|
||||
},
|
||||
|
||||
// OpenFn is a function that opens a previously created temporary repository.
|
||||
Open: func(cfg local.Config) (restic.Backend, error) {
|
||||
return local.Open(context.TODO(), cfg)
|
||||
},
|
||||
|
||||
// CleanupFn removes data created during the tests.
|
||||
Cleanup: func(cfg local.Config) error {
|
||||
if !rtest.TestCleanupTempDirs {
|
||||
t.Logf("leaving test backend dir at %v", cfg.Path)
|
||||
}
|
||||
|
||||
rtest.RemoveAll(t, cfg.Path)
|
||||
return nil
|
||||
},
|
||||
Factory: local.NewFactory(),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,96 +1,29 @@
|
|||
package location
|
||||
package location_test
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"testing"
|
||||
|
||||
var passwordTests = []struct {
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
"local:/srv/repo",
|
||||
"local:/srv/repo",
|
||||
},
|
||||
{
|
||||
"/dir1/dir2",
|
||||
"/dir1/dir2",
|
||||
},
|
||||
{
|
||||
`c:\dir1\foobar\dir2`,
|
||||
`c:\dir1\foobar\dir2`,
|
||||
},
|
||||
{
|
||||
"sftp:user@host:/srv/repo",
|
||||
"sftp:user@host:/srv/repo",
|
||||
},
|
||||
{
|
||||
"s3://eu-central-1/bucketname",
|
||||
"s3://eu-central-1/bucketname",
|
||||
},
|
||||
{
|
||||
"swift:container17:/prefix97",
|
||||
"swift:container17:/prefix97",
|
||||
},
|
||||
{
|
||||
"b2:bucketname:/prefix",
|
||||
"b2:bucketname:/prefix",
|
||||
},
|
||||
{
|
||||
"rest:",
|
||||
"rest:/",
|
||||
},
|
||||
{
|
||||
"rest:localhost/",
|
||||
"rest:localhost/",
|
||||
},
|
||||
{
|
||||
"rest::123/",
|
||||
"rest::123/",
|
||||
},
|
||||
{
|
||||
"rest:http://",
|
||||
"rest:http://",
|
||||
},
|
||||
{
|
||||
"rest:http://hostname.foo:1234/",
|
||||
"rest:http://hostname.foo:1234/",
|
||||
},
|
||||
{
|
||||
"rest:http://user@hostname.foo:1234/",
|
||||
"rest:http://user@hostname.foo:1234/",
|
||||
},
|
||||
{
|
||||
"rest:http://user:@hostname.foo:1234/",
|
||||
"rest:http://user:***@hostname.foo:1234/",
|
||||
},
|
||||
{
|
||||
"rest:http://user:p@hostname.foo:1234/",
|
||||
"rest:http://user:***@hostname.foo:1234/",
|
||||
},
|
||||
{
|
||||
"rest:http://user:pppppaaafhhfuuwiiehhthhghhdkjaoowpprooghjjjdhhwuuhgjsjhhfdjhruuhsjsdhhfhshhsppwufhhsjjsjs@hostname.foo:1234/",
|
||||
"rest:http://user:***@hostname.foo:1234/",
|
||||
},
|
||||
{
|
||||
"rest:http://user:password@hostname",
|
||||
"rest:http://user:***@hostname/",
|
||||
},
|
||||
{
|
||||
"rest:http://user:password@:123",
|
||||
"rest:http://user:***@:123/",
|
||||
},
|
||||
{
|
||||
"rest:http://user:password@",
|
||||
"rest:http://user:***@/",
|
||||
},
|
||||
}
|
||||
"github.com/restic/restic/internal/backend/location"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func TestStripPassword(t *testing.T) {
|
||||
for i, test := range passwordTests {
|
||||
t.Run(test.input, func(t *testing.T) {
|
||||
result := StripPassword(test.input)
|
||||
if result != test.expected {
|
||||
t.Errorf("test %d: expected '%s' but got '%s'", i, test.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
registry := location.NewRegistry()
|
||||
registry.Register(
|
||||
location.NewHTTPBackendFactory[any, restic.Backend]("test", nil,
|
||||
func(s string) string {
|
||||
return "cleaned"
|
||||
}, nil, nil,
|
||||
),
|
||||
)
|
||||
|
||||
t.Run("valid", func(t *testing.T) {
|
||||
clean := location.StripPassword(registry, "test:secret")
|
||||
test.Equals(t, "cleaned", clean)
|
||||
})
|
||||
t.Run("unknown", func(t *testing.T) {
|
||||
clean := location.StripPassword(registry, "invalid:secret")
|
||||
test.Equals(t, "invalid:secret", clean)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -4,16 +4,6 @@ package location
|
|||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/restic/restic/internal/backend/azure"
|
||||
"github.com/restic/restic/internal/backend/b2"
|
||||
"github.com/restic/restic/internal/backend/gs"
|
||||
"github.com/restic/restic/internal/backend/local"
|
||||
"github.com/restic/restic/internal/backend/rclone"
|
||||
"github.com/restic/restic/internal/backend/rest"
|
||||
"github.com/restic/restic/internal/backend/s3"
|
||||
"github.com/restic/restic/internal/backend/sftp"
|
||||
"github.com/restic/restic/internal/backend/smb"
|
||||
"github.com/restic/restic/internal/backend/swift"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
)
|
||||
|
||||
|
@ -24,35 +14,8 @@ type Location struct {
|
|||
Config interface{}
|
||||
}
|
||||
|
||||
type parser struct {
|
||||
scheme string
|
||||
parse func(string) (interface{}, error)
|
||||
stripPassword func(string) string
|
||||
}
|
||||
|
||||
func configToAny[C any](parser func(string) (*C, error)) func(string) (interface{}, error) {
|
||||
return func(s string) (interface{}, error) {
|
||||
return parser(s)
|
||||
}
|
||||
}
|
||||
|
||||
// parsers is a list of valid config parsers for the backends. The first parser
|
||||
// is the fallback and should always be set to the local backend.
|
||||
var parsers = []parser{
|
||||
{"b2", configToAny(b2.ParseConfig), noPassword},
|
||||
{"local", configToAny(local.ParseConfig), noPassword},
|
||||
{"sftp", configToAny(sftp.ParseConfig), noPassword},
|
||||
{"s3", configToAny(s3.ParseConfig), noPassword},
|
||||
{"gs", configToAny(gs.ParseConfig), noPassword},
|
||||
{"azure", configToAny(azure.ParseConfig), noPassword},
|
||||
{"swift", configToAny(swift.ParseConfig), noPassword},
|
||||
{"rest", configToAny(rest.ParseConfig), rest.StripPassword},
|
||||
{"rclone", configToAny(rclone.ParseConfig), noPassword},
|
||||
{"smb", configToAny(smb.ParseConfig), noPassword},
|
||||
}
|
||||
|
||||
// noPassword returns the repository location unchanged (there's no sensitive information there)
|
||||
func noPassword(s string) string {
|
||||
// NoPassword returns the repository location unchanged (there's no sensitive information there)
|
||||
func NoPassword(s string) string {
|
||||
return s
|
||||
}
|
||||
|
||||
|
@ -90,16 +53,13 @@ func isPath(s string) bool {
|
|||
// starts with a backend name followed by a colon, that backend's Parse()
|
||||
// function is called. Otherwise, the local backend is used which interprets s
|
||||
// as the name of a directory.
|
||||
func Parse(s string) (u Location, err error) {
|
||||
func Parse(registry *Registry, s string) (u Location, err error) {
|
||||
scheme := extractScheme(s)
|
||||
u.Scheme = scheme
|
||||
|
||||
for _, parser := range parsers {
|
||||
if parser.scheme != scheme {
|
||||
continue
|
||||
}
|
||||
|
||||
u.Config, err = parser.parse(s)
|
||||
factory := registry.Lookup(scheme)
|
||||
if factory != nil {
|
||||
u.Config, err = factory.ParseConfig(s)
|
||||
if err != nil {
|
||||
return Location{}, err
|
||||
}
|
||||
|
@ -113,7 +73,12 @@ func Parse(s string) (u Location, err error) {
|
|||
}
|
||||
|
||||
u.Scheme = "local"
|
||||
u.Config, err = local.ParseConfig("local:" + s)
|
||||
factory = registry.Lookup(u.Scheme)
|
||||
if factory == nil {
|
||||
return Location{}, errors.New("local backend not available")
|
||||
}
|
||||
|
||||
u.Config, err = factory.ParseConfig("local:" + s)
|
||||
if err != nil {
|
||||
return Location{}, err
|
||||
}
|
||||
|
@ -122,14 +87,12 @@ func Parse(s string) (u Location, err error) {
|
|||
}
|
||||
|
||||
// StripPassword returns a displayable version of a repository location (with any sensitive information removed)
|
||||
func StripPassword(s string) string {
|
||||
func StripPassword(registry *Registry, s string) string {
|
||||
scheme := extractScheme(s)
|
||||
|
||||
for _, parser := range parsers {
|
||||
if parser.scheme != scheme {
|
||||
continue
|
||||
}
|
||||
return parser.stripPassword(s)
|
||||
factory := registry.Lookup(scheme)
|
||||
if factory != nil {
|
||||
return factory.StripPassword(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
|
|
@ -1,344 +1,65 @@
|
|||
package location
|
||||
package location_test
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/restic/restic/internal/backend/b2"
|
||||
"github.com/restic/restic/internal/backend/local"
|
||||
"github.com/restic/restic/internal/backend/rest"
|
||||
"github.com/restic/restic/internal/backend/s3"
|
||||
"github.com/restic/restic/internal/backend/sftp"
|
||||
"github.com/restic/restic/internal/backend/swift"
|
||||
"github.com/restic/restic/internal/backend/location"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func parseURL(s string) *url.URL {
|
||||
u, err := url.Parse(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return u
|
||||
type testConfig struct {
|
||||
loc string
|
||||
}
|
||||
|
||||
var parseTests = []struct {
|
||||
s string
|
||||
u Location
|
||||
}{
|
||||
{
|
||||
"local:/srv/repo",
|
||||
Location{Scheme: "local",
|
||||
Config: &local.Config{
|
||||
Path: "/srv/repo",
|
||||
Connections: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"local:dir1/dir2",
|
||||
Location{Scheme: "local",
|
||||
Config: &local.Config{
|
||||
Path: "dir1/dir2",
|
||||
Connections: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"local:dir1/dir2",
|
||||
Location{Scheme: "local",
|
||||
Config: &local.Config{
|
||||
Path: "dir1/dir2",
|
||||
Connections: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"dir1/dir2",
|
||||
Location{Scheme: "local",
|
||||
Config: &local.Config{
|
||||
Path: "dir1/dir2",
|
||||
Connections: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"/dir1/dir2",
|
||||
Location{Scheme: "local",
|
||||
Config: &local.Config{
|
||||
Path: "/dir1/dir2",
|
||||
Connections: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"local:../dir1/dir2",
|
||||
Location{Scheme: "local",
|
||||
Config: &local.Config{
|
||||
Path: "../dir1/dir2",
|
||||
Connections: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"/dir1/dir2",
|
||||
Location{Scheme: "local",
|
||||
Config: &local.Config{
|
||||
Path: "/dir1/dir2",
|
||||
Connections: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"/dir1:foobar/dir2",
|
||||
Location{Scheme: "local",
|
||||
Config: &local.Config{
|
||||
Path: "/dir1:foobar/dir2",
|
||||
Connections: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
`\dir1\foobar\dir2`,
|
||||
Location{Scheme: "local",
|
||||
Config: &local.Config{
|
||||
Path: `\dir1\foobar\dir2`,
|
||||
Connections: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
`c:\dir1\foobar\dir2`,
|
||||
Location{Scheme: "local",
|
||||
Config: &local.Config{
|
||||
Path: `c:\dir1\foobar\dir2`,
|
||||
Connections: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
`C:\Users\appveyor\AppData\Local\Temp\1\restic-test-879453535\repo`,
|
||||
Location{Scheme: "local",
|
||||
Config: &local.Config{
|
||||
Path: `C:\Users\appveyor\AppData\Local\Temp\1\restic-test-879453535\repo`,
|
||||
Connections: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
`c:/dir1/foobar/dir2`,
|
||||
Location{Scheme: "local",
|
||||
Config: &local.Config{
|
||||
Path: `c:/dir1/foobar/dir2`,
|
||||
Connections: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"sftp:user@host:/srv/repo",
|
||||
Location{Scheme: "sftp",
|
||||
Config: &sftp.Config{
|
||||
User: "user",
|
||||
Host: "host",
|
||||
Path: "/srv/repo",
|
||||
Connections: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"sftp:host:/srv/repo",
|
||||
Location{Scheme: "sftp",
|
||||
Config: &sftp.Config{
|
||||
User: "",
|
||||
Host: "host",
|
||||
Path: "/srv/repo",
|
||||
Connections: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"sftp://user@host/srv/repo",
|
||||
Location{Scheme: "sftp",
|
||||
Config: &sftp.Config{
|
||||
User: "user",
|
||||
Host: "host",
|
||||
Path: "srv/repo",
|
||||
Connections: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"sftp://user@host//srv/repo",
|
||||
Location{Scheme: "sftp",
|
||||
Config: &sftp.Config{
|
||||
User: "user",
|
||||
Host: "host",
|
||||
Path: "/srv/repo",
|
||||
Connections: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
"s3://eu-central-1/bucketname",
|
||||
Location{Scheme: "s3",
|
||||
Config: &s3.Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "bucketname",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"s3://hostname.foo/bucketname",
|
||||
Location{Scheme: "s3",
|
||||
Config: &s3.Config{
|
||||
Endpoint: "hostname.foo",
|
||||
Bucket: "bucketname",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"s3://hostname.foo/bucketname/prefix/directory",
|
||||
Location{Scheme: "s3",
|
||||
Config: &s3.Config{
|
||||
Endpoint: "hostname.foo",
|
||||
Bucket: "bucketname",
|
||||
Prefix: "prefix/directory",
|
||||
Connections: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"s3:eu-central-1/repo",
|
||||
Location{Scheme: "s3",
|
||||
Config: &s3.Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "repo",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"s3:eu-central-1/repo/prefix/directory",
|
||||
Location{Scheme: "s3",
|
||||
Config: &s3.Config{
|
||||
Endpoint: "eu-central-1",
|
||||
Bucket: "repo",
|
||||
Prefix: "prefix/directory",
|
||||
Connections: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"s3:https://hostname.foo/repo",
|
||||
Location{Scheme: "s3",
|
||||
Config: &s3.Config{
|
||||
Endpoint: "hostname.foo",
|
||||
Bucket: "repo",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"s3:https://hostname.foo/repo/prefix/directory",
|
||||
Location{Scheme: "s3",
|
||||
Config: &s3.Config{
|
||||
Endpoint: "hostname.foo",
|
||||
Bucket: "repo",
|
||||
Prefix: "prefix/directory",
|
||||
Connections: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"s3:http://hostname.foo/repo",
|
||||
Location{Scheme: "s3",
|
||||
Config: &s3.Config{
|
||||
Endpoint: "hostname.foo",
|
||||
Bucket: "repo",
|
||||
Prefix: "",
|
||||
UseHTTP: true,
|
||||
Connections: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"swift:container17:/",
|
||||
Location{Scheme: "swift",
|
||||
Config: &swift.Config{
|
||||
Container: "container17",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"swift:container17:/prefix97",
|
||||
Location{Scheme: "swift",
|
||||
Config: &swift.Config{
|
||||
Container: "container17",
|
||||
Prefix: "prefix97",
|
||||
Connections: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"rest:http://hostname.foo:1234/",
|
||||
Location{Scheme: "rest",
|
||||
Config: &rest.Config{
|
||||
URL: parseURL("http://hostname.foo:1234/"),
|
||||
Connections: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"b2:bucketname:/prefix", Location{Scheme: "b2",
|
||||
Config: &b2.Config{
|
||||
Bucket: "bucketname",
|
||||
Prefix: "prefix",
|
||||
Connections: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"b2:bucketname", Location{Scheme: "b2",
|
||||
Config: &b2.Config{
|
||||
Bucket: "bucketname",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
func testFactory() location.Factory {
|
||||
return location.NewHTTPBackendFactory[testConfig, restic.Backend](
|
||||
"local",
|
||||
func(s string) (*testConfig, error) {
|
||||
return &testConfig{loc: s}, nil
|
||||
}, nil, nil, nil,
|
||||
)
|
||||
}
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
for i, test := range parseTests {
|
||||
t.Run(test.s, func(t *testing.T) {
|
||||
u, err := Parse(test.s)
|
||||
registry := location.NewRegistry()
|
||||
registry.Register(testFactory())
|
||||
|
||||
path := "local:example"
|
||||
u, err := location.Parse(registry, path)
|
||||
test.OK(t, err)
|
||||
test.Equals(t, "local", u.Scheme)
|
||||
test.Equals(t, &testConfig{loc: path}, u.Config)
|
||||
}
|
||||
|
||||
func TestParseFallback(t *testing.T) {
|
||||
fallbackTests := []string{
|
||||
"dir1/dir2",
|
||||
"/dir1/dir2",
|
||||
"/dir1:foobar/dir2",
|
||||
`\dir1\foobar\dir2`,
|
||||
`c:\dir1\foobar\dir2`,
|
||||
`C:\Users\appveyor\AppData\Local\Temp\1\restic-test-879453535\repo`,
|
||||
`c:/dir1/foobar/dir2`,
|
||||
}
|
||||
|
||||
registry := location.NewRegistry()
|
||||
registry.Register(testFactory())
|
||||
|
||||
for _, path := range fallbackTests {
|
||||
t.Run(path, func(t *testing.T) {
|
||||
u, err := location.Parse(registry, path)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if test.u.Scheme != u.Scheme {
|
||||
t.Errorf("test %d: scheme does not match, want %q, got %q",
|
||||
i, test.u.Scheme, u.Scheme)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(test.u.Config, u.Config) {
|
||||
t.Errorf("test %d: cfg map does not match, want:\n %#v\ngot: \n %#v",
|
||||
i, test.u.Config, u.Config)
|
||||
}
|
||||
test.Equals(t, "local", u.Scheme)
|
||||
test.Equals(t, "local:"+path, u.Config.(*testConfig).loc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidScheme(t *testing.T) {
|
||||
registry := location.NewRegistry()
|
||||
var invalidSchemes = []string{
|
||||
"foobar:xxx",
|
||||
"foobar:/dir/dir2",
|
||||
|
@ -346,7 +67,7 @@ func TestInvalidScheme(t *testing.T) {
|
|||
|
||||
for _, s := range invalidSchemes {
|
||||
t.Run(s, func(t *testing.T) {
|
||||
_, err := Parse(s)
|
||||
_, err := location.Parse(registry, s)
|
||||
if err == nil {
|
||||
t.Fatalf("error for invalid location %q not found", s)
|
||||
}
|
||||
|
|
106
internal/backend/location/registry.go
Normal file
106
internal/backend/location/registry.go
Normal file
|
@ -0,0 +1,106 @@
|
|||
package location
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/restic/restic/internal/backend/limiter"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
)
|
||||
|
||||
type Registry struct {
|
||||
factories map[string]Factory
|
||||
}
|
||||
|
||||
func NewRegistry() *Registry {
|
||||
return &Registry{
|
||||
factories: make(map[string]Factory),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Registry) Register(factory Factory) {
|
||||
if r.factories[factory.Scheme()] != nil {
|
||||
panic("duplicate backend")
|
||||
}
|
||||
r.factories[factory.Scheme()] = factory
|
||||
}
|
||||
|
||||
func (r *Registry) Lookup(scheme string) Factory {
|
||||
return r.factories[scheme]
|
||||
}
|
||||
|
||||
type Factory interface {
|
||||
Scheme() string
|
||||
ParseConfig(s string) (interface{}, error)
|
||||
StripPassword(s string) string
|
||||
Create(ctx context.Context, cfg interface{}, rt http.RoundTripper, lim limiter.Limiter) (restic.Backend, error)
|
||||
Open(ctx context.Context, cfg interface{}, rt http.RoundTripper, lim limiter.Limiter) (restic.Backend, error)
|
||||
}
|
||||
|
||||
type genericBackendFactory[C any, T restic.Backend] struct {
|
||||
scheme string
|
||||
parseConfigFn func(s string) (*C, error)
|
||||
stripPasswordFn func(s string) string
|
||||
createFn func(ctx context.Context, cfg C, rt http.RoundTripper, lim limiter.Limiter) (T, error)
|
||||
openFn func(ctx context.Context, cfg C, rt http.RoundTripper, lim limiter.Limiter) (T, error)
|
||||
}
|
||||
|
||||
func (f *genericBackendFactory[C, T]) Scheme() string {
|
||||
return f.scheme
|
||||
}
|
||||
|
||||
func (f *genericBackendFactory[C, T]) ParseConfig(s string) (interface{}, error) {
|
||||
return f.parseConfigFn(s)
|
||||
}
|
||||
func (f *genericBackendFactory[C, T]) StripPassword(s string) string {
|
||||
if f.stripPasswordFn != nil {
|
||||
return f.stripPasswordFn(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
func (f *genericBackendFactory[C, T]) Create(ctx context.Context, cfg interface{}, rt http.RoundTripper, lim limiter.Limiter) (restic.Backend, error) {
|
||||
return f.createFn(ctx, *cfg.(*C), rt, lim)
|
||||
}
|
||||
func (f *genericBackendFactory[C, T]) Open(ctx context.Context, cfg interface{}, rt http.RoundTripper, lim limiter.Limiter) (restic.Backend, error) {
|
||||
return f.openFn(ctx, *cfg.(*C), rt, lim)
|
||||
}
|
||||
|
||||
func NewHTTPBackendFactory[C any, T restic.Backend](
|
||||
scheme string,
|
||||
parseConfigFn func(s string) (*C, error),
|
||||
stripPasswordFn func(s string) string,
|
||||
createFn func(ctx context.Context, cfg C, rt http.RoundTripper) (T, error),
|
||||
openFn func(ctx context.Context, cfg C, rt http.RoundTripper) (T, error)) Factory {
|
||||
|
||||
return &genericBackendFactory[C, T]{
|
||||
scheme: scheme,
|
||||
parseConfigFn: parseConfigFn,
|
||||
stripPasswordFn: stripPasswordFn,
|
||||
createFn: func(ctx context.Context, cfg C, rt http.RoundTripper, _ limiter.Limiter) (T, error) {
|
||||
return createFn(ctx, cfg, rt)
|
||||
},
|
||||
openFn: func(ctx context.Context, cfg C, rt http.RoundTripper, _ limiter.Limiter) (T, error) {
|
||||
return openFn(ctx, cfg, rt)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewLimitedBackendFactory[C any, T restic.Backend](
|
||||
scheme string,
|
||||
parseConfigFn func(s string) (*C, error),
|
||||
stripPasswordFn func(s string) string,
|
||||
createFn func(ctx context.Context, cfg C, lim limiter.Limiter) (T, error),
|
||||
openFn func(ctx context.Context, cfg C, lim limiter.Limiter) (T, error)) Factory {
|
||||
|
||||
return &genericBackendFactory[C, T]{
|
||||
scheme: scheme,
|
||||
parseConfigFn: parseConfigFn,
|
||||
stripPasswordFn: stripPasswordFn,
|
||||
createFn: func(ctx context.Context, cfg C, _ http.RoundTripper, lim limiter.Limiter) (T, error) {
|
||||
return createFn(ctx, cfg, lim)
|
||||
},
|
||||
openFn: func(ctx context.Context, cfg C, _ http.RoundTripper, lim limiter.Limiter) (T, error) {
|
||||
return openFn(ctx, cfg, lim)
|
||||
},
|
||||
}
|
||||
}
|
|
@ -6,10 +6,12 @@ import (
|
|||
"encoding/base64"
|
||||
"hash"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/cespare/xxhash/v2"
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/location"
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
|
@ -20,6 +22,25 @@ type memMap map[restic.Handle][]byte
|
|||
// make sure that MemoryBackend implements backend.Backend
|
||||
var _ restic.Backend = &MemoryBackend{}
|
||||
|
||||
// NewFactory creates a persistent mem backend
|
||||
func NewFactory() location.Factory {
|
||||
be := New()
|
||||
|
||||
return location.NewHTTPBackendFactory[struct{}, *MemoryBackend](
|
||||
"mem",
|
||||
func(s string) (*struct{}, error) {
|
||||
return &struct{}{}, nil
|
||||
},
|
||||
location.NoPassword,
|
||||
func(_ context.Context, _ struct{}, _ http.RoundTripper) (*MemoryBackend, error) {
|
||||
return be, nil
|
||||
},
|
||||
func(_ context.Context, _ struct{}, _ http.RoundTripper) (*MemoryBackend, error) {
|
||||
return be, nil
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
var errNotFound = errors.New("not found")
|
||||
|
||||
const connectionCount = 2
|
||||
|
|
|
@ -1,58 +1,20 @@
|
|||
package mem_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
|
||||
"github.com/restic/restic/internal/backend/mem"
|
||||
"github.com/restic/restic/internal/backend/test"
|
||||
)
|
||||
|
||||
type memConfig struct {
|
||||
be restic.Backend
|
||||
}
|
||||
|
||||
func newTestSuite() *test.Suite[*memConfig] {
|
||||
return &test.Suite[*memConfig]{
|
||||
func newTestSuite() *test.Suite[struct{}] {
|
||||
return &test.Suite[struct{}]{
|
||||
// NewConfig returns a config for a new temporary backend that will be used in tests.
|
||||
NewConfig: func() (**memConfig, error) {
|
||||
cfg := &memConfig{}
|
||||
return &cfg, nil
|
||||
NewConfig: func() (*struct{}, error) {
|
||||
return &struct{}{}, nil
|
||||
},
|
||||
|
||||
// CreateFn is a function that creates a temporary repository for the tests.
|
||||
Create: func(cfg *memConfig) (restic.Backend, error) {
|
||||
if cfg.be != nil {
|
||||
_, err := cfg.be.Stat(context.TODO(), restic.Handle{Type: restic.ConfigFile})
|
||||
if err != nil && !cfg.be.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
return nil, errors.New("config already exists")
|
||||
}
|
||||
}
|
||||
|
||||
cfg.be = mem.New()
|
||||
return cfg.be, nil
|
||||
},
|
||||
|
||||
// OpenFn is a function that opens a previously created temporary repository.
|
||||
Open: func(cfg *memConfig) (restic.Backend, error) {
|
||||
if cfg.be == nil {
|
||||
cfg.be = mem.New()
|
||||
}
|
||||
return cfg.be, nil
|
||||
},
|
||||
|
||||
// CleanupFn removes data created during the tests.
|
||||
Cleanup: func(cfg *memConfig) error {
|
||||
// no cleanup needed
|
||||
return nil
|
||||
},
|
||||
Factory: mem.NewFactory(),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/limiter"
|
||||
"github.com/restic/restic/internal/backend/location"
|
||||
"github.com/restic/restic/internal/backend/rest"
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
|
@ -36,6 +37,10 @@ type Backend struct {
|
|||
conn *StdioConn
|
||||
}
|
||||
|
||||
func NewFactory() location.Factory {
|
||||
return location.NewLimitedBackendFactory("rclone", ParseConfig, location.NoPassword, Create, Open)
|
||||
}
|
||||
|
||||
// run starts command with args and initializes the StdioConn.
|
||||
func run(command string, args ...string) (*StdioConn, *sync.WaitGroup, chan struct{}, func() error, error) {
|
||||
cmd := exec.Command(command, args...)
|
||||
|
@ -134,7 +139,7 @@ func wrapConn(c *StdioConn, lim limiter.Limiter) *wrappedConn {
|
|||
}
|
||||
|
||||
// New initializes a Backend and starts the process.
|
||||
func newBackend(cfg Config, lim limiter.Limiter) (*Backend, error) {
|
||||
func newBackend(ctx context.Context, cfg Config, lim limiter.Limiter) (*Backend, error) {
|
||||
var (
|
||||
args []string
|
||||
err error
|
||||
|
@ -197,7 +202,7 @@ func newBackend(cfg Config, lim limiter.Limiter) (*Backend, error) {
|
|||
wg: wg,
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
wg.Add(1)
|
||||
|
@ -256,8 +261,8 @@ func newBackend(cfg Config, lim limiter.Limiter) (*Backend, error) {
|
|||
}
|
||||
|
||||
// Open starts an rclone process with the given config.
|
||||
func Open(cfg Config, lim limiter.Limiter) (*Backend, error) {
|
||||
be, err := newBackend(cfg, lim)
|
||||
func Open(ctx context.Context, cfg Config, lim limiter.Limiter) (*Backend, error) {
|
||||
be, err := newBackend(ctx, cfg, lim)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -272,7 +277,7 @@ func Open(cfg Config, lim limiter.Limiter) (*Backend, error) {
|
|||
URL: url,
|
||||
}
|
||||
|
||||
restBackend, err := rest.Open(restConfig, debug.RoundTripper(be.tr))
|
||||
restBackend, err := rest.Open(ctx, restConfig, debug.RoundTripper(be.tr))
|
||||
if err != nil {
|
||||
_ = be.Close()
|
||||
return nil, err
|
||||
|
@ -283,8 +288,8 @@ func Open(cfg Config, lim limiter.Limiter) (*Backend, error) {
|
|||
}
|
||||
|
||||
// Create initializes a new restic repo with rclone.
|
||||
func Create(ctx context.Context, cfg Config) (*Backend, error) {
|
||||
be, err := newBackend(cfg, nil)
|
||||
func Create(ctx context.Context, cfg Config, lim limiter.Limiter) (*Backend, error) {
|
||||
be, err := newBackend(ctx, cfg, lim)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -1,14 +1,11 @@
|
|||
package rclone_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os/exec"
|
||||
"testing"
|
||||
|
||||
"github.com/restic/restic/internal/backend/rclone"
|
||||
"github.com/restic/restic/internal/backend/test"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
|
@ -24,23 +21,15 @@ func newTestSuite(t testing.TB) *test.Suite[rclone.Config] {
|
|||
return &cfg, nil
|
||||
},
|
||||
|
||||
// CreateFn is a function that creates a temporary repository for the tests.
|
||||
Create: func(cfg rclone.Config) (restic.Backend, error) {
|
||||
t.Logf("Create()")
|
||||
be, err := rclone.Create(context.TODO(), cfg)
|
||||
var e *exec.Error
|
||||
if errors.As(err, &e) && e.Err == exec.ErrNotFound {
|
||||
t.Skipf("program %q not found", e.Name)
|
||||
return nil, nil
|
||||
}
|
||||
return be, err
|
||||
},
|
||||
Factory: rclone.NewFactory(),
|
||||
}
|
||||
}
|
||||
|
||||
// OpenFn is a function that opens a previously created temporary repository.
|
||||
Open: func(cfg rclone.Config) (restic.Backend, error) {
|
||||
t.Logf("Open()")
|
||||
return rclone.Open(cfg, nil)
|
||||
},
|
||||
func findRclone(t testing.TB) {
|
||||
// try to find a rclone binary
|
||||
_, err := exec.LookPath("rclone")
|
||||
if err != nil {
|
||||
t.Skip(err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -51,9 +40,11 @@ func TestBackendRclone(t *testing.T) {
|
|||
}
|
||||
}()
|
||||
|
||||
findRclone(t)
|
||||
newTestSuite(t).RunTests(t)
|
||||
}
|
||||
|
||||
func BenchmarkBackendREST(t *testing.B) {
|
||||
findRclone(t)
|
||||
newTestSuite(t).RunBenchmarks(t)
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ func TestRcloneExit(t *testing.T) {
|
|||
dir := rtest.TempDir(t)
|
||||
cfg := NewConfig()
|
||||
cfg.Remote = dir
|
||||
be, err := Open(cfg, nil)
|
||||
be, err := Open(context.TODO(), cfg, nil)
|
||||
var e *exec.Error
|
||||
if errors.As(err, &e) && e.Err == exec.ErrNotFound {
|
||||
t.Skipf("program %q not found", e.Name)
|
||||
|
@ -45,7 +45,7 @@ func TestRcloneFailedStart(t *testing.T) {
|
|||
cfg := NewConfig()
|
||||
// exits with exit code 1
|
||||
cfg.Program = "false"
|
||||
_, err := Open(cfg, nil)
|
||||
_, err := Open(context.TODO(), cfg, nil)
|
||||
var e *exec.ExitError
|
||||
if !errors.As(err, &e) {
|
||||
// unexpected error
|
||||
|
|
|
@ -36,3 +36,71 @@ var configTests = []test.ConfigTestData[Config]{
|
|||
func TestParseConfig(t *testing.T) {
|
||||
test.ParseConfigTester(t, ParseConfig, configTests)
|
||||
}
|
||||
|
||||
var passwordTests = []struct {
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
"rest:",
|
||||
"rest:/",
|
||||
},
|
||||
{
|
||||
"rest:localhost/",
|
||||
"rest:localhost/",
|
||||
},
|
||||
{
|
||||
"rest::123/",
|
||||
"rest::123/",
|
||||
},
|
||||
{
|
||||
"rest:http://",
|
||||
"rest:http://",
|
||||
},
|
||||
{
|
||||
"rest:http://hostname.foo:1234/",
|
||||
"rest:http://hostname.foo:1234/",
|
||||
},
|
||||
{
|
||||
"rest:http://user@hostname.foo:1234/",
|
||||
"rest:http://user@hostname.foo:1234/",
|
||||
},
|
||||
{
|
||||
"rest:http://user:@hostname.foo:1234/",
|
||||
"rest:http://user:***@hostname.foo:1234/",
|
||||
},
|
||||
{
|
||||
"rest:http://user:p@hostname.foo:1234/",
|
||||
"rest:http://user:***@hostname.foo:1234/",
|
||||
},
|
||||
{
|
||||
"rest:http://user:pppppaaafhhfuuwiiehhthhghhdkjaoowpprooghjjjdhhwuuhgjsjhhfdjhruuhsjsdhhfhshhsppwufhhsjjsjs@hostname.foo:1234/",
|
||||
"rest:http://user:***@hostname.foo:1234/",
|
||||
},
|
||||
{
|
||||
"rest:http://user:password@hostname",
|
||||
"rest:http://user:***@hostname/",
|
||||
},
|
||||
{
|
||||
"rest:http://user:password@:123",
|
||||
"rest:http://user:***@:123/",
|
||||
},
|
||||
{
|
||||
"rest:http://user:password@",
|
||||
"rest:http://user:***@/",
|
||||
},
|
||||
}
|
||||
|
||||
func TestStripPassword(t *testing.T) {
|
||||
// Make sure that the factory uses the correct method
|
||||
StripPassword := NewFactory().StripPassword
|
||||
|
||||
for i, test := range passwordTests {
|
||||
t.Run(test.input, func(t *testing.T) {
|
||||
result := StripPassword(test.input)
|
||||
if result != test.expected {
|
||||
t.Errorf("test %d: expected '%s' but got '%s'", i, test.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/layout"
|
||||
"github.com/restic/restic/internal/backend/location"
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
|
@ -29,6 +30,10 @@ type Backend struct {
|
|||
layout.Layout
|
||||
}
|
||||
|
||||
func NewFactory() location.Factory {
|
||||
return location.NewHTTPBackendFactory("rest", ParseConfig, StripPassword, Create, Open)
|
||||
}
|
||||
|
||||
// the REST API protocol version is decided by HTTP request headers, these are the constants.
|
||||
const (
|
||||
ContentTypeV1 = "application/vnd.x.restic.rest.v1"
|
||||
|
@ -36,7 +41,7 @@ const (
|
|||
)
|
||||
|
||||
// Open opens the REST backend with the given config.
|
||||
func Open(cfg Config, rt http.RoundTripper) (*Backend, error) {
|
||||
func Open(_ context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) {
|
||||
// use url without trailing slash for layout
|
||||
url := cfg.URL.String()
|
||||
if url[len(url)-1] == '/' {
|
||||
|
@ -55,7 +60,7 @@ func Open(cfg Config, rt http.RoundTripper) (*Backend, error) {
|
|||
|
||||
// Create creates a new REST on server configured in config.
|
||||
func Create(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) {
|
||||
be, err := Open(cfg, rt)
|
||||
be, err := Open(ctx, cfg, rt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -117,7 +117,7 @@ func TestListAPI(t *testing.T) {
|
|||
URL: srvURL,
|
||||
}
|
||||
|
||||
be, err := rest.Open(cfg, http.DefaultTransport)
|
||||
be, err := rest.Open(context.TODO(), cfg, http.DefaultTransport)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -9,10 +9,8 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/rest"
|
||||
"github.com/restic/restic/internal/backend/test"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
|
@ -67,12 +65,7 @@ func runRESTServer(ctx context.Context, t testing.TB, dir string) (*url.URL, fun
|
|||
return url, cleanup
|
||||
}
|
||||
|
||||
func newTestSuite(_ context.Context, t testing.TB, url *url.URL, minimalData bool) *test.Suite[rest.Config] {
|
||||
tr, err := backend.Transport(backend.TransportOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create transport for tests: %v", err)
|
||||
}
|
||||
|
||||
func newTestSuite(url *url.URL, minimalData bool) *test.Suite[rest.Config] {
|
||||
return &test.Suite[rest.Config]{
|
||||
MinimalData: minimalData,
|
||||
|
||||
|
@ -83,20 +76,7 @@ func newTestSuite(_ context.Context, t testing.TB, url *url.URL, minimalData boo
|
|||
return &cfg, nil
|
||||
},
|
||||
|
||||
// CreateFn is a function that creates a temporary repository for the tests.
|
||||
Create: func(cfg rest.Config) (restic.Backend, error) {
|
||||
return rest.Create(context.TODO(), cfg, tr)
|
||||
},
|
||||
|
||||
// OpenFn is a function that opens a previously created temporary repository.
|
||||
Open: func(cfg rest.Config) (restic.Backend, error) {
|
||||
return rest.Open(cfg, tr)
|
||||
},
|
||||
|
||||
// CleanupFn removes data created during the tests.
|
||||
Cleanup: func(cfg rest.Config) error {
|
||||
return nil
|
||||
},
|
||||
Factory: rest.NewFactory(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -114,7 +94,7 @@ func TestBackendREST(t *testing.T) {
|
|||
serverURL, cleanup := runRESTServer(ctx, t, dir)
|
||||
defer cleanup()
|
||||
|
||||
newTestSuite(ctx, t, serverURL, false).RunTests(t)
|
||||
newTestSuite(serverURL, false).RunTests(t)
|
||||
}
|
||||
|
||||
func TestBackendRESTExternalServer(t *testing.T) {
|
||||
|
@ -128,10 +108,7 @@ func TestBackendRESTExternalServer(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
newTestSuite(ctx, t, cfg.URL, true).RunTests(t)
|
||||
newTestSuite(cfg.URL, true).RunTests(t)
|
||||
}
|
||||
|
||||
func BenchmarkBackendREST(t *testing.B) {
|
||||
|
@ -142,5 +119,5 @@ func BenchmarkBackendREST(t *testing.B) {
|
|||
serverURL, cleanup := runRESTServer(ctx, t, dir)
|
||||
defer cleanup()
|
||||
|
||||
newTestSuite(ctx, t, serverURL, false).RunBenchmarks(t)
|
||||
newTestSuite(serverURL, false).RunBenchmarks(t)
|
||||
}
|
||||
|
|
|
@ -97,24 +97,14 @@ func createConfig(endpoint, bucket, prefix string, useHTTP bool) (*Config, error
|
|||
var _ restic.ApplyEnvironmenter = &Config{}
|
||||
|
||||
// ApplyEnvironment saves values from the environment to the config.
|
||||
func (cfg *Config) ApplyEnvironment(prefix string) error {
|
||||
func (cfg *Config) ApplyEnvironment(prefix string) {
|
||||
if cfg.KeyID == "" {
|
||||
cfg.KeyID = os.Getenv(prefix + "AWS_ACCESS_KEY_ID")
|
||||
}
|
||||
|
||||
if cfg.Secret.String() == "" {
|
||||
cfg.Secret = options.NewSecretString(os.Getenv(prefix + "AWS_SECRET_ACCESS_KEY"))
|
||||
}
|
||||
|
||||
if cfg.KeyID == "" && cfg.Secret.String() != "" {
|
||||
return errors.Fatalf("unable to open S3 backend: Key ID ($AWS_ACCESS_KEY_ID) is empty")
|
||||
} else if cfg.KeyID != "" && cfg.Secret.String() == "" {
|
||||
return errors.Fatalf("unable to open S3 backend: Secret ($AWS_SECRET_ACCESS_KEY) is empty")
|
||||
}
|
||||
|
||||
if cfg.Region == "" {
|
||||
cfg.Region = os.Getenv(prefix + "AWS_DEFAULT_REGION")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -56,6 +56,24 @@ var configTests = []test.ConfigTestData[Config]{
|
|||
Prefix: "prefix/directory",
|
||||
Connections: 5,
|
||||
}},
|
||||
{S: "s3:hostname.foo/foobar", Cfg: Config{
|
||||
Endpoint: "hostname.foo",
|
||||
Bucket: "foobar",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
}},
|
||||
{S: "s3:hostname.foo/foobar/prefix/directory", Cfg: Config{
|
||||
Endpoint: "hostname.foo",
|
||||
Bucket: "foobar",
|
||||
Prefix: "prefix/directory",
|
||||
Connections: 5,
|
||||
}},
|
||||
{S: "s3:https://hostname/foobar", Cfg: Config{
|
||||
Endpoint: "hostname",
|
||||
Bucket: "foobar",
|
||||
Prefix: "",
|
||||
Connections: 5,
|
||||
}},
|
||||
{S: "s3:https://hostname:9999/foobar", Cfg: Config{
|
||||
Endpoint: "hostname:9999",
|
||||
Bucket: "foobar",
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/layout"
|
||||
"github.com/restic/restic/internal/backend/location"
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
|
@ -31,11 +32,21 @@ type Backend struct {
|
|||
// make sure that *Backend implements backend.Backend
|
||||
var _ restic.Backend = &Backend{}
|
||||
|
||||
func NewFactory() location.Factory {
|
||||
return location.NewHTTPBackendFactory("s3", ParseConfig, location.NoPassword, Create, Open)
|
||||
}
|
||||
|
||||
const defaultLayout = "default"
|
||||
|
||||
func open(ctx context.Context, cfg Config, rt http.RoundTripper) (*Backend, error) {
|
||||
debug.Log("open, config %#v", cfg)
|
||||
|
||||
if cfg.KeyID == "" && cfg.Secret.String() != "" {
|
||||
return nil, errors.Fatalf("unable to open S3 backend: Key ID ($AWS_ACCESS_KEY_ID) is empty")
|
||||
} else if cfg.KeyID != "" && cfg.Secret.String() == "" {
|
||||
return nil, errors.Fatalf("unable to open S3 backend: Secret ($AWS_SECRET_ACCESS_KEY) is empty")
|
||||
}
|
||||
|
||||
if cfg.MaxRetries > 0 {
|
||||
minio.MaxRetry = int(cfg.MaxRetries)
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
|
@ -15,7 +14,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/location"
|
||||
"github.com/restic/restic/internal/backend/s3"
|
||||
"github.com/restic/restic/internal/backend/test"
|
||||
"github.com/restic/restic/internal/options"
|
||||
|
@ -98,85 +97,42 @@ func newRandomCredentials(t testing.TB) (key, secret string) {
|
|||
return key, secret
|
||||
}
|
||||
|
||||
type MinioTestConfig struct {
|
||||
s3.Config
|
||||
func newMinioTestSuite(t testing.TB) (*test.Suite[s3.Config], func()) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
tempdir string
|
||||
stopServer func()
|
||||
}
|
||||
tempdir := rtest.TempDir(t)
|
||||
key, secret := newRandomCredentials(t)
|
||||
cleanup := runMinio(ctx, t, tempdir, key, secret)
|
||||
|
||||
func createS3(t testing.TB, cfg MinioTestConfig, tr http.RoundTripper) (be restic.Backend, err error) {
|
||||
for i := 0; i < 10; i++ {
|
||||
be, err = s3.Create(context.TODO(), cfg.Config, tr)
|
||||
if err != nil {
|
||||
t.Logf("s3 open: try %d: error %v", i, err)
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
continue
|
||||
return &test.Suite[s3.Config]{
|
||||
// NewConfig returns a config for a new temporary backend that will be used in tests.
|
||||
NewConfig: func() (*s3.Config, error) {
|
||||
cfg := s3.NewConfig()
|
||||
cfg.Endpoint = "localhost:9000"
|
||||
cfg.Bucket = "restictestbucket"
|
||||
cfg.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano())
|
||||
cfg.UseHTTP = true
|
||||
cfg.KeyID = key
|
||||
cfg.Secret = options.NewSecretString(secret)
|
||||
return &cfg, nil
|
||||
},
|
||||
|
||||
Factory: location.NewHTTPBackendFactory("s3", s3.ParseConfig, location.NoPassword, func(ctx context.Context, cfg s3.Config, rt http.RoundTripper) (be restic.Backend, err error) {
|
||||
for i := 0; i < 10; i++ {
|
||||
be, err = s3.Create(ctx, cfg, rt)
|
||||
if err != nil {
|
||||
t.Logf("s3 open: try %d: error %v", i, err)
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
return be, err
|
||||
}, s3.Open),
|
||||
}, func() {
|
||||
defer cancel()
|
||||
defer cleanup()
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
return be, err
|
||||
}
|
||||
|
||||
func newMinioTestSuite(ctx context.Context, t testing.TB) *test.Suite[MinioTestConfig] {
|
||||
tr, err := backend.Transport(backend.TransportOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create transport for tests: %v", err)
|
||||
}
|
||||
|
||||
return &test.Suite[MinioTestConfig]{
|
||||
// NewConfig returns a config for a new temporary backend that will be used in tests.
|
||||
NewConfig: func() (*MinioTestConfig, error) {
|
||||
cfg := MinioTestConfig{}
|
||||
|
||||
cfg.tempdir = rtest.TempDir(t)
|
||||
key, secret := newRandomCredentials(t)
|
||||
cfg.stopServer = runMinio(ctx, t, cfg.tempdir, key, secret)
|
||||
|
||||
cfg.Config = s3.NewConfig()
|
||||
cfg.Config.Endpoint = "localhost:9000"
|
||||
cfg.Config.Bucket = "restictestbucket"
|
||||
cfg.Config.Prefix = fmt.Sprintf("test-%d", time.Now().UnixNano())
|
||||
cfg.Config.UseHTTP = true
|
||||
cfg.Config.KeyID = key
|
||||
cfg.Config.Secret = options.NewSecretString(secret)
|
||||
return &cfg, nil
|
||||
},
|
||||
|
||||
// CreateFn is a function that creates a temporary repository for the tests.
|
||||
Create: func(cfg MinioTestConfig) (restic.Backend, error) {
|
||||
be, err := createS3(t, cfg, tr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = be.Stat(context.TODO(), restic.Handle{Type: restic.ConfigFile})
|
||||
if err != nil && !be.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
return nil, errors.New("config already exists")
|
||||
}
|
||||
|
||||
return be, nil
|
||||
},
|
||||
|
||||
// OpenFn is a function that opens a previously created temporary repository.
|
||||
Open: func(cfg MinioTestConfig) (restic.Backend, error) {
|
||||
return s3.Open(ctx, cfg.Config, tr)
|
||||
},
|
||||
|
||||
// CleanupFn removes data created during the tests.
|
||||
Cleanup: func(cfg MinioTestConfig) error {
|
||||
if cfg.stopServer != nil {
|
||||
cfg.stopServer()
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendMinio(t *testing.T) {
|
||||
|
@ -193,10 +149,10 @@ func TestBackendMinio(t *testing.T) {
|
|||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
suite, cleanup := newMinioTestSuite(t)
|
||||
defer cleanup()
|
||||
|
||||
newMinioTestSuite(ctx, t).RunTests(t)
|
||||
suite.RunTests(t)
|
||||
}
|
||||
|
||||
func BenchmarkBackendMinio(t *testing.B) {
|
||||
|
@ -207,18 +163,13 @@ func BenchmarkBackendMinio(t *testing.B) {
|
|||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
suite, cleanup := newMinioTestSuite(t)
|
||||
defer cleanup()
|
||||
|
||||
newMinioTestSuite(ctx, t).RunBenchmarks(t)
|
||||
suite.RunBenchmarks(t)
|
||||
}
|
||||
|
||||
func newS3TestSuite(t testing.TB) *test.Suite[s3.Config] {
|
||||
tr, err := backend.Transport(backend.TransportOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create transport for tests: %v", err)
|
||||
}
|
||||
|
||||
func newS3TestSuite() *test.Suite[s3.Config] {
|
||||
return &test.Suite[s3.Config]{
|
||||
// do not use excessive data
|
||||
MinimalData: true,
|
||||
|
@ -236,39 +187,7 @@ func newS3TestSuite(t testing.TB) *test.Suite[s3.Config] {
|
|||
return cfg, nil
|
||||
},
|
||||
|
||||
// CreateFn is a function that creates a temporary repository for the tests.
|
||||
Create: func(cfg s3.Config) (restic.Backend, error) {
|
||||
be, err := s3.Create(context.TODO(), cfg, tr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = be.Stat(context.TODO(), restic.Handle{Type: restic.ConfigFile})
|
||||
if err != nil && !be.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
return nil, errors.New("config already exists")
|
||||
}
|
||||
|
||||
return be, nil
|
||||
},
|
||||
|
||||
// OpenFn is a function that opens a previously created temporary repository.
|
||||
Open: func(cfg s3.Config) (restic.Backend, error) {
|
||||
return s3.Open(context.TODO(), cfg, tr)
|
||||
},
|
||||
|
||||
// CleanupFn removes data created during the tests.
|
||||
Cleanup: func(cfg s3.Config) error {
|
||||
be, err := s3.Open(context.TODO(), cfg, tr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return be.Delete(context.TODO())
|
||||
},
|
||||
Factory: s3.NewFactory(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -293,7 +212,7 @@ func TestBackendS3(t *testing.T) {
|
|||
}
|
||||
|
||||
t.Logf("run tests")
|
||||
newS3TestSuite(t).RunTests(t)
|
||||
newS3TestSuite().RunTests(t)
|
||||
}
|
||||
|
||||
func BenchmarkBackendS3(t *testing.B) {
|
||||
|
@ -311,5 +230,5 @@ func BenchmarkBackendS3(t *testing.B) {
|
|||
}
|
||||
|
||||
t.Logf("run tests")
|
||||
newS3TestSuite(t).RunBenchmarks(t)
|
||||
newS3TestSuite().RunBenchmarks(t)
|
||||
}
|
||||
|
|
|
@ -15,6 +15,8 @@ import (
|
|||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/layout"
|
||||
"github.com/restic/restic/internal/backend/limiter"
|
||||
"github.com/restic/restic/internal/backend/location"
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
|
@ -41,6 +43,10 @@ type SFTP struct {
|
|||
|
||||
var _ restic.Backend = &SFTP{}
|
||||
|
||||
func NewFactory() location.Factory {
|
||||
return location.NewLimitedBackendFactory("sftp", ParseConfig, location.NoPassword, limiter.WrapBackendConstructor(Create), limiter.WrapBackendConstructor(Open))
|
||||
}
|
||||
|
||||
const defaultLayout = "default"
|
||||
|
||||
func startClient(cfg Config) (*SFTP, error) {
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package sftp_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -11,7 +10,6 @@ import (
|
|||
"github.com/restic/restic/internal/backend/sftp"
|
||||
"github.com/restic/restic/internal/backend/test"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
|
@ -33,11 +31,7 @@ func newTestSuite(t testing.TB) *test.Suite[sftp.Config] {
|
|||
return &test.Suite[sftp.Config]{
|
||||
// NewConfig returns a config for a new temporary backend that will be used in tests.
|
||||
NewConfig: func() (*sftp.Config, error) {
|
||||
dir, err := os.MkdirTemp(rtest.TestTempDir, "restic-test-sftp-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dir := rtest.TempDir(t)
|
||||
t.Logf("create new backend at %v", dir)
|
||||
|
||||
cfg := &sftp.Config{
|
||||
|
@ -48,25 +42,7 @@ func newTestSuite(t testing.TB) *test.Suite[sftp.Config] {
|
|||
return cfg, nil
|
||||
},
|
||||
|
||||
// CreateFn is a function that creates a temporary repository for the tests.
|
||||
Create: func(cfg sftp.Config) (restic.Backend, error) {
|
||||
return sftp.Create(context.TODO(), cfg)
|
||||
},
|
||||
|
||||
// OpenFn is a function that opens a previously created temporary repository.
|
||||
Open: func(cfg sftp.Config) (restic.Backend, error) {
|
||||
return sftp.Open(context.TODO(), cfg)
|
||||
},
|
||||
|
||||
// CleanupFn removes data created during the tests.
|
||||
Cleanup: func(cfg sftp.Config) error {
|
||||
if !rtest.TestCleanupTempDirs {
|
||||
t.Logf("leaving test backend dir at %v", cfg.Path)
|
||||
}
|
||||
|
||||
rtest.RemoveAll(t, cfg.Path)
|
||||
return nil
|
||||
},
|
||||
Factory: sftp.NewFactory(),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/options"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
)
|
||||
|
||||
// Config contains all configuration necessary to connect to an SMB server
|
||||
|
@ -126,18 +125,14 @@ func createConfig(user string, host string, port int, sharename, directory strin
|
|||
return &cfg, nil
|
||||
}
|
||||
|
||||
var _ restic.ApplyEnvironmenter = &Config{}
|
||||
|
||||
// ApplyEnvironment saves values from the environment to the config.
|
||||
func (cfg *Config) ApplyEnvironment(prefix string) error {
|
||||
if cfg.User == "" {
|
||||
cfg.User = os.Getenv("RESTIC_SMB_USER")
|
||||
}
|
||||
|
||||
if cfg.Password.String() == "" {
|
||||
cfg.Password = options.NewSecretString(os.Getenv("RESTIC_SMB_PASSWORD"))
|
||||
}
|
||||
|
||||
if cfg.Domain == "" {
|
||||
cfg.Domain = os.Getenv("RESTIC_SMB_DOMAIN")
|
||||
}
|
||||
|
|
|
@ -17,6 +17,8 @@ import (
|
|||
"github.com/hirochachacha/go-smb2"
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/layout"
|
||||
"github.com/restic/restic/internal/backend/limiter"
|
||||
"github.com/restic/restic/internal/backend/location"
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
|
@ -58,6 +60,10 @@ type Backend struct {
|
|||
// make sure that *Backend implements backend.Backend
|
||||
var _ restic.Backend = &Backend{}
|
||||
|
||||
func NewFactory() location.Factory {
|
||||
return location.NewLimitedBackendFactory("smb", ParseConfig, location.NoPassword, limiter.WrapBackendConstructor(Create), limiter.WrapBackendConstructor(Open))
|
||||
}
|
||||
|
||||
const (
|
||||
defaultLayout = "default"
|
||||
)
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package smb_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
|
@ -9,7 +8,6 @@ import (
|
|||
"github.com/restic/restic/internal/backend/smb"
|
||||
"github.com/restic/restic/internal/backend/test"
|
||||
"github.com/restic/restic/internal/options"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
|
@ -18,7 +16,7 @@ func newTestSuite(t testing.TB) *test.Suite[smb.Config] {
|
|||
// NewConfig returns a config for a new temporary backend that will be used in tests.
|
||||
NewConfig: func() (*smb.Config, error) {
|
||||
|
||||
cfg := &smb.Config{}
|
||||
cfg := smb.NewConfig()
|
||||
cfg.Host = "127.0.0.1"
|
||||
cfg.User = "smbuser"
|
||||
cfg.ShareName = cfg.User
|
||||
|
@ -34,28 +32,10 @@ func newTestSuite(t testing.TB) *test.Suite[smb.Config] {
|
|||
|
||||
t.Logf("create new backend at %v", cfg.Host+"/"+cfg.ShareName)
|
||||
|
||||
return cfg, nil
|
||||
return &cfg, nil
|
||||
},
|
||||
|
||||
// CreateFn is a function that creates a temporary repository for the tests.
|
||||
Create: func(cfg smb.Config) (restic.Backend, error) {
|
||||
return smb.Create(context.TODO(), cfg)
|
||||
},
|
||||
|
||||
// OpenFn is a function that opens a previously created temporary repository.
|
||||
Open: func(cfg smb.Config) (restic.Backend, error) {
|
||||
return smb.Open(context.TODO(), cfg)
|
||||
},
|
||||
|
||||
// CleanupFn removes data created during the tests.
|
||||
Cleanup: func(cfg smb.Config) error {
|
||||
if !rtest.TestCleanupTempDirs {
|
||||
t.Logf("leaving test backend dir at %v", cfg.Path)
|
||||
}
|
||||
|
||||
rtest.RemoveAll(t, cfg.Path)
|
||||
return nil
|
||||
},
|
||||
Factory: smb.NewFactory(),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ func ParseConfig(s string) (*Config, error) {
|
|||
var _ restic.ApplyEnvironmenter = &Config{}
|
||||
|
||||
// ApplyEnvironment saves values from the environment to the config.
|
||||
func (cfg *Config) ApplyEnvironment(prefix string) error {
|
||||
func (cfg *Config) ApplyEnvironment(prefix string) {
|
||||
for _, val := range []struct {
|
||||
s *string
|
||||
env string
|
||||
|
@ -130,5 +130,4 @@ func (cfg *Config) ApplyEnvironment(prefix string) error {
|
|||
*val.s = options.NewSecretString(os.Getenv(val.env))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/layout"
|
||||
"github.com/restic/restic/internal/backend/location"
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
|
@ -34,6 +35,10 @@ type beSwift struct {
|
|||
// ensure statically that *beSwift implements restic.Backend.
|
||||
var _ restic.Backend = &beSwift{}
|
||||
|
||||
func NewFactory() location.Factory {
|
||||
return location.NewHTTPBackendFactory("swift", ParseConfig, location.NoPassword, Open, Open)
|
||||
}
|
||||
|
||||
// Open opens the swift backend at a container in region. The container is
|
||||
// created if it does not exist yet.
|
||||
func Open(ctx context.Context, cfg Config, rt http.RoundTripper) (restic.Backend, error) {
|
||||
|
|
|
@ -1,26 +1,18 @@
|
|||
package swift_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/swift"
|
||||
"github.com/restic/restic/internal/backend/test"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func newSwiftTestSuite(t testing.TB) *test.Suite[swift.Config] {
|
||||
tr, err := backend.Transport(backend.TransportOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create transport for tests: %v", err)
|
||||
}
|
||||
|
||||
return &test.Suite[swift.Config]{
|
||||
// do not use excessive data
|
||||
MinimalData: true,
|
||||
|
@ -48,47 +40,13 @@ func newSwiftTestSuite(t testing.TB) *test.Suite[swift.Config] {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if err = cfg.ApplyEnvironment("RESTIC_TEST_"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfg.ApplyEnvironment("RESTIC_TEST_")
|
||||
cfg.Prefix += fmt.Sprintf("/test-%d", time.Now().UnixNano())
|
||||
t.Logf("using prefix %v", cfg.Prefix)
|
||||
return cfg, nil
|
||||
},
|
||||
|
||||
// CreateFn is a function that creates a temporary repository for the tests.
|
||||
Create: func(cfg swift.Config) (restic.Backend, error) {
|
||||
be, err := swift.Open(context.TODO(), cfg, tr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = be.Stat(context.TODO(), restic.Handle{Type: restic.ConfigFile})
|
||||
if err != nil && !be.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
return nil, errors.New("config already exists")
|
||||
}
|
||||
|
||||
return be, nil
|
||||
},
|
||||
|
||||
// OpenFn is a function that opens a previously created temporary repository.
|
||||
Open: func(cfg swift.Config) (restic.Backend, error) {
|
||||
return swift.Open(context.TODO(), cfg, tr)
|
||||
},
|
||||
|
||||
// CleanupFn removes data created during the tests.
|
||||
Cleanup: func(cfg swift.Config) error {
|
||||
be, err := swift.Open(context.TODO(), cfg, tr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return be.Delete(context.TODO())
|
||||
},
|
||||
Factory: swift.NewFactory(),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,11 +1,16 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/restic/restic/internal/backend"
|
||||
"github.com/restic/restic/internal/backend/location"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
@ -18,14 +23,8 @@ type Suite[C any] struct {
|
|||
// NewConfig returns a config for a new temporary backend that will be used in tests.
|
||||
NewConfig func() (*C, error)
|
||||
|
||||
// CreateFn is a function that creates a temporary repository for the tests.
|
||||
Create func(cfg C) (restic.Backend, error)
|
||||
|
||||
// OpenFn is a function that opens a previously created temporary repository.
|
||||
Open func(cfg C) (restic.Backend, error)
|
||||
|
||||
// CleanupFn removes data created during the tests.
|
||||
Cleanup func(cfg C) error
|
||||
// Factory contains a factory that can be used to create or open a repository for the tests.
|
||||
Factory location.Factory
|
||||
|
||||
// MinimalData instructs the tests to not use excessive data.
|
||||
MinimalData bool
|
||||
|
@ -60,11 +59,7 @@ func (s *Suite[C]) RunTests(t *testing.T) {
|
|||
return
|
||||
}
|
||||
|
||||
if s.Cleanup != nil {
|
||||
if err = s.Cleanup(*s.Config); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
s.cleanup(t)
|
||||
}
|
||||
|
||||
type testFunction struct {
|
||||
|
@ -158,13 +153,34 @@ func (s *Suite[C]) RunBenchmarks(b *testing.B) {
|
|||
return
|
||||
}
|
||||
|
||||
if err = s.Cleanup(*s.Config); err != nil {
|
||||
b.Fatal(err)
|
||||
s.cleanup(b)
|
||||
}
|
||||
|
||||
func (s *Suite[C]) createOrError() (restic.Backend, error) {
|
||||
tr, err := backend.Transport(backend.TransportOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create transport for tests: %v", err)
|
||||
}
|
||||
|
||||
be, err := s.Factory.Create(context.TODO(), s.Config, tr, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = be.Stat(context.TODO(), restic.Handle{Type: restic.ConfigFile})
|
||||
if err != nil && !be.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
return nil, errors.New("config already exists")
|
||||
}
|
||||
|
||||
return be, nil
|
||||
}
|
||||
|
||||
func (s *Suite[C]) create(t testing.TB) restic.Backend {
|
||||
be, err := s.Create(*s.Config)
|
||||
be, err := s.createOrError()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -172,13 +188,26 @@ func (s *Suite[C]) create(t testing.TB) restic.Backend {
|
|||
}
|
||||
|
||||
func (s *Suite[C]) open(t testing.TB) restic.Backend {
|
||||
be, err := s.Open(*s.Config)
|
||||
tr, err := backend.Transport(backend.TransportOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create transport for tests: %v", err)
|
||||
}
|
||||
|
||||
be, err := s.Factory.Open(context.TODO(), s.Config, tr, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return be
|
||||
}
|
||||
|
||||
func (s *Suite[C]) cleanup(t testing.TB) {
|
||||
be := s.open(t)
|
||||
if err := be.Delete(context.TODO()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s.close(t, be)
|
||||
}
|
||||
|
||||
func (s *Suite[C]) close(t testing.TB, be restic.Backend) {
|
||||
err := be.Close()
|
||||
if err != nil {
|
||||
|
|
|
@ -36,6 +36,12 @@ func beTest(ctx context.Context, be restic.Backend, h restic.Handle) (bool, erro
|
|||
return err == nil, err
|
||||
}
|
||||
|
||||
// TestStripPasswordCall tests that the StripPassword method of a factory can be called without crashing.
|
||||
// It does not verify whether passwords are removed correctly
|
||||
func (s *Suite[C]) TestStripPasswordCall(_ *testing.T) {
|
||||
s.Factory.StripPassword("some random string")
|
||||
}
|
||||
|
||||
// TestCreateWithConfig tests that creating a backend in a location which already
|
||||
// has a config file fails.
|
||||
func (s *Suite[C]) TestCreateWithConfig(t *testing.T) {
|
||||
|
@ -57,7 +63,7 @@ func (s *Suite[C]) TestCreateWithConfig(t *testing.T) {
|
|||
store(t, b, restic.ConfigFile, []byte("test config"))
|
||||
|
||||
// now create the backend again, this must fail
|
||||
_, err = s.Create(*s.Config)
|
||||
_, err = s.createOrError()
|
||||
if err == nil {
|
||||
t.Fatalf("expected error not found for creating a backend with an existing config file")
|
||||
}
|
||||
|
|
|
@ -222,19 +222,10 @@ func (d *dir) Lookup(ctx context.Context, name string) (fs.Node, error) {
|
|||
}
|
||||
|
||||
func (d *dir) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
|
||||
debug.Log("Listxattr(%v, %v)", d.node.Name, req.Size)
|
||||
for _, attr := range d.node.ExtendedAttributes {
|
||||
resp.Append(attr.Name)
|
||||
}
|
||||
nodeToXattrList(d.node, req, resp)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dir) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
|
||||
debug.Log("Getxattr(%v, %v, %v)", d.node.Name, req.Name, req.Size)
|
||||
attrval := d.node.GetExtendedAttribute(req.Name)
|
||||
if attrval != nil {
|
||||
resp.Xattr = attrval
|
||||
return nil
|
||||
}
|
||||
return fuse.ErrNoXattr
|
||||
return nodeGetXattr(d.node, req, resp)
|
||||
}
|
||||
|
|
|
@ -167,19 +167,10 @@ func (f *openFile) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.R
|
|||
}
|
||||
|
||||
func (f *file) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
|
||||
debug.Log("Listxattr(%v, %v)", f.node.Name, req.Size)
|
||||
for _, attr := range f.node.ExtendedAttributes {
|
||||
resp.Append(attr.Name)
|
||||
}
|
||||
nodeToXattrList(f.node, req, resp)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *file) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
|
||||
debug.Log("Getxattr(%v, %v, %v)", f.node.Name, req.Name, req.Size)
|
||||
attrval := f.node.GetExtendedAttribute(req.Name)
|
||||
if attrval != nil {
|
||||
resp.Xattr = attrval
|
||||
return nil
|
||||
}
|
||||
return fuse.ErrNoXattr
|
||||
return nodeGetXattr(f.node, req, resp)
|
||||
}
|
||||
|
|
|
@ -271,6 +271,31 @@ func TestInodeFromNode(t *testing.T) {
|
|||
rtest.Assert(t, inoA != inoAbb, "inode(a/b/b) = inode(a)")
|
||||
}
|
||||
|
||||
func TestLink(t *testing.T) {
|
||||
node := &restic.Node{Name: "foo.txt", Type: "symlink", Links: 1, LinkTarget: "dst", ExtendedAttributes: []restic.ExtendedAttribute{
|
||||
{Name: "foo", Value: []byte("bar")},
|
||||
}}
|
||||
|
||||
lnk, err := newLink(&Root{}, 42, node)
|
||||
rtest.OK(t, err)
|
||||
target, err := lnk.Readlink(context.TODO(), nil)
|
||||
rtest.OK(t, err)
|
||||
rtest.Equals(t, node.LinkTarget, target)
|
||||
|
||||
exp := &fuse.ListxattrResponse{}
|
||||
exp.Append("foo")
|
||||
resp := &fuse.ListxattrResponse{}
|
||||
rtest.OK(t, lnk.Listxattr(context.TODO(), &fuse.ListxattrRequest{}, resp))
|
||||
rtest.Equals(t, exp.Xattr, resp.Xattr)
|
||||
|
||||
getResp := &fuse.GetxattrResponse{}
|
||||
rtest.OK(t, lnk.Getxattr(context.TODO(), &fuse.GetxattrRequest{Name: "foo"}, getResp))
|
||||
rtest.Equals(t, node.ExtendedAttributes[0].Value, getResp.Xattr)
|
||||
|
||||
err = lnk.Getxattr(context.TODO(), &fuse.GetxattrRequest{Name: "invalid"}, nil)
|
||||
rtest.Assert(t, err != nil, "missing error on reading invalid xattr")
|
||||
}
|
||||
|
||||
var sink uint64
|
||||
|
||||
func BenchmarkInode(b *testing.B) {
|
||||
|
|
|
@ -46,3 +46,12 @@ func (l *link) Attr(_ context.Context, a *fuse.Attr) error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *link) Listxattr(_ context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
|
||||
nodeToXattrList(l.node, req, resp)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *link) Getxattr(_ context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
|
||||
return nodeGetXattr(l.node, req, resp)
|
||||
}
|
||||
|
|
27
internal/fuse/xattr.go
Normal file
27
internal/fuse/xattr.go
Normal file
|
@ -0,0 +1,27 @@
|
|||
//go:build darwin || freebsd || linux
|
||||
// +build darwin freebsd linux
|
||||
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"github.com/anacrolix/fuse"
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
)
|
||||
|
||||
func nodeToXattrList(node *restic.Node, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) {
|
||||
debug.Log("Listxattr(%v, %v)", node.Name, req.Size)
|
||||
for _, attr := range node.ExtendedAttributes {
|
||||
resp.Append(attr.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func nodeGetXattr(node *restic.Node, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
|
||||
debug.Log("Getxattr(%v, %v, %v)", node.Name, req.Name, req.Size)
|
||||
attrval := node.GetExtendedAttribute(req.Name)
|
||||
if attrval != nil {
|
||||
resp.Xattr = attrval
|
||||
return nil
|
||||
}
|
||||
return fuse.ErrNoXattr
|
||||
}
|
|
@ -17,12 +17,12 @@ import (
|
|||
// needs to be resized when the table grows, preventing memory usage spikes.
|
||||
type indexMap struct {
|
||||
// The number of buckets is always a power of two and never zero.
|
||||
buckets []*indexEntry
|
||||
buckets []uint
|
||||
numentries uint
|
||||
|
||||
mh maphash.Hash
|
||||
|
||||
free *indexEntry // Free list.
|
||||
blockList hashedArrayTree
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -41,7 +41,7 @@ func (m *indexMap) add(id restic.ID, packIdx int, offset, length uint32, uncompr
|
|||
}
|
||||
|
||||
h := m.hash(id)
|
||||
e := m.newEntry()
|
||||
e, idx := m.newEntry()
|
||||
e.id = id
|
||||
e.next = m.buckets[h] // Prepend to existing chain.
|
||||
e.packIndex = packIdx
|
||||
|
@ -49,18 +49,16 @@ func (m *indexMap) add(id restic.ID, packIdx int, offset, length uint32, uncompr
|
|||
e.length = length
|
||||
e.uncompressedLength = uncompressedLength
|
||||
|
||||
m.buckets[h] = e
|
||||
m.buckets[h] = idx
|
||||
m.numentries++
|
||||
}
|
||||
|
||||
// foreach calls fn for all entries in the map, until fn returns false.
|
||||
func (m *indexMap) foreach(fn func(*indexEntry) bool) {
|
||||
for _, e := range m.buckets {
|
||||
for e != nil {
|
||||
if !fn(e) {
|
||||
return
|
||||
}
|
||||
e = e.next
|
||||
blockCount := m.blockList.Size()
|
||||
for i := uint(1); i < blockCount; i++ {
|
||||
if !fn(m.resolve(i)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -72,7 +70,10 @@ func (m *indexMap) foreachWithID(id restic.ID, fn func(*indexEntry)) {
|
|||
}
|
||||
|
||||
h := m.hash(id)
|
||||
for e := m.buckets[h]; e != nil; e = e.next {
|
||||
ei := m.buckets[h]
|
||||
for ei != 0 {
|
||||
e := m.resolve(ei)
|
||||
ei = e.next
|
||||
if e.id != id {
|
||||
continue
|
||||
}
|
||||
|
@ -87,26 +88,27 @@ func (m *indexMap) get(id restic.ID) *indexEntry {
|
|||
}
|
||||
|
||||
h := m.hash(id)
|
||||
for e := m.buckets[h]; e != nil; e = e.next {
|
||||
ei := m.buckets[h]
|
||||
for ei != 0 {
|
||||
e := m.resolve(ei)
|
||||
if e.id == id {
|
||||
return e
|
||||
}
|
||||
ei = e.next
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *indexMap) grow() {
|
||||
old := m.buckets
|
||||
m.buckets = make([]*indexEntry, growthFactor*len(m.buckets))
|
||||
m.buckets = make([]uint, growthFactor*len(m.buckets))
|
||||
|
||||
for _, e := range old {
|
||||
for e != nil {
|
||||
h := m.hash(e.id)
|
||||
next := e.next
|
||||
e.next = m.buckets[h]
|
||||
m.buckets[h] = e
|
||||
e = next
|
||||
}
|
||||
blockCount := m.blockList.Size()
|
||||
for i := uint(1); i < blockCount; i++ {
|
||||
e := m.resolve(i)
|
||||
|
||||
h := m.hash(e.id)
|
||||
e.next = m.buckets[h]
|
||||
m.buckets[h] = i
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -124,47 +126,106 @@ func (m *indexMap) hash(id restic.ID) uint {
|
|||
|
||||
func (m *indexMap) init() {
|
||||
const initialBuckets = 64
|
||||
m.buckets = make([]*indexEntry, initialBuckets)
|
||||
m.buckets = make([]uint, initialBuckets)
|
||||
// first entry in blockList serves as null byte
|
||||
m.blockList = *newHAT()
|
||||
m.newEntry()
|
||||
}
|
||||
|
||||
func (m *indexMap) len() uint { return m.numentries }
|
||||
|
||||
func (m *indexMap) newEntry() *indexEntry {
|
||||
// We keep a free list of objects to speed up allocation and GC.
|
||||
// There's an obvious trade-off here: allocating in larger batches
|
||||
// means we allocate faster and the GC has to keep fewer bits to track
|
||||
// what we have in use, but it means we waste some space.
|
||||
//
|
||||
// Then again, allocating each indexEntry separately also wastes space
|
||||
// on 32-bit platforms, because the Go malloc has no size class for
|
||||
// exactly 52 bytes, so it puts the indexEntry in a 64-byte slot instead.
|
||||
// See src/runtime/sizeclasses.go in the Go source repo.
|
||||
//
|
||||
// The batch size of 4 means we hit the size classes for 4×64=256 bytes
|
||||
// (64-bit) and 4×52=208 bytes (32-bit), wasting nothing in malloc on
|
||||
// 64-bit and relatively little on 32-bit.
|
||||
const entryAllocBatch = 4
|
||||
func (m *indexMap) newEntry() (*indexEntry, uint) {
|
||||
return m.blockList.Alloc()
|
||||
}
|
||||
|
||||
e := m.free
|
||||
if e != nil {
|
||||
m.free = e.next
|
||||
} else {
|
||||
free := new([entryAllocBatch]indexEntry)
|
||||
e = &free[0]
|
||||
for i := 1; i < len(free)-1; i++ {
|
||||
free[i].next = &free[i+1]
|
||||
}
|
||||
m.free = &free[1]
|
||||
}
|
||||
|
||||
return e
|
||||
func (m *indexMap) resolve(idx uint) *indexEntry {
|
||||
return m.blockList.Ref(idx)
|
||||
}
|
||||
|
||||
type indexEntry struct {
|
||||
id restic.ID
|
||||
next *indexEntry
|
||||
next uint
|
||||
packIndex int // Position in containing Index's packs field.
|
||||
offset uint32
|
||||
length uint32
|
||||
uncompressedLength uint32
|
||||
}
|
||||
|
||||
type hashedArrayTree struct {
|
||||
mask uint
|
||||
maskShift uint
|
||||
blockSize uint
|
||||
|
||||
size uint
|
||||
blockList [][]indexEntry
|
||||
}
|
||||
|
||||
func newHAT() *hashedArrayTree {
|
||||
// start with a small block size
|
||||
blockSizePower := uint(2)
|
||||
blockSize := uint(1 << blockSizePower)
|
||||
|
||||
return &hashedArrayTree{
|
||||
mask: blockSize - 1,
|
||||
maskShift: blockSizePower,
|
||||
blockSize: blockSize,
|
||||
size: 0,
|
||||
blockList: make([][]indexEntry, blockSize),
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hashedArrayTree) Alloc() (*indexEntry, uint) {
|
||||
h.grow()
|
||||
size := h.size
|
||||
idx, subIdx := h.index(size)
|
||||
h.size++
|
||||
return &h.blockList[idx][subIdx], size
|
||||
}
|
||||
|
||||
func (h *hashedArrayTree) index(pos uint) (idx uint, subIdx uint) {
|
||||
subIdx = pos & h.mask
|
||||
idx = pos >> h.maskShift
|
||||
return
|
||||
}
|
||||
|
||||
func (h *hashedArrayTree) Ref(pos uint) *indexEntry {
|
||||
if pos >= h.size {
|
||||
panic("array index out of bounds")
|
||||
}
|
||||
|
||||
idx, subIdx := h.index(pos)
|
||||
return &h.blockList[idx][subIdx]
|
||||
}
|
||||
|
||||
func (h *hashedArrayTree) Size() uint {
|
||||
return h.size
|
||||
}
|
||||
|
||||
func (h *hashedArrayTree) grow() {
|
||||
idx, subIdx := h.index(h.size)
|
||||
if int(idx) == len(h.blockList) {
|
||||
// blockList is too small -> double list and block size
|
||||
h.blockSize *= 2
|
||||
h.mask = h.mask*2 + 1
|
||||
h.maskShift++
|
||||
idx = idx / 2
|
||||
|
||||
oldBlocks := h.blockList
|
||||
h.blockList = make([][]indexEntry, h.blockSize)
|
||||
|
||||
// pairwise merging of blocks
|
||||
for i := 0; i < len(oldBlocks); i += 2 {
|
||||
block := make([]indexEntry, 0, h.blockSize)
|
||||
block = append(block, oldBlocks[i]...)
|
||||
block = append(block, oldBlocks[i+1]...)
|
||||
h.blockList[i/2] = block
|
||||
// allow GC
|
||||
oldBlocks[i] = nil
|
||||
oldBlocks[i+1] = nil
|
||||
}
|
||||
}
|
||||
if subIdx == 0 {
|
||||
// new index entry batch
|
||||
h.blockList[idx] = make([]indexEntry, h.blockSize)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -108,6 +108,21 @@ func TestIndexMapForeachWithID(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestHashedArrayTree(t *testing.T) {
|
||||
hat := newHAT()
|
||||
const testSize = 1024
|
||||
for i := uint(0); i < testSize; i++ {
|
||||
rtest.Assert(t, hat.Size() == i, "expected hat size %v got %v", i, hat.Size())
|
||||
e, idx := hat.Alloc()
|
||||
rtest.Assert(t, idx == i, "expected entry at idx %v got %v", i, idx)
|
||||
e.length = uint32(i)
|
||||
}
|
||||
for i := uint(0); i < testSize; i++ {
|
||||
e := hat.Ref(i)
|
||||
rtest.Assert(t, e.length == uint32(i), "expected entry to contain %v got %v", uint32(i), e.length)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkIndexMapHash(b *testing.B) {
|
||||
var m indexMap
|
||||
m.add(restic.ID{}, 0, 0, 0, 0) // Trigger lazy initialization.
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -323,6 +324,17 @@ func BenchmarkMasterIndexEach(b *testing.B) {
|
|||
}
|
||||
}
|
||||
|
||||
func BenchmarkMasterIndexGC(b *testing.B) {
|
||||
mIdx, _ := createRandomMasterIndex(b, rand.New(rand.NewSource(0)), 100, 10000)
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
runtime.GC()
|
||||
}
|
||||
runtime.KeepAlive(mIdx)
|
||||
}
|
||||
|
||||
var (
|
||||
snapshotTime = time.Unix(1470492820, 207401672)
|
||||
depth = 3
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
|
@ -601,6 +602,9 @@ func (r *Repository) LoadIndex(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Trigger GC to reset garbage collection threshold
|
||||
runtime.GC()
|
||||
|
||||
if r.cfg.Version < 2 {
|
||||
// sanity check
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
|
|
@ -346,6 +346,7 @@ func benchmarkLoadIndex(b *testing.B, version uint) {
|
|||
},
|
||||
})
|
||||
}
|
||||
idx.Finalize()
|
||||
|
||||
id, err := index.SaveIndex(context.TODO(), repo, idx)
|
||||
rtest.OK(b, err)
|
||||
|
|
|
@ -83,5 +83,5 @@ type FileInfo struct {
|
|||
|
||||
// ApplyEnvironmenter fills in a backend configuration from the environment
|
||||
type ApplyEnvironmenter interface {
|
||||
ApplyEnvironment(prefix string) error
|
||||
ApplyEnvironment(prefix string)
|
||||
}
|
||||
|
|
|
@ -609,10 +609,6 @@ func (node *Node) fillExtra(path string, fi os.FileInfo) error {
|
|||
}
|
||||
|
||||
func (node *Node) fillExtendedAttributes(path string) error {
|
||||
if node.Type == "symlink" {
|
||||
return nil
|
||||
}
|
||||
|
||||
xattrs, err := Listxattr(path)
|
||||
debug.Log("fillExtendedAttributes(%v) %v %v", path, xattrs, err)
|
||||
if err != nil {
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -163,58 +164,99 @@ var nodeTests = []restic.Node{
|
|||
AccessTime: parseTime("2005-05-14 21:07:04.222"),
|
||||
ChangeTime: parseTime("2005-05-14 21:07:05.333"),
|
||||
},
|
||||
{
|
||||
Name: "testXattrFile",
|
||||
Type: "file",
|
||||
Content: restic.IDs{},
|
||||
UID: uint32(os.Getuid()),
|
||||
GID: uint32(os.Getgid()),
|
||||
Mode: 0604,
|
||||
ModTime: parseTime("2005-05-14 21:07:03.111"),
|
||||
AccessTime: parseTime("2005-05-14 21:07:04.222"),
|
||||
ChangeTime: parseTime("2005-05-14 21:07:05.333"),
|
||||
ExtendedAttributes: []restic.ExtendedAttribute{
|
||||
{"user.foo", []byte("bar")},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "testXattrDir",
|
||||
Type: "dir",
|
||||
Subtree: nil,
|
||||
UID: uint32(os.Getuid()),
|
||||
GID: uint32(os.Getgid()),
|
||||
Mode: 0750 | os.ModeDir,
|
||||
ModTime: parseTime("2005-05-14 21:07:03.111"),
|
||||
AccessTime: parseTime("2005-05-14 21:07:04.222"),
|
||||
ChangeTime: parseTime("2005-05-14 21:07:05.333"),
|
||||
ExtendedAttributes: []restic.ExtendedAttribute{
|
||||
{"user.foo", []byte("bar")},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestNodeRestoreAt(t *testing.T) {
|
||||
tempdir, err := os.MkdirTemp(rtest.TestTempDir, "restic-test-")
|
||||
rtest.OK(t, err)
|
||||
|
||||
defer func() {
|
||||
if rtest.TestCleanupTempDirs {
|
||||
rtest.RemoveAll(t, tempdir)
|
||||
} else {
|
||||
t.Logf("leaving tempdir at %v", tempdir)
|
||||
}
|
||||
}()
|
||||
tempdir := t.TempDir()
|
||||
|
||||
for _, test := range nodeTests {
|
||||
nodePath := filepath.Join(tempdir, test.Name)
|
||||
rtest.OK(t, test.CreateAt(context.TODO(), nodePath, nil))
|
||||
rtest.OK(t, test.RestoreMetadata(nodePath))
|
||||
t.Run("", func(t *testing.T) {
|
||||
var nodePath string
|
||||
if test.ExtendedAttributes != nil {
|
||||
if runtime.GOOS == "windows" {
|
||||
// restic does not support xattrs on windows
|
||||
return
|
||||
}
|
||||
|
||||
if test.Type == "dir" {
|
||||
rtest.OK(t, test.RestoreTimestamps(nodePath))
|
||||
}
|
||||
// tempdir might be backed by a filesystem that does not support
|
||||
// extended attributes
|
||||
nodePath = test.Name
|
||||
defer func() {
|
||||
_ = os.Remove(nodePath)
|
||||
}()
|
||||
} else {
|
||||
nodePath = filepath.Join(tempdir, test.Name)
|
||||
}
|
||||
rtest.OK(t, test.CreateAt(context.TODO(), nodePath, nil))
|
||||
rtest.OK(t, test.RestoreMetadata(nodePath))
|
||||
|
||||
fi, err := os.Lstat(nodePath)
|
||||
rtest.OK(t, err)
|
||||
if test.Type == "dir" {
|
||||
rtest.OK(t, test.RestoreTimestamps(nodePath))
|
||||
}
|
||||
|
||||
n2, err := restic.NodeFromFileInfo(nodePath, fi)
|
||||
rtest.OK(t, err)
|
||||
fi, err := os.Lstat(nodePath)
|
||||
rtest.OK(t, err)
|
||||
|
||||
rtest.Assert(t, test.Name == n2.Name,
|
||||
"%v: name doesn't match (%v != %v)", test.Type, test.Name, n2.Name)
|
||||
rtest.Assert(t, test.Type == n2.Type,
|
||||
"%v: type doesn't match (%v != %v)", test.Type, test.Type, n2.Type)
|
||||
rtest.Assert(t, test.Size == n2.Size,
|
||||
"%v: size doesn't match (%v != %v)", test.Size, test.Size, n2.Size)
|
||||
n2, err := restic.NodeFromFileInfo(nodePath, fi)
|
||||
rtest.OK(t, err)
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
rtest.Assert(t, test.UID == n2.UID,
|
||||
"%v: UID doesn't match (%v != %v)", test.Type, test.UID, n2.UID)
|
||||
rtest.Assert(t, test.GID == n2.GID,
|
||||
"%v: GID doesn't match (%v != %v)", test.Type, test.GID, n2.GID)
|
||||
if test.Type != "symlink" {
|
||||
// On OpenBSD only root can set sticky bit (see sticky(8)).
|
||||
if runtime.GOOS != "openbsd" && runtime.GOOS != "netbsd" && runtime.GOOS != "solaris" && test.Name == "testSticky" {
|
||||
rtest.Assert(t, test.Mode == n2.Mode,
|
||||
"%v: mode doesn't match (0%o != 0%o)", test.Type, test.Mode, n2.Mode)
|
||||
rtest.Assert(t, test.Name == n2.Name,
|
||||
"%v: name doesn't match (%v != %v)", test.Type, test.Name, n2.Name)
|
||||
rtest.Assert(t, test.Type == n2.Type,
|
||||
"%v: type doesn't match (%v != %v)", test.Type, test.Type, n2.Type)
|
||||
rtest.Assert(t, test.Size == n2.Size,
|
||||
"%v: size doesn't match (%v != %v)", test.Size, test.Size, n2.Size)
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
rtest.Assert(t, test.UID == n2.UID,
|
||||
"%v: UID doesn't match (%v != %v)", test.Type, test.UID, n2.UID)
|
||||
rtest.Assert(t, test.GID == n2.GID,
|
||||
"%v: GID doesn't match (%v != %v)", test.Type, test.GID, n2.GID)
|
||||
if test.Type != "symlink" {
|
||||
// On OpenBSD only root can set sticky bit (see sticky(8)).
|
||||
if runtime.GOOS != "openbsd" && runtime.GOOS != "netbsd" && runtime.GOOS != "solaris" && test.Name == "testSticky" {
|
||||
rtest.Assert(t, test.Mode == n2.Mode,
|
||||
"%v: mode doesn't match (0%o != 0%o)", test.Type, test.Mode, n2.Mode)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
AssertFsTimeEqual(t, "AccessTime", test.Type, test.AccessTime, n2.AccessTime)
|
||||
AssertFsTimeEqual(t, "ModTime", test.Type, test.ModTime, n2.ModTime)
|
||||
AssertFsTimeEqual(t, "AccessTime", test.Type, test.AccessTime, n2.AccessTime)
|
||||
AssertFsTimeEqual(t, "ModTime", test.Type, test.ModTime, n2.ModTime)
|
||||
if len(n2.ExtendedAttributes) == 0 {
|
||||
n2.ExtendedAttributes = nil
|
||||
}
|
||||
rtest.Assert(t, reflect.DeepEqual(test.ExtendedAttributes, n2.ExtendedAttributes),
|
||||
"%v: xattrs don't match (%v != %v)", test.Name, test.ExtendedAttributes, n2.ExtendedAttributes)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -5,10 +5,13 @@ package restic
|
|||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func stat(t testing.TB, filename string) (fi os.FileInfo, ok bool) {
|
||||
|
@ -25,6 +28,7 @@ func stat(t testing.TB, filename string) (fi os.FileInfo, ok bool) {
|
|||
}
|
||||
|
||||
func checkFile(t testing.TB, stat *syscall.Stat_t, node *Node) {
|
||||
t.Helper()
|
||||
if uint32(node.Mode.Perm()) != uint32(stat.Mode&0777) {
|
||||
t.Errorf("Mode does not match, want %v, got %v", stat.Mode&0777, node.Mode)
|
||||
}
|
||||
|
@ -37,7 +41,7 @@ func checkFile(t testing.TB, stat *syscall.Stat_t, node *Node) {
|
|||
t.Errorf("Dev does not match, want %v, got %v", stat.Dev, node.DeviceID)
|
||||
}
|
||||
|
||||
if node.Size != uint64(stat.Size) {
|
||||
if node.Size != uint64(stat.Size) && node.Type != "symlink" {
|
||||
t.Errorf("Size does not match, want %v, got %v", stat.Size, node.Size)
|
||||
}
|
||||
|
||||
|
@ -83,6 +87,10 @@ func checkDevice(t testing.TB, stat *syscall.Stat_t, node *Node) {
|
|||
}
|
||||
|
||||
func TestNodeFromFileInfo(t *testing.T) {
|
||||
tmp := t.TempDir()
|
||||
symlink := filepath.Join(tmp, "symlink")
|
||||
rtest.OK(t, os.Symlink("target", symlink))
|
||||
|
||||
type Test struct {
|
||||
filename string
|
||||
canSkip bool
|
||||
|
@ -90,6 +98,7 @@ func TestNodeFromFileInfo(t *testing.T) {
|
|||
var tests = []Test{
|
||||
{"node_test.go", false},
|
||||
{"/dev/sda", true},
|
||||
{symlink, false},
|
||||
}
|
||||
|
||||
// on darwin, users are not permitted to list the extended attributes of
|
||||
|
@ -125,7 +134,7 @@ func TestNodeFromFileInfo(t *testing.T) {
|
|||
}
|
||||
|
||||
switch node.Type {
|
||||
case "file":
|
||||
case "file", "symlink":
|
||||
checkFile(t, s, node)
|
||||
case "dev", "chardev":
|
||||
checkFile(t, s, node)
|
||||
|
|
|
@ -13,20 +13,20 @@ import (
|
|||
|
||||
// Getxattr retrieves extended attribute data associated with path.
|
||||
func Getxattr(path, name string) ([]byte, error) {
|
||||
b, err := xattr.Get(path, name)
|
||||
b, err := xattr.LGet(path, name)
|
||||
return b, handleXattrErr(err)
|
||||
}
|
||||
|
||||
// Listxattr retrieves a list of names of extended attributes associated with the
|
||||
// given path in the file system.
|
||||
func Listxattr(path string) ([]string, error) {
|
||||
l, err := xattr.List(path)
|
||||
l, err := xattr.LList(path)
|
||||
return l, handleXattrErr(err)
|
||||
}
|
||||
|
||||
// Setxattr associates name and data together as an attribute of path.
|
||||
func Setxattr(path, name string, data []byte) error {
|
||||
return handleXattrErr(xattr.Set(path, name, data))
|
||||
return handleXattrErr(xattr.LSet(path, name, data))
|
||||
}
|
||||
|
||||
func handleXattrErr(err error) error {
|
||||
|
|
|
@ -25,6 +25,8 @@ type Snapshot struct {
|
|||
Tags []string `json:"tags,omitempty"`
|
||||
Original *ID `json:"original,omitempty"`
|
||||
|
||||
ProgramVersion string `json:"program_version,omitempty"`
|
||||
|
||||
id *ID // plaintext ID, used during restore
|
||||
}
|
||||
|
||||
|
|
|
@ -183,6 +183,7 @@ type KeepReason struct {
|
|||
// according to the policy p. list is sorted in the process. reasons contains
|
||||
// the reasons to keep each snapshot, it is in the same order as keep.
|
||||
func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots, reasons []KeepReason) {
|
||||
// sort newest snapshots first
|
||||
sort.Stable(list)
|
||||
|
||||
if p.Empty() {
|
||||
|
@ -256,7 +257,9 @@ func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots, reason
|
|||
// -1 means "keep all"
|
||||
if b.Count > 0 || b.Count == -1 {
|
||||
val := b.bucker(cur.Time, nr)
|
||||
if val != b.Last {
|
||||
// also keep the oldest snapshot if the bucket has some counts left. This maximizes the
|
||||
// the history length kept while some counts are left.
|
||||
if val != b.Last || nr == len(list)-1 {
|
||||
debug.Log("keep %v %v, bucker %v, val %v\n", cur.Time, cur.id.Str(), i, val)
|
||||
keepSnap = true
|
||||
buckets[i].Last = val
|
||||
|
@ -275,7 +278,7 @@ func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots, reason
|
|||
|
||||
if cur.Time.After(t) {
|
||||
val := b.bucker(cur.Time, nr)
|
||||
if val != b.Last {
|
||||
if val != b.Last || nr == len(list)-1 {
|
||||
debug.Log("keep %v, time %v, ID %v, bucker %v, val %v %v\n", b.reason, cur.Time, cur.id.Str(), i, val, b.Last)
|
||||
keepSnap = true
|
||||
bucketsWithin[i].Last = val
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue