mirror of
https://github.com/restic/restic.git
synced 2025-03-09 00:00:02 +01:00
Merge cb746265c1
into de9a040d27
This commit is contained in:
commit
14c3d9c183
46 changed files with 93 additions and 95 deletions
|
@ -465,8 +465,8 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args []
|
|||
Printf("Others: %5d new, %5d removed\n", stats.Added.Others, stats.Removed.Others)
|
||||
Printf("Data Blobs: %5d new, %5d removed\n", stats.Added.DataBlobs, stats.Removed.DataBlobs)
|
||||
Printf("Tree Blobs: %5d new, %5d removed\n", stats.Added.TreeBlobs, stats.Removed.TreeBlobs)
|
||||
Printf(" Added: %-5s\n", ui.FormatBytes(uint64(stats.Added.Bytes)))
|
||||
Printf(" Removed: %-5s\n", ui.FormatBytes(uint64(stats.Removed.Bytes)))
|
||||
Printf(" Added: %-5s\n", ui.FormatBytes(stats.Added.Bytes))
|
||||
Printf(" Removed: %-5s\n", ui.FormatBytes(stats.Removed.Bytes))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -116,7 +116,7 @@ func (p *jsonLsPrinter) Snapshot(sn *restic.Snapshot) error {
|
|||
})
|
||||
}
|
||||
|
||||
// Print node in our custom JSON format, followed by a newline.
|
||||
// Node formats node in our custom JSON format, followed by a newline.
|
||||
func (p *jsonLsPrinter) Node(path string, node *restic.Node, isPrefixDirectory bool) error {
|
||||
if isPrefixDirectory {
|
||||
return nil
|
||||
|
@ -175,7 +175,7 @@ type ncduLsPrinter struct {
|
|||
depth int
|
||||
}
|
||||
|
||||
// lsSnapshotNcdu prints a restic snapshot in Ncdu save format.
|
||||
// Snapshot prints a restic snapshot in Ncdu save format.
|
||||
// It opens the JSON list. Nodes are added with lsNodeNcdu and the list is closed by lsCloseNcdu.
|
||||
// Format documentation: https://dev.yorhel.nl/ncdu/jsonfmt
|
||||
func (p *ncduLsPrinter) Snapshot(sn *restic.Snapshot) error {
|
||||
|
|
|
@ -104,7 +104,7 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt
|
|||
}
|
||||
|
||||
ok := true
|
||||
var newContent restic.IDs = restic.IDs{}
|
||||
var newContent = restic.IDs{}
|
||||
var newSize uint64
|
||||
// check all contents and remove if not available
|
||||
for _, id := range node.Content {
|
||||
|
|
|
@ -299,7 +299,7 @@ func statsWalkTree(repo restic.Loader, opts StatsOptions, stats *statsContainer,
|
|||
func makeFileIDByContents(node *restic.Node) fileID {
|
||||
var bb []byte
|
||||
for _, c := range node.Content {
|
||||
bb = append(bb, []byte(c[:])...)
|
||||
bb = append(bb, c[:]...)
|
||||
}
|
||||
return sha256.Sum256(bb)
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
@ -38,8 +39,6 @@ import (
|
|||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
|
||||
"os/exec"
|
||||
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
|
|
|
@ -28,11 +28,11 @@ func (e *dirEntry) equals(out io.Writer, other *dirEntry) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func nlink(info os.FileInfo) uint64 {
|
||||
func nlink(_ os.FileInfo) uint64 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func inode(info os.FileInfo) uint64 {
|
||||
func inode(_ os.FileInfo) uint64 {
|
||||
return uint64(0)
|
||||
}
|
||||
|
||||
|
|
|
@ -82,10 +82,10 @@ func printProgress(status string, final bool) {
|
|||
}
|
||||
}
|
||||
|
||||
var carriageControl, clear string
|
||||
var carriageControl, cl string
|
||||
|
||||
if canUpdateStatus {
|
||||
clear = clearLine(w)
|
||||
cl = clearLine(w)
|
||||
}
|
||||
|
||||
if !(strings.HasSuffix(status, "\r") || strings.HasSuffix(status, "\n")) {
|
||||
|
@ -96,7 +96,7 @@ func printProgress(status string, final bool) {
|
|||
}
|
||||
}
|
||||
|
||||
_, _ = os.Stdout.Write([]byte(clear + status + carriageControl))
|
||||
_, _ = os.Stdout.Write([]byte(cl + status + carriageControl))
|
||||
if final {
|
||||
_, _ = os.Stdout.Write([]byte("\n"))
|
||||
}
|
||||
|
|
|
@ -1006,11 +1006,11 @@ func TestArchiverSaveTree(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
src: TestDir{
|
||||
"targetfile": TestFile{Content: string("foobar")},
|
||||
"targetfile": TestFile{Content: "foobar"},
|
||||
},
|
||||
targets: []string{"targetfile"},
|
||||
want: TestDir{
|
||||
"targetfile": TestFile{Content: string("foobar")},
|
||||
"targetfile": TestFile{Content: "foobar"},
|
||||
},
|
||||
stat: Summary{
|
||||
ItemStats: ItemStats{1, 6, 32 + 6, 0, 0, 0},
|
||||
|
@ -1021,12 +1021,12 @@ func TestArchiverSaveTree(t *testing.T) {
|
|||
},
|
||||
{
|
||||
src: TestDir{
|
||||
"targetfile": TestFile{Content: string("foobar")},
|
||||
"targetfile": TestFile{Content: "foobar"},
|
||||
},
|
||||
prepare: symlink("targetfile", "filesymlink"),
|
||||
targets: []string{"targetfile", "filesymlink"},
|
||||
want: TestDir{
|
||||
"targetfile": TestFile{Content: string("foobar")},
|
||||
"targetfile": TestFile{Content: "foobar"},
|
||||
"filesymlink": TestSymlink{Target: "targetfile"},
|
||||
},
|
||||
stat: Summary{
|
||||
|
@ -1041,10 +1041,10 @@ func TestArchiverSaveTree(t *testing.T) {
|
|||
"dir": TestDir{
|
||||
"subdir": TestDir{
|
||||
"subsubdir": TestDir{
|
||||
"targetfile": TestFile{Content: string("foobar")},
|
||||
"targetfile": TestFile{Content: "foobar"},
|
||||
},
|
||||
},
|
||||
"otherfile": TestFile{Content: string("xxx")},
|
||||
"otherfile": TestFile{Content: "xxx"},
|
||||
},
|
||||
},
|
||||
prepare: symlink("subdir", filepath.FromSlash("dir/symlink")),
|
||||
|
@ -1066,10 +1066,10 @@ func TestArchiverSaveTree(t *testing.T) {
|
|||
"dir": TestDir{
|
||||
"subdir": TestDir{
|
||||
"subsubdir": TestDir{
|
||||
"targetfile": TestFile{Content: string("foobar")},
|
||||
"targetfile": TestFile{Content: "foobar"},
|
||||
},
|
||||
},
|
||||
"otherfile": TestFile{Content: string("xxx")},
|
||||
"otherfile": TestFile{Content: "xxx"},
|
||||
},
|
||||
},
|
||||
prepare: symlink("subdir", filepath.FromSlash("dir/symlink")),
|
||||
|
@ -1078,7 +1078,7 @@ func TestArchiverSaveTree(t *testing.T) {
|
|||
"dir": TestDir{
|
||||
"symlink": TestDir{
|
||||
"subsubdir": TestDir{
|
||||
"targetfile": TestFile{Content: string("foobar")},
|
||||
"targetfile": TestFile{Content: "foobar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -1696,8 +1696,8 @@ func checkSnapshotStats(t *testing.T, sn *restic.Snapshot, stat Summary) {
|
|||
rtest.Equals(t, stat.Files.New+stat.Files.Changed+stat.Files.Unchanged, sn.Summary.TotalFilesProcessed, "TotalFilesProcessed")
|
||||
bothZeroOrNeither(t, uint64(stat.DataBlobs), uint64(sn.Summary.DataBlobs))
|
||||
bothZeroOrNeither(t, uint64(stat.TreeBlobs), uint64(sn.Summary.TreeBlobs))
|
||||
bothZeroOrNeither(t, uint64(stat.DataSize+stat.TreeSize), uint64(sn.Summary.DataAdded))
|
||||
bothZeroOrNeither(t, uint64(stat.DataSizeInRepo+stat.TreeSizeInRepo), uint64(sn.Summary.DataAddedPacked))
|
||||
bothZeroOrNeither(t, stat.DataSize+stat.TreeSize, sn.Summary.DataAdded)
|
||||
bothZeroOrNeither(t, stat.DataSizeInRepo+stat.TreeSizeInRepo, sn.Summary.DataAddedPacked)
|
||||
}
|
||||
|
||||
func TestArchiverParent(t *testing.T) {
|
||||
|
@ -2495,7 +2495,7 @@ type missingFS struct {
|
|||
errorOnOpen bool
|
||||
}
|
||||
|
||||
func (fs *missingFS) OpenFile(name string, flag int, metadataOnly bool) (fs.File, error) {
|
||||
func (fs *missingFS) OpenFile(_ string, _ int, _ bool) (fs.File, error) {
|
||||
if fs.errorOnOpen {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ func createTestFiles(t testing.TB, num int) (files []string) {
|
|||
return files
|
||||
}
|
||||
|
||||
func startFileSaver(ctx context.Context, t testing.TB, fsInst fs.FS) (*fileSaver, context.Context, *errgroup.Group) {
|
||||
func startFileSaver(ctx context.Context, t testing.TB, _ fs.FS) (*fileSaver, context.Context, *errgroup.Group) {
|
||||
wg, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
saveBlob := func(ctx context.Context, tpe restic.BlobType, buf *buffer, _ string, cb func(saveBlobResponse)) {
|
||||
|
|
|
@ -421,10 +421,10 @@ func (be *Backend) List(ctx context.Context, t backend.FileType, fn func(backend
|
|||
prefix += "/"
|
||||
}
|
||||
|
||||
max := int32(be.listMaxItems)
|
||||
maxI := int32(be.listMaxItems)
|
||||
|
||||
opts := &azContainer.ListBlobsFlatOptions{
|
||||
MaxResults: &max,
|
||||
MaxResults: &maxI,
|
||||
Prefix: &prefix,
|
||||
}
|
||||
lister := be.container.NewListBlobsFlatPager(opts)
|
||||
|
|
|
@ -340,7 +340,7 @@ func (be *Backend) List(ctx context.Context, t backend.FileType, fn func(backend
|
|||
|
||||
fi := backend.FileInfo{
|
||||
Name: path.Base(m),
|
||||
Size: int64(attrs.Size),
|
||||
Size: attrs.Size,
|
||||
}
|
||||
|
||||
err = fn(fi)
|
||||
|
|
|
@ -20,7 +20,7 @@ type Limiter interface {
|
|||
// for downloads.
|
||||
Downstream(r io.Reader) io.Reader
|
||||
|
||||
// Downstream returns a rate limited reader that is intended to be used
|
||||
// DownstreamWriter returns a rate limited reader that is intended to be used
|
||||
// for downloads.
|
||||
DownstreamWriter(r io.Writer) io.Writer
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ func TestLimiterWrapping(t *testing.T) {
|
|||
|
||||
func TestReadLimiter(t *testing.T) {
|
||||
reader := bytes.NewReader(make([]byte, 300))
|
||||
limiter := rate.NewLimiter(rate.Limit(10000), int(100))
|
||||
limiter := rate.NewLimiter(rate.Limit(10000), 100)
|
||||
limReader := rateLimitedReader{reader, limiter}
|
||||
|
||||
n, err := limReader.Read([]byte{})
|
||||
|
@ -54,7 +54,7 @@ func TestReadLimiter(t *testing.T) {
|
|||
|
||||
func TestWriteLimiter(t *testing.T) {
|
||||
writer := &bytes.Buffer{}
|
||||
limiter := rate.NewLimiter(rate.Limit(10000), int(100))
|
||||
limiter := rate.NewLimiter(rate.Limit(10000), 100)
|
||||
limReader := rateLimitedWriter{writer, limiter}
|
||||
|
||||
n, err := limReader.Write([]byte{})
|
||||
|
|
|
@ -5,14 +5,14 @@ import (
|
|||
)
|
||||
|
||||
// Can't explicitly flush directory changes on Windows.
|
||||
func fsyncDir(dir string) error { return nil }
|
||||
func fsyncDir(_ string) error { return nil }
|
||||
|
||||
// Windows is not macOS.
|
||||
func isMacENOTTY(err error) bool { return false }
|
||||
func isMacENOTTY(_ error) bool { return false }
|
||||
|
||||
// We don't modify read-only on windows,
|
||||
// since it will make us unable to delete the file,
|
||||
// and this isn't common practice on this platform.
|
||||
func setFileReadonly(f string, mode os.FileMode) error {
|
||||
func setFileReadonly(_ string, _ os.FileMode) error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -304,7 +304,7 @@ func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.Rewind
|
|||
opts.StorageClass = be.cfg.StorageClass
|
||||
}
|
||||
|
||||
info, err := be.client.PutObject(ctx, be.cfg.Bucket, objName, io.NopCloser(rd), int64(rd.Length()), opts)
|
||||
info, err := be.client.PutObject(ctx, be.cfg.Bucket, objName, io.NopCloser(rd), rd.Length(), opts)
|
||||
|
||||
// sanity check
|
||||
if err == nil && info.Size != rd.Length() {
|
||||
|
|
|
@ -101,7 +101,7 @@ func countingBlocker() (func(), func(int) int) {
|
|||
}
|
||||
|
||||
func concurrencyTester(t *testing.T, setup func(m *mock.Backend), handler func(be backend.Backend) func() error, unblock func(int) int, isUnlimited bool) {
|
||||
expectBlocked := int(2)
|
||||
expectBlocked := 2
|
||||
workerCount := expectBlocked + 1
|
||||
|
||||
m := mock.NewBackend()
|
||||
|
|
|
@ -47,8 +47,8 @@ func SplitShellStrings(data string) (strs []string, err error) {
|
|||
|
||||
// derived from strings.SplitFunc
|
||||
fieldStart := -1 // Set to -1 when looking for start of field.
|
||||
for i, rune := range data {
|
||||
if s.isSplitChar(rune) {
|
||||
for i, r := range data {
|
||||
if s.isSplitChar(r) {
|
||||
if fieldStart >= 0 {
|
||||
strs = append(strs, data[fieldStart:i])
|
||||
fieldStart = -1
|
||||
|
|
|
@ -696,7 +696,7 @@ var testStrings = []struct {
|
|||
func store(t testing.TB, b backend.Backend, tpe backend.FileType, data []byte) backend.Handle {
|
||||
id := restic.Hash(data)
|
||||
h := backend.Handle{Name: id.String(), Type: tpe}
|
||||
err := b.Save(context.TODO(), h, backend.NewByteReader([]byte(data), b.Hasher()))
|
||||
err := b.Save(context.TODO(), h, backend.NewByteReader(data, b.Hasher()))
|
||||
test.OK(t, err)
|
||||
return h
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ func TestDefaultLoad(t *testing.T) {
|
|||
// happy case, assert correct parameters are passed around and content stream is closed
|
||||
err := util.DefaultLoad(context.TODO(), h, 10, 11, func(ctx context.Context, ih backend.Handle, length int, offset int64) (io.ReadCloser, error) {
|
||||
rtest.Equals(t, h, ih)
|
||||
rtest.Equals(t, int(10), length)
|
||||
rtest.Equals(t, 10, length)
|
||||
rtest.Equals(t, int64(11), offset)
|
||||
|
||||
return rd, nil
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
// ```
|
||||
// defer TestSetFlag(t, features.Flags, features.ExampleFlag, true)()
|
||||
// ```
|
||||
func TestSetFlag(t *testing.T, f *FlagSet, flag FlagName, value bool) func() {
|
||||
func TestSetFlag(_ *testing.T, f *FlagSet, flag FlagName, value bool) func() {
|
||||
current := f.Enabled(flag)
|
||||
|
||||
panicIfCalled := func(msg string) {
|
||||
|
|
|
@ -150,7 +150,7 @@ func getFileEA(handle windows.Handle, iosb *ioStatusBlock, buf *uint8, bufLen ui
|
|||
if restartScan {
|
||||
_p1 = 1
|
||||
}
|
||||
r0, _, _ := syscall.SyscallN(procNtQueryEaFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(buf)), uintptr(bufLen), uintptr(_p0), uintptr(eaList), uintptr(eaListLen), uintptr(unsafe.Pointer(eaIndex)), uintptr(_p1))
|
||||
r0, _, _ := syscall.SyscallN(procNtQueryEaFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(buf)), uintptr(bufLen), uintptr(_p0), eaList, uintptr(eaListLen), uintptr(unsafe.Pointer(eaIndex)), uintptr(_p1))
|
||||
status = ntStatus(r0)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -105,7 +105,7 @@ func clearAttribute(path string, attribute uint32) error {
|
|||
}
|
||||
if fileAttributes&attribute != 0 {
|
||||
// Clear the attribute
|
||||
fileAttributes &= ^uint32(attribute)
|
||||
fileAttributes &= ^attribute
|
||||
err = windows.SetFileAttributes(ptr, fileAttributes)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
ole "github.com/go-ole/go-ole"
|
||||
"github.com/go-ole/go-ole"
|
||||
"github.com/restic/restic/internal/options"
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
|
|
@ -211,6 +211,6 @@ func securityDescriptorBytesToStruct(sd []byte) (*windows.SECURITY_DESCRIPTOR, e
|
|||
// securityDescriptorStructToBytes converts the pointer to windows SECURITY_DESCRIPTOR
|
||||
// into a security descriptor bytes representation.
|
||||
func securityDescriptorStructToBytes(sd *windows.SECURITY_DESCRIPTOR) ([]byte, error) {
|
||||
b := unsafe.Slice((*byte)(unsafe.Pointer(sd)), sd.Length())
|
||||
b := unsafe.Slice(unsafe.Pointer(sd), sd.Length())
|
||||
return b, nil
|
||||
}
|
||||
|
|
|
@ -7,6 +7,6 @@ import "os"
|
|||
|
||||
// OS-specific replacements of setFlags can set file status flags
|
||||
// that improve I/O performance.
|
||||
func setFlags(*os.File) error {
|
||||
func setFlags(_ *os.File) error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
"time"
|
||||
"unsafe"
|
||||
|
||||
ole "github.com/go-ole/go-ole"
|
||||
"github.com/go-ole/go-ole"
|
||||
"github.com/restic/restic/internal/errors"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
|
|
@ -19,6 +19,6 @@ type Migration interface {
|
|||
// Name returns a short name.
|
||||
Name() string
|
||||
|
||||
// Descr returns a description what the migration does.
|
||||
// Desc returns a description what the migration does.
|
||||
Desc() string
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ type noopSaver struct{}
|
|||
func (n *noopSaver) Connections() uint {
|
||||
return 2
|
||||
}
|
||||
func (n *noopSaver) SaveUnpacked(ctx context.Context, t restic.FileType, buf []byte) (restic.ID, error) {
|
||||
func (n *noopSaver) SaveUnpacked(_ context.Context, _ restic.FileType, buf []byte) (restic.ID, error) {
|
||||
return restic.Hash(buf), nil
|
||||
}
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ func TestPackerManager(t *testing.T) {
|
|||
func testPackerManager(t testing.TB) int64 {
|
||||
rnd := rand.New(rand.NewSource(randomSeed))
|
||||
|
||||
savedBytes := int(0)
|
||||
savedBytes := 0
|
||||
pm := newPackerManager(crypto.NewRandomKey(), restic.DataBlob, DefaultPackSize, func(ctx context.Context, tp restic.BlobType, p *packer) error {
|
||||
err := p.Finalize()
|
||||
if err != nil {
|
||||
|
@ -83,7 +83,7 @@ func testPackerManager(t testing.TB) int64 {
|
|||
}
|
||||
|
||||
func TestPackerManagerWithOversizeBlob(t *testing.T) {
|
||||
packFiles := int(0)
|
||||
packFiles := 0
|
||||
sizeLimit := uint(512 * 1024)
|
||||
pm := newPackerManager(crypto.NewRandomKey(), restic.DataBlob, sizeLimit, func(ctx context.Context, tp restic.BlobType, p *packer) error {
|
||||
packFiles++
|
||||
|
|
|
@ -272,7 +272,7 @@ func (r *Repository) loadBlob(ctx context.Context, blobs []restic.PackedBlob, bu
|
|||
continue
|
||||
}
|
||||
|
||||
it := newPackBlobIterator(blob.PackID, newByteReader(buf), uint(blob.Offset), []restic.Blob{blob.Blob}, r.key, r.getZstdDecoder())
|
||||
it := newPackBlobIterator(blob.PackID, newByteReader(buf), blob.Offset, []restic.Blob{blob.Blob}, r.key, r.getZstdDecoder())
|
||||
pbv, err := it.Next()
|
||||
|
||||
if err == nil {
|
||||
|
|
|
@ -158,7 +158,7 @@ func BenchmarkAllVersions(b *testing.B, bench VersionedBenchmark) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNewLock(t *testing.T, repo *Repository, exclusive bool) (*restic.Lock, error) {
|
||||
func TestNewLock(_ *testing.T, repo *Repository, exclusive bool) (*restic.Lock, error) {
|
||||
// TODO get rid of this test helper
|
||||
return restic.NewLock(context.TODO(), &internalRepository{repo}, exclusive)
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
|
||||
// UnlockCancelDelay bounds the duration how long lock cleanup operations will wait
|
||||
// if the passed in context was canceled.
|
||||
const UnlockCancelDelay time.Duration = 1 * time.Minute
|
||||
const UnlockCancelDelay = 1 * time.Minute
|
||||
|
||||
// Lock represents a process locking the repository for an operation.
|
||||
//
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
)
|
||||
|
||||
// uidGidInt always returns 0 on Windows, since uid isn't numbers
|
||||
func uidGidInt(u *user.User) (uid, gid uint32, err error) {
|
||||
func uidGidInt(_ *user.User) (uid, gid uint32, err error) {
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package restic
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
@ -13,8 +14,6 @@ import (
|
|||
|
||||
"github.com/restic/restic/internal/errors"
|
||||
|
||||
"bytes"
|
||||
|
||||
"github.com/restic/restic/internal/debug"
|
||||
)
|
||||
|
||||
|
@ -495,7 +494,7 @@ func OSAttrsToGenericAttributes(attributeType reflect.Type, attributeValuePtr *r
|
|||
}
|
||||
|
||||
// Insert the field into the map
|
||||
attrs[getFQKey(field, keyPrefix)] = json.RawMessage(fieldBytes)
|
||||
attrs[getFQKey(field, keyPrefix)] = fieldBytes
|
||||
}
|
||||
return attrs, nil
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ type WindowsAttributes struct {
|
|||
SecurityDescriptor *[]byte `generic:"security_descriptor"`
|
||||
}
|
||||
|
||||
// windowsAttrsToGenericAttributes converts the WindowsAttributes to a generic attributes map using reflection
|
||||
// WindowsAttrsToGenericAttributes converts the WindowsAttributes to a generic attributes map using reflection
|
||||
func WindowsAttrsToGenericAttributes(windowsAttributes WindowsAttributes) (attrs map[GenericAttributeType]json.RawMessage, err error) {
|
||||
// Get the value of the WindowsAttributes
|
||||
windowsAttributesValue := reflect.ValueOf(windowsAttributes)
|
||||
|
|
|
@ -71,19 +71,19 @@ type FileType = backend.FileType
|
|||
// in the `WriteableFileType` subset can be modified via the Repository interface.
|
||||
// All other filetypes are considered internal datastructures of the Repository.
|
||||
const (
|
||||
PackFile FileType = backend.PackFile
|
||||
KeyFile FileType = backend.KeyFile
|
||||
LockFile FileType = backend.LockFile
|
||||
SnapshotFile FileType = backend.SnapshotFile
|
||||
IndexFile FileType = backend.IndexFile
|
||||
ConfigFile FileType = backend.ConfigFile
|
||||
PackFile = backend.PackFile
|
||||
KeyFile = backend.KeyFile
|
||||
LockFile = backend.LockFile
|
||||
SnapshotFile = backend.SnapshotFile
|
||||
IndexFile = backend.IndexFile
|
||||
ConfigFile = backend.ConfigFile
|
||||
)
|
||||
|
||||
// WriteableFileType defines the different data types that can be modified via SaveUnpacked or RemoveUnpacked.
|
||||
type WriteableFileType backend.FileType
|
||||
|
||||
// These are the different data types that can be modified via SaveUnpacked or RemoveUnpacked.
|
||||
const (
|
||||
WriteableSnapshotFile WriteableFileType = WriteableFileType(SnapshotFile)
|
||||
WriteableSnapshotFile = WriteableFileType(SnapshotFile)
|
||||
)
|
||||
|
||||
func (w *WriteableFileType) ToFileType() FileType {
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
package restic
|
||||
|
||||
import (
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
"testing"
|
||||
|
||||
rtest "github.com/restic/restic/internal/test"
|
||||
)
|
||||
|
||||
func TestTagLists_Flatten(t *testing.T) {
|
||||
|
|
|
@ -399,5 +399,5 @@ func (r *fileRestorer) reportBlobProgress(file *fileInfo, blobSize uint64) {
|
|||
if file.state == nil {
|
||||
action = restore.ActionFileRestored
|
||||
}
|
||||
r.progress.AddProgress(file.location, action, uint64(blobSize), uint64(file.size))
|
||||
r.progress.AddProgress(file.location, action, blobSize, uint64(file.size))
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ type TestRepo struct {
|
|||
loader blobsLoaderFn
|
||||
}
|
||||
|
||||
func (i *TestRepo) Lookup(tpe restic.BlobType, id restic.ID) []restic.PackedBlob {
|
||||
func (i *TestRepo) Lookup(_ restic.BlobType, id restic.ID) []restic.PackedBlob {
|
||||
packs := i.blobs[id]
|
||||
return packs
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ func (i *TestRepo) fileContent(file *fileInfo) string {
|
|||
return i.filesPathToContent[file.location]
|
||||
}
|
||||
|
||||
func (i *TestRepo) StartWarmup(ctx context.Context, packs restic.IDSet) (restic.WarmupJob, error) {
|
||||
func (i *TestRepo) StartWarmup(_ context.Context, packs restic.IDSet) (restic.WarmupJob, error) {
|
||||
job := TestWarmupJob{handlesCount: len(packs)}
|
||||
i.warmupJobs = append(i.warmupJobs, &job)
|
||||
return &job, nil
|
||||
|
|
|
@ -1004,10 +1004,10 @@ type printerMock struct {
|
|||
|
||||
func (p *printerMock) Update(_ restoreui.State, _ time.Duration) {
|
||||
}
|
||||
func (p *printerMock) Error(item string, err error) error {
|
||||
func (p *printerMock) Error(_ string, _ error) error {
|
||||
return nil
|
||||
}
|
||||
func (p *printerMock) CompleteItem(action restoreui.ItemAction, item string, size uint64) {
|
||||
func (p *printerMock) CompleteItem(_ restoreui.ItemAction, _ string, _ uint64) {
|
||||
}
|
||||
func (p *printerMock) Finish(s restoreui.State, _ time.Duration) {
|
||||
p.s = s
|
||||
|
|
|
@ -53,12 +53,12 @@ func FormatDuration(d time.Duration) string {
|
|||
func FormatSeconds(sec uint64) string {
|
||||
hours := sec / 3600
|
||||
sec -= hours * 3600
|
||||
min := sec / 60
|
||||
sec -= min * 60
|
||||
mins := sec / 60
|
||||
sec -= mins * 60
|
||||
if hours > 0 {
|
||||
return fmt.Sprintf("%d:%02d:%02d", hours, min, sec)
|
||||
return fmt.Sprintf("%d:%02d:%02d", hours, mins, sec)
|
||||
}
|
||||
return fmt.Sprintf("%d:%02d", min, sec)
|
||||
return fmt.Sprintf("%d:%02d", mins, sec)
|
||||
}
|
||||
|
||||
// ParseBytes parses a size in bytes from s. It understands the suffixes
|
||||
|
|
|
@ -29,8 +29,8 @@ func NewCounter(interval time.Duration, total uint64, report Func) *Counter {
|
|||
max: total,
|
||||
}
|
||||
c.Updater = *NewUpdater(interval, func(runtime time.Duration, final bool) {
|
||||
v, max := c.Get()
|
||||
report(v, max, runtime, final)
|
||||
v, maxV := c.Get()
|
||||
report(v, maxV, runtime, final)
|
||||
})
|
||||
return c
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ func TestCounter(t *testing.T) {
|
|||
test.Assert(t, increasing, "values not increasing")
|
||||
test.Equals(t, uint64(N), last)
|
||||
test.Equals(t, uint64(42), lastTotal)
|
||||
test.Equals(t, int(1), nmaxChange)
|
||||
test.Equals(t, 1, nmaxChange)
|
||||
|
||||
t.Log("number of calls:", ncalls)
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"text/template"
|
||||
|
||||
"github.com/restic/restic/internal/ui"
|
||||
|
|
|
@ -5,6 +5,6 @@ package termstatus
|
|||
|
||||
// IsProcessBackground reports whether the current process is running in the
|
||||
// background. Not implemented for this platform.
|
||||
func IsProcessBackground(uintptr) bool {
|
||||
func IsProcessBackground(_ uintptr) bool {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -24,34 +24,34 @@ func TestSetStatus(t *testing.T) {
|
|||
go term.Run(ctx)
|
||||
|
||||
const (
|
||||
clear = posixControlClearLine
|
||||
home = posixControlMoveCursorHome
|
||||
up = posixControlMoveCursorUp
|
||||
cl = posixControlClearLine
|
||||
home = posixControlMoveCursorHome
|
||||
up = posixControlMoveCursorUp
|
||||
)
|
||||
|
||||
term.SetStatus([]string{"first"})
|
||||
exp := home + clear + "first" + home
|
||||
exp := home + cl + "first" + home
|
||||
|
||||
term.SetStatus([]string{""})
|
||||
exp += home + clear + "" + home
|
||||
exp += home + cl + "" + home
|
||||
|
||||
term.SetStatus([]string{})
|
||||
exp += home + clear + "" + home
|
||||
exp += home + cl + "" + home
|
||||
|
||||
// already empty status
|
||||
term.SetStatus([]string{})
|
||||
|
||||
term.SetStatus([]string{"foo", "bar", "baz"})
|
||||
exp += home + clear + "foo\n" + home + clear + "bar\n" +
|
||||
home + clear + "baz" + home + up + up
|
||||
exp += home + cl + "foo\n" + home + cl + "bar\n" +
|
||||
home + cl + "baz" + home + up + up
|
||||
|
||||
term.SetStatus([]string{"quux", "needs\nquote"})
|
||||
exp += home + clear + "quux\n" +
|
||||
home + clear + "\"needs\\nquote\"\n" +
|
||||
home + clear + home + up + up // Clear third line
|
||||
exp += home + cl + "quux\n" +
|
||||
home + cl + "\"needs\\nquote\"\n" +
|
||||
home + cl + home + up + up // Clear third line
|
||||
|
||||
cancel()
|
||||
exp += home + clear + "\n" + home + clear + home + up // Status cleared
|
||||
exp += home + cl + "\n" + home + cl + home + up // Status cleared
|
||||
|
||||
<-term.closed
|
||||
rtest.Equals(t, exp, buf.String())
|
||||
|
|
Loading…
Add table
Reference in a new issue