1
0
Fork 0
mirror of https://github.com/restic/restic.git synced 2025-03-09 00:00:02 +01:00
This commit is contained in:
Aneesh N 2025-02-24 04:22:33 +00:00 committed by GitHub
commit a735354394
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
32 changed files with 2026 additions and 448 deletions

View file

@ -0,0 +1,9 @@
Enhancement: Back up and restore Windows Alternate Data Streams
Restic did not back up Alternate Data Streams in Windows. Restic now backs up Alternate Data Streams (ADS) and restores them back to the main files.
The Alternate Data Streams are backed up like any other normal files, and the full name of the stream is stored as the name of the file.
During restore, the ADS are restored and attached to the original files as Alternate Data Streams.
For progress and summary, the ADS are not counted in the file counts, but the sizes of the ADS files are counted.
https://github.com/restic/restic/pull/5171
https://github.com/restic/restic/issues/1401

View file

@ -242,21 +242,21 @@ func (arch *Archiver) trackItem(item string, previous, current *restic.Node, s I
case restic.NodeTypeDir:
switch {
case previous == nil:
arch.summary.Dirs.New++
arch.summary.Dirs.incrementNewFiles(current)
case previous.Equals(*current):
arch.summary.Dirs.Unchanged++
arch.summary.Dirs.incrementUnchangedFiles(current)
default:
arch.summary.Dirs.Changed++
arch.summary.Dirs.incrementChangedFiles(current)
}
case restic.NodeTypeFile:
switch {
case previous == nil:
arch.summary.Files.New++
arch.summary.Files.incrementNewFiles(current)
case previous.Equals(*current):
arch.summary.Files.Unchanged++
arch.summary.Files.incrementUnchangedFiles(current)
default:
arch.summary.Files.Changed++
arch.summary.Files.incrementChangedFiles(current)
}
}
}
@ -320,17 +320,20 @@ func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, me
if err != nil {
return futureNode{}, err
}
pathnames := arch.preProcessPaths(dir, names)
sort.Strings(pathnames)
nodes := make([]futureNode, 0, len(names))
nodes := make([]futureNode, 0, len(pathnames))
for _, name := range names {
for _, pathname := range pathnames {
// test if context has been cancelled
if ctx.Err() != nil {
debug.Log("context has been cancelled, aborting")
return futureNode{}, ctx.Err()
}
pathname := arch.FS.Join(dir, name)
name := getNameFromPathname(pathname)
pathname := arch.processPath(dir, pathname)
oldNode := previous.Find(name)
snItem := join(snPath, name)
fn, excluded, err := arch.save(ctx, snItem, pathname, oldNode)
@ -343,7 +346,7 @@ func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, me
continue
}
return futureNode{}, err
return futureNode{}, errors.Wrap(err, "error saving a target (file or directory)")
}
if excluded {
@ -456,7 +459,11 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous
if err != nil {
return futureNode{}, false, err
}
//In case of windows ADS files for checking include and excludes we use the main file which has the ADS files attached.
//For Unix, the main file is the same as there is no ADS. So targetMain is always the same as target.
//After checking the exclusion for actually processing the file, we use the full file name including ads portion if any.
targetMain := fs.SanitizeMainFileName(target)
abstargetMain := fs.SanitizeMainFileName(abstarget)
filterError := func(err error) (futureNode, bool, error) {
err = arch.error(abstarget, err)
if err != nil {
@ -493,16 +500,21 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous
}()
// get file info and run remaining select functions that require file information
fi, err := meta.Stat()
fiMain, err := meta.Stat()
if err != nil {
debug.Log("lstat() for %v returned error: %v", target, err)
// ignore if file disappeared since it was returned by readdir
return filterError(filterNotExist(err))
}
if !arch.Select(abstarget, fi, arch.FS) {
if !arch.Select(abstargetMain, fiMain, arch.FS) {
debug.Log("%v is excluded", target)
return futureNode{}, true, nil
}
var fi *fs.ExtendedFileInfo
fi, shouldReturn, fn, excluded, err := arch.processTargets(target, targetMain, abstarget, fiMain)
if shouldReturn {
return fn, excluded, err
}
switch {
case fi.Mode.IsRegular():
@ -694,11 +706,6 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *tree,
}
return futureNode{}, 0, err
}
if err != nil {
return futureNode{}, 0, err
}
if !excluded {
nodes = append(nodes, fn)
}
@ -762,13 +769,9 @@ func (arch *Archiver) dirPathToNode(snPath, target string) (node *restic.Node, e
func resolveRelativeTargets(filesys fs.FS, targets []string) ([]string, error) {
debug.Log("targets before resolving: %v", targets)
result := make([]string, 0, len(targets))
preProcessTargets(filesys, &targets)
for _, target := range targets {
if target != "" && filesys.VolumeName(target) == target {
// special case to allow users to also specify a volume name "C:" instead of a path "C:\"
target = target + filesys.Separator()
} else {
target = filesys.Clean(target)
}
target = processTarget(filesys, target)
pc, _ := pathComponents(filesys, target, false)
if len(pc) > 0 {
result = append(result, target)

View file

@ -1847,7 +1847,7 @@ func TestArchiverParent(t *testing.T) {
}
func TestArchiverErrorReporting(t *testing.T) {
ignoreErrorForBasename := func(basename string) ErrorFunc {
ignoreErrorForBasename := func(_ string) ErrorFunc {
return func(item string, err error) error {
if filepath.Base(item) == "targetfile" {
t.Logf("ignoring error for targetfile: %v", err)

View file

@ -0,0 +1,68 @@
//go:build !windows
// +build !windows
package archiver
import (
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/restic"
)
// preProcessTargets performs preprocessing of the targets before the loop.
// It is a no-op on non-windows OS as we do not need to do an
// extra iteration on the targets before the loop.
// We process each target inside the loop.
func preProcessTargets(_ fs.FS, _ *[]string) {
// no-op
}
// processTarget processes each target in the loop.
// In case of non-windows OS it uses the passed filesys to clean the target.
func processTarget(filesys fs.FS, target string) string {
if target != "" && filesys.VolumeName(target) == target {
// special case to allow users to also specify a volume name "C:" instead of a path "C:\"
target = target + filesys.Separator()
} else {
target = filesys.Clean(target)
}
return target
}
// preProcessPaths processes paths before looping.
func (arch *Archiver) preProcessPaths(_ string, names []string) (paths []string) {
// In case of non-windows OS this is no-op as we process the paths within the loop
// and avoid the extra looping before hand.
return names
}
// processPath processes the path in the loop.
func (arch *Archiver) processPath(dir string, name string) (path string) {
//In case of non-windows OS we prepare the path in the loop.
return arch.FS.Join(dir, name)
}
// getNameFromPathname gets the name from pathname.
// In case for non-windows the pathname is same as the name.
func getNameFromPathname(pathname string) (name string) {
return pathname
}
// processTargets is no-op for non-windows OS
func (arch *Archiver) processTargets(_ string, _ string, _ string, fiMain *fs.ExtendedFileInfo) (fi *fs.ExtendedFileInfo, shouldReturn bool, fn futureNode, excluded bool, err error) {
return fiMain, false, futureNode{}, false, nil
}
// incrementNewFiles increments the new files count
func (c *ChangeStats) incrementNewFiles(_ *restic.Node) {
c.New++
}
// incrementNewFiles increments the unchanged files count
func (c *ChangeStats) incrementUnchangedFiles(_ *restic.Node) {
c.Unchanged++
}
// incrementNewFiles increments the changed files count
func (c *ChangeStats) incrementChangedFiles(_ *restic.Node) {
c.Changed++
}

View file

@ -0,0 +1,132 @@
package archiver
import (
"path/filepath"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/restic"
)
// preProcessTargets performs preprocessing of the targets before the loop.
// For Windows, it cleans each target and it also adds ads stream for each
// target to the targets array.
// We read the ADS from each file and add them as independent Nodes with
// the full ADS name as the name of the file.
// During restore the ADS files are restored using the ADS name and that
// automatically attaches them as ADS to the main file.
func preProcessTargets(filesys fs.FS, targets *[]string) {
for _, target := range *targets {
if target != "" && filesys.VolumeName(target) == target {
// special case to allow users to also specify a volume name "C:" instead of a path "C:\"
target = target + filesys.Separator()
} else {
target = filesys.Clean(target)
}
addADSStreams(target, targets)
}
}
// processTarget processes each target in the loop.
// In case of windows the clean up of target is already done
// in preProcessTargets before the loop, hence this is no-op.
func processTarget(filesys fs.FS, target string) string {
if target != "" && filesys.VolumeName(target) == target {
// special case to allow users to also specify a volume name "C:" instead of a path "C:\"
target = target + filesys.Separator()
}
return target
}
// getNameFromPathname gets the name from pathname.
// In case for windows the pathname is the full path, so it need to get the base name.
func getNameFromPathname(pathname string) (name string) {
return filepath.Base(pathname)
}
// preProcessPaths processes paths before looping.
func (arch *Archiver) preProcessPaths(dir string, names []string) (paths []string) {
// In case of windows we want to add the ADS paths as well before sorting.
return arch.getPathsIncludingADS(dir, names)
}
// processPath processes the path in the loop.
func (arch *Archiver) processPath(_ string, name string) (path string) {
// In case of windows we have already prepared the paths before the loop.
// Hence this is a no-op.
return name
}
// getPathsIncludingADS iterates all passed path names and adds the ads
// contained in those paths before returning all full paths including ads
func (arch *Archiver) getPathsIncludingADS(dir string, names []string) []string {
paths := make([]string, 0, len(names))
for _, name := range names {
pathname := arch.FS.Join(dir, name)
paths = append(paths, pathname)
addADSStreams(pathname, &paths)
}
return paths
}
// addADSStreams gets the ads streams if any in the pathname passed and adds them to the passed paths
func addADSStreams(pathname string, paths *[]string) {
success, adsStreams, err := restic.GetADStreamNames(pathname)
if success {
streamCount := len(adsStreams)
if streamCount > 0 {
debug.Log("ADS Streams for file: %s, streams: %v", pathname, adsStreams)
for i := 0; i < streamCount; i++ {
adsStream := adsStreams[i]
adsPath := pathname + adsStream
*paths = append(*paths, adsPath)
}
}
} else if err != nil {
debug.Log("No ADS found for path: %s, err: %v", pathname, err)
}
}
// processTargets in windows performs Lstat for the ADS files since the file info would not be available for them yet.
func (arch *Archiver) processTargets(target string, targetMain string, abstarget string, fiMain *fs.ExtendedFileInfo) (fi *fs.ExtendedFileInfo, shouldReturn bool, fn futureNode, excluded bool, err error) {
if target != targetMain {
//If this is an ADS file we need to Lstat again for the file info.
fi, err = arch.FS.Lstat(target)
if err != nil {
debug.Log("lstat() for %v returned error: %v", target, err)
err = arch.error(abstarget, err)
if err != nil {
return nil, true, futureNode{}, false, errors.WithStack(err)
}
//If this is an ads file, shouldReturn should be true because we want to
// skip the remaining processing of the file.
return nil, true, futureNode{}, true, nil
}
} else {
fi = fiMain
}
return fi, false, futureNode{}, false, nil
}
// incrementNewFiles increments the new files count
func (c *ChangeStats) incrementNewFiles(node *restic.Node) {
if node.IsMainFile() {
c.New++
}
}
// incrementNewFiles increments the unchanged files count
func (c *ChangeStats) incrementUnchangedFiles(node *restic.Node) {
if node.IsMainFile() {
c.Unchanged++
}
}
// incrementNewFiles increments the changed files count
func (c *ChangeStats) incrementChangedFiles(node *restic.Node) {
if node.IsMainFile() {
c.Changed++
}
}

View file

@ -0,0 +1,223 @@
//go:build windows
// +build windows
package archiver
import (
"context"
"os"
"path/filepath"
"sync"
"testing"
"time"
"github.com/restic/restic/internal/checker"
"github.com/restic/restic/internal/filter"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test"
)
func TestArchiverSnapshotWithAds(t *testing.T) {
// The toplevel directory is not counted in the ItemStats
var tests = []struct {
name string
src TestDir
targets []string
want TestDir
stat ItemStats
exclude []string
}{
{
name: "Ads_directory_Basic",
src: TestDir{
"dir": TestDir{
"targetfile.txt": TestFile{Content: string("foobar")},
"targetfile.txt:Stream1:$DATA": TestFile{Content: string("stream 1")},
"targetfile.txt:Stream2:$DATA": TestFile{Content: string("stream 2")},
},
},
targets: []string{"dir"},
stat: ItemStats{3, 22, 246 + 22, 2, 0, 768},
},
{
name: "Ads_folder_with_dir_streams",
src: TestDir{
"dir": TestDir{
":Stream1:$DATA": TestFile{Content: string("stream 1")},
":Stream2:$DATA": TestFile{Content: string("stream 2")},
},
},
targets: []string{"dir"},
want: TestDir{
"dir": TestDir{},
"dir:Stream1:$DATA": TestFile{Content: string("stream 1")},
"dir:Stream2:$DATA": TestFile{Content: string("stream 2")},
},
stat: ItemStats{2, 16, 164 + 16, 2, 0, 563},
},
{
name: "single_Ads_file",
src: TestDir{
"targetfile.txt": TestFile{Content: string("foobar")},
"targetfile.txt:Stream1:$DATA": TestFile{Content: string("stream 1")},
"targetfile.txt:Stream2:$DATA": TestFile{Content: string("stream 2")},
},
targets: []string{"targetfile.txt"},
stat: ItemStats{3, 22, 246 + 22, 1, 0, 457},
},
{
name: "Ads_all_types",
src: TestDir{
"dir": TestDir{
"adsfile.txt": TestFile{Content: string("foobar")},
"adsfile.txt:Stream1:$DATA": TestFile{Content: string("stream 1")},
"adsfile.txt:Stream2:$DATA": TestFile{Content: string("stream 2")},
":dirstream1:$DATA": TestFile{Content: string("stream 3")},
":dirstream2:$DATA": TestFile{Content: string("stream 4")},
},
"targetfile.txt": TestFile{Content: string("foobar")},
"targetfile.txt:Stream1:$DATA": TestFile{Content: string("stream 1")},
"targetfile.txt:Stream2:$DATA": TestFile{Content: string("stream 2")},
},
want: TestDir{
"dir": TestDir{
"adsfile.txt": TestFile{Content: string("foobar")},
"adsfile.txt:Stream1:$DATA": TestFile{Content: string("stream 1")},
"adsfile.txt:Stream2:$DATA": TestFile{Content: string("stream 2")},
},
"dir:dirstream1:$DATA": TestFile{Content: string("stream 3")},
"dir:dirstream2:$DATA": TestFile{Content: string("stream 4")},
"targetfile.txt": TestFile{Content: string("foobar")},
"targetfile.txt:Stream1:$DATA": TestFile{Content: string("stream 1")},
"targetfile.txt:Stream2:$DATA": TestFile{Content: string("stream 2")},
},
targets: []string{"targetfile.txt", "dir"},
stat: ItemStats{5, 38, 410 + 38, 2, 0, 1133},
},
{
name: "Ads_directory_exclusion",
src: TestDir{
"dir": TestDir{
"adsfile.txt": TestFile{Content: string("foobar")},
"adsfile.txt:Stream1:$DATA": TestFile{Content: string("stream 1")},
"adsfile.txt:Stream2:$DATA": TestFile{Content: string("stream 2")},
":dirstream1:$DATA": TestFile{Content: string("stream 3")},
":dirstream2:$DATA": TestFile{Content: string("stream 4")},
},
"targetfile.txt": TestFile{Content: string("foobar")},
"targetfile.txt:Stream1:$DATA": TestFile{Content: string("stream 1")},
"targetfile.txt:Stream2:$DATA": TestFile{Content: string("stream 2")},
},
want: TestDir{
"targetfile.txt": TestFile{Content: string("foobar")},
"targetfile.txt:Stream1:$DATA": TestFile{Content: string("stream 1")},
"targetfile.txt:Stream2:$DATA": TestFile{Content: string("stream 2")},
},
targets: []string{"targetfile.txt", "dir"},
exclude: []string{"*\\dir*"},
stat: ItemStats{3, 22, 268, 1, 0, 1133},
},
{
name: "Ads_backup_file_exclusion",
src: TestDir{
"dir": TestDir{
"adsfile.txt": TestFile{Content: string("foobar")},
"adsfile.txt:Stream1:$DATA": TestFile{Content: string("stream 1")},
"adsfile.txt:Stream2:$DATA": TestFile{Content: string("stream 2")},
":dirstream1:$DATA": TestFile{Content: string("stream 3")},
":dirstream2:$DATA": TestFile{Content: string("stream 4")},
},
"targetfile.txt": TestFile{Content: string("foobar")},
"targetfile.txt:Stream1:$DATA": TestFile{Content: string("stream 1")},
"targetfile.txt:Stream2:$DATA": TestFile{Content: string("stream 2")},
},
want: TestDir{
"dir": TestDir{},
"dir:dirstream1:$DATA": TestFile{Content: string("stream 3")},
"dir:dirstream2:$DATA": TestFile{Content: string("stream 4")},
"targetfile.txt": TestFile{Content: string("foobar")},
"targetfile.txt:Stream1:$DATA": TestFile{Content: string("stream 1")},
"targetfile.txt:Stream2:$DATA": TestFile{Content: string("stream 2")},
},
targets: []string{"targetfile.txt", "dir"},
exclude: []string{"*\\dir\\adsfile.txt"},
stat: ItemStats{5, 38, 448, 2, 0, 2150},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tempdir, repo := prepareTempdirRepoSrc(t, test.src)
testFS := fs.Track{FS: fs.Local{}}
arch := New(repo, testFS, Options{})
if len(test.exclude) != 0 {
parsedPatterns := filter.ParsePatterns(test.exclude)
arch.SelectByName = func(item string) bool {
//if
if matched, err := filter.List(parsedPatterns, item); err == nil && matched {
return false
} else {
return true
}
}
}
var stat *ItemStats = &ItemStats{}
lock := &sync.Mutex{}
arch.CompleteItem = func(item string, previous, current *restic.Node, s ItemStats, d time.Duration) {
lock.Lock()
defer lock.Unlock()
stat.Add(s)
}
back := rtest.Chdir(t, tempdir)
defer back()
var targets []string
for _, target := range test.targets {
targets = append(targets, os.ExpandEnv(target))
}
sn, snapshotID, _, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now(), Excludes: test.exclude})
if err != nil {
t.Fatal(err)
}
t.Logf("saved as %v", snapshotID.Str())
want := test.want
if want == nil {
want = test.src
}
TestEnsureSnapshot(t, repo, snapshotID, want)
checker.TestCheckRepo(t, repo, false)
// check that the snapshot contains the targets with absolute paths
for i, target := range sn.Paths {
atarget, err := filepath.Abs(test.targets[i])
if err != nil {
t.Fatal(err)
}
if target != atarget {
t.Errorf("wrong path in snapshot: want %v, got %v", atarget, target)
}
}
rtest.Equals(t, uint64(test.stat.DataBlobs), uint64(stat.DataBlobs))
rtest.Equals(t, uint64(test.stat.TreeBlobs), uint64(stat.TreeBlobs))
rtest.Equals(t, test.stat.DataSize, stat.DataSize)
rtest.Equals(t, test.stat.DataSizeInRepo, stat.DataSizeInRepo)
})
}
}

View file

@ -86,11 +86,11 @@ func TestCreateFiles(t testing.TB, target string, dir TestDir) {
for _, name := range names {
item := dir[name]
targetPath := filepath.Join(target, name)
targetPath := getTargetPath(target, name)
switch it := item.(type) {
case TestFile:
err := os.WriteFile(targetPath, []byte(it.Content), 0644)
err := writeFile(t, targetPath, it.Content)
if err != nil {
t.Fatal(err)
}

View file

@ -0,0 +1,20 @@
//go:build !windows
// +build !windows
package archiver
import (
"os"
"path/filepath"
"testing"
)
// getTargetPath gets the target path from the target and the name
func getTargetPath(target string, name string) (targetPath string) {
return filepath.Join(target, name)
}
// writeFile writes the content to the file at the targetPath
func writeFile(_ testing.TB, targetPath string, content string) (err error) {
return os.WriteFile(targetPath, []byte(content), 0644)
}

View file

@ -0,0 +1,43 @@
package archiver
import (
"os"
"path/filepath"
"testing"
"github.com/restic/restic/internal/fs"
)
// getTargetPath gets the target path from the target and the name
func getTargetPath(target string, name string) (targetPath string) {
if name[0] == ':' {
// If the first char of the name is :, append the name to the targetPath.
// This switch is useful for cases like creating directories having ads attributes attached.
// Without this, if we put the directory ads creation at top level, eg. "dir" and "dir:dirstream1:$DATA",
// since they can be created in any order it could first create an empty file called "dir" with the ads
// stream and then the dir creation fails.
targetPath = target + name
} else {
targetPath = filepath.Join(target, name)
}
return targetPath
}
// writeFile writes the content to the file at the targetPath
func writeFile(t testing.TB, targetPath string, content string) (err error) {
//For windows, create file only if it doesn't exist. Otherwise ads streams may get overwritten.
f, err := os.OpenFile(targetPath, os.O_WRONLY|os.O_TRUNC, 0644)
if os.IsNotExist(err) {
f, err = os.OpenFile(targetPath, os.O_WRONLY|fs.O_CREATE|os.O_TRUNC, 0644)
}
if err != nil {
t.Fatal(err)
}
_, err = f.Write([]byte(content))
if err1 := f.Close(); err1 != nil && err == nil {
err = err1
}
return err
}

View file

@ -5,6 +5,7 @@ import (
"strings"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
)
// ErrBadString is returned when Match is called with the empty string as the
@ -200,9 +201,9 @@ func match(pattern Pattern, strs []string) (matched bool, err error) {
for i := len(pattern.parts) - 1; i >= 0; i-- {
var ok bool
if pattern.parts[i].isSimple {
ok = pattern.parts[i].pattern == strs[offset+i]
ok = pattern.parts[i].pattern == fs.SanitizeMainFileName(strs[offset+i])
} else {
ok, err = filepath.Match(pattern.parts[i].pattern, strs[offset+i])
ok, err = filepath.Match(pattern.parts[i].pattern, fs.SanitizeMainFileName(strs[offset+i]))
if err != nil {
return false, errors.Wrap(err, "Match")
}

View file

@ -48,3 +48,9 @@ func chmod(name string, mode os.FileMode) error {
return err
}
// SanitizeMainFileName will only keep the main file and remove the secondary file.
func SanitizeMainFileName(str string) string {
// no-op - In case of non-windows there is no secondary file
return str
}

View file

@ -134,3 +134,11 @@ func openHandleForEA(nodeType restic.NodeType, path string, writeAccess bool) (h
}
return handle, err
}
// SanitizeMainFileName will only keep the main file and remove the secondary file like ADS from the name.
func SanitizeMainFileName(str string) string {
// The ADS is essentially a part of the main file. So for any functionality that
// needs to consider the main file, like filtering, we need to derive the main file name
// from the ADS name.
return restic.TrimAds(str)
}

View file

@ -192,29 +192,33 @@ func nodeRestoreGenericAttributes(node *restic.Node, path string, warn func(msg
if len(node.GenericAttributes) == 0 {
return nil
}
var errs []error
windowsAttributes, unknownAttribs, err := genericAttributesToWindowsAttrs(node.GenericAttributes)
if err != nil {
return fmt.Errorf("error parsing generic attribute for: %s : %v", path, err)
}
if windowsAttributes.CreationTime != nil {
if err := restoreCreationTime(path, windowsAttributes.CreationTime); err != nil {
errs = append(errs, fmt.Errorf("error restoring creation time for: %s : %v", path, err))
if node.IsMainFile() {
var errs []error
windowsAttributes, unknownAttribs, err := genericAttributesToWindowsAttrs(node.GenericAttributes)
if err != nil {
return fmt.Errorf("error parsing generic attribute for: %s : %v", path, err)
}
}
if windowsAttributes.FileAttributes != nil {
if err := restoreFileAttributes(path, windowsAttributes.FileAttributes); err != nil {
errs = append(errs, fmt.Errorf("error restoring file attributes for: %s : %v", path, err))
if windowsAttributes.CreationTime != nil {
if err := restoreCreationTime(path, windowsAttributes.CreationTime); err != nil {
errs = append(errs, fmt.Errorf("error restoring creation time for: %s : %v", path, err))
}
}
}
if windowsAttributes.SecurityDescriptor != nil {
if err := setSecurityDescriptor(path, windowsAttributes.SecurityDescriptor); err != nil {
errs = append(errs, fmt.Errorf("error restoring security descriptor for: %s : %v", path, err))
if windowsAttributes.FileAttributes != nil {
if err := restoreFileAttributes(path, windowsAttributes.FileAttributes); err != nil {
errs = append(errs, fmt.Errorf("error restoring file attributes for: %s : %v", path, err))
}
}
if windowsAttributes.SecurityDescriptor != nil {
if err := setSecurityDescriptor(path, windowsAttributes.SecurityDescriptor); err != nil {
errs = append(errs, fmt.Errorf("error restoring security descriptor for: %s : %v", path, err))
}
}
}
restic.HandleUnknownGenericAttributesFound(unknownAttribs, warn)
return errors.Join(errs...)
node.RemoveExtraStreams(path)
restic.HandleUnknownGenericAttributesFound(unknownAttribs, warn)
return errors.Join(errs...)
}
return nil
}
// genericAttributesToWindowsAttrs converts the generic attributes map to a WindowsAttributes and also returns a string of unknown attributes that it could not convert.
@ -341,10 +345,18 @@ func decryptFile(pathPointer *uint16) error {
// nodeFillGenericAttributes fills in the generic attributes for windows like File Attributes,
// Created time and Security Descriptors.
func nodeFillGenericAttributes(node *restic.Node, path string, stat *ExtendedFileInfo) error {
if strings.Contains(filepath.Base(path), ":") {
// Do not process for Alternate Data Streams in Windows
return nil
func nodeFillGenericAttributes(node *restic.Node, path string, stat *ExtendedFileInfo) (err error) {
isAds := restic.IsAds(path)
var attrs restic.WindowsAttributes
if isAds {
attrs, err = getWindowsAttributesForAds(stat, &isAds)
if err != nil {
return err
}
node.GenericAttributes, err = restic.WindowsAttrsToGenericAttributes(attrs)
// Do not process remaining generic attributes for Alternate Data Streams in Windows
// Also do not allow to process extended attributes for ADS.
return err
}
isVolume, err := isVolumePath(path)
@ -366,15 +378,49 @@ func nodeFillGenericAttributes(node *restic.Node, path string, stat *ExtendedFil
}
}
winFI := stat.sys.(*syscall.Win32FileAttributeData)
attrs, err = getWindowsAttributes(stat, sd, path, isAds)
if err != nil {
return err
}
node.GenericAttributes, err = restic.WindowsAttrsToGenericAttributes(attrs)
return err
}
// Add Windows attributes
node.GenericAttributes, err = restic.WindowsAttrsToGenericAttributes(restic.WindowsAttributes{
func getWindowsAttributesForAds(stat *ExtendedFileInfo, isAds *bool) (restic.WindowsAttributes, error) {
winFI := stat.sys.(*syscall.Win32FileAttributeData)
return restic.WindowsAttributes{
CreationTime: &winFI.CreationTime,
FileAttributes: &winFI.FileAttributes,
IsADS: isAds,
}, nil
}
func getWindowsAttributes(stat *ExtendedFileInfo, sd *[]byte, path string, isAds bool) (restic.WindowsAttributes, error) {
winFI := stat.sys.(*syscall.Win32FileAttributeData)
attrs := restic.WindowsAttributes{
CreationTime: &winFI.CreationTime,
FileAttributes: &winFI.FileAttributes,
SecurityDescriptor: sd,
})
return err
}
if isAds {
attrs.IsADS = &isAds
} else {
hasAds := getHasAds(path)
if len(hasAds) > 0 {
attrs.HasADS = &hasAds
}
}
return attrs, nil
}
func getHasAds(path string) (hasAds []string) {
s, names, err := restic.GetADStreamNames(path)
if s {
hasAds = names
} else if err != nil {
debug.Log("Could not fetch ads information for %v %v.", path, err)
}
return hasAds
}
// checkAndStoreEASupport checks if the volume of the path supports extended attributes and stores the result in a map

View file

@ -0,0 +1,144 @@
//go:build windows
// +build windows
package restic
import (
"path/filepath"
"strings"
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var (
kernel32dll = syscall.NewLazyDLL("kernel32.dll")
findFirstStreamW = kernel32dll.NewProc("FindFirstStreamW")
findNextStreamW = kernel32dll.NewProc("FindNextStreamW")
findClose = kernel32dll.NewProc("FindClose")
)
type (
HANDLE uintptr
)
const (
maxPath = 296
streamInfoLevelStandard = 0
invalidFileHandle = ^HANDLE(0)
)
type Win32FindStreamData struct {
size int64
name [maxPath]uint16
}
/*
HANDLE WINAPI FindFirstStreamW(
__in LPCWSTR lpFileName,
__in STREAM_INFO_LEVELS InfoLevel, (0 standard, 1 max infos)
__out LPVOID lpFindStreamData, (return information about file in a WIN32_FIND_STREAM_DATA if 0 is given in infos_level
__reserved DWORD dwFlags (Reserved for future use. This parameter must be zero.) cf: doc
);
https://msdn.microsoft.com/en-us/library/aa364424(v=vs.85).aspx
*/
// GetADStreamNames returns the ads stream names for the passed fileName.
// If success is true, it means ADS files were found.
func GetADStreamNames(fileName string) (success bool, streamNames []string, err error) {
h, success, firstname, err := findFirstStream(fileName)
defer closeHandle(h)
if success {
if !strings.Contains(firstname, "::") {
//If fileName is a directory which has ADS, the ADS name comes in the first stream itself between the two :
//file ads firstname comes as ::$DATA
streamNames = append(streamNames, firstname)
}
for {
endStream, name, err2 := findNextStream(h)
err = err2
if endStream {
break
}
streamNames = append(streamNames, name)
}
}
// If the handle is found successfully, success is true, but the windows api
// still returns an error object. It doesn't mean that an error occurred.
if isHandleEOFError(err) {
// This error is expected, we don't need to expose it.
err = nil
}
return success, streamNames, err
}
// findFirstStream gets the handle and stream type for the first stream
// If the handle is found successfully, success is true, but the windows api
// still returns an error object. It doesn't mean that an error occurred.
func findFirstStream(fileName string) (handle HANDLE, success bool, streamType string, err error) {
fsd := &Win32FindStreamData{}
ptr, err := syscall.UTF16PtrFromString(fileName)
if err != nil {
return invalidFileHandle, false, "<nil>", err
}
ret, _, err := findFirstStreamW.Call(
uintptr(unsafe.Pointer(ptr)),
streamInfoLevelStandard,
uintptr(unsafe.Pointer(fsd)),
0,
)
h := HANDLE(ret)
streamType = windows.UTF16ToString(fsd.name[:])
return h, h != invalidFileHandle, streamType, err
}
// findNextStream finds the next ads stream name
// endStream indicites if this is the last stream, name is the stream name.
// err being returned does not mean an error occurred.
func findNextStream(handle HANDLE) (endStream bool, name string, err error) {
fsd := &Win32FindStreamData{}
ret, _, err := findNextStreamW.Call(
uintptr(handle),
uintptr(unsafe.Pointer(fsd)),
)
name = windows.UTF16ToString(fsd.name[:])
return ret != 1, name, err
}
// closeHandle closes the passed handle
func closeHandle(handle HANDLE) bool {
ret, _, _ := findClose.Call(
uintptr(handle),
)
return ret != 0
}
// TrimAds trims the ads file part from the passed filename and returns the base name.
func TrimAds(str string) string {
dir, filename := filepath.Split(str)
if strings.Contains(filename, ":") {
out := filepath.Join(dir, strings.Split(filename, ":")[0])
return out
} else {
return str
}
}
// IsAds checks if the passed file name is an ads file.
func IsAds(str string) bool {
filename := filepath.Base(str)
// Only ADS filenames can contain ":" in windows.
return strings.Contains(filename, ":")
}
// isHandleEOFError checks if the error is ERROR_HANDLE_EOF
func isHandleEOFError(err error) bool {
// Use a type assertion to check if the error is of type syscall.Errno
if errno, ok := err.(syscall.Errno); ok {
// Compare the error code to the expected value
return errno == syscall.ERROR_HANDLE_EOF
}
return false
}

View file

@ -0,0 +1,187 @@
//go:build windows
// +build windows
package restic
import (
"math/rand"
"os"
"strconv"
"sync"
"testing"
rtest "github.com/restic/restic/internal/test"
)
var (
testFileName = "TestingAds.txt"
testFilePath string
adsFileName = ":AdsName"
testData = "This is the main data stream."
testDataAds = "This is an alternate data stream "
goWG sync.WaitGroup
dataSize int
)
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
func TestAdsFile(t *testing.T) {
// create a temp test file
for i := 0; i < 5; i++ {
dataSize = 10000000 * i
testData = testData + randStringBytesRmndr(dataSize)
testDataAds = testDataAds + randStringBytesRmndr(dataSize)
//Testing with multiple ads streams in sequence.
testAdsForCount(i, t)
}
}
func testAdsForCount(adsTestCount int, t *testing.T) {
makeTestFile(adsTestCount)
defer os.Remove(testFilePath)
success, streams, errGA := GetADStreamNames(testFilePath)
rtest.Assert(t, success, "GetADStreamNames status. error: %v", errGA)
rtest.Assert(t, len(streams) == adsTestCount, "Stream found: %v", streams)
adsCount := len(streams)
goWG.Add(1)
go ReadMain(t)
goWG.Add(adsCount)
for i := 0; i < adsCount; i++ {
//Writing ADS to the file concurrently
go ReadAds(i, t)
}
goWG.Wait()
os.Remove(testFilePath)
}
func ReadMain(t *testing.T) {
defer goWG.Done()
data, errR := os.ReadFile(testFilePath)
rtest.OK(t, errR)
dataString := string(data)
rtest.Assert(t, dataString == testData, "Data read: %v", len(dataString))
}
func ReadAds(i int, t *testing.T) {
defer goWG.Done()
dataAds, errAds := os.ReadFile(testFilePath + adsFileName + strconv.Itoa(i))
rtest.OK(t, errAds)
rtest.Assert(t, errAds == nil, "GetADStreamNames status. error: %v", errAds)
dataStringAds := string(dataAds)
rtest.Assert(t, dataStringAds == testDataAds+strconv.Itoa(i)+".\n", "Ads Data read: %v", len(dataStringAds))
}
func makeTestFile(adsCount int) error {
f, err := os.CreateTemp("", testFileName)
if err != nil {
panic(err)
}
testFilePath = f.Name()
defer f.Close()
if adsCount == 0 || adsCount == 1 {
goWG.Add(1)
//Writing main file
go WriteMain(err, f)
}
goWG.Add(adsCount)
for i := 0; i < adsCount; i++ {
//Writing ADS to the file concurrently while main file also gets written
go WriteADS(i)
if i == 1 {
//Testing some cases where the main file writing may start after the ads streams writing has started.
//These cases are tested when adsCount > 1. In this case we start writing the main file after starting to write ads.
goWG.Add(1)
go WriteMain(err, f)
}
}
goWG.Wait()
return nil
}
func WriteMain(err error, f *os.File) (bool, error) {
defer goWG.Done()
_, err1 := f.Write([]byte(testData))
if err1 != nil {
return true, err
}
return false, err
}
func WriteADS(i int) (bool, error) {
defer goWG.Done()
a, err := os.Create(testFilePath + adsFileName + strconv.Itoa(i))
if err != nil {
return true, err
}
defer a.Close()
_, err = a.Write([]byte(testDataAds + strconv.Itoa(i) + ".\n"))
if err != nil {
return true, err
}
return false, nil
}
func randStringBytesRmndr(n int) string {
b := make([]byte, n)
for i := range b {
b[i] = letterBytes[rand.Int63()%int64(len(letterBytes))]
}
return string(b)
}
func TestTrimAds(t *testing.T) {
tests := []struct {
input string
output string
}{
{input: "d:\\test.txt:stream1:$DATA", output: "d:\\test.txt"},
{input: "test.txt:stream1:$DATA", output: "test.txt"},
{input: "test.txt", output: "test.txt"},
{input: "\\abc\\test.txt:stream1:$DATA", output: "\\abc\\test.txt"},
{input: "\\abc\\", output: "\\abc\\"},
{input: "\\", output: "\\"},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
output := TrimAds(test.input)
rtest.Equals(t, test.output, output)
})
}
}
func TestIsAds(t *testing.T) {
tests := []struct {
input string
result bool
}{
{input: "d:\\test.txt:stream1:$DATA", result: true},
{input: "test.txt:stream1:$DATA", result: true},
{input: "test.txt", result: false},
{input: "\\abc\\test.txt:stream1:$DATA", result: true},
{input: "\\abc\\", result: false},
{input: "\\", result: false},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
output := IsAds(test.input)
rtest.Equals(t, test.result, output)
})
}
}

View file

@ -46,13 +46,18 @@ const (
TypeFileAttributes GenericAttributeType = "windows.file_attributes"
// TypeSecurityDescriptor is the GenericAttributeType used for storing security descriptors including owner, group, discretionary access control list (DACL), system access control list (SACL)) for windows files within the generic attributes map.
TypeSecurityDescriptor GenericAttributeType = "windows.security_descriptor"
// TypeHasADS is the GenericAttributeType used to indicate that a file has Alternate Data Streams attached to it.
// The value will have a array of the ADS attached to the file. Those files will have a generic attribute TypeIsADS.
TypeHasADS GenericAttributeType = "windows.has_ads"
// TypeIsADS is the GenericAttributeType with a boolean value used to indicate that the file represents an Alternate Data Stream.
TypeIsADS GenericAttributeType = "windows.is_ads"
// Generic Attributes for other OS types should be defined here.
)
// init is called when the package is initialized. Any new GenericAttributeTypes being created must be added here as well.
func init() {
storeGenericAttributeType(TypeCreationTime, TypeFileAttributes, TypeSecurityDescriptor)
storeGenericAttributeType(TypeCreationTime, TypeFileAttributes, TypeSecurityDescriptor, TypeHasADS, TypeIsADS)
}
// genericAttributesForOS maintains a map of known genericAttributesForOS to the OSType

View file

@ -2,9 +2,12 @@ package restic
import (
"encoding/json"
"os"
"reflect"
"runtime"
"syscall"
"github.com/restic/restic/internal/debug"
)
// WindowsAttributes are the genericAttributes for Windows OS
@ -16,6 +19,11 @@ type WindowsAttributes struct {
// SecurityDescriptor is used for storing security descriptors which includes
// owner, group, discretionary access control list (DACL), system access control list (SACL)
SecurityDescriptor *[]byte `generic:"security_descriptor"`
// HasADS is used to indicate that a file has Alternate Data Streams attached to it.
// The value will have a array of the ADS attached to the file. Those files will have a generic attribute TypeIsADS.
HasADS *[]string `generic:"has_ads"`
// IsADS is used to indicate that the file represents an Alternate Data Stream.
IsADS *bool `generic:"is_ads"`
}
// windowsAttrsToGenericAttributes converts the WindowsAttributes to a generic attributes map using reflection
@ -24,3 +32,56 @@ func WindowsAttrsToGenericAttributes(windowsAttributes WindowsAttributes) (attrs
windowsAttributesValue := reflect.ValueOf(windowsAttributes)
return OSAttrsToGenericAttributes(reflect.TypeOf(windowsAttributes), &windowsAttributesValue, runtime.GOOS)
}
// IsMainFile indicates if this is the main file and not a secondary file like an ads stream.
// This is used for functionalities we want to skip for secondary (ads) files.
// Eg. For Windows we do not want to count the secondary files
func (node Node) IsMainFile() bool {
return string(node.GenericAttributes[TypeIsADS]) != "true"
}
// RemoveExtraStreams removes any extra streams on the file which are not present in the
// backed up state in the generic attribute TypeHasAds.
func (node Node) RemoveExtraStreams(path string) {
success, existingStreams, _ := GetADStreamNames(path)
if success {
var adsValues []string
hasAdsBytes := node.GenericAttributes[TypeHasADS]
if hasAdsBytes != nil {
var adsArray []string
err := json.Unmarshal(hasAdsBytes, &adsArray)
if err == nil {
adsValues = adsArray
}
}
extraStreams := filterItems(adsValues, existingStreams)
for _, extraStream := range extraStreams {
streamToRemove := path + extraStream
err := os.Remove(streamToRemove)
if err != nil {
debug.Log("Error removing stream: %s : %s", streamToRemove, err)
}
}
}
}
// filterItems filters out which items are in evalArray which are not in referenceArray.
func filterItems(referenceArray, evalArray []string) (result []string) {
// Create a map to store elements of referenceArray for fast lookup
referenceArrayMap := make(map[string]bool)
for _, item := range referenceArray {
referenceArrayMap[item] = true
}
// Iterate through elements of evalArray
for _, item := range evalArray {
// Check if the item is not in referenceArray
if !referenceArrayMap[item] {
// Append to the result array
result = append(result, item)
}
}
return result
}

View file

@ -6,6 +6,8 @@ import (
"path/filepath"
"sync"
"encoding/json"
"golang.org/x/sync/errgroup"
"github.com/restic/restic/internal/debug"
@ -29,6 +31,7 @@ type fileInfo struct {
location string // file on local filesystem relative to restorer basedir
blobs interface{} // blobs of the file
state *fileState
attrs map[restic.GenericAttributeType]json.RawMessage
}
type fileBlobInfo struct {
@ -94,8 +97,8 @@ func newFileRestorer(dst string,
}
}
func (r *fileRestorer) addFile(location string, content restic.IDs, size int64, state *fileState) {
r.files = append(r.files, &fileInfo{location: location, blobs: content, size: size, state: state})
func (r *fileRestorer) addFile(location string, content restic.IDs, size int64, state *fileState, attrs map[restic.GenericAttributeType]json.RawMessage) {
r.files = append(r.files, &fileInfo{location: location, blobs: content, size: size, state: state, attrs: attrs})
}
func (r *fileRestorer) targetPath(location string) string {
@ -187,7 +190,7 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error {
// empty file or one with already uptodate content. Make sure that the file size is correct
if !restoredBlobs {
err := r.truncateFileToSize(file.location, file.size)
err := r.truncateFileToSize(file)
if errFile := r.sanitizeError(file, err); errFile != nil {
return errFile
}
@ -249,8 +252,8 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error {
return wg.Wait()
}
func (r *fileRestorer) truncateFileToSize(location string, size int64) error {
f, err := createFile(r.targetPath(location), size, false, r.allowRecursiveDelete)
func (r *fileRestorer) truncateFileToSize(file *fileInfo) error {
f, err := createOrOpenFile(r.targetPath(file.location), file.size, file, r.allowRecursiveDelete)
if err != nil {
return err
}
@ -380,7 +383,7 @@ func (r *fileRestorer) downloadBlobs(ctx context.Context, packID restic.ID,
file.inProgress = true
createSize = file.size
}
writeErr := r.filesWriter.writeToFile(r.targetPath(file.location), blobData, offset, createSize, file.sparse)
writeErr := r.filesWriter.writeToFile(r.targetPath(file.location), blobData, offset, createSize, file)
r.reportBlobProgress(file, uint64(len(blobData)))
return writeErr
}
@ -399,5 +402,5 @@ func (r *fileRestorer) reportBlobProgress(file *fileInfo, blobSize uint64) {
if file.state == nil {
action = restore.ActionFileRestored
}
r.progress.AddProgress(file.location, action, uint64(blobSize), uint64(file.size))
r.progress.AddProgress(file.location, action, uint64(blobSize), uint64(file.size), file.attrs)
}

View file

@ -4,11 +4,9 @@ import (
"fmt"
"os"
"sync"
"syscall"
"github.com/cespare/xxhash/v2"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
)
@ -61,27 +59,10 @@ func openFile(path string) (*os.File, error) {
return f, nil
}
func createFile(path string, createSize int64, sparse bool, allowRecursiveDelete bool) (*os.File, error) {
f, err := fs.OpenFile(path, fs.O_CREATE|fs.O_WRONLY|fs.O_NOFOLLOW, 0600)
if err != nil && fs.IsAccessDenied(err) {
// If file is readonly, clear the readonly flag by resetting the
// permissions of the file and try again
// as the metadata will be set again in the second pass and the
// readonly flag will be applied again if needed.
if err = fs.ResetPermissions(path); err != nil {
return nil, err
}
if f, err = fs.OpenFile(path, fs.O_WRONLY|fs.O_NOFOLLOW, 0600); err != nil {
return nil, err
}
} else if err != nil && (errors.Is(err, syscall.ELOOP) || errors.Is(err, syscall.EISDIR)) {
// symlink or directory, try to remove it later on
f = nil
} else if err != nil {
return nil, err
}
func postCreateFile(f *os.File, path string, createSize int64, allowRecursiveDelete, sparse bool) (*os.File, error) {
var fi os.FileInfo
var err error
if f != nil {
// stat to check that we've opened a regular file
fi, err = f.Stat()
@ -162,7 +143,7 @@ func ensureSize(f *os.File, fi os.FileInfo, createSize int64, sparse bool) (*os.
return f, nil
}
func (w *filesWriter) writeToFile(path string, blob []byte, offset int64, createSize int64, sparse bool) error {
func (w *filesWriter) writeToFile(path string, blob []byte, offset int64, createSize int64, fileInfo *fileInfo) error {
bucket := &w.buckets[uint(xxhash.Sum64String(path))%uint(len(w.buckets))]
acquireWriter := func() (*partialFile, error) {
@ -173,18 +154,12 @@ func (w *filesWriter) writeToFile(path string, blob []byte, offset int64, create
bucket.files[path].users++
return wr, nil
}
var f *os.File
var err error
if createSize >= 0 {
f, err = createFile(path, createSize, sparse, w.allowRecursiveDelete)
if err != nil {
return nil, err
}
} else if f, err = openFile(path); err != nil {
f, err := createOrOpenFile(path, createSize, fileInfo, w.allowRecursiveDelete)
if err != nil {
return nil, err
}
wr := &partialFile{File: f, users: 1, sparse: sparse}
wr := &partialFile{File: f, users: 1, sparse: fileInfo.sparse}
bucket.files[path] = wr
return wr, nil
@ -196,6 +171,8 @@ func (w *filesWriter) writeToFile(path string, blob []byte, offset int64, create
if bucket.files[path].users == 1 {
delete(bucket.files, path)
//Clean up for the path
CleanupPath(path)
return wr.Close()
}
bucket.files[path].users--

View file

@ -18,16 +18,16 @@ func TestFilesWriterBasic(t *testing.T) {
f1 := dir + "/f1"
f2 := dir + "/f2"
rtest.OK(t, w.writeToFile(f1, []byte{1}, 0, 2, false))
rtest.OK(t, w.writeToFile(f1, []byte{1}, 0, 2, &fileInfo{}))
rtest.Equals(t, 0, len(w.buckets[0].files))
rtest.OK(t, w.writeToFile(f2, []byte{2}, 0, 2, false))
rtest.OK(t, w.writeToFile(f2, []byte{2}, 0, 2, &fileInfo{}))
rtest.Equals(t, 0, len(w.buckets[0].files))
rtest.OK(t, w.writeToFile(f1, []byte{1}, 1, -1, false))
rtest.OK(t, w.writeToFile(f1, []byte{1}, 1, -1, &fileInfo{}))
rtest.Equals(t, 0, len(w.buckets[0].files))
rtest.OK(t, w.writeToFile(f2, []byte{2}, 1, -1, false))
rtest.OK(t, w.writeToFile(f2, []byte{2}, 1, -1, &fileInfo{}))
rtest.Equals(t, 0, len(w.buckets[0].files))
buf, err := os.ReadFile(f1)
@ -48,13 +48,13 @@ func TestFilesWriterRecursiveOverwrite(t *testing.T) {
// must error if recursive delete is not allowed
w := newFilesWriter(1, false)
err := w.writeToFile(path, []byte{1}, 0, 2, false)
err := w.writeToFile(path, []byte{1}, 0, 2, &fileInfo{})
rtest.Assert(t, errors.Is(err, notEmptyDirError()), "unexpected error got %v", err)
rtest.Equals(t, 0, len(w.buckets[0].files))
// must replace directory
w = newFilesWriter(1, true)
rtest.OK(t, w.writeToFile(path, []byte{1, 1}, 0, 2, false))
rtest.OK(t, w.writeToFile(path, []byte{1, 1}, 0, 2, &fileInfo{}))
rtest.Equals(t, 0, len(w.buckets[0].files))
buf, err := os.ReadFile(path)
@ -133,7 +133,7 @@ func TestCreateFile(t *testing.T) {
for j, test := range tests {
path := basepath + fmt.Sprintf("%v%v", i, j)
sc.create(t, path)
f, err := createFile(path, test.size, test.isSparse, false)
f, err := createOrOpenFile(path, test.size, &fileInfo{sparse: test.isSparse}, false)
if sc.err == nil {
rtest.OK(t, err)
fi, err := f.Stat()
@ -161,7 +161,7 @@ func TestCreateFileRecursiveDelete(t *testing.T) {
rtest.OK(t, os.WriteFile(filepath.Join(path, "file"), []byte("data"), 0o400))
// replace it
f, err := createFile(path, 42, false, true)
f, err := createOrOpenFile(path, 42, &fileInfo{sparse: false}, true)
rtest.OK(t, err)
fi, err := f.Stat()
rtest.OK(t, err)

View file

@ -0,0 +1,82 @@
//go:build !windows
// +build !windows
package restorer
import (
"os"
"syscall"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
)
// OpenFile opens the file with create, truncate and write only options if
// createSize is specified greater than 0 i.e. if the file hasn't already
// been created. Otherwise it opens the file with only write only option.
func (fw *filesWriter) OpenFile(createSize int64, path string, fileInfo *fileInfo) (file *os.File, err error) {
return fw.openFile(createSize, path, fileInfo)
}
// OpenFile opens the file with create, truncate and write only options if
// createSize is specified greater than 0 i.e. if the file hasn't already
// been created. Otherwise it opens the file with only write only option.
func (fw *filesWriter) openFile(createSize int64, path string, _ *fileInfo) (file *os.File, err error) {
if createSize >= 0 {
file, err = fs.OpenFile(path, fs.O_CREATE|fs.O_WRONLY|fs.O_NOFOLLOW, 0600)
if fs.IsAccessDenied(err) {
// If file is readonly, clear the readonly flag by resetting the
// permissions of the file and try again
// as the metadata will be set again in the second pass and the
// readonly flag will be applied again if needed.
if err = fs.ResetPermissions(path); err != nil {
return nil, err
}
if file, err = fs.OpenFile(path, fs.O_WRONLY|fs.O_NOFOLLOW, 0600); err != nil {
return nil, err
}
} else if err != nil && (errors.Is(err, syscall.ELOOP) || errors.Is(err, syscall.EISDIR)) {
// symlink or directory, try to remove it later on
file = nil
} else if err != nil {
return nil, err
}
} else {
file, err = openFile(path)
}
return file, err
}
// CleanupPath performs clean up for the specified path.
func CleanupPath(_ string) {
// no-op
}
func createOrOpenFile(path string, createSize int64, fileInfo *fileInfo, allowRecursiveDelete bool) (*os.File, error) {
if createSize >= 0 {
return createFile(path, createSize, fileInfo, allowRecursiveDelete)
}
return openFile(path)
}
func createFile(path string, createSize int64, fileInfo *fileInfo, allowRecursiveDelete bool) (*os.File, error) {
f, err := fs.OpenFile(path, fs.O_CREATE|fs.O_WRONLY|fs.O_NOFOLLOW, 0600)
if err != nil && fs.IsAccessDenied(err) {
// If file is readonly, clear the readonly flag by resetting the
// permissions of the file and try again
// as the metadata will be set again in the second pass and the
// readonly flag will be applied again if needed.
if err = fs.ResetPermissions(path); err != nil {
return nil, err
}
if f, err = fs.OpenFile(path, fs.O_WRONLY|fs.O_NOFOLLOW, 0600); err != nil {
return nil, err
}
} else if err != nil && (errors.Is(err, syscall.ELOOP) || errors.Is(err, syscall.EISDIR)) {
// symlink or directory, try to remove it later on
f = nil
} else if err != nil {
return nil, err
}
return postCreateFile(f, path, createSize, allowRecursiveDelete, fileInfo.sparse)
}

View file

@ -0,0 +1,248 @@
package restorer
import (
"encoding/json"
"errors"
"os"
"sync"
"syscall"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/restic"
)
// createOrOpenFile opens the file and handles the readonly attribute and ads related logic during file creation.
//
// Readonly files -
// If an existing file is detected as readonly we clear the flag because otherwise we cannot
// make changes to the file. The readonly attribute would be set again in the second pass when the attributes
// are set if the file version being restored has the readonly bit.
//
// ADS files need special handling -
// Each stream is treated as a separate file in restic. This function is called for the main file which has the
// streams and for each stream.
// If the ads stream calls this function first and the main file doesn't already exist, then creating the file
// for the streams causes the main file to automatically get created with 0 size. Hence we need to be careful
// while creating the main file after that. If we blindly create the main file with the os.O_CREATE option,
// it could overwrite the file already created when the stream was created. However creating the stream with
// os.O_CREATE option does not overwrite the mainfile if it already exists.
// It will simply attach the new stream to the main file if the main file existed, otherwise it will
// create the 0 size main file.
// So we need to sync on the main file path if the file either has ads or is an ads file and then while creating
// the main file, we need to check if the file already exists and if it does, we should not overwrite it
// but instead we should simply return the file that we already created.
//
// Reduction in number of streams when restoring an old mainfile version -
// Another case to handle is if the mainfile already had more streams and the file version being restored has
// less streams, then the extra streams need to be removed from the main file. The stream names are present
// as the value in the generic attribute TypeHasAds.
func createOrOpenFile(path string, createSize int64, fileInfo *fileInfo, allowRecursiveDelete bool) (*os.File, error) {
if createSize >= 0 {
var mainPath string
mainPath, f, err := openFileImpl(path, createSize, fileInfo)
if err != nil && fs.IsAccessDenied(err) {
// If file is readonly, clear the readonly flag by resetting the
// permissions of the file and try again
// as the metadata will be set again in the second pass and the
// readonly flag will be applied again if needed.
if err = fs.ResetPermissions(mainPath); err != nil {
return nil, err
}
if f, err = fs.OpenFile(path, fs.O_WRONLY|fs.O_NOFOLLOW, 0600); err != nil {
return nil, err
}
} else if err != nil && (errors.Is(err, syscall.ELOOP) || errors.Is(err, syscall.EISDIR)) {
// symlink or directory, try to remove it later on
f = nil
} else if err != nil {
return nil, err
}
return postCreateFile(f, path, createSize, allowRecursiveDelete, fileInfo.sparse)
} else {
return openFile(path)
}
}
// openFileImpl is the actual open file implementation.
func openFileImpl(path string, createSize int64, fileInfo *fileInfo) (mainPath string, file *os.File, err error) {
if createSize >= 0 {
// File needs to be created or replaced
//Define all the flags
var isAlreadyExists bool
var isAdsRelated, hasAds, isAds = getAdsAttributes(fileInfo.attrs)
// This means that this is an ads related file. It either has ads streams or is an ads streams
var mainPath string
if isAds {
mainPath = restic.TrimAds(path)
} else {
mainPath = path
}
if isAdsRelated {
// Get or create a mutex based on the main file path
mutex := GetOrCreateMutex(mainPath)
mutex.Lock()
defer mutex.Unlock()
// Making sure the code below doesn't execute concurrently for the main file and any of the ads files
}
if err != nil {
return mainPath, nil, err
}
// First check if file already exists
file, err = openFile(path)
if err == nil {
// File already exists
isAlreadyExists = true
} else if !os.IsNotExist(err) {
// Any error other that IsNotExist error, then do not continue.
// If the error was because access is denied,
// the calling method will try to check if the file is readonly and if so, it tries to
// remove the readonly attribute and call this openFileImpl method again once.
// If this method throws access denied again, then it stops trying and returns the error.
return mainPath, nil, err
}
//At this point readonly flag is already handled and we need not consider it anymore.
file, err = handleCreateFile(path, file, isAdsRelated, hasAds, isAds, isAlreadyExists)
} else {
// File is already created. For subsequent writes, only use os.O_WRONLY flag.
file, err = openFile(path)
}
return mainPath, file, err
}
// handleCreateFile handles all the various combination of states while creating the file if needed.
func handleCreateFile(path string, fileIn *os.File, isAdsRelated, hasAds, isAds, isAlreadyExists bool) (file *os.File, err error) {
if !isAdsRelated {
// This is the simplest case where ADS files are not involved.
file, err = handleCreateFileNonAds(path, fileIn, isAlreadyExists)
} else {
// This is a complex case needing coordination between the main file and the ads files.
file, err = handleCreateFileAds(path, fileIn, hasAds, isAds, isAlreadyExists)
}
return file, err
}
// handleCreateFileNonAds handles all the various combination of states while creating the non-ads file if needed.
func handleCreateFileNonAds(path string, fileIn *os.File, isAlreadyExists bool) (file *os.File, err error) {
// This is the simple case.
if isAlreadyExists {
// If the non-ads file already exists, return the file that we already created without create option.
return fileIn, nil
} else {
// If the non-ads file did not exist, try creating the file with create flag.
return fs.OpenFile(path, fs.O_CREATE|fs.O_WRONLY|fs.O_NOFOLLOW, 0600)
}
}
// handleCreateFileAds handles all the various combination of states while creating the ads related file if needed.
func handleCreateFileAds(path string, fileIn *os.File, hasAds, isAds, isAlreadyExists bool) (file *os.File, err error) {
if isAlreadyExists {
// If the ads related file already exists, return the file that we already created without create option.
return fileIn, nil
} else {
// If the ads related file did not exist, first check if it is an ads file or a file which has ads files attached.
if isAds {
// If it is an ads file, then we can simply open it with create options without worrying about overwriting.
return fs.OpenFile(path, fs.O_CREATE|fs.O_WRONLY|fs.O_NOFOLLOW, 0600)
}
if hasAds {
// If it is the main file which has ads files attached, we will check again if the main file wasn't created
// since we synced.
file, err = openFile(path)
if err != nil {
if os.IsNotExist(err) {
// We confirmed that the main file still doesn't exist after syncing.
// Hence creating the file with the create flag.
return fs.OpenFile(path, fs.O_CREATE|fs.O_WRONLY|fs.O_NOFOLLOW, 0600)
} else {
// Some other error occured so stop processing and return it.
return nil, err
}
} else {
// This means that the main file exists now and we should simply return it,
// it does not have the create flag.
return file, err
}
}
return nil, errors.New("invalid case for ads")
}
}
// Helper methods
var pathMutexMap = PathMutexMap{
mutex: make(map[string]*sync.Mutex),
}
// PathMutexMap represents a map of mutexes, where each path maps to a unique mutex.
type PathMutexMap struct {
mu sync.RWMutex
mutex map[string]*sync.Mutex
}
// CleanupPath performs clean up for the specified path.
func CleanupPath(path string) {
removeMutex(path)
}
// removeMutex removes the mutex for the specified path.
func removeMutex(path string) {
path = restic.TrimAds(path)
pathMutexMap.mu.Lock()
defer pathMutexMap.mu.Unlock()
// Delete the mutex from the map
delete(pathMutexMap.mutex, path)
}
// Cleanup performs cleanup for all paths.
// It clears all the mutexes in the map.
func Cleanup() {
pathMutexMap.mu.Lock()
defer pathMutexMap.mu.Unlock()
// Iterate over the map and remove each mutex
for path, mutex := range pathMutexMap.mutex {
// You can optionally do additional cleanup or release resources associated with the mutex
mutex.Lock()
// Delete the mutex from the map
delete(pathMutexMap.mutex, path)
mutex.Unlock()
}
}
// GetOrCreateMutex returns the mutex associated with the given path.
// If the mutex doesn't exist, it creates a new one.
func GetOrCreateMutex(path string) *sync.Mutex {
pathMutexMap.mu.RLock()
mutex, ok := pathMutexMap.mutex[path]
pathMutexMap.mu.RUnlock()
if !ok {
// The mutex doesn't exist, upgrade the lock and create a new one
pathMutexMap.mu.Lock()
defer pathMutexMap.mu.Unlock()
// Double-check if another goroutine has created the mutex
if mutex, ok = pathMutexMap.mutex[path]; !ok {
mutex = &sync.Mutex{}
pathMutexMap.mutex[path] = mutex
}
}
return mutex
}
// getAdsAttributes gets all the ads related attributes.
func getAdsAttributes(attrs map[restic.GenericAttributeType]json.RawMessage) (isAdsRelated, hasAds, isAds bool) {
if len(attrs) > 0 {
adsBytes := attrs[restic.TypeHasADS]
hasAds = adsBytes != nil
isAds = string(attrs[restic.TypeIsADS]) != "true"
}
isAdsRelated = hasAds || isAds
return isAdsRelated, hasAds, isAds
}

View file

@ -283,7 +283,7 @@ func (res *Restorer) restoreNodeTo(node *restic.Node, target, location string) e
}
}
res.opts.Progress.AddProgress(location, restoreui.ActionOtherRestored, 0, 0)
res.opts.Progress.AddProgress(location, restoreui.ActionOtherRestored, 0, 0, node.GenericAttributes)
return res.restoreNodeMetadataTo(node, target, location)
}
@ -310,7 +310,7 @@ func (res *Restorer) restoreHardlinkAt(node *restic.Node, target, path, location
}
}
res.opts.Progress.AddProgress(location, restoreui.ActionOtherRestored, 0, 0)
res.opts.Progress.AddProgress(location, restoreui.ActionOtherRestored, 0, 0, node.GenericAttributes)
// TODO investigate if hardlinks have separate metadata on any supported system
return res.restoreNodeMetadataTo(node, path, location)
}
@ -368,10 +368,10 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) (uint64, error)
// first tree pass: create directories and collect all files to restore
err = res.traverseTree(ctx, dst, *res.sn.Tree, treeVisitor{
enterDir: func(_ *restic.Node, target, location string) error {
enterDir: func(node *restic.Node, target, location string) error {
debug.Log("first pass, enterDir: mkdir %q, leaveDir should restore metadata", location)
if location != string(filepath.Separator) {
res.opts.Progress.AddFile(0)
res.addFile(node, 0)
}
return res.ensureDir(target)
},
@ -383,14 +383,14 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) (uint64, error)
}
if node.Type != restic.NodeTypeFile {
res.opts.Progress.AddFile(0)
res.addFile(node, 0)
return nil
}
if node.Links > 1 {
if idx.Has(node.Inode, node.DeviceID) {
// a hardlinked file does not increase the restore size
res.opts.Progress.AddFile(0)
res.addFile(node, 0)
return nil
}
idx.Add(node.Inode, node.DeviceID, location)
@ -398,18 +398,18 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) (uint64, error)
buf, err = res.withOverwriteCheck(ctx, node, target, location, false, buf, func(updateMetadataOnly bool, matches *fileState) error {
if updateMetadataOnly {
res.opts.Progress.AddSkippedFile(location, node.Size)
res.addSkippedFile(node, location, node.Size)
} else {
res.opts.Progress.AddFile(node.Size)
res.addFile(node, node.Size)
if !res.opts.DryRun {
filerestorer.addFile(location, node.Content, int64(node.Size), matches)
filerestorer.addFile(location, node.Content, int64(node.Size), matches, node.GenericAttributes)
} else {
action := restoreui.ActionFileUpdated
if matches == nil {
action = restoreui.ActionFileRestored
}
// immediately mark as completed
res.opts.Progress.AddProgress(location, action, node.Size, node.Size)
res.opts.Progress.AddProgress(location, action, node.Size, node.Size, node.GenericAttributes)
}
}
res.trackFile(location, updateMetadataOnly)
@ -471,7 +471,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) (uint64, error)
err := res.restoreNodeMetadataTo(node, target, location)
if err == nil {
res.opts.Progress.AddProgress(location, restoreui.ActionDirRestored, 0, 0)
res.opts.Progress.AddProgress(location, restoreui.ActionDirRestored, 0, 0, node.GenericAttributes)
}
return err
},
@ -564,7 +564,7 @@ func (res *Restorer) withOverwriteCheck(ctx context.Context, node *restic.Node,
if isHardlink {
size = 0
}
res.opts.Progress.AddSkippedFile(location, size)
res.addSkippedFile(node, location, size)
return buf, nil
}

View file

@ -27,7 +27,11 @@ import (
"golang.org/x/sync/errgroup"
)
type Node interface{}
type Node interface {
IsAds() bool
HasAds() bool
Attributes() *FileAttributes
}
type Snapshot struct {
Nodes map[string]Node
@ -41,6 +45,20 @@ type File struct {
Mode os.FileMode
ModTime time.Time
attributes *FileAttributes
isAds bool
hasAds bool
}
func (f File) IsAds() bool {
return f.isAds
}
func (f File) HasAds() bool {
return f.hasAds
}
func (f File) Attributes() *FileAttributes {
return f.attributes
}
type Symlink struct {
@ -48,11 +66,37 @@ type Symlink struct {
ModTime time.Time
}
func (s Symlink) IsAds() bool {
return false
}
func (s Symlink) HasAds() bool {
return false
}
func (s Symlink) Attributes() *FileAttributes {
return nil
}
type Dir struct {
Nodes map[string]Node
Mode os.FileMode
ModTime time.Time
attributes *FileAttributes
hasAds bool
}
func (Dir) IsAds() bool {
// Dir itself can not be an ADS
return false
}
func (d Dir) HasAds() bool {
return d.hasAds
}
func (d Dir) Attributes() *FileAttributes {
return d.attributes
}
type FileAttributes struct {
@ -75,7 +119,9 @@ func saveFile(t testing.TB, repo restic.BlobSaver, data string) restic.ID {
return id
}
func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode uint64, getGenericAttributes func(attr *FileAttributes, isDir bool) (genericAttributes map[restic.GenericAttributeType]json.RawMessage)) restic.ID {
func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode uint64,
getFileAttributes func(attr *FileAttributes, isDir bool) (fileAttributes map[restic.GenericAttributeType]json.RawMessage),
getAdsAttributes func(path string, hasAds bool, isAds bool) (genericAttributes map[restic.GenericAttributeType]json.RawMessage)) restic.ID {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -107,6 +153,8 @@ func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode u
if mode == 0 {
mode = 0644
}
genericAttributes := getGenericAttributes(name, node, getFileAttributes, getAdsAttributes)
err := tree.Insert(&restic.Node{
Type: restic.NodeTypeFile,
Mode: mode,
@ -118,7 +166,7 @@ func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode u
Size: uint64(size),
Inode: fi,
Links: lc,
GenericAttributes: getGenericAttributes(node.attributes, false),
GenericAttributes: genericAttributes,
})
rtest.OK(t, err)
case Symlink:
@ -135,7 +183,7 @@ func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode u
})
rtest.OK(t, err)
case Dir:
id := saveDir(t, repo, node.Nodes, inode, getGenericAttributes)
id := saveDir(t, repo, node.Nodes, inode, getFileAttributes, getAdsAttributes)
mode := node.Mode
if mode == 0 {
@ -150,7 +198,7 @@ func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode u
UID: uint32(os.Getuid()),
GID: uint32(os.Getgid()),
Subtree: &id,
GenericAttributes: getGenericAttributes(node.attributes, false),
GenericAttributes: getGenericAttributes(name, node, getFileAttributes, getAdsAttributes),
})
rtest.OK(t, err)
default:
@ -166,13 +214,29 @@ func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode u
return id
}
func saveSnapshot(t testing.TB, repo restic.Repository, snapshot Snapshot, getGenericAttributes func(attr *FileAttributes, isDir bool) (genericAttributes map[restic.GenericAttributeType]json.RawMessage)) (*restic.Snapshot, restic.ID) {
func getGenericAttributes(name string, node Node,
getFileAttributes func(attr *FileAttributes, isDir bool) map[restic.GenericAttributeType]json.RawMessage,
getAdsAttributes func(path string, hasAds bool, isAds bool) map[restic.GenericAttributeType]json.RawMessage) map[restic.GenericAttributeType]json.RawMessage {
genericAttributes := getFileAttributes(node.Attributes(), false)
if node.HasAds() || node.IsAds() {
if genericAttributes == nil {
genericAttributes = map[restic.GenericAttributeType]json.RawMessage{}
}
for k, v := range getAdsAttributes(name, node.HasAds(), node.IsAds()) {
genericAttributes[k] = v
}
}
return genericAttributes
}
func saveSnapshot(t testing.TB, repo restic.Repository, snapshot Snapshot, getFileAttributes func(attr *FileAttributes, isDir bool) map[restic.GenericAttributeType]json.RawMessage,
getAdsAttributes func(path string, hasAds bool, isAds bool) map[restic.GenericAttributeType]json.RawMessage) (*restic.Snapshot, restic.ID) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wg, wgCtx := errgroup.WithContext(ctx)
repo.StartPackUploader(wgCtx, wg)
treeID := saveDir(t, repo, snapshot.Nodes, 1000, getGenericAttributes)
treeID := saveDir(t, repo, snapshot.Nodes, 1000, getFileAttributes, getAdsAttributes)
err := repo.Flush(ctx)
if err != nil {
t.Fatal(err)
@ -192,7 +256,12 @@ func saveSnapshot(t testing.TB, repo restic.Repository, snapshot Snapshot, getGe
return sn, id
}
var noopGetGenericAttributes = func(attr *FileAttributes, isDir bool) (genericAttributes map[restic.GenericAttributeType]json.RawMessage) {
var noopGetFileAttributes = func(_ *FileAttributes, _ bool) (fileAttributes map[restic.GenericAttributeType]json.RawMessage) {
// No-op
return nil
}
var noopGetAdsAttributes = func(_ string, _ bool, _ bool) (adsAttribute map[restic.GenericAttributeType]json.RawMessage) {
// No-op
return nil
}
@ -372,7 +441,7 @@ func TestRestorer(t *testing.T) {
for _, test := range tests {
t.Run("", func(t *testing.T) {
repo := repository.TestRepository(t)
sn, id := saveSnapshot(t, repo, test.Snapshot, noopGetGenericAttributes)
sn, id := saveSnapshot(t, repo, test.Snapshot, noopGetFileAttributes, noopGetAdsAttributes)
t.Logf("snapshot saved as %v", id.Str())
res := NewRestorer(repo, sn, Options{})
@ -483,7 +552,7 @@ func TestRestorerRelative(t *testing.T) {
t.Run("", func(t *testing.T) {
repo := repository.TestRepository(t)
sn, id := saveSnapshot(t, repo, test.Snapshot, noopGetGenericAttributes)
sn, id := saveSnapshot(t, repo, test.Snapshot, noopGetFileAttributes, noopGetAdsAttributes)
t.Logf("snapshot saved as %v", id.Str())
res := NewRestorer(repo, sn, Options{})
@ -746,7 +815,7 @@ func TestRestorerTraverseTree(t *testing.T) {
for _, test := range tests {
t.Run("", func(t *testing.T) {
repo := repository.TestRepository(t)
sn, _ := saveSnapshot(t, repo, test.Snapshot, noopGetGenericAttributes)
sn, _ := saveSnapshot(t, repo, test.Snapshot, noopGetFileAttributes, noopGetAdsAttributes)
// set Delete option to enable tracking filenames in a directory
res := NewRestorer(repo, sn, Options{Delete: true})
@ -823,7 +892,7 @@ func TestRestorerConsistentTimestampsAndPermissions(t *testing.T) {
},
},
},
}, noopGetGenericAttributes)
}, noopGetFileAttributes, noopGetAdsAttributes)
res := NewRestorer(repo, sn, Options{})
@ -878,7 +947,7 @@ func TestVerifyCancel(t *testing.T) {
}
repo := repository.TestRepository(t)
sn, _ := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes)
sn, _ := saveSnapshot(t, repo, snapshot, noopGetFileAttributes, noopGetAdsAttributes)
res := NewRestorer(repo, sn, Options{})
@ -961,7 +1030,7 @@ func saveSnapshotsAndOverwrite(t *testing.T, baseSnapshot Snapshot, overwriteSna
defer cancel()
// base snapshot
sn, id := saveSnapshot(t, repo, baseSnapshot, noopGetGenericAttributes)
sn, id := saveSnapshot(t, repo, baseSnapshot, noopGetFileAttributes, noopGetAdsAttributes)
t.Logf("base snapshot saved as %v", id.Str())
res := NewRestorer(repo, sn, baseOptions)
@ -969,7 +1038,7 @@ func saveSnapshotsAndOverwrite(t *testing.T, baseSnapshot Snapshot, overwriteSna
rtest.OK(t, err)
// overwrite snapshot
sn, id = saveSnapshot(t, repo, overwriteSnapshot, noopGetGenericAttributes)
sn, id = saveSnapshot(t, repo, overwriteSnapshot, noopGetFileAttributes, noopGetAdsAttributes)
t.Logf("overwrite snapshot saved as %v", id.Str())
res = NewRestorer(repo, sn, overwriteOptions)
countRestoredFiles, err := res.RestoreTo(ctx, tempdir)
@ -1252,7 +1321,7 @@ func TestRestoreModified(t *testing.T) {
defer cancel()
for _, snapshot := range snapshots {
sn, id := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes)
sn, id := saveSnapshot(t, repo, snapshot, noopGetFileAttributes, noopGetAdsAttributes)
t.Logf("snapshot saved as %v", id.Str())
res := NewRestorer(repo, sn, Options{Overwrite: OverwriteIfChanged})
@ -1279,7 +1348,7 @@ func TestRestoreIfChanged(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sn, id := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes)
sn, id := saveSnapshot(t, repo, snapshot, noopGetFileAttributes, noopGetAdsAttributes)
t.Logf("snapshot saved as %v", id.Str())
res := NewRestorer(repo, sn, Options{})
@ -1336,7 +1405,7 @@ func TestRestoreDryRun(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sn, id := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes)
sn, id := saveSnapshot(t, repo, snapshot, noopGetFileAttributes, noopGetAdsAttributes)
t.Logf("snapshot saved as %v", id.Str())
res := NewRestorer(repo, sn, Options{DryRun: true})
@ -1365,7 +1434,7 @@ func TestRestoreDryRunDelete(t *testing.T) {
rtest.OK(t, err)
rtest.OK(t, f.Close())
sn, _ := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes)
sn, _ := saveSnapshot(t, repo, snapshot, noopGetFileAttributes, noopGetAdsAttributes)
res := NewRestorer(repo, sn, Options{DryRun: true, Delete: true})
_, err = res.RestoreTo(ctx, tempdir)
rtest.OK(t, err)
@ -1417,7 +1486,7 @@ func TestRestoreDelete(t *testing.T) {
},
"anotherfile": File{Data: "content: file\n"},
},
}, noopGetGenericAttributes)
}, noopGetFileAttributes, noopGetAdsAttributes)
// should delete files that no longer exist in the snapshot
deleteSn, _ := saveSnapshot(t, repo, Snapshot{
@ -1429,7 +1498,7 @@ func TestRestoreDelete(t *testing.T) {
},
},
},
}, noopGetGenericAttributes)
}, noopGetFileAttributes, noopGetAdsAttributes)
tests := []struct {
selectFilter func(item string, isDir bool) (selectedForRestore bool, childMayBeSelected bool)
@ -1524,7 +1593,7 @@ func TestRestoreToFile(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sn, _ := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes)
sn, _ := saveSnapshot(t, repo, snapshot, noopGetFileAttributes, noopGetAdsAttributes)
res := NewRestorer(repo, sn, Options{})
_, err := res.RestoreTo(ctx, tempdir)
rtest.Assert(t, strings.Contains(err.Error(), "cannot create target directory"), "unexpected error %v", err)

View file

@ -3,8 +3,21 @@
package restorer
import "github.com/restic/restic/internal/restic"
// toComparableFilename returns a filename suitable for equality checks. On Windows, it returns the
// uppercase version of the string. On all other systems, it returns the unmodified filename.
func toComparableFilename(path string) string {
return path
}
// addFile adds the file to restorer's progress tracker
func (res *Restorer) addFile(_ *restic.Node, size uint64) {
res.opts.Progress.AddFile(size)
}
// addSkippedFile adds the skipped file to restorer's progress tracker.
// If the node represents an ads file, it skips the file count.
func (res *Restorer) addSkippedFile(_ *restic.Node, location string, size uint64) {
res.opts.Progress.AddSkippedFile(location, size)
}

View file

@ -29,7 +29,7 @@ func TestRestorerRestoreEmptyHardlinkedFields(t *testing.T) {
},
},
},
}, noopGetGenericAttributes)
}, noopGetFileAttributes, noopGetAdsAttributes)
res := NewRestorer(repo, sn, Options{})
@ -86,7 +86,7 @@ func testRestorerProgressBar(t *testing.T, dryRun bool) {
},
"file2": File{Links: 1, Inode: 2, Data: "example"},
},
}, noopGetGenericAttributes)
}, noopGetFileAttributes, noopGetAdsAttributes)
mock := &printerMock{}
progress := restoreui.NewProgress(mock, 0)
@ -122,7 +122,7 @@ func TestRestorePermissions(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sn, id := saveSnapshot(t, repo, snapshot, noopGetGenericAttributes)
sn, id := saveSnapshot(t, repo, snapshot, noopGetFileAttributes, noopGetAdsAttributes)
t.Logf("snapshot saved as %v", id.Str())
res := NewRestorer(repo, sn, Options{})

View file

@ -3,7 +3,11 @@
package restorer
import "strings"
import (
"strings"
"github.com/restic/restic/internal/restic"
)
// toComparableFilename returns a filename suitable for equality checks. On Windows, it returns the
// uppercase version of the string. On all other systems, it returns the unmodified filename.
@ -11,3 +15,22 @@ func toComparableFilename(path string) string {
// apparently NTFS internally uppercases filenames for comparison
return strings.ToUpper(path)
}
// addFile adds the file to restorer's progress tracker.
// If the node represents an ads file, it only adds the size without counting the ads file.
func (res *Restorer) addFile(node *restic.Node, size uint64) {
if node.IsMainFile() {
res.opts.Progress.AddFile(size)
} else {
// If this is not the main file, we just want to update the size and not the count.
res.opts.Progress.AddSize(size)
}
}
// addSkippedFile adds the skipped file to restorer's progress tracker.
// If the node represents an ads file, it skips the file count.
func (res *Restorer) addSkippedFile(node *restic.Node, location string, size uint64) {
if node.IsMainFile() {
res.opts.Progress.AddSkippedFile(location, size)
}
}

View file

@ -9,7 +9,7 @@ import (
"math"
"os"
"path"
"path/filepath"
"strings"
"syscall"
"testing"
"time"
@ -18,11 +18,17 @@ import (
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/test"
rtest "github.com/restic/restic/internal/test"
restoreui "github.com/restic/restic/internal/ui/restore"
"golang.org/x/sys/windows"
)
// Index of the main file stream for testing streams when the restoration order is different.
// This is mainly used to test scenarios in which the main file stream is restored after restoring another stream.
// Handling this scenario is important as creating the main file will usually replace the existing streams.
// '-1' is used because this will allow us to handle the ads stream indexes as they are. We just need to insert the main file index in the place we want it to be restored.
const MAIN_STREAM_ORDER_INDEX = -1
func getBlockCount(t *testing.T, filename string) int64 {
libkernel32 := windows.NewLazySystemDLL("kernel32.dll")
err := libkernel32.Load()
@ -49,12 +55,15 @@ type DataStreamInfo struct {
data string
}
type NodeInfo struct {
DataStreamInfo
parentDir string
attributes FileAttributes
Exists bool
IsDirectory bool
type NodeTestInfo struct {
DataStreamInfo //The main data stream of the file
parentDir string
attributes *FileAttributes
IsDirectory bool
//The order for restoration of streams in Ads streams
//We also includes main stream index (-1) to the order to indcate when the main file should be restored.
StreamRestoreOrder []int
AdsStreams []DataStreamInfo //Alternate streams of the node
}
func TestFileAttributeCombination(t *testing.T) {
@ -71,27 +80,7 @@ func testFileAttributeCombination(t *testing.T, isEmpty bool) {
attributeCombinations := generateCombinations(5, []bool{})
fileName := "TestFile.txt"
// Iterate through each attribute combination
for _, attr1 := range attributeCombinations {
//Set up the required file information
fileInfo := NodeInfo{
DataStreamInfo: getDataStreamInfo(isEmpty, fileName),
parentDir: "dir",
attributes: getFileAttributes(attr1),
Exists: false,
}
//Get the current test name
testName := getCombinationTestName(fileInfo, fileName, fileInfo.attributes)
//Run test
t.Run(testName, func(t *testing.T) {
mainFilePath := runAttributeTests(t, fileInfo, fileInfo.attributes)
verifyFileRestores(isEmpty, mainFilePath, t, fileInfo)
})
}
testAttributeCombinations(t, attributeCombinations, fileName, isEmpty, false, false, NodeTestInfo{})
}
func generateCombinations(n int, prefix []bool) [][]bool {
@ -112,23 +101,68 @@ func generateCombinations(n int, prefix []bool) [][]bool {
return append(permsTrue, permsFalse...)
}
func getDataStreamInfo(isEmpty bool, fileName string) DataStreamInfo {
func testAttributeCombinations(t *testing.T, attributeCombinations [][]bool, nodeName string, isEmpty, isDirectory, createExisting bool, existingNode NodeTestInfo) {
// Iterate through each attribute combination
for _, attr1 := range attributeCombinations {
//Set up the node that needs to be restored
nodeInfo := NodeTestInfo{
DataStreamInfo: getDummyDataStream(isEmpty || isDirectory, nodeName, false),
parentDir: "dir",
attributes: convertToFileAttributes(attr1, isDirectory),
IsDirectory: isDirectory,
}
//Get the current test name
testName := getCombinationTestName(nodeInfo, nodeName, createExisting, existingNode)
//Run test
t.Run(testName, func(t *testing.T) {
// run the test and verify attributes
mainPath := runAttributeTests(t, nodeInfo, createExisting, existingNode)
//verify node restoration
verifyRestores(t, isEmpty || isDirectory, mainPath, nodeInfo.DataStreamInfo)
})
}
}
func getDummyDataStream(isEmptyOrDirectory bool, mainStreamName string, isExisting bool) DataStreamInfo {
var dataStreamInfo DataStreamInfo
if isEmpty {
// Set only the name if the node is empty or is a directory.
if isEmptyOrDirectory {
dataStreamInfo = DataStreamInfo{
name: fileName,
name: mainStreamName,
}
} else {
data := "Main file data stream."
if isExisting {
//Use a differnt data for existing files
data = "Existing file data"
}
dataStreamInfo = DataStreamInfo{
name: fileName,
data: "Main file data stream.",
name: mainStreamName,
data: data,
}
}
return dataStreamInfo
}
func getFileAttributes(values []bool) FileAttributes {
return FileAttributes{
// Convert boolean values to file attributes
func convertToFileAttributes(values []bool, isDirectory bool) *FileAttributes {
if isDirectory {
return &FileAttributes{
// readonly not valid for directories
Hidden: values[0],
System: values[1],
Archive: values[2],
Encrypted: values[3],
}
}
return &FileAttributes{
ReadOnly: values[0],
Hidden: values[1],
System: values[2],
@ -137,7 +171,8 @@ func getFileAttributes(values []bool) FileAttributes {
}
}
func getCombinationTestName(fi NodeInfo, fileName string, overwriteAttr FileAttributes) string {
// generate name for the provide attribute combination
func getCombinationTestName(fi NodeTestInfo, fileName string, createExisiting bool, existingNode NodeTestInfo) string {
if fi.attributes.ReadOnly {
fileName += "-ReadOnly"
}
@ -153,105 +188,61 @@ func getCombinationTestName(fi NodeInfo, fileName string, overwriteAttr FileAttr
if fi.attributes.Encrypted {
fileName += "-Encrypted"
}
if fi.Exists {
fileName += "-Overwrite"
if overwriteAttr.ReadOnly {
fileName += "-R"
}
if overwriteAttr.Hidden {
fileName += "-H"
}
if overwriteAttr.System {
fileName += "-S"
}
if overwriteAttr.Archive {
fileName += "-A"
}
if overwriteAttr.Encrypted {
fileName += "-E"
}
if !createExisiting {
return fileName
}
// Additonal name for existing file attributes test
fileName += "-Overwrite"
if existingNode.attributes.ReadOnly {
fileName += "-R"
}
if existingNode.attributes.Hidden {
fileName += "-H"
}
if existingNode.attributes.System {
fileName += "-S"
}
if existingNode.attributes.Archive {
fileName += "-A"
}
if existingNode.attributes.Encrypted {
fileName += "-E"
}
return fileName
}
func runAttributeTests(t *testing.T, fileInfo NodeInfo, existingFileAttr FileAttributes) string {
func runAttributeTests(t *testing.T, fileInfo NodeTestInfo, createExisting bool, existingNodeInfo NodeTestInfo) string {
testDir := t.TempDir()
res, _ := setupWithFileAttributes(t, fileInfo, testDir, existingFileAttr)
runRestorerTest(t, fileInfo, testDir, createExisting, existingNodeInfo)
mainFilePath := path.Join(testDir, fileInfo.parentDir, fileInfo.name)
verifyAttributes(t, mainFilePath, fileInfo.attributes)
return mainFilePath
}
func runRestorerTest(t *testing.T, nodeInfo NodeTestInfo, testDir string, createExisting bool, existingNodeInfo NodeTestInfo) {
res := setup(t, nodeInfo, testDir, createExisting, existingNodeInfo)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, err := res.RestoreTo(ctx, testDir)
rtest.OK(t, err)
mainFilePath := path.Join(testDir, fileInfo.parentDir, fileInfo.name)
//Verify restore
verifyFileAttributes(t, mainFilePath, fileInfo.attributes)
return mainFilePath
}
func setupWithFileAttributes(t *testing.T, nodeInfo NodeInfo, testDir string, existingFileAttr FileAttributes) (*Restorer, []int) {
func setup(t *testing.T, nodeInfo NodeTestInfo, testDir string, createExisitingFile bool, existingNodeInfo NodeTestInfo) *Restorer {
t.Helper()
if nodeInfo.Exists {
if !nodeInfo.IsDirectory {
err := os.MkdirAll(path.Join(testDir, nodeInfo.parentDir), os.ModeDir)
rtest.OK(t, err)
filepath := path.Join(testDir, nodeInfo.parentDir, nodeInfo.name)
if existingFileAttr.Encrypted {
err := createEncryptedFileWriteData(filepath, nodeInfo)
rtest.OK(t, err)
} else {
// Write the data to the file
file, err := os.OpenFile(path.Clean(filepath), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600)
rtest.OK(t, err)
_, err = file.Write([]byte(nodeInfo.data))
rtest.OK(t, err)
err = file.Close()
rtest.OK(t, err)
}
} else {
err := os.MkdirAll(path.Join(testDir, nodeInfo.parentDir, nodeInfo.name), os.ModeDir)
rtest.OK(t, err)
}
pathPointer, err := syscall.UTF16PtrFromString(path.Join(testDir, nodeInfo.parentDir, nodeInfo.name))
rtest.OK(t, err)
syscall.SetFileAttributes(pathPointer, getAttributeValue(&existingFileAttr))
if createExisitingFile {
createExisting(t, testDir, existingNodeInfo)
}
index := 0
if !nodeInfo.IsDirectory && nodeInfo.StreamRestoreOrder == nil {
nodeInfo.StreamRestoreOrder = []int{MAIN_STREAM_ORDER_INDEX}
}
order := []int{}
streams := []DataStreamInfo{}
if !nodeInfo.IsDirectory {
order = append(order, index)
index++
streams = append(streams, nodeInfo.DataStreamInfo)
}
return setup(t, getNodes(nodeInfo.parentDir, nodeInfo.name, order, streams, nodeInfo.IsDirectory, &nodeInfo.attributes)), order
}
nodesMap := getNodes(nodeInfo.parentDir, nodeInfo)
func createEncryptedFileWriteData(filepath string, fileInfo NodeInfo) (err error) {
var ptr *uint16
if ptr, err = windows.UTF16PtrFromString(filepath); err != nil {
return err
}
var handle windows.Handle
//Create the file with encrypted flag
if handle, err = windows.CreateFile(ptr, uint32(windows.GENERIC_READ|windows.GENERIC_WRITE), uint32(windows.FILE_SHARE_READ), nil, uint32(windows.CREATE_ALWAYS), windows.FILE_ATTRIBUTE_ENCRYPTED, 0); err != nil {
return err
}
//Write data to file
if _, err = windows.Write(handle, []byte(fileInfo.data)); err != nil {
return err
}
//Close handle
return windows.CloseHandle(handle)
}
func setup(t *testing.T, nodesMap map[string]Node) *Restorer {
repo := repository.TestRepository(t)
getFileAttributes := func(attr *FileAttributes, isDir bool) (genericAttributes map[restic.GenericAttributeType]json.RawMessage) {
if attr == nil {
return
@ -263,14 +254,43 @@ func setup(t *testing.T, nodesMap map[string]Node) *Restorer {
//If the node is a directory add FILE_ATTRIBUTE_DIRECTORY to attributes
fileattr |= windows.FILE_ATTRIBUTE_DIRECTORY
}
attrs, err := restic.WindowsAttrsToGenericAttributes(restic.WindowsAttributes{FileAttributes: &fileattr})
test.OK(t, err)
rtest.OK(t, err)
return attrs
}
getAdsAttributes := func(path string, hasAds, isAds bool) map[restic.GenericAttributeType]json.RawMessage {
if isAds {
windowsAttr := restic.WindowsAttributes{
IsADS: &isAds,
}
attrs, err := restic.WindowsAttrsToGenericAttributes(windowsAttr)
rtest.OK(t, err)
return attrs
} else if hasAds {
//Find ads names by recursively searching through nodes
//This is needed when multiple levels of parent directories are defined for ads file
adsNames := findAdsNamesRecursively(nodesMap, path, []string{})
windowsAttr := restic.WindowsAttributes{
HasADS: &adsNames,
}
attrs, err := restic.WindowsAttrsToGenericAttributes(windowsAttr)
rtest.OK(t, err)
return attrs
} else {
return map[restic.GenericAttributeType]json.RawMessage{}
}
}
repo := repository.TestRepository(t)
sn, _ := saveSnapshot(t, repo, Snapshot{
Nodes: nodesMap,
}, getFileAttributes)
res := NewRestorer(repo, sn, Options{})
}, getFileAttributes, getAdsAttributes)
mock := &printerMock{}
progress := restoreui.NewProgress(mock, 0)
res := NewRestorer(repo, sn, Options{Progress: progress})
return res
}
@ -294,12 +314,63 @@ func getAttributeValue(attr *FileAttributes) uint32 {
return fileattr
}
func getNodes(dir string, mainNodeName string, order []int, streams []DataStreamInfo, isDirectory bool, attributes *FileAttributes) map[string]Node {
func createExisting(t *testing.T, testDir string, nodeInfo NodeTestInfo) {
//Create directory or file for testing with node already exist in the folder.
if !nodeInfo.IsDirectory {
err := os.MkdirAll(path.Join(testDir, nodeInfo.parentDir), os.ModeDir)
rtest.OK(t, err)
filepath := path.Join(testDir, nodeInfo.parentDir, nodeInfo.name)
createTestFile(t, nodeInfo.attributes.Encrypted, filepath, nodeInfo.DataStreamInfo)
} else {
err := os.MkdirAll(path.Join(testDir, nodeInfo.parentDir, nodeInfo.name), os.ModeDir)
rtest.OK(t, err)
}
//Create ads streams if any
if len(nodeInfo.AdsStreams) > 0 {
for _, stream := range nodeInfo.AdsStreams {
filepath := path.Join(testDir, nodeInfo.parentDir, stream.name)
createTestFile(t, nodeInfo.attributes.Encrypted, filepath, stream)
}
}
//Set attributes
pathPointer, err := syscall.UTF16PtrFromString(path.Join(testDir, nodeInfo.parentDir, nodeInfo.name))
rtest.OK(t, err)
syscall.SetFileAttributes(pathPointer, getAttributeValue(nodeInfo.attributes))
}
func createTestFile(t *testing.T, isEncrypted bool, filepath string, stream DataStreamInfo) {
var attribute uint32 = windows.FILE_ATTRIBUTE_NORMAL
if isEncrypted {
attribute = windows.FILE_ATTRIBUTE_ENCRYPTED
}
var ptr *uint16
ptr, err := windows.UTF16PtrFromString(filepath)
rtest.OK(t, err)
//Create the file with attribute flag
handle, err := windows.CreateFile(ptr, uint32(windows.GENERIC_READ|windows.GENERIC_WRITE), uint32(windows.FILE_SHARE_READ), nil, uint32(windows.CREATE_ALWAYS), attribute, 0)
rtest.OK(t, err)
//Write data to file
_, err = windows.Write(handle, []byte(stream.data))
rtest.OK(t, err)
//Close handle
rtest.OK(t, windows.CloseHandle(handle))
}
func getNodes(dir string, node NodeTestInfo) map[string]Node {
var mode os.FileMode
if isDirectory {
if node.IsDirectory {
mode = os.FileMode(2147484159)
} else {
if attributes != nil && attributes.ReadOnly {
if node.attributes != nil && node.attributes.ReadOnly {
mode = os.FileMode(0o444)
} else {
mode = os.FileMode(0o666)
@ -308,32 +379,45 @@ func getNodes(dir string, mainNodeName string, order []int, streams []DataStream
getFileNodes := func() map[string]Node {
nodes := map[string]Node{}
if isDirectory {
if node.IsDirectory {
//Add a directory node at the same level as the other streams
nodes[mainNodeName] = Dir{
nodes[node.name] = Dir{
ModTime: time.Now(),
attributes: attributes,
hasAds: len(node.AdsStreams) > 1,
attributes: node.attributes,
Mode: mode,
}
}
if len(streams) > 0 {
for _, index := range order {
stream := streams[index]
var attr *FileAttributes = nil
if mainNodeName == stream.name {
attr = attributes
} else if attributes != nil && attributes.Encrypted {
//Set encrypted attribute
attr = &FileAttributes{Encrypted: true}
// Add nodes to the node map in the order we want.
// This ensures the restoration of nodes in the specific order.
for _, index := range node.StreamRestoreOrder {
if index == MAIN_STREAM_ORDER_INDEX && !node.IsDirectory {
//If main file then use the data stream from nodeinfo
nodes[node.DataStreamInfo.name] = File{
ModTime: time.Now(),
Data: node.DataStreamInfo.data,
Mode: mode,
attributes: node.attributes,
hasAds: len(node.AdsStreams) > 1,
isAds: false,
}
} else {
//Else take the node from the AdsStreams of the node
attr := &FileAttributes{}
if node.attributes != nil && node.attributes.Encrypted {
//Setting the encrypted attribute for ads streams.
//This is needed when an encrypted ads stream is restored first, we need to create the file with encrypted attribute.
attr.Encrypted = true
}
nodes[stream.name] = File{
nodes[node.AdsStreams[index].name] = File{
ModTime: time.Now(),
Data: stream.data,
Data: node.AdsStreams[index].data,
Mode: mode,
attributes: attr,
hasAds: false,
isAds: true,
}
}
}
@ -349,103 +433,72 @@ func getNodes(dir string, mainNodeName string, order []int, streams []DataStream
}
}
func verifyFileAttributes(t *testing.T, mainFilePath string, attr FileAttributes) {
func findAdsNamesRecursively(nodesMap map[string]Node, path string, adsNames []string) []string {
for name, node := range nodesMap {
if restic.TrimAds(name) == path && name != path {
adsNames = append(adsNames, strings.Replace(name, path, "", -1))
} else if dir, ok := node.(Dir); ok && len(dir.Nodes) > 0 {
adsNames = findAdsNamesRecursively(dir.Nodes, path, adsNames)
}
}
return adsNames
}
func verifyAttributes(t *testing.T, mainFilePath string, attr *FileAttributes) {
ptr, err := windows.UTF16PtrFromString(mainFilePath)
rtest.OK(t, err)
//Get file attributes using syscall
fileAttributes, err := syscall.GetFileAttributes(ptr)
rtest.OK(t, err)
//Test positive and negative scenarios
if attr.ReadOnly {
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_READONLY != 0, "Expected read only attribute.")
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_READONLY != 0, "Expected read only attibute.")
} else {
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_READONLY == 0, "Unexpected read only attribute.")
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_READONLY == 0, "Unexpected read only attibute.")
}
if attr.Hidden {
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_HIDDEN != 0, "Expected hidden attribute.")
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_HIDDEN != 0, "Expected hidden attibute.")
} else {
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_HIDDEN == 0, "Unexpected hidden attribute.")
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_HIDDEN == 0, "Unexpected hidden attibute.")
}
if attr.System {
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_SYSTEM != 0, "Expected system attribute.")
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_SYSTEM != 0, "Expected system attibute.")
} else {
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_SYSTEM == 0, "Unexpected system attribute.")
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_SYSTEM == 0, "Unexpected system attibute.")
}
if attr.Archive {
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ARCHIVE != 0, "Expected archive attribute.")
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ARCHIVE != 0, "Expected archive attibute.")
} else {
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ARCHIVE == 0, "Unexpected archive attribute.")
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ARCHIVE == 0, "Unexpected archive attibute.")
}
if attr.Encrypted {
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ENCRYPTED != 0, "Expected encrypted attribute.")
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ENCRYPTED != 0, "Expected encrypted attibute.")
} else {
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ENCRYPTED == 0, "Unexpected encrypted attribute.")
rtest.Assert(t, fileAttributes&windows.FILE_ATTRIBUTE_ENCRYPTED == 0, "Unexpected encrypted attibute.")
}
}
func verifyFileRestores(isEmpty bool, mainFilePath string, t *testing.T, fileInfo NodeInfo) {
if isEmpty {
_, err1 := os.Stat(mainFilePath)
rtest.Assert(t, !errors.Is(err1, os.ErrNotExist), "The file "+fileInfo.name+" does not exist")
} else {
func verifyRestores(t *testing.T, isEmptyOrDirectory bool, path string, dsInfo DataStreamInfo) {
fi, err1 := os.Stat(path)
rtest.Assert(t, !errors.Is(err1, os.ErrNotExist), "The node "+dsInfo.name+" does not exist")
verifyMainFileRestore(t, mainFilePath, fileInfo)
//If the node is not a directoru or should not be empty, check its contents.
if !isEmptyOrDirectory {
size := fi.Size()
rtest.Assert(t, size > 0, "The file "+dsInfo.name+" exists but is empty")
content, err := os.ReadFile(path)
rtest.OK(t, err)
rtest.Assert(t, string(content) == dsInfo.data, "The file "+dsInfo.name+" exists but the content is not overwritten")
}
}
func verifyMainFileRestore(t *testing.T, mainFilePath string, fileInfo NodeInfo) {
fi, err1 := os.Stat(mainFilePath)
rtest.Assert(t, !errors.Is(err1, os.ErrNotExist), "The file "+fileInfo.name+" does not exist")
size := fi.Size()
rtest.Assert(t, size > 0, "The file "+fileInfo.name+" exists but is empty")
content, err := os.ReadFile(mainFilePath)
rtest.OK(t, err)
rtest.Assert(t, string(content) == fileInfo.data, "The file "+fileInfo.name+" exists but the content is not overwritten")
}
func TestDirAttributeCombination(t *testing.T) {
t.Parallel()
attributeCombinations := generateCombinations(4, []bool{})
dirName := "TestDir"
// Iterate through each attribute combination
for _, attr1 := range attributeCombinations {
//Set up the required directory information
dirInfo := NodeInfo{
DataStreamInfo: DataStreamInfo{
name: dirName,
},
parentDir: "dir",
attributes: getDirFileAttributes(attr1),
Exists: false,
IsDirectory: true,
}
//Get the current test name
testName := getCombinationTestName(dirInfo, dirName, dirInfo.attributes)
//Run test
t.Run(testName, func(t *testing.T) {
mainDirPath := runAttributeTests(t, dirInfo, dirInfo.attributes)
//Check directory exists
_, err1 := os.Stat(mainDirPath)
rtest.Assert(t, !errors.Is(err1, os.ErrNotExist), "The directory "+dirInfo.name+" does not exist")
})
}
}
func getDirFileAttributes(values []bool) FileAttributes {
return FileAttributes{
// readonly not valid for directories
Hidden: values[0],
System: values[1],
Archive: values[2],
Encrypted: values[3],
}
testAttributeCombinations(t, attributeCombinations, dirName, false, true, false, NodeTestInfo{})
}
func TestFileAttributeCombinationsOverwrite(t *testing.T) {
@ -460,39 +513,31 @@ func testFileAttributeCombinationsOverwrite(t *testing.T, isEmpty bool) {
t.Parallel()
//Get attribute combinations
attributeCombinations := generateCombinations(5, []bool{})
//Get overwrite file attribute combinations
//Get existing file attribute combinations
overwriteCombinations := generateCombinations(5, []bool{})
fileName := "TestOverwriteFile"
//Iterate through each attribute combination
for _, attr1 := range attributeCombinations {
testAttributeCombinationsOverwrite(t, attributeCombinations, overwriteCombinations, isEmpty, fileName, false)
}
fileInfo := NodeInfo{
DataStreamInfo: getDataStreamInfo(isEmpty, fileName),
func testAttributeCombinationsOverwrite(t *testing.T, attributeCombinations [][]bool, overwriteCombinations [][]bool, isEmpty bool, nodeName string, isDirectory bool) {
// Convert existing attributes boolean value combinations to FileAttributes list
existingFileAttribute := []FileAttributes{}
for _, overwrite := range overwriteCombinations {
existingFileAttribute = append(existingFileAttribute, *convertToFileAttributes(overwrite, isDirectory))
}
//Iterate through each existing attribute combination
for _, existingFileAttr := range existingFileAttribute {
exisitngNodeInfo := NodeTestInfo{
DataStreamInfo: getDummyDataStream(isEmpty || isDirectory, nodeName, true),
parentDir: "dir",
attributes: getFileAttributes(attr1),
Exists: true,
attributes: &existingFileAttr,
IsDirectory: isDirectory,
}
overwriteFileAttributes := []FileAttributes{}
for _, overwrite := range overwriteCombinations {
overwriteFileAttributes = append(overwriteFileAttributes, getFileAttributes(overwrite))
}
//Iterate through each overwrite attribute combination
for _, overwriteFileAttr := range overwriteFileAttributes {
//Get the test name
testName := getCombinationTestName(fileInfo, fileName, overwriteFileAttr)
//Run test
t.Run(testName, func(t *testing.T) {
mainFilePath := runAttributeTests(t, fileInfo, overwriteFileAttr)
verifyFileRestores(isEmpty, mainFilePath, t, fileInfo)
})
}
testAttributeCombinations(t, attributeCombinations, nodeName, isEmpty, isDirectory, true, exisitngNodeInfo)
}
}
@ -500,76 +545,182 @@ func TestDirAttributeCombinationsOverwrite(t *testing.T) {
t.Parallel()
//Get attribute combinations
attributeCombinations := generateCombinations(4, []bool{})
//Get overwrite dir attribute combinations
//Get existing dir attribute combinations
overwriteCombinations := generateCombinations(4, []bool{})
dirName := "TestOverwriteDir"
//Iterate through each attribute combination
for _, attr1 := range attributeCombinations {
testAttributeCombinationsOverwrite(t, attributeCombinations, overwriteCombinations, true, dirName, true)
}
dirInfo := NodeInfo{
DataStreamInfo: DataStreamInfo{
name: dirName,
},
parentDir: "dir",
attributes: getDirFileAttributes(attr1),
Exists: true,
IsDirectory: true,
}
func TestOrderedAdsFile(t *testing.T) {
dataStreams := []DataStreamInfo{
{"OrderedAdsFile.text:datastream1:$DATA", "First data stream."},
{"OrderedAdsFile.text:datastream2:$DATA", "Second data stream."},
}
overwriteDirFileAttributes := []FileAttributes{}
var tests = map[string]struct {
fileOrder []int
Exists bool
}{
"main-stream-first": {
fileOrder: []int{MAIN_STREAM_ORDER_INDEX, 0, 1},
},
"second-stream-first": {
fileOrder: []int{0, MAIN_STREAM_ORDER_INDEX, 1},
},
"main-stream-first-already-exists": {
fileOrder: []int{MAIN_STREAM_ORDER_INDEX, 0, 1},
Exists: true,
},
"second-stream-first-already-exists": {
fileOrder: []int{0, MAIN_STREAM_ORDER_INDEX, 1},
Exists: true,
},
}
for _, overwrite := range overwriteCombinations {
overwriteDirFileAttributes = append(overwriteDirFileAttributes, getDirFileAttributes(overwrite))
}
mainStreamName := "OrderedAdsFile.text"
dir := "dir"
//Iterate through each overwrite attribute combinations
for _, overwriteDirAttr := range overwriteDirFileAttributes {
//Get the test name
testName := getCombinationTestName(dirInfo, dirName, overwriteDirAttr)
for name, test := range tests {
t.Run(name, func(t *testing.T) {
tempdir := rtest.TempDir(t)
//Run test
t.Run(testName, func(t *testing.T) {
mainDirPath := runAttributeTests(t, dirInfo, dirInfo.attributes)
nodeInfo := NodeTestInfo{
parentDir: dir,
attributes: &FileAttributes{},
DataStreamInfo: getDummyDataStream(false, mainStreamName, false),
StreamRestoreOrder: test.fileOrder,
AdsStreams: dataStreams,
}
//Check directory exists
_, err1 := os.Stat(mainDirPath)
rtest.Assert(t, !errors.Is(err1, os.ErrNotExist), "The directory "+dirInfo.name+" does not exist")
})
}
exisitingNode := NodeTestInfo{}
if test.Exists {
exisitingNode = NodeTestInfo{
parentDir: dir,
attributes: &FileAttributes{},
DataStreamInfo: getDummyDataStream(false, mainStreamName, true),
StreamRestoreOrder: test.fileOrder,
AdsStreams: dataStreams,
}
}
runRestorerTest(t, nodeInfo, tempdir, test.Exists, exisitingNode)
verifyRestoreOrder(t, nodeInfo, tempdir)
})
}
}
func TestRestoreDeleteCaseInsensitive(t *testing.T) {
repo := repository.TestRepository(t)
tempdir := rtest.TempDir(t)
func verifyRestoreOrder(t *testing.T, nodeInfo NodeTestInfo, tempdir string) {
for _, fileIndex := range nodeInfo.StreamRestoreOrder {
sn, _ := saveSnapshot(t, repo, Snapshot{
Nodes: map[string]Node{
"anotherfile": File{Data: "content: file\n"},
},
}, noopGetGenericAttributes)
var stream DataStreamInfo
if fileIndex == MAIN_STREAM_ORDER_INDEX {
stream = nodeInfo.DataStreamInfo
} else {
stream = nodeInfo.AdsStreams[fileIndex]
}
// should delete files that no longer exist in the snapshot
deleteSn, _ := saveSnapshot(t, repo, Snapshot{
Nodes: map[string]Node{
"AnotherfilE": File{Data: "content: file\n"},
},
}, noopGetGenericAttributes)
res := NewRestorer(repo, sn, Options{})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, err := res.RestoreTo(ctx, tempdir)
rtest.OK(t, err)
res = NewRestorer(repo, deleteSn, Options{Delete: true})
_, err = res.RestoreTo(ctx, tempdir)
rtest.OK(t, err)
// anotherfile must still exist
_, err = os.Stat(filepath.Join(tempdir, "anotherfile"))
rtest.OK(t, err)
fp := path.Join(tempdir, nodeInfo.parentDir, stream.name)
verifyRestores(t, false, fp, stream)
}
}
func TestExistingStreamRemoval(t *testing.T) {
tempdir := rtest.TempDir(t)
dirName := "dir"
mainFileName := "TestExistingStream.text"
existingFileStreams := []DataStreamInfo{
{"TestExistingStream.text:datastream1:$DATA", "Existing stream 1."},
{"TestExistingStream.text:datastream2:$DATA", "Existing stream 2."},
{"TestExistingStream.text:datastream3:$DATA", "Existing stream 3."},
{"TestExistingStream.text:datastream4:$DATA", "Existing stream 4."},
}
restoringStreams := []DataStreamInfo{
{"TestExistingStream.text:datastream1:$DATA", "First data stream."},
{"TestExistingStream.text:datastream2:$DATA", "Second data stream."},
}
nodeInfo := NodeTestInfo{
parentDir: dirName,
attributes: &FileAttributes{},
DataStreamInfo: DataStreamInfo{
name: mainFileName,
data: "Main file data.",
},
StreamRestoreOrder: []int{MAIN_STREAM_ORDER_INDEX, 0, 1},
AdsStreams: restoringStreams,
}
existingNodeInfo := NodeTestInfo{
parentDir: dirName,
attributes: &FileAttributes{},
DataStreamInfo: DataStreamInfo{
name: mainFileName,
data: "Existing main stream.",
},
StreamRestoreOrder: []int{MAIN_STREAM_ORDER_INDEX, 0, 1, 2, 3, 4},
AdsStreams: existingFileStreams}
runRestorerTest(t, nodeInfo, tempdir, true, existingNodeInfo)
verifyExistingStreamRemoval(t, existingFileStreams, tempdir, dirName, restoringStreams)
dirPath := path.Join(tempdir, nodeInfo.parentDir, nodeInfo.name)
verifyRestores(t, true, dirPath, nodeInfo.DataStreamInfo)
}
func verifyExistingStreamRemoval(t *testing.T, existingFileStreams []DataStreamInfo, tempdir string, dirName string, restoredStreams []DataStreamInfo) {
for _, currentFile := range existingFileStreams {
fp := path.Join(tempdir, dirName, currentFile.name)
existsInRestored := existsInStreamList(currentFile.name, restoredStreams)
if !existsInRestored {
//Stream that doesn't exist in the restored stream list must have been removed.
_, err1 := os.Stat(fp)
rtest.Assert(t, errors.Is(err1, os.ErrNotExist), "The file "+currentFile.name+" should not exist")
}
}
for _, currentFile := range restoredStreams {
fp := path.Join(tempdir, dirName, currentFile.name)
verifyRestores(t, false, fp, currentFile)
}
}
func existsInStreamList(name string, streams []DataStreamInfo) bool {
for _, value := range streams {
if value.name == name {
return true
}
}
return false
}
func TestAdsDirectory(t *testing.T) {
streams := []DataStreamInfo{
{"TestDirStream:datastream1:$DATA", "First dir stream."},
{"TestDirStream:datastream2:$DATA", "Second dir stream."},
}
nodeinfo := NodeTestInfo{
parentDir: "dir",
attributes: &FileAttributes{},
DataStreamInfo: DataStreamInfo{name: "TestDirStream"},
IsDirectory: true,
StreamRestoreOrder: []int{0, 1},
AdsStreams: streams,
}
tempDir := t.TempDir()
runRestorerTest(t, nodeinfo, tempDir, false, NodeTestInfo{})
for _, stream := range streams {
fp := path.Join(tempDir, nodeinfo.parentDir, stream.name)
verifyRestores(t, false, fp, stream)
}
dirPath := path.Join(tempDir, nodeinfo.parentDir, nodeinfo.name)
verifyRestores(t, true, dirPath, nodeinfo.DataStreamInfo)
}

View file

@ -4,6 +4,9 @@ import (
"sync"
"time"
"encoding/json"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/progress"
)
@ -86,8 +89,15 @@ func (p *Progress) AddFile(size uint64) {
p.s.AllBytesTotal += size
}
// AddSize starts tracking a new file with the given size
func (p *Progress) AddSize(size uint64) {
p.m.Lock()
defer p.m.Unlock()
p.s.AllBytesTotal += size
}
// AddProgress accumulates the number of bytes written for a file
func (p *Progress) AddProgress(name string, action ItemAction, bytesWrittenPortion uint64, bytesTotal uint64) {
func (p *Progress) AddProgress(name string, action ItemAction, bytesWrittenPortion uint64, bytesTotal uint64, attrs map[restic.GenericAttributeType]json.RawMessage) {
if p == nil {
return
}
@ -105,7 +115,7 @@ func (p *Progress) AddProgress(name string, action ItemAction, bytesWrittenPorti
p.s.AllBytesWritten += bytesWrittenPortion
if entry.bytesWritten == entry.bytesTotal {
delete(p.progressInfoMap, name)
p.s.FilesFinished++
p.incrementFilesFinished(attrs)
p.printer.CompleteItem(action, name, bytesTotal)
}

View file

@ -4,7 +4,10 @@ import (
"testing"
"time"
"encoding/json"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/test"
)
@ -94,9 +97,12 @@ func TestFirstProgressOnAFile(t *testing.T) {
expectedBytesWritten := uint64(5)
expectedBytesTotal := uint64(100)
attrs := map[restic.GenericAttributeType]json.RawMessage{
restic.TypeIsADS: json.RawMessage(`false`),
}
result, items, _ := testProgress(func(progress *Progress) bool {
progress.AddFile(expectedBytesTotal)
progress.AddProgress("test", ActionFileUpdated, expectedBytesWritten, expectedBytesTotal)
progress.AddProgress("test", ActionFileUpdated, expectedBytesWritten, expectedBytesTotal, attrs)
return false
})
test.Equals(t, printerTrace{
@ -107,12 +113,14 @@ func TestFirstProgressOnAFile(t *testing.T) {
func TestLastProgressOnAFile(t *testing.T) {
fileSize := uint64(100)
attrs := map[restic.GenericAttributeType]json.RawMessage{
restic.TypeIsADS: json.RawMessage(`false`),
}
result, items, _ := testProgress(func(progress *Progress) bool {
progress.AddFile(fileSize)
progress.AddProgress("test", ActionFileUpdated, 30, fileSize)
progress.AddProgress("test", ActionFileUpdated, 35, fileSize)
progress.AddProgress("test", ActionFileUpdated, 35, fileSize)
progress.AddProgress("test", ActionFileUpdated, 30, fileSize, attrs)
progress.AddProgress("test", ActionFileUpdated, 35, fileSize, attrs)
progress.AddProgress("test", ActionFileUpdated, 35, fileSize, attrs)
return false
})
test.Equals(t, printerTrace{
@ -125,13 +133,15 @@ func TestLastProgressOnAFile(t *testing.T) {
func TestLastProgressOnLastFile(t *testing.T) {
fileSize := uint64(100)
attrs := map[restic.GenericAttributeType]json.RawMessage{
restic.TypeIsADS: json.RawMessage(`false`),
}
result, items, _ := testProgress(func(progress *Progress) bool {
progress.AddFile(fileSize)
progress.AddFile(50)
progress.AddProgress("test1", ActionFileUpdated, 50, 50)
progress.AddProgress("test2", ActionFileUpdated, 50, fileSize)
progress.AddProgress("test2", ActionFileUpdated, 50, fileSize)
progress.AddProgress("test1", ActionFileUpdated, 50, 50, attrs)
progress.AddProgress("test2", ActionFileUpdated, 50, fileSize, attrs)
progress.AddProgress("test2", ActionFileUpdated, 50, fileSize, attrs)
return false
})
test.Equals(t, printerTrace{
@ -145,12 +155,14 @@ func TestLastProgressOnLastFile(t *testing.T) {
func TestSummaryOnSuccess(t *testing.T) {
fileSize := uint64(100)
attrs := map[restic.GenericAttributeType]json.RawMessage{
restic.TypeIsADS: json.RawMessage(`false`),
}
result, _, _ := testProgress(func(progress *Progress) bool {
progress.AddFile(fileSize)
progress.AddFile(50)
progress.AddProgress("test1", ActionFileUpdated, 50, 50)
progress.AddProgress("test2", ActionFileUpdated, fileSize, fileSize)
progress.AddProgress("test1", ActionFileUpdated, 50, 50, attrs)
progress.AddProgress("test2", ActionFileUpdated, fileSize, fileSize, attrs)
return true
})
test.Equals(t, printerTrace{
@ -160,12 +172,14 @@ func TestSummaryOnSuccess(t *testing.T) {
func TestSummaryOnErrors(t *testing.T) {
fileSize := uint64(100)
attrs := map[restic.GenericAttributeType]json.RawMessage{
restic.TypeIsADS: json.RawMessage(`false`),
}
result, _, _ := testProgress(func(progress *Progress) bool {
progress.AddFile(fileSize)
progress.AddFile(50)
progress.AddProgress("test1", ActionFileUpdated, 50, 50)
progress.AddProgress("test2", ActionFileUpdated, fileSize/2, fileSize)
progress.AddProgress("test1", ActionFileUpdated, 50, 50, attrs)
progress.AddProgress("test2", ActionFileUpdated, fileSize/2, fileSize, attrs)
return true
})
test.Equals(t, printerTrace{
@ -194,8 +208,11 @@ func TestProgressTypes(t *testing.T) {
_, items, _ := testProgress(func(progress *Progress) bool {
progress.AddFile(fileSize)
progress.AddFile(0)
progress.AddProgress("dir", ActionDirRestored, fileSize, fileSize)
progress.AddProgress("new", ActionFileRestored, 0, 0)
attrs := map[restic.GenericAttributeType]json.RawMessage{
restic.TypeIsADS: json.RawMessage(`false`),
}
progress.AddProgress("dir", ActionDirRestored, fileSize, fileSize, attrs)
progress.AddProgress("new", ActionFileRestored, 0, 0, attrs)
progress.ReportDeletion("del")
return true
})

View file

@ -0,0 +1,15 @@
//go:build !windows
// +build !windows
package restore
import (
"encoding/json"
"github.com/restic/restic/internal/restic"
)
// incrementFilesFinished increments the files finished count
func (p *Progress) incrementFilesFinished(_ map[restic.GenericAttributeType]json.RawMessage) {
p.s.FilesFinished++
}

View file

@ -0,0 +1,14 @@
package restore
import (
"encoding/json"
"github.com/restic/restic/internal/restic"
)
// incrementFilesFinished increments the files finished count if it is a main file
func (p *Progress) incrementFilesFinished(attrs map[restic.GenericAttributeType]json.RawMessage) {
if string(attrs[restic.TypeIsADS]) != "true" {
p.s.FilesFinished++
}
}