From 6ba38e9a38c863bb58e9624ce1869050d404d33b Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Wed, 11 May 2016 21:35:57 +0200 Subject: [PATCH 01/98] Add tests for Repack() --- src/restic/repository/prune.go | 14 ++ src/restic/repository/prune_test.go | 211 ++++++++++++++++++++++++++++ 2 files changed, 225 insertions(+) create mode 100644 src/restic/repository/prune.go create mode 100644 src/restic/repository/prune_test.go diff --git a/src/restic/repository/prune.go b/src/restic/repository/prune.go new file mode 100644 index 00000000..a5fd0898 --- /dev/null +++ b/src/restic/repository/prune.go @@ -0,0 +1,14 @@ +package repository + +import ( + "restic/backend" + "restic/debug" +) + +// Repack takes a list of packs together with a list of blobs contained in +// these packs. Each pack is loaded and the blobs listed in keepBlobs is saved +// into a new pack. Afterwards, the packs are removed. +func Repack(repo *Repository, packs, keepBlobs backend.IDSet) error { + debug.Log("Repack", "repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs)) + return nil +} diff --git a/src/restic/repository/prune_test.go b/src/restic/repository/prune_test.go new file mode 100644 index 00000000..227f1c73 --- /dev/null +++ b/src/restic/repository/prune_test.go @@ -0,0 +1,211 @@ +package repository + +import ( + "io" + "math/rand" + "restic/backend" + "restic/pack" + "testing" +) + +func randomSize(min, max int) int { + return rand.Intn(max-min) + min +} + +func random(t *testing.T, length int) []byte { + src := rand.New(rand.NewSource(int64(length))) + buf := make([]byte, length) + _, err := io.ReadFull(src, buf) + if err != nil { + t.Fatalf("unable to read %d random bytes: %v", length, err) + } + + return buf +} + +func createRandomBlobs(t *testing.T, repo *Repository, blobs int, pData float32) { + for i := 0; i < blobs; i++ { + var ( + tpe pack.BlobType + length int + ) + + if rand.Float32() < pData { + tpe = pack.Data + length = randomSize(50*1024, 2*1024*1024) // 50KiB to 2MiB of data + } else { + tpe = pack.Tree + length = randomSize(5*1024, 50*1024) // 5KiB to 50KiB + } + + _, err := repo.SaveAndEncrypt(tpe, random(t, length), nil) + if err != nil { + t.Fatalf("SaveFrom() error %v", err) + } + + if rand.Float32() < 0.2 { + if err = repo.Flush(); err != nil { + t.Fatalf("repo.Flush() returned error %v", err) + } + } + } + + if err := repo.Flush(); err != nil { + t.Fatalf("repo.Flush() returned error %v", err) + } +} + +// redundancy returns the amount of duplicate data in the repo. It only looks +// at all pack files. +func redundancy(t *testing.T, repo *Repository) float32 { + done := make(chan struct{}) + defer close(done) + + type redEntry struct { + count int + size int + } + red := make(map[backend.ID]redEntry) + + for id := range repo.List(backend.Data, done) { + entries, err := repo.ListPack(id) + if err != nil { + t.Fatalf("error listing pack %v: %v", id.Str(), err) + } + + for _, e := range entries { + updatedEntry := redEntry{ + count: 1, + size: int(e.Length), + } + + if oldEntry, ok := red[e.ID]; ok { + updatedEntry.count += oldEntry.count + + if updatedEntry.size != oldEntry.size { + t.Fatalf("sizes do not match: %v != %v", updatedEntry.size, oldEntry.size) + } + } + + red[e.ID] = updatedEntry + } + } + + totalBytes := 0 + redundantBytes := 0 + for _, v := range red { + totalBytes += v.count * v.size + + if v.count > 1 { + redundantBytes += (v.count - 1) * v.size + } + } + + return float32(redundantBytes) / float32(totalBytes) +} + +// selectBlobs returns a list of random blobs from the repository with probability p. +func selectBlobs(t *testing.T, repo *Repository, p float32) backend.IDSet { + done := make(chan struct{}) + defer close(done) + + blobs := backend.NewIDSet() + + for id := range repo.List(backend.Data, done) { + entries, err := repo.ListPack(id) + if err != nil { + t.Fatalf("error listing pack %v: %v", id, err) + } + + for _, entry := range entries { + if rand.Float32() <= p { + blobs.Insert(entry.ID) + } + } + } + + return blobs +} + +func listPacks(t *testing.T, repo *Repository) backend.IDSet { + done := make(chan struct{}) + defer close(done) + + list := backend.NewIDSet() + for id := range repo.List(backend.Data, done) { + list.Insert(id) + } + + return list +} + +func findPacksForBlobs(t *testing.T, repo *Repository, blobs backend.IDSet) backend.IDSet { + packs := backend.NewIDSet() + + idx := repo.Index() + for id := range blobs { + pb, err := idx.Lookup(id) + if err != nil { + t.Fatal(err) + } + + packs.Insert(pb.PackID) + } + + return packs +} + +func TestRepack(t *testing.T) { + repo, cleanup := TestRepository(t) + defer cleanup() + + createRandomBlobs(t, repo, rand.Intn(400), 0.7) + + packsBefore := listPacks(t, repo) + + // Running repack on empty ID sets should not do anything at all. + err := Repack(repo, nil, nil) + if err != nil { + t.Fatal(err) + } + + packsAfter := listPacks(t, repo) + + if !packsAfter.Equals(packsBefore) { + t.Fatalf("packs are not equal, Repack modified something. Before:\n %v\nAfter:\n %v", + packsBefore, packsAfter) + } + + if err := repo.SaveIndex(); err != nil { + t.Fatalf("repo.SaveIndex() %v", err) + } + + blobs := selectBlobs(t, repo, 0.2) + t.Logf("selected %d blobs: %v", len(blobs), blobs) + + packs := findPacksForBlobs(t, repo, blobs) + + err = Repack(repo, packs, blobs) + if err != nil { + t.Fatalf("Repack() error %v", err) + } + + packsAfter = listPacks(t, repo) + for id := range packs { + if packsAfter.Has(id) { + t.Errorf("pack %v still present although it should have been repacked and removed", id.Str()) + } + } + + idx := repo.Index() + for id := range blobs { + pb, err := idx.Lookup(id) + if err != nil { + t.Errorf("unable to find blob %v in repo", id.Str()) + } + + if packs.Has(pb.PackID) { + t.Errorf("lookup returned pack ID %v that should've been removed", pb.PackID) + } + } +} From 00139648a01facf085dd94a789ddd9d9f6a69138 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Wed, 11 May 2016 22:28:56 +0200 Subject: [PATCH 02/98] Implement Repack() --- src/restic/repository/prune.go | 132 ++++++++++++++++++++++++++++ src/restic/repository/prune_test.go | 93 +++++++------------- 2 files changed, 163 insertions(+), 62 deletions(-) diff --git a/src/restic/repository/prune.go b/src/restic/repository/prune.go index a5fd0898..dd02782b 100644 --- a/src/restic/repository/prune.go +++ b/src/restic/repository/prune.go @@ -1,8 +1,12 @@ package repository import ( + "fmt" + "os" "restic/backend" "restic/debug" + "restic/pack" + "restic/worker" ) // Repack takes a list of packs together with a list of blobs contained in @@ -10,5 +14,133 @@ import ( // into a new pack. Afterwards, the packs are removed. func Repack(repo *Repository, packs, keepBlobs backend.IDSet) error { debug.Log("Repack", "repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs)) + + var buf []byte + for packID := range packs { + list, err := repo.ListPack(packID) + if err != nil { + return err + } + + debug.Log("Repack", "processing pack %v, blobs: %v", packID.Str(), list) + + for _, blob := range list { + buf, err = repo.LoadBlob(blob.Type, blob.ID, buf) + if err != nil { + return err + } + debug.Log("Repack", " loaded blob %v", blob.ID.Str()) + + _, err = repo.SaveAndEncrypt(blob.Type, buf, &blob.ID) + if err != nil { + return err + } + + debug.Log("Repack", " saved blob %v", blob.ID.Str()) + } + } + + if err := repo.Flush(); err != nil { + return err + } + + for packID := range packs { + err := repo.Backend().Remove(backend.Data, packID.String()) + if err != nil { + debug.Log("Repack", "error removing pack %v: %v", packID.Str(), err) + return err + } + debug.Log("Repack", "removed pack %v", packID.Str()) + } + + return nil +} + +const rebuildIndexWorkers = 10 + +type loadBlobsResult struct { + packID backend.ID + entries []pack.Blob +} + +// loadBlobsFromAllPacks sends the contents of all packs to ch. +func loadBlobsFromAllPacks(repo *Repository, ch chan<- worker.Job, done <-chan struct{}) { + f := func(job worker.Job, done <-chan struct{}) (interface{}, error) { + packID := job.Data.(backend.ID) + entries, err := repo.ListPack(packID) + return loadBlobsResult{ + packID: packID, + entries: entries, + }, err + } + + jobCh := make(chan worker.Job) + wp := worker.New(rebuildIndexWorkers, f, jobCh, ch) + + go func() { + for id := range repo.List(backend.Data, done) { + jobCh <- worker.Job{Data: id} + } + close(jobCh) + }() + + wp.Wait() +} + +// RebuildIndex lists all packs in the repo, writes a new index and removes all +// old indexes. This operation should only be done with an exclusive lock in +// place. +func RebuildIndex(repo *Repository) error { + debug.Log("RebuildIndex", "start rebuilding index") + + done := make(chan struct{}) + defer close(done) + + ch := make(chan worker.Job) + go loadBlobsFromAllPacks(repo, ch, done) + + idx := NewIndex() + for job := range ch { + id := job.Data.(backend.ID) + + if job.Error != nil { + fmt.Fprintf(os.Stderr, "error for pack %v: %v\n", id, job.Error) + continue + } + + res := job.Result.(loadBlobsResult) + + for _, entry := range res.entries { + pb := PackedBlob{ + ID: entry.ID, + Type: entry.Type, + Length: entry.Length, + Offset: entry.Offset, + PackID: res.packID, + } + idx.Store(pb) + } + } + + oldIndexes := backend.NewIDSet() + for id := range repo.List(backend.Index, done) { + idx.AddToSupersedes(id) + oldIndexes.Insert(id) + } + + id, err := SaveIndex(repo, idx) + if err != nil { + debug.Log("RebuildIndex.RebuildIndex", "error saving index: %v", err) + return err + } + debug.Log("RebuildIndex.RebuildIndex", "new index saved as %v", id.Str()) + + for indexID := range oldIndexes { + err := repo.Backend().Remove(backend.Index, indexID.String()) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to remove index %v: %v\n", indexID.Str(), err) + } + } + return nil } diff --git a/src/restic/repository/prune_test.go b/src/restic/repository/prune_test.go index 227f1c73..cfcc3aa7 100644 --- a/src/restic/repository/prune_test.go +++ b/src/restic/repository/prune_test.go @@ -55,55 +55,6 @@ func createRandomBlobs(t *testing.T, repo *Repository, blobs int, pData float32) } } -// redundancy returns the amount of duplicate data in the repo. It only looks -// at all pack files. -func redundancy(t *testing.T, repo *Repository) float32 { - done := make(chan struct{}) - defer close(done) - - type redEntry struct { - count int - size int - } - red := make(map[backend.ID]redEntry) - - for id := range repo.List(backend.Data, done) { - entries, err := repo.ListPack(id) - if err != nil { - t.Fatalf("error listing pack %v: %v", id.Str(), err) - } - - for _, e := range entries { - updatedEntry := redEntry{ - count: 1, - size: int(e.Length), - } - - if oldEntry, ok := red[e.ID]; ok { - updatedEntry.count += oldEntry.count - - if updatedEntry.size != oldEntry.size { - t.Fatalf("sizes do not match: %v != %v", updatedEntry.size, oldEntry.size) - } - } - - red[e.ID] = updatedEntry - } - } - - totalBytes := 0 - redundantBytes := 0 - for _, v := range red { - totalBytes += v.count * v.size - - if v.count > 1 { - redundantBytes += (v.count - 1) * v.size - } - } - - return float32(redundantBytes) / float32(totalBytes) -} - // selectBlobs returns a list of random blobs from the repository with probability p. func selectBlobs(t *testing.T, repo *Repository, p float32) backend.IDSet { done := make(chan struct{}) @@ -155,6 +106,32 @@ func findPacksForBlobs(t *testing.T, repo *Repository, blobs backend.IDSet) back return packs } +func repack(t *testing.T, repo *Repository, packs, blobs backend.IDSet) { + err := Repack(repo, packs, blobs) + if err != nil { + t.Fatal(err) + } +} + +func saveIndex(t *testing.T, repo *Repository) { + if err := repo.SaveIndex(); err != nil { + t.Fatalf("repo.SaveIndex() %v", err) + } +} + +func rebuildIndex(t *testing.T, repo *Repository) { + if err := RebuildIndex(repo); err != nil { + t.Fatalf("error rebuilding index: %v", err) + } +} + +func reloadIndex(t *testing.T, repo *Repository) { + repo.SetIndex(NewMasterIndex()) + if err := repo.LoadIndex(); err != nil { + t.Fatalf("error loading new index: %v", err) + } +} + func TestRepack(t *testing.T) { repo, cleanup := TestRepository(t) defer cleanup() @@ -164,10 +141,7 @@ func TestRepack(t *testing.T) { packsBefore := listPacks(t, repo) // Running repack on empty ID sets should not do anything at all. - err := Repack(repo, nil, nil) - if err != nil { - t.Fatal(err) - } + repack(t, repo, nil, nil) packsAfter := listPacks(t, repo) @@ -176,19 +150,14 @@ func TestRepack(t *testing.T) { packsBefore, packsAfter) } - if err := repo.SaveIndex(); err != nil { - t.Fatalf("repo.SaveIndex() %v", err) - } + saveIndex(t, repo) blobs := selectBlobs(t, repo, 0.2) - t.Logf("selected %d blobs: %v", len(blobs), blobs) - packs := findPacksForBlobs(t, repo, blobs) - err = Repack(repo, packs, blobs) - if err != nil { - t.Fatalf("Repack() error %v", err) - } + repack(t, repo, packs, blobs) + rebuildIndex(t, repo) + reloadIndex(t, repo) packsAfter = listPacks(t, repo) for id := range packs { From fa26ecc8f98d0756d77543ea47ad82501d5fc48f Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Wed, 11 May 2016 22:30:32 +0200 Subject: [PATCH 03/98] Make `rebuild-index` use the code in package repository --- src/cmds/restic/cmd_rebuild_index.go | 100 +-------------------------- 1 file changed, 2 insertions(+), 98 deletions(-) diff --git a/src/cmds/restic/cmd_rebuild_index.go b/src/cmds/restic/cmd_rebuild_index.go index 8ac54d80..cb50c105 100644 --- a/src/cmds/restic/cmd_rebuild_index.go +++ b/src/cmds/restic/cmd_rebuild_index.go @@ -1,14 +1,6 @@ package main -import ( - "fmt" - "os" - "restic/backend" - "restic/debug" - "restic/pack" - "restic/repository" - "restic/worker" -) +import "restic/repository" type CmdRebuildIndex struct { global *GlobalOptions @@ -26,94 +18,6 @@ func init() { } } -const rebuildIndexWorkers = 10 - -func loadBlobsFromPacks(repo *repository.Repository) (packs map[backend.ID][]pack.Blob) { - done := make(chan struct{}) - defer close(done) - - f := func(job worker.Job, done <-chan struct{}) (interface{}, error) { - return repo.ListPack(job.Data.(backend.ID)) - } - - jobCh := make(chan worker.Job) - resCh := make(chan worker.Job) - wp := worker.New(rebuildIndexWorkers, f, jobCh, resCh) - - go func() { - for id := range repo.List(backend.Data, done) { - jobCh <- worker.Job{Data: id} - } - close(jobCh) - }() - - packs = make(map[backend.ID][]pack.Blob) - for job := range resCh { - id := job.Data.(backend.ID) - - if job.Error != nil { - fmt.Fprintf(os.Stderr, "error for pack %v: %v\n", id, job.Error) - continue - } - - entries := job.Result.([]pack.Blob) - packs[id] = entries - } - - wp.Wait() - - return packs -} - -func listIndexIDs(repo *repository.Repository) (list backend.IDs) { - done := make(chan struct{}) - for id := range repo.List(backend.Index, done) { - list = append(list, id) - } - - return list -} - -func (cmd CmdRebuildIndex) rebuildIndex() error { - debug.Log("RebuildIndex.RebuildIndex", "start rebuilding index") - - packs := loadBlobsFromPacks(cmd.repo) - cmd.global.Verbosef("loaded blobs from %d packs\n", len(packs)) - - idx := repository.NewIndex() - for packID, entries := range packs { - for _, entry := range entries { - pb := repository.PackedBlob{ - ID: entry.ID, - Type: entry.Type, - Length: entry.Length, - Offset: entry.Offset, - PackID: packID, - } - idx.Store(pb) - } - } - - oldIndexes := listIndexIDs(cmd.repo) - idx.AddToSupersedes(oldIndexes...) - cmd.global.Printf(" saving new index\n") - id, err := repository.SaveIndex(cmd.repo, idx) - if err != nil { - debug.Log("RebuildIndex.RebuildIndex", "error saving index: %v", err) - return err - } - debug.Log("RebuildIndex.RebuildIndex", "new index saved as %v", id.Str()) - - for _, indexID := range oldIndexes { - err := cmd.repo.Backend().Remove(backend.Index, indexID.String()) - if err != nil { - cmd.global.Warnf("unable to remove index %v: %v\n", indexID.Str(), err) - } - } - - return nil -} - func (cmd CmdRebuildIndex) Execute(args []string) error { repo, err := cmd.global.OpenRepository() if err != nil { @@ -127,5 +31,5 @@ func (cmd CmdRebuildIndex) Execute(args []string) error { return err } - return cmd.rebuildIndex() + return repository.RebuildIndex(repo) } From 122a0944a6b4bc5035d7ff69d5b4ab0a03e17040 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Wed, 11 May 2016 22:43:56 +0200 Subject: [PATCH 04/98] Do not repack blobs that shouldn't be kept --- src/restic/repository/prune.go | 4 ++++ src/restic/repository/prune_test.go | 29 ++++++++++++++++++++--------- 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/src/restic/repository/prune.go b/src/restic/repository/prune.go index dd02782b..61f4d1af 100644 --- a/src/restic/repository/prune.go +++ b/src/restic/repository/prune.go @@ -25,6 +25,10 @@ func Repack(repo *Repository, packs, keepBlobs backend.IDSet) error { debug.Log("Repack", "processing pack %v, blobs: %v", packID.Str(), list) for _, blob := range list { + if !keepBlobs.Has(blob.ID) { + continue + } + buf, err = repo.LoadBlob(blob.Type, blob.ID, buf) if err != nil { return err diff --git a/src/restic/repository/prune_test.go b/src/restic/repository/prune_test.go index cfcc3aa7..fc4099ff 100644 --- a/src/restic/repository/prune_test.go +++ b/src/restic/repository/prune_test.go @@ -55,12 +55,14 @@ func createRandomBlobs(t *testing.T, repo *Repository, blobs int, pData float32) } } -// selectBlobs returns a list of random blobs from the repository with probability p. -func selectBlobs(t *testing.T, repo *Repository, p float32) backend.IDSet { +// selectBlobs splits the list of all blobs randomly into two lists. A blob +// will be contained in the firstone ith probability p. +func selectBlobs(t *testing.T, repo *Repository, p float32) (list1, list2 backend.IDSet) { done := make(chan struct{}) defer close(done) - blobs := backend.NewIDSet() + list1 = backend.NewIDSet() + list2 = backend.NewIDSet() for id := range repo.List(backend.Data, done) { entries, err := repo.ListPack(id) @@ -70,12 +72,14 @@ func selectBlobs(t *testing.T, repo *Repository, p float32) backend.IDSet { for _, entry := range entries { if rand.Float32() <= p { - blobs.Insert(entry.ID) + list1.Insert(entry.ID) + } else { + list2.Insert(entry.ID) } } } - return blobs + return list1, list2 } func listPacks(t *testing.T, repo *Repository) backend.IDSet { @@ -152,10 +156,11 @@ func TestRepack(t *testing.T) { saveIndex(t, repo) - blobs := selectBlobs(t, repo, 0.2) - packs := findPacksForBlobs(t, repo, blobs) + removeBlobs, keepBlobs := selectBlobs(t, repo, 0.2) - repack(t, repo, packs, blobs) + packs := findPacksForBlobs(t, repo, keepBlobs) + + repack(t, repo, packs, keepBlobs) rebuildIndex(t, repo) reloadIndex(t, repo) @@ -167,7 +172,7 @@ func TestRepack(t *testing.T) { } idx := repo.Index() - for id := range blobs { + for id := range keepBlobs { pb, err := idx.Lookup(id) if err != nil { t.Errorf("unable to find blob %v in repo", id.Str()) @@ -177,4 +182,10 @@ func TestRepack(t *testing.T) { t.Errorf("lookup returned pack ID %v that should've been removed", pb.PackID) } } + + for id := range removeBlobs { + if _, err := idx.Lookup(id); err == nil { + t.Errorf("blob %v still contained in the repo", id.Str()) + } + } } From d5f42201c5e0ebc0782c7caf7508fc15ef488469 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Wed, 11 May 2016 23:00:21 +0200 Subject: [PATCH 05/98] Fix test for Repack --- src/restic/repository/prune.go | 2 ++ src/restic/repository/prune_test.go | 8 ++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/restic/repository/prune.go b/src/restic/repository/prune.go index 61f4d1af..db874a0b 100644 --- a/src/restic/repository/prune.go +++ b/src/restic/repository/prune.go @@ -41,6 +41,8 @@ func Repack(repo *Repository, packs, keepBlobs backend.IDSet) error { } debug.Log("Repack", " saved blob %v", blob.ID.Str()) + + keepBlobs.Delete(blob.ID) } } diff --git a/src/restic/repository/prune_test.go b/src/restic/repository/prune_test.go index fc4099ff..accd88f1 100644 --- a/src/restic/repository/prune_test.go +++ b/src/restic/repository/prune_test.go @@ -158,14 +158,14 @@ func TestRepack(t *testing.T) { removeBlobs, keepBlobs := selectBlobs(t, repo, 0.2) - packs := findPacksForBlobs(t, repo, keepBlobs) + removePacks := findPacksForBlobs(t, repo, removeBlobs) - repack(t, repo, packs, keepBlobs) + repack(t, repo, removePacks, keepBlobs) rebuildIndex(t, repo) reloadIndex(t, repo) packsAfter = listPacks(t, repo) - for id := range packs { + for id := range removePacks { if packsAfter.Has(id) { t.Errorf("pack %v still present although it should have been repacked and removed", id.Str()) } @@ -178,7 +178,7 @@ func TestRepack(t *testing.T) { t.Errorf("unable to find blob %v in repo", id.Str()) } - if packs.Has(pb.PackID) { + if removePacks.Has(pb.PackID) { t.Errorf("lookup returned pack ID %v that should've been removed", pb.PackID) } } From 0e6c72ad1d44e771c8ad49f4ec53b3e5bbaaabdc Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 15 Aug 2016 18:10:39 +0200 Subject: [PATCH 06/98] Implement Repack() --- src/restic/repository/prune.go | 51 +++++++++++++++++++++++----------- 1 file changed, 35 insertions(+), 16 deletions(-) diff --git a/src/restic/repository/prune.go b/src/restic/repository/prune.go index db874a0b..d4a481ec 100644 --- a/src/restic/repository/prune.go +++ b/src/restic/repository/prune.go @@ -1,9 +1,12 @@ package repository import ( + "bytes" "fmt" + "io" "os" "restic/backend" + "restic/crypto" "restic/debug" "restic/pack" "restic/worker" @@ -11,38 +14,54 @@ import ( // Repack takes a list of packs together with a list of blobs contained in // these packs. Each pack is loaded and the blobs listed in keepBlobs is saved -// into a new pack. Afterwards, the packs are removed. -func Repack(repo *Repository, packs, keepBlobs backend.IDSet) error { +// into a new pack. Afterwards, the packs are removed. This operation requires +// an exclusive lock on the repo. +func Repack(repo *Repository, packs, keepBlobs backend.IDSet) (err error) { debug.Log("Repack", "repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs)) - var buf []byte + buf := make([]byte, 0, maxPackSize) for packID := range packs { - list, err := repo.ListPack(packID) + // load the complete blob + h := backend.Handle{Type: backend.Data, Name: packID.String()} + + l, err := repo.Backend().Load(h, buf[:cap(buf)], 0) + if err == io.ErrUnexpectedEOF { + err = nil + buf = buf[:l] + } + if err != nil { return err } - debug.Log("Repack", "processing pack %v, blobs: %v", packID.Str(), list) + debug.Log("Repack", "pack %v loaded (%d bytes)", packID.Str(), len(buf)) - for _, blob := range list { - if !keepBlobs.Has(blob.ID) { + unpck, err := pack.NewUnpacker(repo.Key(), bytes.NewReader(buf)) + if err != nil { + return err + } + + debug.Log("Repack", "processing pack %v, blobs: %v", packID.Str(), len(unpck.Entries)) + var plaintext []byte + for _, entry := range unpck.Entries { + if !keepBlobs.Has(entry.ID) { continue } - buf, err = repo.LoadBlob(blob.Type, blob.ID, buf) - if err != nil { - return err - } - debug.Log("Repack", " loaded blob %v", blob.ID.Str()) - - _, err = repo.SaveAndEncrypt(blob.Type, buf, &blob.ID) + ciphertext := buf[entry.Offset : entry.Offset+entry.Length] + plaintext, err = crypto.Decrypt(repo.Key(), plaintext, ciphertext) if err != nil { return err } - debug.Log("Repack", " saved blob %v", blob.ID.Str()) + _, err = repo.SaveAndEncrypt(entry.Type, plaintext, &entry.ID) + if err != nil { + return err + } - keepBlobs.Delete(blob.ID) + debug.Log("Repack", " saved blob %v", entry.ID.Str()) + + keepBlobs.Delete(entry.ID) } } From d609e4a9862eaa777428a2ef4f14c62979e1e77f Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 28 Jul 2016 21:41:18 +0200 Subject: [PATCH 07/98] Extended plaintext buffer if necessary --- src/restic/repository/prune.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/restic/repository/prune.go b/src/restic/repository/prune.go index d4a481ec..91584347 100644 --- a/src/restic/repository/prune.go +++ b/src/restic/repository/prune.go @@ -49,6 +49,11 @@ func Repack(repo *Repository, packs, keepBlobs backend.IDSet) (err error) { } ciphertext := buf[entry.Offset : entry.Offset+entry.Length] + + if cap(plaintext) < len(ciphertext) { + plaintext = make([]byte, len(ciphertext)) + } + plaintext, err = crypto.Decrypt(repo.Key(), plaintext, ciphertext) if err != nil { return err From f59ffcaeaebe0d1e52ce8a0549b907809fec9b01 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 30 Jul 2016 13:58:27 +0200 Subject: [PATCH 08/98] Correct comment --- src/restic/repository/prune.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/restic/repository/prune.go b/src/restic/repository/prune.go index 91584347..75f9f9ac 100644 --- a/src/restic/repository/prune.go +++ b/src/restic/repository/prune.go @@ -21,7 +21,7 @@ func Repack(repo *Repository, packs, keepBlobs backend.IDSet) (err error) { buf := make([]byte, 0, maxPackSize) for packID := range packs { - // load the complete blob + // load the complete pack h := backend.Handle{Type: backend.Data, Name: packID.String()} l, err := repo.Backend().Load(h, buf[:cap(buf)], 0) From 14db71d3fa43194c7d14ad7c199704fe296aacef Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 31 Jul 2016 10:29:53 +0200 Subject: [PATCH 09/98] Move RandReader to repository package --- src/restic/repository/rand_reader.go | 79 ++++++++++++++++++++++++++++ src/restic/testing.go | 73 +------------------------ 2 files changed, 80 insertions(+), 72 deletions(-) create mode 100644 src/restic/repository/rand_reader.go diff --git a/src/restic/repository/rand_reader.go b/src/restic/repository/rand_reader.go new file mode 100644 index 00000000..201826a9 --- /dev/null +++ b/src/restic/repository/rand_reader.go @@ -0,0 +1,79 @@ +package repository + +import ( + "io" + "math/rand" +) + +// RandReader allows reading from a rand.Rand. +type RandReader struct { + rnd *rand.Rand + buf []byte +} + +// NewRandReader creates a new Reader from a random source. +func NewRandReader(rnd *rand.Rand) io.Reader { + return &RandReader{rnd: rnd, buf: make([]byte, 0, 7)} +} + +func (rd *RandReader) read(p []byte) (n int, err error) { + if len(p)%7 != 0 { + panic("invalid buffer length, not multiple of 7") + } + + rnd := rd.rnd + for i := 0; i < len(p); i += 7 { + val := rnd.Int63() + + p[i+0] = byte(val >> 0) + p[i+1] = byte(val >> 8) + p[i+2] = byte(val >> 16) + p[i+3] = byte(val >> 24) + p[i+4] = byte(val >> 32) + p[i+5] = byte(val >> 40) + p[i+6] = byte(val >> 48) + } + + return len(p), nil +} + +func (rd *RandReader) Read(p []byte) (int, error) { + // first, copy buffer to p + pos := copy(p, rd.buf) + copy(rd.buf, rd.buf[pos:]) + + // shorten buf and p accordingly + rd.buf = rd.buf[:len(rd.buf)-pos] + p = p[pos:] + + // if this is enough to fill p, return + if len(p) == 0 { + return pos, nil + } + + // load multiple of 7 byte + l := (len(p) / 7) * 7 + n, err := rd.read(p[:l]) + pos += n + if err != nil { + return pos, err + } + p = p[n:] + + // load 7 byte to temp buffer + rd.buf = rd.buf[:7] + n, err = rd.read(rd.buf) + if err != nil { + return pos, err + } + + // copy the remaining bytes from the buffer to p + n = copy(p, rd.buf) + pos += n + + // save the remaining bytes in rd.buf + n = copy(rd.buf, rd.buf[n:]) + rd.buf = rd.buf[:n] + + return pos, nil +} diff --git a/src/restic/testing.go b/src/restic/testing.go index fa2ab649..c0f1a0f7 100644 --- a/src/restic/testing.go +++ b/src/restic/testing.go @@ -13,80 +13,9 @@ import ( "github.com/restic/chunker" ) -type randReader struct { - rnd *rand.Rand - buf []byte -} - -func newRandReader(rnd *rand.Rand) io.Reader { - return &randReader{rnd: rnd, buf: make([]byte, 0, 7)} -} - -func (rd *randReader) read(p []byte) (n int, err error) { - if len(p)%7 != 0 { - panic("invalid buffer length, not multiple of 7") - } - - rnd := rd.rnd - for i := 0; i < len(p); i += 7 { - val := rnd.Int63() - - p[i+0] = byte(val >> 0) - p[i+1] = byte(val >> 8) - p[i+2] = byte(val >> 16) - p[i+3] = byte(val >> 24) - p[i+4] = byte(val >> 32) - p[i+5] = byte(val >> 40) - p[i+6] = byte(val >> 48) - } - - return len(p), nil -} - -func (rd *randReader) Read(p []byte) (int, error) { - // first, copy buffer to p - pos := copy(p, rd.buf) - copy(rd.buf, rd.buf[pos:]) - - // shorten buf and p accordingly - rd.buf = rd.buf[:len(rd.buf)-pos] - p = p[pos:] - - // if this is enough to fill p, return - if len(p) == 0 { - return pos, nil - } - - // load multiple of 7 byte - l := (len(p) / 7) * 7 - n, err := rd.read(p[:l]) - pos += n - if err != nil { - return pos, err - } - p = p[n:] - - // load 7 byte to temp buffer - rd.buf = rd.buf[:7] - n, err = rd.read(rd.buf) - if err != nil { - return pos, err - } - - // copy the remaining bytes from the buffer to p - n = copy(p, rd.buf) - pos += n - - // save the remaining bytes in rd.buf - n = copy(rd.buf, rd.buf[n:]) - rd.buf = rd.buf[:n] - - return pos, nil -} - // fakeFile returns a reader which yields deterministic pseudo-random data. func fakeFile(t testing.TB, seed, size int64) io.Reader { - return io.LimitReader(newRandReader(rand.New(rand.NewSource(seed))), size) + return io.LimitReader(repository.NewRandReader(rand.New(rand.NewSource(seed))), size) } // saveFile reads from rd and saves the blobs in the repository. The list of From 952f124238b470d02e8d37890ebee68d4bf60242 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 31 Jul 2016 10:30:57 +0200 Subject: [PATCH 10/98] Use RandReader instead of rand directly This is a fix to be backwards-compatible with Go < 1.6. --- src/restic/repository/prune_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/restic/repository/prune_test.go b/src/restic/repository/prune_test.go index accd88f1..05277e4c 100644 --- a/src/restic/repository/prune_test.go +++ b/src/restic/repository/prune_test.go @@ -13,9 +13,9 @@ func randomSize(min, max int) int { } func random(t *testing.T, length int) []byte { - src := rand.New(rand.NewSource(int64(length))) + rd := NewRandReader(rand.New(rand.NewSource(int64(length)))) buf := make([]byte, length) - _, err := io.ReadFull(src, buf) + _, err := io.ReadFull(rd, buf) if err != nil { t.Fatalf("unable to read %d random bytes: %v", length, err) } From d9012b4a644230c1a758303acadebd3a740200af Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 31 Jul 2016 10:58:09 +0200 Subject: [PATCH 11/98] Add trees recursively to test snapshot --- src/restic/testing.go | 56 ++++++++++++++++++++++++++++---------- src/restic/testing_test.go | 7 +++-- 2 files changed, 46 insertions(+), 17 deletions(-) diff --git a/src/restic/testing.go b/src/restic/testing.go index c0f1a0f7..08c9f6e7 100644 --- a/src/restic/testing.go +++ b/src/restic/testing.go @@ -43,28 +43,50 @@ func saveFile(t testing.TB, repo *repository.Repository, rd io.Reader) (blobs ba return blobs } -const maxFileSize = 1500000 -const maxSeed = 100 +const ( + maxFileSize = 1500000 + maxSeed = 20 + maxNodes = 32 +) // saveTree saves a tree of fake files in the repo and returns the ID. -func saveTree(t testing.TB, repo *repository.Repository, seed int64) backend.ID { +func saveTree(t testing.TB, repo *repository.Repository, seed int64, depth int) backend.ID { + t.Logf("create fake tree with seed %d, depth %d", seed, depth) + rnd := rand.NewSource(seed) - numNodes := int(rnd.Int63() % 64) + numNodes := int(rnd.Int63() % maxNodes) t.Logf("create %v nodes", numNodes) var tree Tree for i := 0; i < numNodes; i++ { - seed := rnd.Int63() % maxSeed - size := rnd.Int63() % maxFileSize - node := &Node{ - Name: fmt.Sprintf("file-%v", seed), - Type: "file", - Mode: 0644, - Size: uint64(size), + // randomly select the type of the node, either tree (p = 1/4) or file (p = 3/4). + if depth > 1 && rnd.Int63()%4 == 0 { + treeSeed := rnd.Int63() % maxSeed + id := saveTree(t, repo, treeSeed, depth-1) + + node := &Node{ + Name: fmt.Sprintf("dir-%v", treeSeed), + Type: "dir", + Mode: 0755, + Subtree: &id, + } + + tree.Nodes = append(tree.Nodes, node) + continue } - node.Content = saveFile(t, repo, fakeFile(t, seed, size)) + fileSeed := rnd.Int63() % maxSeed + fileSize := rnd.Int63() % maxFileSize + + node := &Node{ + Name: fmt.Sprintf("file-%v", fileSeed), + Type: "file", + Mode: 0644, + Size: uint64(fileSize), + } + + node.Content = saveFile(t, repo, fakeFile(t, fileSeed, fileSize)) tree.Nodes = append(tree.Nodes, node) } @@ -78,8 +100,12 @@ func saveTree(t testing.TB, repo *repository.Repository, seed int64) backend.ID // TestCreateSnapshot creates a snapshot filled with fake data. The // fake data is generated deterministically from the timestamp `at`, which is -// also used as the snapshot's timestamp. -func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time) backend.ID { +// also used as the snapshot's timestamp. The tree's depth can be specified +// with the parameter depth. +func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time, depth int) backend.ID { + seed := at.Unix() + t.Logf("create fake snapshot at %s with seed %d", at, seed) + fakedir := fmt.Sprintf("fakedir-at-%v", at.Format("2006-01-02 15:04:05")) snapshot, err := NewSnapshot([]string{fakedir}) if err != nil { @@ -87,7 +113,7 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time) } snapshot.Time = at - treeID := saveTree(t, repo, at.UnixNano()) + treeID := saveTree(t, repo, seed, depth) snapshot.Tree = &treeID id, err := repo.SaveJSONUnpacked(backend.Snapshot, snapshot) diff --git a/src/restic/testing_test.go b/src/restic/testing_test.go index 8243a01a..fa71038a 100644 --- a/src/restic/testing_test.go +++ b/src/restic/testing_test.go @@ -10,14 +10,17 @@ import ( var testSnapshotTime = time.Unix(1460289341, 207401672) -const testCreateSnapshots = 3 +const ( + testCreateSnapshots = 3 + testDepth = 2 +) func TestCreateSnapshot(t *testing.T) { repo, cleanup := repository.TestRepository(t) defer cleanup() for i := 0; i < testCreateSnapshots; i++ { - restic.TestCreateSnapshot(t, repo, testSnapshotTime.Add(time.Duration(i)*time.Second)) + restic.TestCreateSnapshot(t, repo, testSnapshotTime.Add(time.Duration(i)*time.Second), testDepth) } snapshots, err := restic.LoadAllSnapshots(repo) From b55ac2afd6b8c35bdf2fac4e472a3aef66d28659 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 31 Jul 2016 11:04:04 +0200 Subject: [PATCH 12/98] Make test files in test repo less random --- src/restic/testing.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/restic/testing.go b/src/restic/testing.go index 08c9f6e7..75ac8959 100644 --- a/src/restic/testing.go +++ b/src/restic/testing.go @@ -21,6 +21,7 @@ func fakeFile(t testing.TB, seed, size int64) io.Reader { // saveFile reads from rd and saves the blobs in the repository. The list of // IDs is returned. func saveFile(t testing.TB, repo *repository.Repository, rd io.Reader) (blobs backend.IDs) { + blobs = backend.IDs{} ch := chunker.New(rd, repo.Config.ChunkerPolynomial) for { @@ -30,7 +31,7 @@ func saveFile(t testing.TB, repo *repository.Repository, rd io.Reader) (blobs ba } if err != nil { - t.Fatalf("unabel to save chunk in repo: %v", err) + t.Fatalf("unable to save chunk in repo: %v", err) } id, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, nil) @@ -45,7 +46,7 @@ func saveFile(t testing.TB, repo *repository.Repository, rd io.Reader) (blobs ba const ( maxFileSize = 1500000 - maxSeed = 20 + maxSeed = 32 maxNodes = 32 ) @@ -77,7 +78,7 @@ func saveTree(t testing.TB, repo *repository.Repository, seed int64, depth int) } fileSeed := rnd.Int63() % maxSeed - fileSize := rnd.Int63() % maxFileSize + fileSize := (maxFileSize / maxSeed) * fileSeed node := &Node{ Name: fmt.Sprintf("file-%v", fileSeed), From 6c2334f50561a5c550c5d1ed7e9508b200519cda Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 31 Jul 2016 12:09:26 +0200 Subject: [PATCH 13/98] Make TestCreateSnapshot less verbose --- src/restic/testing.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/restic/testing.go b/src/restic/testing.go index 75ac8959..6cb9b54a 100644 --- a/src/restic/testing.go +++ b/src/restic/testing.go @@ -52,11 +52,8 @@ const ( // saveTree saves a tree of fake files in the repo and returns the ID. func saveTree(t testing.TB, repo *repository.Repository, seed int64, depth int) backend.ID { - t.Logf("create fake tree with seed %d, depth %d", seed, depth) - rnd := rand.NewSource(seed) numNodes := int(rnd.Int63() % maxNodes) - t.Logf("create %v nodes", numNodes) var tree Tree for i := 0; i < numNodes; i++ { From 5c32ae15c2b32ca37ff097ec5745da4e77039953 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 31 Jul 2016 12:09:44 +0200 Subject: [PATCH 14/98] Move test checking repo code to checker package --- src/restic/checker/testing.go | 36 +++++++++++++++++++++++++++++++++++ src/restic/testing_test.go | 27 +------------------------- 2 files changed, 37 insertions(+), 26 deletions(-) create mode 100644 src/restic/checker/testing.go diff --git a/src/restic/checker/testing.go b/src/restic/checker/testing.go new file mode 100644 index 00000000..a2ac3345 --- /dev/null +++ b/src/restic/checker/testing.go @@ -0,0 +1,36 @@ +package checker + +import ( + "restic/repository" + "testing" +) + +// TestCheckRepo runs the checker on repo. +func TestCheckRepo(t testing.TB, repo *repository.Repository) { + chkr := New(repo) + + hints, errs := chkr.LoadIndex() + if len(errs) != 0 { + t.Fatalf("errors loading index: %v", errs) + } + + if len(hints) != 0 { + t.Fatalf("errors loading index: %v", hints) + } + + done := make(chan struct{}) + defer close(done) + errChan := make(chan error) + go chkr.Structure(errChan, done) + + for err := range errChan { + t.Error(err) + } + + errChan = make(chan error) + go chkr.ReadData(nil, errChan, done) + + for err := range errChan { + t.Error(err) + } +} diff --git a/src/restic/testing_test.go b/src/restic/testing_test.go index fa71038a..8ec68b7f 100644 --- a/src/restic/testing_test.go +++ b/src/restic/testing_test.go @@ -45,30 +45,5 @@ func TestCreateSnapshot(t *testing.T) { t.Fatalf("snapshot has zero tree ID") } - chkr := checker.New(repo) - - hints, errs := chkr.LoadIndex() - if len(errs) != 0 { - t.Fatalf("errors loading index: %v", errs) - } - - if len(hints) != 0 { - t.Fatalf("errors loading index: %v", hints) - } - - done := make(chan struct{}) - defer close(done) - errChan := make(chan error) - go chkr.Structure(errChan, done) - - for err := range errChan { - t.Error(err) - } - - errChan = make(chan error) - go chkr.ReadData(nil, errChan, done) - - for err := range errChan { - t.Error(err) - } + checker.TestCheckRepo(t, repo) } From fe79177b4074c6def16d9c17d5154af13979bda1 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 31 Jul 2016 16:12:19 +0200 Subject: [PATCH 15/98] Make TestCreateSnapshot return the snapshot itself --- src/restic/testing.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/restic/testing.go b/src/restic/testing.go index 6cb9b54a..384c45e5 100644 --- a/src/restic/testing.go +++ b/src/restic/testing.go @@ -100,7 +100,7 @@ func saveTree(t testing.TB, repo *repository.Repository, seed int64, depth int) // fake data is generated deterministically from the timestamp `at`, which is // also used as the snapshot's timestamp. The tree's depth can be specified // with the parameter depth. -func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time, depth int) backend.ID { +func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time, depth int) *Snapshot { seed := at.Unix() t.Logf("create fake snapshot at %s with seed %d", at, seed) @@ -119,6 +119,8 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time, t.Fatal(err) } + snapshot.id = &id + t.Logf("saved snapshot %v", id.Str()) err = repo.Flush() @@ -131,5 +133,5 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time, t.Fatal(err) } - return id + return snapshot } From d5323223f4acdf551b70635fcba16801357b4361 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 31 Jul 2016 16:27:36 +0200 Subject: [PATCH 16/98] Change repository Init() function to allow better testing --- src/restic/repository/config.go | 11 +++++------ src/restic/repository/config_test.go | 4 +++- src/restic/repository/repository.go | 14 +++++++++++++- 3 files changed, 21 insertions(+), 8 deletions(-) diff --git a/src/restic/repository/config.go b/src/restic/repository/config.go index a41517f3..abe1277e 100644 --- a/src/restic/repository/config.go +++ b/src/restic/repository/config.go @@ -7,9 +7,10 @@ import ( "errors" "io" - "github.com/restic/chunker" "restic/backend" "restic/debug" + + "github.com/restic/chunker" ) // Config contains the configuration for a repository. @@ -37,8 +38,8 @@ type JSONUnpackedLoader interface { } // CreateConfig creates a config file with a randomly selected polynomial and -// ID and saves the config in the repository. -func CreateConfig(r JSONUnpackedSaver) (Config, error) { +// ID. +func CreateConfig() (Config, error) { var ( err error cfg Config @@ -59,9 +60,7 @@ func CreateConfig(r JSONUnpackedSaver) (Config, error) { cfg.Version = RepoVersion debug.Log("Repo.CreateConfig", "New config: %#v", cfg) - - _, err = r.SaveJSONUnpacked(backend.Config, cfg) - return cfg, err + return cfg, nil } // LoadConfig returns loads, checks and returns the config for a repository. diff --git a/src/restic/repository/config_test.go b/src/restic/repository/config_test.go index 10fc61d9..71f2fd81 100644 --- a/src/restic/repository/config_test.go +++ b/src/restic/repository/config_test.go @@ -32,9 +32,11 @@ func TestConfig(t *testing.T) { return backend.ID{}, nil } - cfg1, err := repository.CreateConfig(saver(save)) + cfg1, err := repository.CreateConfig() OK(t, err) + _, err = saver(save).SaveJSONUnpacked(backend.Config, cfg1) + load := func(tpe backend.Type, id backend.ID, arg interface{}) error { Assert(t, tpe == backend.Config, "wrong backend type: got %v, wanted %v", diff --git a/src/restic/repository/repository.go b/src/restic/repository/repository.go index aa75d322..bdda7fe4 100644 --- a/src/restic/repository/repository.go +++ b/src/restic/repository/repository.go @@ -416,6 +416,17 @@ func (r *Repository) Init(password string) error { return errors.New("repository master key and config already initialized") } + cfg, err := CreateConfig() + if err != nil { + return err + } + + return r.init(password, cfg) +} + +// init creates a new master key with the supplied password and uses it to save +// the config into the repo. +func (r *Repository) init(password string, cfg Config) error { key, err := createMasterKey(r, password) if err != nil { return err @@ -424,7 +435,8 @@ func (r *Repository) Init(password string) error { r.key = key.master r.packerManager.key = key.master r.keyName = key.Name() - r.Config, err = CreateConfig(r) + r.Config = cfg + _, err = r.SaveJSONUnpacked(backend.Config, cfg) return err } From 4720a7d807fc26059cfa6750a6eb6c6d456126b8 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 31 Jul 2016 17:45:22 +0200 Subject: [PATCH 17/98] Allow specifying chunker polynomial for tests --- src/restic/repository/config.go | 17 +++++++++++++++++ src/restic/repository/testing.go | 14 ++++++++++---- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/src/restic/repository/config.go b/src/restic/repository/config.go index abe1277e..46f739d3 100644 --- a/src/restic/repository/config.go +++ b/src/restic/repository/config.go @@ -6,6 +6,7 @@ import ( "encoding/hex" "errors" "io" + "testing" "restic/backend" "restic/debug" @@ -63,6 +64,22 @@ func CreateConfig() (Config, error) { return cfg, nil } +// TestCreateConfig creates a config for use within tests. +func TestCreateConfig(t testing.TB, pol chunker.Pol) (cfg Config) { + cfg.ChunkerPolynomial = pol + + newID := make([]byte, repositoryIDSize) + _, err := io.ReadFull(rand.Reader, newID) + if err != nil { + t.Fatalf("unable to create random ID: %v", err) + } + + cfg.ID = hex.EncodeToString(newID) + cfg.Version = RepoVersion + + return cfg +} + // LoadConfig returns loads, checks and returns the config for a repository. func LoadConfig(r JSONUnpackedLoader) (Config, error) { var ( diff --git a/src/restic/repository/testing.go b/src/restic/repository/testing.go index f45714d8..f0a9913f 100644 --- a/src/restic/repository/testing.go +++ b/src/restic/repository/testing.go @@ -6,6 +6,8 @@ import ( "restic/backend/local" "restic/backend/mem" "testing" + + "github.com/restic/chunker" ) // TestBackend returns a fully configured in-memory backend. @@ -16,8 +18,11 @@ func TestBackend(t testing.TB) (be backend.Backend, cleanup func()) { // TestPassword is used for all repositories created by the Test* functions. const TestPassword = "geheim" +const testChunkerPol = chunker.Pol(0x3DA3358B4DC173) + // TestRepositoryWithBackend returns a repository initialized with a test -// password. If be is nil, an in-memory backend is used. +// password. If be is nil, an in-memory backend is used. A constant polynomial +// is used for the chunker. func TestRepositoryWithBackend(t testing.TB, be backend.Backend) (r *Repository, cleanup func()) { var beCleanup func() if be == nil { @@ -26,9 +31,10 @@ func TestRepositoryWithBackend(t testing.TB, be backend.Backend) (r *Repository, r = New(be) - err := r.Init(TestPassword) + cfg := TestCreateConfig(t, testChunkerPol) + err := r.init(TestPassword, cfg) if err != nil { - t.Fatalf("TestRepopository(): initialize repo failed: %v", err) + t.Fatalf("TestRepository(): initialize repo failed: %v", err) } return r, func() { @@ -41,7 +47,7 @@ func TestRepositoryWithBackend(t testing.TB, be backend.Backend) (r *Repository, // TestRepository returns a repository initialized with a test password on an // in-memory backend. When the environment variable RESTIC_TEST_REPO is set to // a non-existing directory, a local backend is created there and this is used -// instead. The directory is not removed. +// instead. The directory is not removed, but left there for inspection. func TestRepository(t testing.TB) (r *Repository, cleanup func()) { dir := os.Getenv("RESTIC_TEST_REPO") if dir != "" { From 22aa17091b055a3740e9e5240b8baa02ef7923a7 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 31 Jul 2016 21:26:56 +0200 Subject: [PATCH 18/98] Add test for FindUsedBlobs --- src/restic/repository/prune.go | 6 +++ src/restic/repository/prune_test.go | 65 ++++++++++++++++++++++------- 2 files changed, 57 insertions(+), 14 deletions(-) diff --git a/src/restic/repository/prune.go b/src/restic/repository/prune.go index 75f9f9ac..f0b90194 100644 --- a/src/restic/repository/prune.go +++ b/src/restic/repository/prune.go @@ -174,3 +174,9 @@ func RebuildIndex(repo *Repository) error { return nil } + +// FindUsedBlobs traverses the tree ID and returns a set of all blobs +// encountered. +func FindUsedBlobs(repo *Repository, treeID backend.ID) (backend.IDSet, error) { + return nil, nil +} diff --git a/src/restic/repository/prune_test.go b/src/restic/repository/prune_test.go index 05277e4c..ce6c3748 100644 --- a/src/restic/repository/prune_test.go +++ b/src/restic/repository/prune_test.go @@ -1,11 +1,14 @@ -package repository +package repository_test import ( "io" "math/rand" + "restic" "restic/backend" "restic/pack" + "restic/repository" "testing" + "time" ) func randomSize(min, max int) int { @@ -13,7 +16,7 @@ func randomSize(min, max int) int { } func random(t *testing.T, length int) []byte { - rd := NewRandReader(rand.New(rand.NewSource(int64(length)))) + rd := repository.NewRandReader(rand.New(rand.NewSource(int64(length)))) buf := make([]byte, length) _, err := io.ReadFull(rd, buf) if err != nil { @@ -23,7 +26,7 @@ func random(t *testing.T, length int) []byte { return buf } -func createRandomBlobs(t *testing.T, repo *Repository, blobs int, pData float32) { +func createRandomBlobs(t *testing.T, repo *repository.Repository, blobs int, pData float32) { for i := 0; i < blobs; i++ { var ( tpe pack.BlobType @@ -57,7 +60,7 @@ func createRandomBlobs(t *testing.T, repo *Repository, blobs int, pData float32) // selectBlobs splits the list of all blobs randomly into two lists. A blob // will be contained in the firstone ith probability p. -func selectBlobs(t *testing.T, repo *Repository, p float32) (list1, list2 backend.IDSet) { +func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, list2 backend.IDSet) { done := make(chan struct{}) defer close(done) @@ -82,7 +85,7 @@ func selectBlobs(t *testing.T, repo *Repository, p float32) (list1, list2 backen return list1, list2 } -func listPacks(t *testing.T, repo *Repository) backend.IDSet { +func listPacks(t *testing.T, repo *repository.Repository) backend.IDSet { done := make(chan struct{}) defer close(done) @@ -94,7 +97,7 @@ func listPacks(t *testing.T, repo *Repository) backend.IDSet { return list } -func findPacksForBlobs(t *testing.T, repo *Repository, blobs backend.IDSet) backend.IDSet { +func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs backend.IDSet) backend.IDSet { packs := backend.NewIDSet() idx := repo.Index() @@ -110,34 +113,34 @@ func findPacksForBlobs(t *testing.T, repo *Repository, blobs backend.IDSet) back return packs } -func repack(t *testing.T, repo *Repository, packs, blobs backend.IDSet) { - err := Repack(repo, packs, blobs) +func repack(t *testing.T, repo *repository.Repository, packs, blobs backend.IDSet) { + err := repository.Repack(repo, packs, blobs) if err != nil { t.Fatal(err) } } -func saveIndex(t *testing.T, repo *Repository) { +func saveIndex(t *testing.T, repo *repository.Repository) { if err := repo.SaveIndex(); err != nil { t.Fatalf("repo.SaveIndex() %v", err) } } -func rebuildIndex(t *testing.T, repo *Repository) { - if err := RebuildIndex(repo); err != nil { +func rebuildIndex(t *testing.T, repo *repository.Repository) { + if err := repository.RebuildIndex(repo); err != nil { t.Fatalf("error rebuilding index: %v", err) } } -func reloadIndex(t *testing.T, repo *Repository) { - repo.SetIndex(NewMasterIndex()) +func reloadIndex(t *testing.T, repo *repository.Repository) { + repo.SetIndex(repository.NewMasterIndex()) if err := repo.LoadIndex(); err != nil { t.Fatalf("error loading new index: %v", err) } } func TestRepack(t *testing.T) { - repo, cleanup := TestRepository(t) + repo, cleanup := repository.TestRepository(t) defer cleanup() createRandomBlobs(t, repo, rand.Intn(400), 0.7) @@ -189,3 +192,37 @@ func TestRepack(t *testing.T) { } } } + +const ( + testSnapshots = 3 + testDepth = 2 +) + +var testTime = time.Unix(1469960361, 23) + +func TestFindUsedBlobs(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + var snapshots []*restic.Snapshot + for i := 0; i < testSnapshots; i++ { + sn := restic.TestCreateSnapshot(t, repo, testTime.Add(time.Duration(i)*time.Second), testDepth) + t.Logf("snapshot %v saved, tree %v", sn.ID().Str(), sn.Tree.Str()) + snapshots = append(snapshots, sn) + } + + for _, sn := range snapshots { + usedBlobs, err := repository.FindUsedBlobs(repo, *sn.Tree) + if err != nil { + t.Errorf("FindUsedBlobs returned error: %v", err) + continue + } + + if len(usedBlobs) == 0 { + t.Errorf("FindUsedBlobs returned an empty set") + continue + } + + t.Logf("used blobs from snapshot %v (tree %v): %v", sn.ID().Str(), sn.Tree.Str(), usedBlobs) + } +} From 723592d9238d0ade0ea2eef5c553489d47291b82 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 1 Aug 2016 18:31:44 +0200 Subject: [PATCH 19/98] Move FindUsedBlobs to package restic --- src/restic/find.go | 12 +++++++++ src/restic/find_test.go | 42 +++++++++++++++++++++++++++++ src/restic/repository/prune.go | 6 ----- src/restic/repository/prune_test.go | 36 ------------------------- 4 files changed, 54 insertions(+), 42 deletions(-) create mode 100644 src/restic/find.go create mode 100644 src/restic/find_test.go diff --git a/src/restic/find.go b/src/restic/find.go new file mode 100644 index 00000000..baeb47ca --- /dev/null +++ b/src/restic/find.go @@ -0,0 +1,12 @@ +package restic + +import ( + "restic/backend" + "restic/repository" +) + +// FindUsedBlobs traverses the tree ID and returns a set of all blobs +// encountered. +func FindUsedBlobs(repo *repository.Repository, treeID backend.ID) (backend.IDSet, error) { + return nil, nil +} diff --git a/src/restic/find_test.go b/src/restic/find_test.go new file mode 100644 index 00000000..f5973b9a --- /dev/null +++ b/src/restic/find_test.go @@ -0,0 +1,42 @@ +package restic + +import ( + "testing" + "time" + + "restic/repository" +) + +const ( + testSnapshots = 3 + testDepth = 2 +) + +var testTime = time.Unix(1469960361, 23) + +func TestFindUsedBlobs(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + var snapshots []*Snapshot + for i := 0; i < testSnapshots; i++ { + sn := TestCreateSnapshot(t, repo, testTime.Add(time.Duration(i)*time.Second), testDepth) + t.Logf("snapshot %v saved, tree %v", sn.ID().Str(), sn.Tree.Str()) + snapshots = append(snapshots, sn) + } + + for _, sn := range snapshots { + usedBlobs, err := FindUsedBlobs(repo, *sn.Tree) + if err != nil { + t.Errorf("FindUsedBlobs returned error: %v", err) + continue + } + + if len(usedBlobs) == 0 { + t.Errorf("FindUsedBlobs returned an empty set") + continue + } + + t.Logf("used blobs from snapshot %v (tree %v): %v", sn.ID().Str(), sn.Tree.Str(), usedBlobs) + } +} diff --git a/src/restic/repository/prune.go b/src/restic/repository/prune.go index f0b90194..75f9f9ac 100644 --- a/src/restic/repository/prune.go +++ b/src/restic/repository/prune.go @@ -174,9 +174,3 @@ func RebuildIndex(repo *Repository) error { return nil } - -// FindUsedBlobs traverses the tree ID and returns a set of all blobs -// encountered. -func FindUsedBlobs(repo *Repository, treeID backend.ID) (backend.IDSet, error) { - return nil, nil -} diff --git a/src/restic/repository/prune_test.go b/src/restic/repository/prune_test.go index ce6c3748..9b40e92b 100644 --- a/src/restic/repository/prune_test.go +++ b/src/restic/repository/prune_test.go @@ -3,12 +3,10 @@ package repository_test import ( "io" "math/rand" - "restic" "restic/backend" "restic/pack" "restic/repository" "testing" - "time" ) func randomSize(min, max int) int { @@ -192,37 +190,3 @@ func TestRepack(t *testing.T) { } } } - -const ( - testSnapshots = 3 - testDepth = 2 -) - -var testTime = time.Unix(1469960361, 23) - -func TestFindUsedBlobs(t *testing.T) { - repo, cleanup := repository.TestRepository(t) - defer cleanup() - - var snapshots []*restic.Snapshot - for i := 0; i < testSnapshots; i++ { - sn := restic.TestCreateSnapshot(t, repo, testTime.Add(time.Duration(i)*time.Second), testDepth) - t.Logf("snapshot %v saved, tree %v", sn.ID().Str(), sn.Tree.Str()) - snapshots = append(snapshots, sn) - } - - for _, sn := range snapshots { - usedBlobs, err := repository.FindUsedBlobs(repo, *sn.Tree) - if err != nil { - t.Errorf("FindUsedBlobs returned error: %v", err) - continue - } - - if len(usedBlobs) == 0 { - t.Errorf("FindUsedBlobs returned an empty set") - continue - } - - t.Logf("used blobs from snapshot %v (tree %v): %v", sn.ID().Str(), sn.Tree.Str(), usedBlobs) - } -} From 51b16ad57dddc0c111c59e23af9cdcffcd9ecd32 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 1 Aug 2016 18:35:13 +0200 Subject: [PATCH 20/98] Add handy functions to backend.IDSet --- src/restic/backend/idset.go | 41 +++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/src/restic/backend/idset.go b/src/restic/backend/idset.go index 12307219..4bfe52ca 100644 --- a/src/restic/backend/idset.go +++ b/src/restic/backend/idset.go @@ -60,6 +60,47 @@ func (s IDSet) Equals(other IDSet) bool { return true } +// Merge adds the blobs in other to the current set. +func (s IDSet) Merge(other IDSet) { + for id := range other { + s.Insert(id) + } +} + +// Intersect returns a new set containing the IDs that are present in both sets. +func (s IDSet) Intersect(other IDSet) (result IDSet) { + result = NewIDSet() + + set1 := s + set2 := other + + // iterate over the smaller set + if len(set2) < len(set1) { + set1, set2 = set2, set1 + } + + for id := range set1 { + if set2.Has(id) { + result.Insert(id) + } + } + + return result +} + +// Sub returns a new set containing all IDs that are present in s but not in +// other. +func (s IDSet) Sub(other IDSet) (result IDSet) { + result = NewIDSet() + for id := range s { + if !other.Has(id) { + result.Insert(id) + } + } + + return result +} + func (s IDSet) String() string { str := s.List().String() if len(str) < 2 { From ffc3503e6f89557d3f3128b872d118301a9f696b Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 1 Aug 2016 18:40:08 +0200 Subject: [PATCH 21/98] Add first version of FindUsedBlobs --- src/restic/find.go | 31 ++++++++++- src/restic/find_test.go | 71 +++++++++++++++++++++++- src/restic/testdata/used_blobs_snapshot0 | 37 ++++++++++++ src/restic/testdata/used_blobs_snapshot1 | 34 ++++++++++++ src/restic/testdata/used_blobs_snapshot2 | 9 +++ 5 files changed, 178 insertions(+), 4 deletions(-) create mode 100644 src/restic/testdata/used_blobs_snapshot0 create mode 100644 src/restic/testdata/used_blobs_snapshot1 create mode 100644 src/restic/testdata/used_blobs_snapshot2 diff --git a/src/restic/find.go b/src/restic/find.go index baeb47ca..7d7a8698 100644 --- a/src/restic/find.go +++ b/src/restic/find.go @@ -5,8 +5,35 @@ import ( "restic/repository" ) +// FindUsedBlobs traverse the tree ID and adds all seen blobs to blobs. +func findUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs backend.IDSet) error { + blobs.Insert(treeID) + + tree, err := LoadTree(repo, treeID) + if err != nil { + return err + } + + for _, node := range tree.Nodes { + switch node.Type { + case "file": + for _, blob := range node.Content { + blobs.Insert(blob) + } + case "dir": + err := findUsedBlobs(repo, *node.Subtree, blobs) + if err != nil { + return err + } + } + } + + return nil +} + // FindUsedBlobs traverses the tree ID and returns a set of all blobs // encountered. -func FindUsedBlobs(repo *repository.Repository, treeID backend.ID) (backend.IDSet, error) { - return nil, nil +func FindUsedBlobs(repo *repository.Repository, treeID backend.ID) (blobs backend.IDSet, err error) { + blobs = backend.NewIDSet() + return blobs, findUsedBlobs(repo, treeID, blobs) } diff --git a/src/restic/find_test.go b/src/restic/find_test.go index f5973b9a..759664f2 100644 --- a/src/restic/find_test.go +++ b/src/restic/find_test.go @@ -1,12 +1,69 @@ package restic import ( + "bufio" + "flag" + "fmt" + "os" + "path/filepath" + "sort" "testing" "time" + "restic/backend" "restic/repository" ) +func loadIDSet(t testing.TB, filename string) backend.IDSet { + f, err := os.Open(filename) + if err != nil { + t.Logf("unable to open golden file %v: %v", filename, err) + return backend.IDSet{} + } + + sc := bufio.NewScanner(f) + + ids := backend.NewIDSet() + for sc.Scan() { + id, err := backend.ParseID(sc.Text()) + if err != nil { + t.Errorf("file %v contained invalid id: %v", filename, err) + } + + ids.Insert(id) + } + + if err = f.Close(); err != nil { + t.Errorf("closing file %v failed with error %v", filename, err) + } + + return ids +} + +func saveIDSet(t testing.TB, filename string, s backend.IDSet) { + f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + t.Fatalf("unable to update golden file %v: %v", filename, err) + return + } + + var ids backend.IDs + for id := range s { + ids = append(ids, id) + } + + sort.Sort(ids) + for _, id := range ids { + fmt.Fprintf(f, "%s\n", id) + } + + if err = f.Close(); err != nil { + t.Fatalf("close file %v returned error: %v", filename, err) + } +} + +var updateGoldenFiles = flag.Bool("update", false, "update golden files in testdata/") + const ( testSnapshots = 3 testDepth = 2 @@ -25,7 +82,7 @@ func TestFindUsedBlobs(t *testing.T) { snapshots = append(snapshots, sn) } - for _, sn := range snapshots { + for i, sn := range snapshots { usedBlobs, err := FindUsedBlobs(repo, *sn.Tree) if err != nil { t.Errorf("FindUsedBlobs returned error: %v", err) @@ -37,6 +94,16 @@ func TestFindUsedBlobs(t *testing.T) { continue } - t.Logf("used blobs from snapshot %v (tree %v): %v", sn.ID().Str(), sn.Tree.Str(), usedBlobs) + goldenFilename := filepath.Join("testdata", fmt.Sprintf("used_blobs_snapshot%d", i)) + want := loadIDSet(t, goldenFilename) + + if !want.Equals(usedBlobs) { + t.Errorf("snapshot %d: wrong list of blobs returned:\n missing blobs: %v\n extra blobs: %v", + i, want.Sub(usedBlobs), usedBlobs.Sub(want)) + } + + if *updateGoldenFiles { + saveIDSet(t, goldenFilename, usedBlobs) + } } } diff --git a/src/restic/testdata/used_blobs_snapshot0 b/src/restic/testdata/used_blobs_snapshot0 new file mode 100644 index 00000000..543f534b --- /dev/null +++ b/src/restic/testdata/used_blobs_snapshot0 @@ -0,0 +1,37 @@ +087e8d5f45f93a78e52a938ac0b7864f92f8910091c0da69201a156242df3b78 +0bf505951741c44714527d252313b6959ce4f19d2e5512fca1c1b2da14424da3 +0c82d00e6ee78b48559cda2f9cc909beeb8769183b115dfda0a5767832accc8d +2941bfd03b8933bb150b085a2252b69675495af64523bf8d38e67429e7cccb45 +378a9b6862c8fa5c6915f158d16e4416243159bb9da44c564896c065bc6c1cf4 +3ffcf5128fc404c2a363e3e8a8d4c8a7ae8c36fcacba7fdfe71ec9dabcadd567 +40f5ca234e5eed1dc967c83fa99076ef636619148082f300cf877676728ebf14 +42aad1ab6cc964043e53e5da13ed0f2b44a3bf6ae7702f60a805f13028377524 +42bc8f509dbd6b9881cab4c1684d5cf74207046336f654db1b884197f15cae7b +47cf470c1c6de9af00b3b1ee963de8b94f51a2870b3338b3f33cfc565c0f8be4 +587045d0ec69e47a3cc91b13c959aa80add9118ecfac47232ea992650f25f0b9 +615e8851030f318751f3c8baf8fbfa9958e2dd7f25dc1a87dcf6d6f79d1f1a9f +63ec5e835e11203bbeef69095523344dd975f1ab52bdbf4a1db7a53914d967ca +714f9e16404b9ec83de56715e5387b2c4c2ed0af1889166a4e767822f971bf52 +80ba9a145bf46cae605e911c18165c02213e8d11d68dc5b7824f259d17b7b6d0 +86af714d79d18be1c9c0ae23cca9dbd7cef44530e253e80af5bd5c34eab09714 +8a445cf5b6313cbe3b5872a55adde52aa8d1ae188f41d56f176e40a3137ac058 +8e171f7367d1b68012ed1ceec8f54b7b9b8654ebaf63a760017c34d761b17878 +8e98f35e65fb42c85eb4a2ab4793e294148e3f318252cb850a896274d2aa90bc +9d65ba6443863394a8c6582fef4a8aaab2fb46417eef41f1792cdbdb38ee0b4c +9da502ea8e7a768ee0dbafdc613db3df4a7cd9c98af08328265c4d2e953e8efa +9f2899688d2f23391cfd86e7b6d326a54f352bb294160878178639aab4aa378f +a2f3ccf973b3600c06c42dc3b867b263a788c18aa57f4448fea2525b7cbfd784 +b2deaf9174086129ec3b9f79e05401fdb3baf8b75335addffac1950182d779df +b81870ebe27b98f6b8746349e8ea444c96bf2eaac5dbd6236175150ce579f46b +bd4dacd46031b2b837bc9bd06145b0571156fa496408ce728c003ae50b265aaf +c0775cfc822f59524b4ed714d257607fd5f2c9f0dc9f65763a86ffc33aac325b +c3596f717c495d20c33561e991d4295550b6d7544687f2363e999bdc0266224d +c54c4899c4d7dcda8b9e597aebfbaf7d65c9c7a760527d77e7fc9894283d736e +ca51ecf1633896f852929cb2d56ad1b5bed4ab6055bdcf370ced4011bed164aa +ce8b656cead478c34060510962daf97cea52abde68bbef7934dd5c5513cf6f3b +dafbb65569781083b627de833fb931cf98401299a62d747f03d8fc135ab57279 +e193d395410520580e76a5b89b8d23a1d162c0e28c52cb8194d409a74a120f7d +e752efd93f9850ba0cafbbac01bb283c10095ac923cdb8ff027393001123d406 +f728e5576d4ab63248c310396d67d9afa3267dd2dea3cfba690dbd04efe181fb +f75b6460b68d254f2195b08c606672fb55c05fb7bed7e16699b3231104b673ea +fe19f084021bdac5a9a5d270042ff53ef36357dd0743318d0480dee1a43de266 diff --git a/src/restic/testdata/used_blobs_snapshot1 b/src/restic/testdata/used_blobs_snapshot1 new file mode 100644 index 00000000..502e9170 --- /dev/null +++ b/src/restic/testdata/used_blobs_snapshot1 @@ -0,0 +1,34 @@ +011a951a9796979c2b515ef4209662013bd1f16a20a1b35d1d950d7408bdc8b4 +087e8d5f45f93a78e52a938ac0b7864f92f8910091c0da69201a156242df3b78 +0bad18b7f2d82d7c9cf8e405262ad2f3dbe57928aa242c1070b917042a99072d +0bf505951741c44714527d252313b6959ce4f19d2e5512fca1c1b2da14424da3 +0c82d00e6ee78b48559cda2f9cc909beeb8769183b115dfda0a5767832accc8d +2941bfd03b8933bb150b085a2252b69675495af64523bf8d38e67429e7cccb45 +3ffcf5128fc404c2a363e3e8a8d4c8a7ae8c36fcacba7fdfe71ec9dabcadd567 +40f5ca234e5eed1dc967c83fa99076ef636619148082f300cf877676728ebf14 +42bc8f509dbd6b9881cab4c1684d5cf74207046336f654db1b884197f15cae7b +47cf470c1c6de9af00b3b1ee963de8b94f51a2870b3338b3f33cfc565c0f8be4 +4b2e91022c34c756b7bd8ece046a2bab6f0dcad89f46c52d1f84cd48e8da55df +6416bc2321cdeb8758188af2b3925f2c82ffde014bf53b7a69c0f113a5c460fe +714f9e16404b9ec83de56715e5387b2c4c2ed0af1889166a4e767822f971bf52 +80ba9a145bf46cae605e911c18165c02213e8d11d68dc5b7824f259d17b7b6d0 +83bf0196cf45bbca0be7e292688a3622af7888c0e9ec01bb78edaff302cced06 +8a445cf5b6313cbe3b5872a55adde52aa8d1ae188f41d56f176e40a3137ac058 +8e98f35e65fb42c85eb4a2ab4793e294148e3f318252cb850a896274d2aa90bc +907acef01e05c3e0140858423e9284ddd3d64145ba8b0c3293371c5c7ab3d6b7 +9d65ba6443863394a8c6582fef4a8aaab2fb46417eef41f1792cdbdb38ee0b4c +9da502ea8e7a768ee0dbafdc613db3df4a7cd9c98af08328265c4d2e953e8efa +a2f3ccf973b3600c06c42dc3b867b263a788c18aa57f4448fea2525b7cbfd784 +b2deaf9174086129ec3b9f79e05401fdb3baf8b75335addffac1950182d779df +b3915971171e049292e28d7bc61fe362e94f73aa49b578f4ca1322b47d7fc39c +bd4dacd46031b2b837bc9bd06145b0571156fa496408ce728c003ae50b265aaf +c3596f717c495d20c33561e991d4295550b6d7544687f2363e999bdc0266224d +c54c4899c4d7dcda8b9e597aebfbaf7d65c9c7a760527d77e7fc9894283d736e +ca51ecf1633896f852929cb2d56ad1b5bed4ab6055bdcf370ced4011bed164aa +cb8001715217b4f6960aa24c1abb4b60a20c10f23abc1e5f69e0f5436bd788c8 +d39c4c264e01ec47b0386da3775c6b0cc337974627ff55792938cca4895ac6c4 +dafbb65569781083b627de833fb931cf98401299a62d747f03d8fc135ab57279 +e193d395410520580e76a5b89b8d23a1d162c0e28c52cb8194d409a74a120f7d +e791912a7fad8954c764fae41d2958d2feeae2278e403429add9119ab43a36f5 +f728e5576d4ab63248c310396d67d9afa3267dd2dea3cfba690dbd04efe181fb +fe19f084021bdac5a9a5d270042ff53ef36357dd0743318d0480dee1a43de266 diff --git a/src/restic/testdata/used_blobs_snapshot2 b/src/restic/testdata/used_blobs_snapshot2 new file mode 100644 index 00000000..382140b4 --- /dev/null +++ b/src/restic/testdata/used_blobs_snapshot2 @@ -0,0 +1,9 @@ +35e13e123748cd27d1634c4e07e5ff2fc86901b09b215f3125331d1226c782be +378a9b6862c8fa5c6915f158d16e4416243159bb9da44c564896c065bc6c1cf4 +42aad1ab6cc964043e53e5da13ed0f2b44a3bf6ae7702f60a805f13028377524 +47cf470c1c6de9af00b3b1ee963de8b94f51a2870b3338b3f33cfc565c0f8be4 +615e8851030f318751f3c8baf8fbfa9958e2dd7f25dc1a87dcf6d6f79d1f1a9f +83bf0196cf45bbca0be7e292688a3622af7888c0e9ec01bb78edaff302cced06 +9d65ba6443863394a8c6582fef4a8aaab2fb46417eef41f1792cdbdb38ee0b4c +b3915971171e049292e28d7bc61fe362e94f73aa49b578f4ca1322b47d7fc39c +c0775cfc822f59524b4ed714d257607fd5f2c9f0dc9f65763a86ffc33aac325b From bdd085e9f163449d6c14d3a2f1475f0e5241be56 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 1 Aug 2016 18:45:03 +0200 Subject: [PATCH 22/98] Prevent loops when finding used blobs --- src/restic/find.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/restic/find.go b/src/restic/find.go index 7d7a8698..974ebf67 100644 --- a/src/restic/find.go +++ b/src/restic/find.go @@ -6,7 +6,7 @@ import ( ) // FindUsedBlobs traverse the tree ID and adds all seen blobs to blobs. -func findUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs backend.IDSet) error { +func findUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs backend.IDSet, seen backend.IDSet) error { blobs.Insert(treeID) tree, err := LoadTree(repo, treeID) @@ -21,7 +21,14 @@ func findUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs backend blobs.Insert(blob) } case "dir": - err := findUsedBlobs(repo, *node.Subtree, blobs) + subtreeID := *node.Subtree + if seen.Has(subtreeID) { + continue + } + + seen.Insert(subtreeID) + + err := findUsedBlobs(repo, subtreeID, blobs, seen) if err != nil { return err } @@ -35,5 +42,5 @@ func findUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs backend // encountered. func FindUsedBlobs(repo *repository.Repository, treeID backend.ID) (blobs backend.IDSet, err error) { blobs = backend.NewIDSet() - return blobs, findUsedBlobs(repo, treeID, blobs) + return blobs, findUsedBlobs(repo, treeID, blobs, backend.NewIDSet()) } From 34b3e3a0959b3f6f7fd3a00f6ade64afdf629c48 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 1 Aug 2016 18:55:07 +0200 Subject: [PATCH 23/98] Split index/repack functions to different files --- .../repository/{prune.go => index_rebuild.go} | 77 ---------------- .../{prune_test.go => index_rebuild_test.go} | 82 +---------------- src/restic/repository/repack.go | 84 ++++++++++++++++++ src/restic/repository/repack_test.go | 88 +++++++++++++++++++ 4 files changed, 173 insertions(+), 158 deletions(-) rename src/restic/repository/{prune.go => index_rebuild.go} (52%) rename src/restic/repository/{prune_test.go => index_rebuild_test.go} (54%) create mode 100644 src/restic/repository/repack.go create mode 100644 src/restic/repository/repack_test.go diff --git a/src/restic/repository/prune.go b/src/restic/repository/index_rebuild.go similarity index 52% rename from src/restic/repository/prune.go rename to src/restic/repository/index_rebuild.go index 75f9f9ac..34ef6680 100644 --- a/src/restic/repository/prune.go +++ b/src/restic/repository/index_rebuild.go @@ -1,91 +1,14 @@ package repository import ( - "bytes" "fmt" - "io" "os" "restic/backend" - "restic/crypto" "restic/debug" "restic/pack" "restic/worker" ) -// Repack takes a list of packs together with a list of blobs contained in -// these packs. Each pack is loaded and the blobs listed in keepBlobs is saved -// into a new pack. Afterwards, the packs are removed. This operation requires -// an exclusive lock on the repo. -func Repack(repo *Repository, packs, keepBlobs backend.IDSet) (err error) { - debug.Log("Repack", "repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs)) - - buf := make([]byte, 0, maxPackSize) - for packID := range packs { - // load the complete pack - h := backend.Handle{Type: backend.Data, Name: packID.String()} - - l, err := repo.Backend().Load(h, buf[:cap(buf)], 0) - if err == io.ErrUnexpectedEOF { - err = nil - buf = buf[:l] - } - - if err != nil { - return err - } - - debug.Log("Repack", "pack %v loaded (%d bytes)", packID.Str(), len(buf)) - - unpck, err := pack.NewUnpacker(repo.Key(), bytes.NewReader(buf)) - if err != nil { - return err - } - - debug.Log("Repack", "processing pack %v, blobs: %v", packID.Str(), len(unpck.Entries)) - var plaintext []byte - for _, entry := range unpck.Entries { - if !keepBlobs.Has(entry.ID) { - continue - } - - ciphertext := buf[entry.Offset : entry.Offset+entry.Length] - - if cap(plaintext) < len(ciphertext) { - plaintext = make([]byte, len(ciphertext)) - } - - plaintext, err = crypto.Decrypt(repo.Key(), plaintext, ciphertext) - if err != nil { - return err - } - - _, err = repo.SaveAndEncrypt(entry.Type, plaintext, &entry.ID) - if err != nil { - return err - } - - debug.Log("Repack", " saved blob %v", entry.ID.Str()) - - keepBlobs.Delete(entry.ID) - } - } - - if err := repo.Flush(); err != nil { - return err - } - - for packID := range packs { - err := repo.Backend().Remove(backend.Data, packID.String()) - if err != nil { - debug.Log("Repack", "error removing pack %v: %v", packID.Str(), err) - return err - } - debug.Log("Repack", "removed pack %v", packID.Str()) - } - - return nil -} - const rebuildIndexWorkers = 10 type loadBlobsResult struct { diff --git a/src/restic/repository/prune_test.go b/src/restic/repository/index_rebuild_test.go similarity index 54% rename from src/restic/repository/prune_test.go rename to src/restic/repository/index_rebuild_test.go index 9b40e92b..6a6dcc35 100644 --- a/src/restic/repository/prune_test.go +++ b/src/restic/repository/index_rebuild_test.go @@ -1,4 +1,4 @@ -package repository_test +package repository import ( "io" @@ -110,83 +110,3 @@ func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs backend. return packs } - -func repack(t *testing.T, repo *repository.Repository, packs, blobs backend.IDSet) { - err := repository.Repack(repo, packs, blobs) - if err != nil { - t.Fatal(err) - } -} - -func saveIndex(t *testing.T, repo *repository.Repository) { - if err := repo.SaveIndex(); err != nil { - t.Fatalf("repo.SaveIndex() %v", err) - } -} - -func rebuildIndex(t *testing.T, repo *repository.Repository) { - if err := repository.RebuildIndex(repo); err != nil { - t.Fatalf("error rebuilding index: %v", err) - } -} - -func reloadIndex(t *testing.T, repo *repository.Repository) { - repo.SetIndex(repository.NewMasterIndex()) - if err := repo.LoadIndex(); err != nil { - t.Fatalf("error loading new index: %v", err) - } -} - -func TestRepack(t *testing.T) { - repo, cleanup := repository.TestRepository(t) - defer cleanup() - - createRandomBlobs(t, repo, rand.Intn(400), 0.7) - - packsBefore := listPacks(t, repo) - - // Running repack on empty ID sets should not do anything at all. - repack(t, repo, nil, nil) - - packsAfter := listPacks(t, repo) - - if !packsAfter.Equals(packsBefore) { - t.Fatalf("packs are not equal, Repack modified something. Before:\n %v\nAfter:\n %v", - packsBefore, packsAfter) - } - - saveIndex(t, repo) - - removeBlobs, keepBlobs := selectBlobs(t, repo, 0.2) - - removePacks := findPacksForBlobs(t, repo, removeBlobs) - - repack(t, repo, removePacks, keepBlobs) - rebuildIndex(t, repo) - reloadIndex(t, repo) - - packsAfter = listPacks(t, repo) - for id := range removePacks { - if packsAfter.Has(id) { - t.Errorf("pack %v still present although it should have been repacked and removed", id.Str()) - } - } - - idx := repo.Index() - for id := range keepBlobs { - pb, err := idx.Lookup(id) - if err != nil { - t.Errorf("unable to find blob %v in repo", id.Str()) - } - - if removePacks.Has(pb.PackID) { - t.Errorf("lookup returned pack ID %v that should've been removed", pb.PackID) - } - } - - for id := range removeBlobs { - if _, err := idx.Lookup(id); err == nil { - t.Errorf("blob %v still contained in the repo", id.Str()) - } - } -} diff --git a/src/restic/repository/repack.go b/src/restic/repository/repack.go new file mode 100644 index 00000000..7177d096 --- /dev/null +++ b/src/restic/repository/repack.go @@ -0,0 +1,84 @@ +package repository + +import ( + "bytes" + "io" + "restic/backend" + "restic/crypto" + "restic/debug" + "restic/pack" +) + +// Repack takes a list of packs together with a list of blobs contained in +// these packs. Each pack is loaded and the blobs listed in keepBlobs is saved +// into a new pack. Afterwards, the packs are removed. This operation requires +// an exclusive lock on the repo. +func Repack(repo *Repository, packs, keepBlobs backend.IDSet) (err error) { + debug.Log("Repack", "repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs)) + + buf := make([]byte, 0, maxPackSize) + for packID := range packs { + // load the complete pack + h := backend.Handle{Type: backend.Data, Name: packID.String()} + + l, err := repo.Backend().Load(h, buf[:cap(buf)], 0) + if err == io.ErrUnexpectedEOF { + err = nil + buf = buf[:l] + } + + if err != nil { + return err + } + + debug.Log("Repack", "pack %v loaded (%d bytes)", packID.Str(), len(buf)) + + unpck, err := pack.NewUnpacker(repo.Key(), bytes.NewReader(buf)) + if err != nil { + return err + } + + debug.Log("Repack", "processing pack %v, blobs: %v", packID.Str(), len(unpck.Entries)) + var plaintext []byte + for _, entry := range unpck.Entries { + if !keepBlobs.Has(entry.ID) { + continue + } + + ciphertext := buf[entry.Offset : entry.Offset+entry.Length] + + if cap(plaintext) < len(ciphertext) { + plaintext = make([]byte, len(ciphertext)) + } + + plaintext, err = crypto.Decrypt(repo.Key(), plaintext, ciphertext) + if err != nil { + return err + } + + _, err = repo.SaveAndEncrypt(entry.Type, plaintext, &entry.ID) + if err != nil { + return err + } + + debug.Log("Repack", " saved blob %v", entry.ID.Str()) + + keepBlobs.Delete(entry.ID) + } + } + + if err := repo.Flush(); err != nil { + return err + } + + for packID := range packs { + err := repo.Backend().Remove(backend.Data, packID.String()) + if err != nil { + debug.Log("Repack", "error removing pack %v: %v", packID.Str(), err) + return err + } + debug.Log("Repack", "removed pack %v", packID.Str()) + } + + return nil +} diff --git a/src/restic/repository/repack_test.go b/src/restic/repository/repack_test.go new file mode 100644 index 00000000..be343fe1 --- /dev/null +++ b/src/restic/repository/repack_test.go @@ -0,0 +1,88 @@ +package repository + +import ( + "math/rand" + "restic/backend" + "restic/repository" + "testing" +) + +func repack(t *testing.T, repo *repository.Repository, packs, blobs backend.IDSet) { + err := repository.Repack(repo, packs, blobs) + if err != nil { + t.Fatal(err) + } +} + +func saveIndex(t *testing.T, repo *repository.Repository) { + if err := repo.SaveIndex(); err != nil { + t.Fatalf("repo.SaveIndex() %v", err) + } +} + +func rebuildIndex(t *testing.T, repo *repository.Repository) { + if err := repository.RebuildIndex(repo); err != nil { + t.Fatalf("error rebuilding index: %v", err) + } +} + +func reloadIndex(t *testing.T, repo *repository.Repository) { + repo.SetIndex(repository.NewMasterIndex()) + if err := repo.LoadIndex(); err != nil { + t.Fatalf("error loading new index: %v", err) + } +} + +func TestRepack(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + createRandomBlobs(t, repo, rand.Intn(400), 0.7) + + packsBefore := listPacks(t, repo) + + // Running repack on empty ID sets should not do anything at all. + repack(t, repo, nil, nil) + + packsAfter := listPacks(t, repo) + + if !packsAfter.Equals(packsBefore) { + t.Fatalf("packs are not equal, Repack modified something. Before:\n %v\nAfter:\n %v", + packsBefore, packsAfter) + } + + saveIndex(t, repo) + + removeBlobs, keepBlobs := selectBlobs(t, repo, 0.2) + + removePacks := findPacksForBlobs(t, repo, removeBlobs) + + repack(t, repo, removePacks, keepBlobs) + rebuildIndex(t, repo) + reloadIndex(t, repo) + + packsAfter = listPacks(t, repo) + for id := range removePacks { + if packsAfter.Has(id) { + t.Errorf("pack %v still present although it should have been repacked and removed", id.Str()) + } + } + + idx := repo.Index() + for id := range keepBlobs { + pb, err := idx.Lookup(id) + if err != nil { + t.Errorf("unable to find blob %v in repo", id.Str()) + } + + if removePacks.Has(pb.PackID) { + t.Errorf("lookup returned pack ID %v that should've been removed", pb.PackID) + } + } + + for id := range removeBlobs { + if _, err := idx.Lookup(id); err == nil { + t.Errorf("blob %v still contained in the repo", id.Str()) + } + } +} From 810056c2bc41b8d31db42ca8ad6edff1f85401f4 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 1 Aug 2016 20:04:23 +0200 Subject: [PATCH 24/98] Correct packages for tests --- src/restic/repository/index_rebuild_test.go | 2 +- src/restic/repository/repack_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/restic/repository/index_rebuild_test.go b/src/restic/repository/index_rebuild_test.go index 6a6dcc35..7b168320 100644 --- a/src/restic/repository/index_rebuild_test.go +++ b/src/restic/repository/index_rebuild_test.go @@ -1,4 +1,4 @@ -package repository +package repository_test import ( "io" diff --git a/src/restic/repository/repack_test.go b/src/restic/repository/repack_test.go index be343fe1..145736fa 100644 --- a/src/restic/repository/repack_test.go +++ b/src/restic/repository/repack_test.go @@ -1,4 +1,4 @@ -package repository +package repository_test import ( "math/rand" From 6227821b4ed27a7ec995edb9a0ad3c49ea5e024f Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 1 Aug 2016 20:24:15 +0200 Subject: [PATCH 25/98] Move functions to correct file --- src/restic/repository/index_rebuild_test.go | 112 -------------------- src/restic/repository/repack_test.go | 104 ++++++++++++++++++ 2 files changed, 104 insertions(+), 112 deletions(-) delete mode 100644 src/restic/repository/index_rebuild_test.go diff --git a/src/restic/repository/index_rebuild_test.go b/src/restic/repository/index_rebuild_test.go deleted file mode 100644 index 7b168320..00000000 --- a/src/restic/repository/index_rebuild_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package repository_test - -import ( - "io" - "math/rand" - "restic/backend" - "restic/pack" - "restic/repository" - "testing" -) - -func randomSize(min, max int) int { - return rand.Intn(max-min) + min -} - -func random(t *testing.T, length int) []byte { - rd := repository.NewRandReader(rand.New(rand.NewSource(int64(length)))) - buf := make([]byte, length) - _, err := io.ReadFull(rd, buf) - if err != nil { - t.Fatalf("unable to read %d random bytes: %v", length, err) - } - - return buf -} - -func createRandomBlobs(t *testing.T, repo *repository.Repository, blobs int, pData float32) { - for i := 0; i < blobs; i++ { - var ( - tpe pack.BlobType - length int - ) - - if rand.Float32() < pData { - tpe = pack.Data - length = randomSize(50*1024, 2*1024*1024) // 50KiB to 2MiB of data - } else { - tpe = pack.Tree - length = randomSize(5*1024, 50*1024) // 5KiB to 50KiB - } - - _, err := repo.SaveAndEncrypt(tpe, random(t, length), nil) - if err != nil { - t.Fatalf("SaveFrom() error %v", err) - } - - if rand.Float32() < 0.2 { - if err = repo.Flush(); err != nil { - t.Fatalf("repo.Flush() returned error %v", err) - } - } - } - - if err := repo.Flush(); err != nil { - t.Fatalf("repo.Flush() returned error %v", err) - } -} - -// selectBlobs splits the list of all blobs randomly into two lists. A blob -// will be contained in the firstone ith probability p. -func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, list2 backend.IDSet) { - done := make(chan struct{}) - defer close(done) - - list1 = backend.NewIDSet() - list2 = backend.NewIDSet() - - for id := range repo.List(backend.Data, done) { - entries, err := repo.ListPack(id) - if err != nil { - t.Fatalf("error listing pack %v: %v", id, err) - } - - for _, entry := range entries { - if rand.Float32() <= p { - list1.Insert(entry.ID) - } else { - list2.Insert(entry.ID) - } - } - } - - return list1, list2 -} - -func listPacks(t *testing.T, repo *repository.Repository) backend.IDSet { - done := make(chan struct{}) - defer close(done) - - list := backend.NewIDSet() - for id := range repo.List(backend.Data, done) { - list.Insert(id) - } - - return list -} - -func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs backend.IDSet) backend.IDSet { - packs := backend.NewIDSet() - - idx := repo.Index() - for id := range blobs { - pb, err := idx.Lookup(id) - if err != nil { - t.Fatal(err) - } - - packs.Insert(pb.PackID) - } - - return packs -} diff --git a/src/restic/repository/repack_test.go b/src/restic/repository/repack_test.go index 145736fa..9b40e92b 100644 --- a/src/restic/repository/repack_test.go +++ b/src/restic/repository/repack_test.go @@ -1,12 +1,116 @@ package repository_test import ( + "io" "math/rand" "restic/backend" + "restic/pack" "restic/repository" "testing" ) +func randomSize(min, max int) int { + return rand.Intn(max-min) + min +} + +func random(t *testing.T, length int) []byte { + rd := repository.NewRandReader(rand.New(rand.NewSource(int64(length)))) + buf := make([]byte, length) + _, err := io.ReadFull(rd, buf) + if err != nil { + t.Fatalf("unable to read %d random bytes: %v", length, err) + } + + return buf +} + +func createRandomBlobs(t *testing.T, repo *repository.Repository, blobs int, pData float32) { + for i := 0; i < blobs; i++ { + var ( + tpe pack.BlobType + length int + ) + + if rand.Float32() < pData { + tpe = pack.Data + length = randomSize(50*1024, 2*1024*1024) // 50KiB to 2MiB of data + } else { + tpe = pack.Tree + length = randomSize(5*1024, 50*1024) // 5KiB to 50KiB + } + + _, err := repo.SaveAndEncrypt(tpe, random(t, length), nil) + if err != nil { + t.Fatalf("SaveFrom() error %v", err) + } + + if rand.Float32() < 0.2 { + if err = repo.Flush(); err != nil { + t.Fatalf("repo.Flush() returned error %v", err) + } + } + } + + if err := repo.Flush(); err != nil { + t.Fatalf("repo.Flush() returned error %v", err) + } +} + +// selectBlobs splits the list of all blobs randomly into two lists. A blob +// will be contained in the firstone ith probability p. +func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, list2 backend.IDSet) { + done := make(chan struct{}) + defer close(done) + + list1 = backend.NewIDSet() + list2 = backend.NewIDSet() + + for id := range repo.List(backend.Data, done) { + entries, err := repo.ListPack(id) + if err != nil { + t.Fatalf("error listing pack %v: %v", id, err) + } + + for _, entry := range entries { + if rand.Float32() <= p { + list1.Insert(entry.ID) + } else { + list2.Insert(entry.ID) + } + } + } + + return list1, list2 +} + +func listPacks(t *testing.T, repo *repository.Repository) backend.IDSet { + done := make(chan struct{}) + defer close(done) + + list := backend.NewIDSet() + for id := range repo.List(backend.Data, done) { + list.Insert(id) + } + + return list +} + +func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs backend.IDSet) backend.IDSet { + packs := backend.NewIDSet() + + idx := repo.Index() + for id := range blobs { + pb, err := idx.Lookup(id) + if err != nil { + t.Fatal(err) + } + + packs.Insert(pb.PackID) + } + + return packs +} + func repack(t *testing.T, repo *repository.Repository, packs, blobs backend.IDSet) { err := repository.Repack(repo, packs, blobs) if err != nil { From 50b724ca23a7cf6d4a00f03143603d48e5a6603a Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 1 Aug 2016 20:44:02 +0200 Subject: [PATCH 26/98] Fix stylistic issues with FindUsedBlobs --- src/restic/find.go | 10 ++++------ src/restic/find_test.go | 13 +++++++------ 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/src/restic/find.go b/src/restic/find.go index 974ebf67..dd2ab8ff 100644 --- a/src/restic/find.go +++ b/src/restic/find.go @@ -5,7 +5,7 @@ import ( "restic/repository" ) -// FindUsedBlobs traverse the tree ID and adds all seen blobs to blobs. +// findUsedBlobs traverse the tree ID and adds all seen blobs to blobs. func findUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs backend.IDSet, seen backend.IDSet) error { blobs.Insert(treeID) @@ -38,9 +38,7 @@ func findUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs backend return nil } -// FindUsedBlobs traverses the tree ID and returns a set of all blobs -// encountered. -func FindUsedBlobs(repo *repository.Repository, treeID backend.ID) (blobs backend.IDSet, err error) { - blobs = backend.NewIDSet() - return blobs, findUsedBlobs(repo, treeID, blobs, backend.NewIDSet()) +// FindUsedBlobs traverses the tree ID and adds all seen blobs (trees and data blobs) to the set blobs. +func FindUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs backend.IDSet) error { + return findUsedBlobs(repo, treeID, blobs, backend.NewIDSet()) } diff --git a/src/restic/find_test.go b/src/restic/find_test.go index 759664f2..5f1c2e0f 100644 --- a/src/restic/find_test.go +++ b/src/restic/find_test.go @@ -65,25 +65,26 @@ func saveIDSet(t testing.TB, filename string, s backend.IDSet) { var updateGoldenFiles = flag.Bool("update", false, "update golden files in testdata/") const ( - testSnapshots = 3 - testDepth = 2 + findTestSnapshots = 3 + findTestDepth = 2 ) -var testTime = time.Unix(1469960361, 23) +var findTestTime = time.Unix(1469960361, 23) func TestFindUsedBlobs(t *testing.T) { repo, cleanup := repository.TestRepository(t) defer cleanup() var snapshots []*Snapshot - for i := 0; i < testSnapshots; i++ { - sn := TestCreateSnapshot(t, repo, testTime.Add(time.Duration(i)*time.Second), testDepth) + for i := 0; i < findTestSnapshots; i++ { + sn := TestCreateSnapshot(t, repo, findTestTime.Add(time.Duration(i)*time.Second), findTestDepth) t.Logf("snapshot %v saved, tree %v", sn.ID().Str(), sn.Tree.Str()) snapshots = append(snapshots, sn) } for i, sn := range snapshots { - usedBlobs, err := FindUsedBlobs(repo, *sn.Tree) + usedBlobs := backend.NewIDSet() + err := FindUsedBlobs(repo, *sn.Tree, usedBlobs) if err != nil { t.Errorf("FindUsedBlobs returned error: %v", err) continue From f1bc181c5b1fa54e22969d3348c9bf6e2fcd23f9 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 1 Aug 2016 21:12:23 +0200 Subject: [PATCH 27/98] Add more checks for tests --- src/restic/checker/testing.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/src/restic/checker/testing.go b/src/restic/checker/testing.go index a2ac3345..3bf9aa2e 100644 --- a/src/restic/checker/testing.go +++ b/src/restic/checker/testing.go @@ -20,13 +20,30 @@ func TestCheckRepo(t testing.TB, repo *repository.Repository) { done := make(chan struct{}) defer close(done) + + // packs errChan := make(chan error) + go chkr.Packs(errChan, done) + + for err := range errChan { + t.Error(err) + } + + // structure + errChan = make(chan error) go chkr.Structure(errChan, done) for err := range errChan { t.Error(err) } + // unused blobs + blobs := chkr.UnusedBlobs() + if len(blobs) > 0 { + t.Errorf("unused blobs found: %v", blobs) + } + + // read data errChan = make(chan error) go chkr.ReadData(nil, errChan, done) From 035d0aeb31766d05359dd2d76a88d4da248be08e Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 1 Aug 2016 21:30:46 +0200 Subject: [PATCH 28/98] Do not create duplicate content for tests --- src/restic/testing.go | 32 +++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/src/restic/testing.go b/src/restic/testing.go index 384c45e5..88c5615a 100644 --- a/src/restic/testing.go +++ b/src/restic/testing.go @@ -1,6 +1,7 @@ package restic import ( + "encoding/json" "fmt" "io" "math/rand" @@ -34,10 +35,14 @@ func saveFile(t testing.TB, repo *repository.Repository, rd io.Reader) (blobs ba t.Fatalf("unable to save chunk in repo: %v", err) } - id, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, nil) - if err != nil { - t.Fatalf("error saving chunk: %v", err) + id := backend.Hash(chunk.Data) + if !repo.Index().Has(id) { + _, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, &id) + if err != nil { + t.Fatalf("error saving chunk: %v", err) + } } + blobs = append(blobs, id) } @@ -50,6 +55,23 @@ const ( maxNodes = 32 ) +func treeIsKnown(t testing.TB, repo *repository.Repository, tree *Tree) (bool, backend.ID) { + data, err := json.Marshal(tree) + if err != nil { + t.Fatalf("json.Marshal(tree) returned error: %v", err) + return false, backend.ID{} + } + data = append(data, '\n') + + // check if tree has been saved before + id := backend.Hash(data) + if repo.Index().Has(id) { + return true, id + } + + return false, id +} + // saveTree saves a tree of fake files in the repo and returns the ID. func saveTree(t testing.TB, repo *repository.Repository, seed int64, depth int) backend.ID { rnd := rand.NewSource(seed) @@ -88,6 +110,10 @@ func saveTree(t testing.TB, repo *repository.Repository, seed int64, depth int) tree.Nodes = append(tree.Nodes, node) } + if known, id := treeIsKnown(t, repo, &tree); known { + return id + } + id, err := repo.SaveJSON(pack.Tree, tree) if err != nil { t.Fatal(err) From 8b4d4ec25faf0dc82c0133f05dc8885f8c1ac4d9 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Tue, 2 Aug 2016 22:07:06 +0200 Subject: [PATCH 29/98] Fix TestCreateSnapshot, do not store duplicate data --- src/restic/testing.go | 61 +++++++++++++++++++++++++++++-------------- 1 file changed, 42 insertions(+), 19 deletions(-) diff --git a/src/restic/testing.go b/src/restic/testing.go index 88c5615a..4275ad26 100644 --- a/src/restic/testing.go +++ b/src/restic/testing.go @@ -19,11 +19,17 @@ func fakeFile(t testing.TB, seed, size int64) io.Reader { return io.LimitReader(repository.NewRandReader(rand.New(rand.NewSource(seed))), size) } +type fakeFileSystem struct { + t testing.TB + repo *repository.Repository + knownBlobs backend.IDSet +} + // saveFile reads from rd and saves the blobs in the repository. The list of // IDs is returned. -func saveFile(t testing.TB, repo *repository.Repository, rd io.Reader) (blobs backend.IDs) { +func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs backend.IDs) { blobs = backend.IDs{} - ch := chunker.New(rd, repo.Config.ChunkerPolynomial) + ch := chunker.New(rd, fs.repo.Config.ChunkerPolynomial) for { chunk, err := ch.Next(getBuf()) @@ -32,15 +38,17 @@ func saveFile(t testing.TB, repo *repository.Repository, rd io.Reader) (blobs ba } if err != nil { - t.Fatalf("unable to save chunk in repo: %v", err) + fs.t.Fatalf("unable to save chunk in repo: %v", err) } id := backend.Hash(chunk.Data) - if !repo.Index().Has(id) { - _, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, &id) + if !fs.blobIsKnown(id) { + _, err := fs.repo.SaveAndEncrypt(pack.Data, chunk.Data, &id) if err != nil { - t.Fatalf("error saving chunk: %v", err) + fs.t.Fatalf("error saving chunk: %v", err) } + + fs.knownBlobs.Insert(id) } blobs = append(blobs, id) @@ -55,25 +63,34 @@ const ( maxNodes = 32 ) -func treeIsKnown(t testing.TB, repo *repository.Repository, tree *Tree) (bool, backend.ID) { +func (fs fakeFileSystem) treeIsKnown(tree *Tree) (bool, backend.ID) { data, err := json.Marshal(tree) if err != nil { - t.Fatalf("json.Marshal(tree) returned error: %v", err) + fs.t.Fatalf("json.Marshal(tree) returned error: %v", err) return false, backend.ID{} } data = append(data, '\n') - // check if tree has been saved before id := backend.Hash(data) - if repo.Index().Has(id) { - return true, id + return fs.blobIsKnown(id), id + +} + +func (fs fakeFileSystem) blobIsKnown(id backend.ID) bool { + if fs.knownBlobs.Has(id) { + return true } - return false, id + if fs.repo.Index().Has(id) { + return true + } + + fs.knownBlobs.Insert(id) + return false } // saveTree saves a tree of fake files in the repo and returns the ID. -func saveTree(t testing.TB, repo *repository.Repository, seed int64, depth int) backend.ID { +func (fs fakeFileSystem) saveTree(seed int64, depth int) backend.ID { rnd := rand.NewSource(seed) numNodes := int(rnd.Int63() % maxNodes) @@ -83,7 +100,7 @@ func saveTree(t testing.TB, repo *repository.Repository, seed int64, depth int) // randomly select the type of the node, either tree (p = 1/4) or file (p = 3/4). if depth > 1 && rnd.Int63()%4 == 0 { treeSeed := rnd.Int63() % maxSeed - id := saveTree(t, repo, treeSeed, depth-1) + id := fs.saveTree(treeSeed, depth-1) node := &Node{ Name: fmt.Sprintf("dir-%v", treeSeed), @@ -106,17 +123,17 @@ func saveTree(t testing.TB, repo *repository.Repository, seed int64, depth int) Size: uint64(fileSize), } - node.Content = saveFile(t, repo, fakeFile(t, fileSeed, fileSize)) + node.Content = fs.saveFile(fakeFile(fs.t, fileSeed, fileSize)) tree.Nodes = append(tree.Nodes, node) } - if known, id := treeIsKnown(t, repo, &tree); known { + if known, id := fs.treeIsKnown(&tree); known { return id } - id, err := repo.SaveJSON(pack.Tree, tree) + id, err := fs.repo.SaveJSON(pack.Tree, tree) if err != nil { - t.Fatal(err) + fs.t.Fatal(err) } return id @@ -137,7 +154,13 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time, } snapshot.Time = at - treeID := saveTree(t, repo, seed, depth) + fs := fakeFileSystem{ + t: t, + repo: repo, + knownBlobs: backend.NewIDSet(), + } + + treeID := fs.saveTree(seed, depth) snapshot.Tree = &treeID id, err := repo.SaveJSONUnpacked(backend.Snapshot, snapshot) From 7e732dbd2d1e3b1ef2dd7d9af0b21829275c3caf Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Wed, 3 Aug 2016 20:03:52 +0200 Subject: [PATCH 30/98] Allow multiple entries in the index --- src/restic/repository/index.go | 134 ++++++++++++++++++--------------- 1 file changed, 73 insertions(+), 61 deletions(-) diff --git a/src/restic/repository/index.go b/src/restic/repository/index.go index 56534bd9..5aff065c 100644 --- a/src/restic/repository/index.go +++ b/src/restic/repository/index.go @@ -18,7 +18,7 @@ import ( // Index holds a lookup table for id -> pack. type Index struct { m sync.Mutex - pack map[backend.ID]indexEntry + pack map[backend.ID][]indexEntry final bool // set to true for all indexes read from the backend ("finalized") id backend.ID // set to the ID of the index when it's finalized @@ -36,18 +36,19 @@ type indexEntry struct { // NewIndex returns a new index. func NewIndex() *Index { return &Index{ - pack: make(map[backend.ID]indexEntry), + pack: make(map[backend.ID][]indexEntry), created: time.Now(), } } func (idx *Index) store(blob PackedBlob) { - idx.pack[blob.ID] = indexEntry{ + list := idx.pack[blob.ID] + idx.pack[blob.ID] = append(list, indexEntry{ tpe: blob.Type, packID: blob.PackID, offset: blob.Offset, length: blob.Length, - } + }) } // Final returns true iff the index is already written to the repository, it is @@ -131,7 +132,8 @@ func (idx *Index) Lookup(id backend.ID) (pb PackedBlob, err error) { idx.m.Lock() defer idx.m.Unlock() - if p, ok := idx.pack[id]; ok { + if packs, ok := idx.pack[id]; ok { + p := packs[0] debug.Log("Index.Lookup", "id %v found in pack %v at %d, length %d", id.Str(), p.packID.Str(), p.offset, p.length) @@ -154,15 +156,17 @@ func (idx *Index) ListPack(id backend.ID) (list []PackedBlob) { idx.m.Lock() defer idx.m.Unlock() - for blobID, entry := range idx.pack { - if entry.packID == id { - list = append(list, PackedBlob{ - ID: blobID, - Type: entry.tpe, - Length: entry.length, - Offset: entry.offset, - PackID: entry.packID, - }) + for blobID, packList := range idx.pack { + for _, entry := range packList { + if entry.packID == id { + list = append(list, PackedBlob{ + ID: blobID, + Type: entry.tpe, + Length: entry.length, + Offset: entry.offset, + PackID: entry.packID, + }) + } } } @@ -257,17 +261,19 @@ func (idx *Index) Each(done chan struct{}) <-chan PackedBlob { close(ch) }() - for id, blob := range idx.pack { - select { - case <-done: - return - case ch <- PackedBlob{ - ID: id, - Offset: blob.offset, - Type: blob.tpe, - Length: blob.length, - PackID: blob.packID, - }: + for id, packs := range idx.pack { + for _, blob := range packs { + select { + case <-done: + return + case ch <- PackedBlob{ + ID: id, + Offset: blob.offset, + Type: blob.tpe, + Length: blob.length, + PackID: blob.packID, + }: + } } } }() @@ -281,8 +287,10 @@ func (idx *Index) Packs() backend.IDSet { defer idx.m.Unlock() packs := backend.NewIDSet() - for _, entry := range idx.pack { - packs.Insert(entry.packID) + for _, list := range idx.pack { + for _, entry := range list { + packs.Insert(entry.packID) + } } return packs @@ -294,10 +302,12 @@ func (idx *Index) Count(t pack.BlobType) (n uint) { idx.m.Lock() defer idx.m.Unlock() - for id, blob := range idx.pack { - if blob.tpe == t { - n++ - debug.Log("Index.Count", " blob %v counted: %v", id.Str(), blob) + for id, list := range idx.pack { + for _, blob := range list { + if blob.tpe == t { + n++ + debug.Log("Index.Count", " blob %v counted: %v", id.Str(), blob) + } } } @@ -330,37 +340,39 @@ func (idx *Index) generatePackList() ([]*packJSON, error) { list := []*packJSON{} packs := make(map[backend.ID]*packJSON) - for id, blob := range idx.pack { - if blob.packID.IsNull() { - panic("null pack id") + for id, packedBlobs := range idx.pack { + for _, blob := range packedBlobs { + if blob.packID.IsNull() { + panic("null pack id") + } + + debug.Log("Index.generatePackList", "handle blob %v", id.Str()) + + if blob.packID.IsNull() { + debug.Log("Index.generatePackList", "blob %q has no packID! (type %v, offset %v, length %v)", + id.Str(), blob.tpe, blob.offset, blob.length) + return nil, fmt.Errorf("unable to serialize index: pack for blob %v hasn't been written yet", id) + } + + // see if pack is already in map + p, ok := packs[blob.packID] + if !ok { + // else create new pack + p = &packJSON{ID: blob.packID} + + // and append it to the list and map + list = append(list, p) + packs[p.ID] = p + } + + // add blob + p.Blobs = append(p.Blobs, blobJSON{ + ID: id, + Type: blob.tpe, + Offset: blob.offset, + Length: blob.length, + }) } - - debug.Log("Index.generatePackList", "handle blob %v", id.Str()) - - if blob.packID.IsNull() { - debug.Log("Index.generatePackList", "blob %q has no packID! (type %v, offset %v, length %v)", - id.Str(), blob.tpe, blob.offset, blob.length) - return nil, fmt.Errorf("unable to serialize index: pack for blob %v hasn't been written yet", id) - } - - // see if pack is already in map - p, ok := packs[blob.packID] - if !ok { - // else create new pack - p = &packJSON{ID: blob.packID} - - // and append it to the list and map - list = append(list, p) - packs[p.ID] = p - } - - // add blob - p.Blobs = append(p.Blobs, blobJSON{ - ID: id, - Type: blob.tpe, - Offset: blob.offset, - Length: blob.length, - }) } debug.Log("Index.generatePackList", "done") From 35e3762e3744622482a3aa6b6b4534f1033c1b6a Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Wed, 3 Aug 2016 21:05:42 +0200 Subject: [PATCH 31/98] Remove dead code --- src/restic/repository/index.go | 32 -------------------------------- 1 file changed, 32 deletions(-) diff --git a/src/restic/repository/index.go b/src/restic/repository/index.go index 5aff065c..995d949a 100644 --- a/src/restic/repository/index.go +++ b/src/restic/repository/index.go @@ -111,22 +111,6 @@ func (idx *Index) Store(blob PackedBlob) { idx.store(blob) } -// StoreBlobs saves information about the blobs to the index in one atomic transaction. -func (idx *Index) StoreBlobs(blobs []PackedBlob) { - idx.m.Lock() - defer idx.m.Unlock() - - if idx.final { - panic("store new item in finalized index") - } - - debug.Log("Index.StoreBlobs", "stored %d blobs", len(blobs)) - - for _, blob := range blobs { - idx.store(blob) - } -} - // Lookup queries the index for the blob ID and returns a PackedBlob. func (idx *Index) Lookup(id backend.ID) (pb PackedBlob, err error) { idx.m.Lock() @@ -193,22 +177,6 @@ func (idx *Index) LookupSize(id backend.ID) (cleartextLength uint, err error) { return blob.PlaintextLength(), nil } -// Merge loads all items from other into idx. -func (idx *Index) Merge(other *Index) { - debug.Log("Index.Merge", "Merge index with %p", other) - idx.m.Lock() - defer idx.m.Unlock() - - for k, v := range other.pack { - if _, ok := idx.pack[k]; ok { - debug.Log("Index.Merge", "index already has key %v, updating", k.Str()) - } - - idx.pack[k] = v - } - debug.Log("Index.Merge", "done merging index") -} - // Supersedes returns the list of indexes this index supersedes, if any. func (idx *Index) Supersedes() backend.IDs { return idx.supersedes From 1b4b469440bd3f27a5d1931b2a7898afea0c919d Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Wed, 3 Aug 2016 21:51:20 +0200 Subject: [PATCH 32/98] Add pack.Handle and pack.Handles --- src/restic/pack/blob_set.go | 109 ++++++++++++++++++++++++++++++++++++ src/restic/pack/handle.go | 59 +++++++++++++++++++ 2 files changed, 168 insertions(+) create mode 100644 src/restic/pack/blob_set.go create mode 100644 src/restic/pack/handle.go diff --git a/src/restic/pack/blob_set.go b/src/restic/pack/blob_set.go new file mode 100644 index 00000000..686ea931 --- /dev/null +++ b/src/restic/pack/blob_set.go @@ -0,0 +1,109 @@ +package pack + +import "sort" + +// BlobSet is a set of blobs. +type BlobSet map[Handle]struct{} + +// NewBlobSet returns a new BlobSet, populated with ids. +func NewBlobSet(handles ...Handle) BlobSet { + m := make(BlobSet) + for _, h := range handles { + m[h] = struct{}{} + } + + return m +} + +// Has returns true iff id is contained in the set. +func (s BlobSet) Has(h Handle) bool { + _, ok := s[h] + return ok +} + +// Insert adds id to the set. +func (s BlobSet) Insert(h Handle) { + s[h] = struct{}{} +} + +// Delete removes id from the set. +func (s BlobSet) Delete(h Handle) { + delete(s, h) +} + +// Equals returns true iff s equals other. +func (s BlobSet) Equals(other BlobSet) bool { + if len(s) != len(other) { + return false + } + + for h := range s { + if _, ok := other[h]; !ok { + return false + } + } + + return true +} + +// Merge adds the blobs in other to the current set. +func (s BlobSet) Merge(other BlobSet) { + for h := range other { + s.Insert(h) + } +} + +// Intersect returns a new set containing the handles that are present in both sets. +func (s BlobSet) Intersect(other BlobSet) (result BlobSet) { + result = NewBlobSet() + + set1 := s + set2 := other + + // iterate over the smaller set + if len(set2) < len(set1) { + set1, set2 = set2, set1 + } + + for h := range set1 { + if set2.Has(h) { + result.Insert(h) + } + } + + return result +} + +// Sub returns a new set containing all handles that are present in s but not in +// other. +func (s BlobSet) Sub(other BlobSet) (result BlobSet) { + result = NewBlobSet() + for h := range s { + if !other.Has(h) { + result.Insert(h) + } + } + + return result +} + +// List returns a slice of all Handles in the set. +func (s BlobSet) List() Handles { + list := make(Handles, 0, len(s)) + for h := range s { + list = append(list, h) + } + + sort.Sort(list) + + return list +} + +func (s BlobSet) String() string { + str := s.List().String() + if len(str) < 2 { + return "{}" + } + + return "{" + str[1:len(str)-1] + "}" +} diff --git a/src/restic/pack/handle.go b/src/restic/pack/handle.go new file mode 100644 index 00000000..b47aa129 --- /dev/null +++ b/src/restic/pack/handle.go @@ -0,0 +1,59 @@ +package pack + +import ( + "fmt" + "restic/backend" +) + +// Handle identifies a blob of a given type. +type Handle struct { + ID backend.ID + Type BlobType +} + +func (h Handle) String() string { + name := h.ID.String() + if len(name) > 10 { + name = name[:10] + } + return fmt.Sprintf("<%s/%s>", h.Type, name) +} + +// Handles is an ordered list of Handles that implements sort.Interface. +type Handles []Handle + +func (h Handles) Len() int { + return len(h) +} + +func (h Handles) Less(i, j int) bool { + if h[i].Type != h[j].Type { + return h[i].Type < h[j].Type + } + + for k, b := range h[i].ID { + if b == h[j].ID[k] { + continue + } + + if b < h[j].ID[k] { + return true + } + + return false + } + + return false +} + +func (h Handles) Swap(i, j int) { + h[i], h[j] = h[j], h[i] +} + +func (h Handles) String() string { + elements := make([]string, 0, len(h)) + for _, e := range h { + elements = append(elements, e.String()) + } + return fmt.Sprintf("%v", elements) +} From 231da4ff80871a10661be78f4a67620d301eefa7 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Wed, 3 Aug 2016 22:06:55 +0200 Subject: [PATCH 33/98] Remove old repacking code --- src/restic/checker/repacker.go | 163 ---------------------------- src/restic/checker/repacker_test.go | 127 ---------------------- 2 files changed, 290 deletions(-) delete mode 100644 src/restic/checker/repacker.go delete mode 100644 src/restic/checker/repacker_test.go diff --git a/src/restic/checker/repacker.go b/src/restic/checker/repacker.go deleted file mode 100644 index 8aedfc08..00000000 --- a/src/restic/checker/repacker.go +++ /dev/null @@ -1,163 +0,0 @@ -package checker - -import ( - "errors" - - "restic/backend" - "restic/debug" - "restic/repository" -) - -// Repacker extracts still used blobs from packs with unused blobs and creates -// new packs. -type Repacker struct { - unusedBlobs backend.IDSet - repo *repository.Repository -} - -// NewRepacker returns a new repacker that (when Repack() in run) cleans up the -// repository and creates new packs and indexs so that all blobs in unusedBlobs -// aren't used any more. -func NewRepacker(repo *repository.Repository, unusedBlobs backend.IDSet) *Repacker { - return &Repacker{ - repo: repo, - unusedBlobs: unusedBlobs, - } -} - -// Repack runs the process of finding still used blobs in packs with unused -// blobs, extracts them and creates new packs with just the still-in-use blobs. -func (r *Repacker) Repack() error { - debug.Log("Repacker.Repack", "searching packs for %v", r.unusedBlobs) - - unneededPacks, err := FindPacksForBlobs(r.repo, r.unusedBlobs) - if err != nil { - return err - } - - debug.Log("Repacker.Repack", "found packs: %v", unneededPacks) - - blobs, err := FindBlobsForPacks(r.repo, unneededPacks) - if err != nil { - return err - } - - debug.Log("Repacker.Repack", "found blobs: %v", blobs) - - for id := range r.unusedBlobs { - debug.Log("Repacker.Repack", "remove unused blob %v", id.Str()) - blobs.Delete(id) - } - - debug.Log("Repacker.Repack", "need to repack blobs: %v", blobs) - - err = RepackBlobs(r.repo, r.repo, blobs) - if err != nil { - return err - } - - debug.Log("Repacker.Repack", "remove unneeded packs: %v", unneededPacks) - for packID := range unneededPacks { - err = r.repo.Backend().Remove(backend.Data, packID.String()) - if err != nil { - return err - } - } - - debug.Log("Repacker.Repack", "rebuild index, unneeded packs: %v", unneededPacks) - idx, err := r.repo.Index().RebuildIndex(unneededPacks) - - newIndexID, err := repository.SaveIndex(r.repo, idx) - debug.Log("Repacker.Repack", "saved new index at %v, err %v", newIndexID.Str(), err) - if err != nil { - return err - } - - debug.Log("Repacker.Repack", "remove old indexes: %v", idx.Supersedes()) - for _, id := range idx.Supersedes() { - err = r.repo.Backend().Remove(backend.Index, id.String()) - if err != nil { - debug.Log("Repacker.Repack", "error removing index %v: %v", id.Str(), err) - return err - } - - debug.Log("Repacker.Repack", "removed index %v", id.Str()) - } - - return nil -} - -// FindPacksForBlobs returns the set of packs that contain the blobs. -func FindPacksForBlobs(repo *repository.Repository, blobs backend.IDSet) (backend.IDSet, error) { - packs := backend.NewIDSet() - idx := repo.Index() - for id := range blobs { - blob, err := idx.Lookup(id) - if err != nil { - return nil, err - } - - packs.Insert(blob.PackID) - } - - return packs, nil -} - -// FindBlobsForPacks returns the set of blobs contained in a pack of packs. -func FindBlobsForPacks(repo *repository.Repository, packs backend.IDSet) (backend.IDSet, error) { - blobs := backend.NewIDSet() - - for packID := range packs { - for _, packedBlob := range repo.Index().ListPack(packID) { - blobs.Insert(packedBlob.ID) - } - } - - return blobs, nil -} - -// repackBlob loads a single blob from src and saves it in dst. -func repackBlob(src, dst *repository.Repository, id backend.ID) error { - blob, err := src.Index().Lookup(id) - if err != nil { - return err - } - - debug.Log("RepackBlobs", "repacking blob %v, len %v", id.Str(), blob.PlaintextLength()) - - buf := make([]byte, 0, blob.PlaintextLength()) - buf, err = src.LoadBlob(blob.Type, id, buf) - if err != nil { - return err - } - - if uint(len(buf)) != blob.PlaintextLength() { - debug.Log("RepackBlobs", "repack blob %v: len(buf) isn't equal to length: %v = %v", id.Str(), len(buf), blob.PlaintextLength()) - return errors.New("LoadBlob returned wrong data, len() doesn't match") - } - - _, err = dst.SaveAndEncrypt(blob.Type, buf, &id) - if err != nil { - return err - } - - return nil -} - -// RepackBlobs reads all blobs in blobIDs from src and saves them into new pack -// files in dst. Source and destination repo may be the same. -func RepackBlobs(src, dst *repository.Repository, blobIDs backend.IDSet) (err error) { - for id := range blobIDs { - err = repackBlob(src, dst, id) - if err != nil { - return err - } - } - - err = dst.Flush() - if err != nil { - return err - } - - return nil -} diff --git a/src/restic/checker/repacker_test.go b/src/restic/checker/repacker_test.go deleted file mode 100644 index 821828c8..00000000 --- a/src/restic/checker/repacker_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package checker_test - -import ( - "testing" - - "restic/backend" - "restic/checker" - - . "restic/test" -) - -var findPackTests = []struct { - blobIDs backend.IDSet - packIDs backend.IDSet -}{ - { - backend.IDSet{ - ParseID("534f211b4fc2cf5b362a24e8eba22db5372a75b7e974603ff9263f5a471760f4"): struct{}{}, - ParseID("51aa04744b518c6a85b4e7643cfa99d58789c2a6ca2a3fda831fa3032f28535c"): struct{}{}, - ParseID("454515bca5f4f60349a527bd814cc2681bc3625716460cc6310771c966d8a3bf"): struct{}{}, - ParseID("c01952de4d91da1b1b80bc6e06eaa4ec21523f4853b69dc8231708b9b7ec62d8"): struct{}{}, - }, - backend.IDSet{ - ParseID("19a731a515618ec8b75fc0ff3b887d8feb83aef1001c9899f6702761142ed068"): struct{}{}, - ParseID("657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6"): struct{}{}, - }, - }, -} - -var findBlobTests = []struct { - packIDs backend.IDSet - blobIDs backend.IDSet -}{ - { - backend.IDSet{ - ParseID("60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e"): struct{}{}, - }, - backend.IDSet{ - ParseID("356493f0b00a614d36c698591bbb2b1d801932d85328c1f508019550034549fc"): struct{}{}, - ParseID("b8a6bcdddef5c0f542b4648b2ef79bc0ed4377d4109755d2fb78aff11e042663"): struct{}{}, - ParseID("5714f7274a8aa69b1692916739dc3835d09aac5395946b8ec4f58e563947199a"): struct{}{}, - ParseID("b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c"): struct{}{}, - ParseID("08d0444e9987fa6e35ce4232b2b71473e1a8f66b2f9664cc44dc57aad3c5a63a"): struct{}{}, - }, - }, - { - backend.IDSet{ - ParseID("60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e"): struct{}{}, - ParseID("ff7e12cd66d896b08490e787d1915c641e678d7e6b4a00e60db5d13054f4def4"): struct{}{}, - }, - backend.IDSet{ - ParseID("356493f0b00a614d36c698591bbb2b1d801932d85328c1f508019550034549fc"): struct{}{}, - ParseID("b8a6bcdddef5c0f542b4648b2ef79bc0ed4377d4109755d2fb78aff11e042663"): struct{}{}, - ParseID("5714f7274a8aa69b1692916739dc3835d09aac5395946b8ec4f58e563947199a"): struct{}{}, - ParseID("b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c"): struct{}{}, - ParseID("08d0444e9987fa6e35ce4232b2b71473e1a8f66b2f9664cc44dc57aad3c5a63a"): struct{}{}, - ParseID("aa79d596dbd4c863e5400deaca869830888fe1ce9f51b4a983f532c77f16a596"): struct{}{}, - ParseID("b2396c92781307111accf2ebb1cd62b58134b744d90cb6f153ca456a98dc3e76"): struct{}{}, - ParseID("5249af22d3b2acd6da8048ac37b2a87fa346fabde55ed23bb866f7618843c9fe"): struct{}{}, - ParseID("f41c2089a9d58a4b0bf39369fa37588e6578c928aea8e90a4490a6315b9905c1"): struct{}{}, - }, - }, -} - -func TestRepackerFindPacks(t *testing.T) { - WithTestEnvironment(t, checkerTestData, func(repodir string) { - repo := OpenLocalRepo(t, repodir) - - OK(t, repo.LoadIndex()) - - for _, test := range findPackTests { - packIDs, err := checker.FindPacksForBlobs(repo, test.blobIDs) - OK(t, err) - Equals(t, test.packIDs, packIDs) - } - - for _, test := range findBlobTests { - blobs, err := checker.FindBlobsForPacks(repo, test.packIDs) - OK(t, err) - - Assert(t, test.blobIDs.Equals(blobs), - "list of blobs for packs %v does not match, expected:\n %v\ngot:\n %v", - test.packIDs, test.blobIDs, blobs) - } - }) -} - -func TestRepacker(t *testing.T) { - WithTestEnvironment(t, checkerTestData, func(repodir string) { - repo := OpenLocalRepo(t, repodir) - OK(t, repo.LoadIndex()) - - repo.Backend().Remove(backend.Snapshot, "c2b53c5e6a16db92fbb9aa08bd2794c58b379d8724d661ee30d20898bdfdff22") - - unusedBlobs := backend.IDSet{ - ParseID("5714f7274a8aa69b1692916739dc3835d09aac5395946b8ec4f58e563947199a"): struct{}{}, - ParseID("08d0444e9987fa6e35ce4232b2b71473e1a8f66b2f9664cc44dc57aad3c5a63a"): struct{}{}, - ParseID("356493f0b00a614d36c698591bbb2b1d801932d85328c1f508019550034549fc"): struct{}{}, - ParseID("b8a6bcdddef5c0f542b4648b2ef79bc0ed4377d4109755d2fb78aff11e042663"): struct{}{}, - } - - chkr := checker.New(repo) - _, errs := chkr.LoadIndex() - OKs(t, errs) - - errs = checkStruct(chkr) - OKs(t, errs) - - list := backend.NewIDSet(chkr.UnusedBlobs()...) - if !unusedBlobs.Equals(list) { - t.Fatalf("expected unused blobs:\n %v\ngot:\n %v", unusedBlobs, list) - } - - repacker := checker.NewRepacker(repo, unusedBlobs) - OK(t, repacker.Repack()) - - chkr = checker.New(repo) - _, errs = chkr.LoadIndex() - OKs(t, errs) - OKs(t, checkPacks(chkr)) - OKs(t, checkStruct(chkr)) - - blobs := chkr.UnusedBlobs() - Assert(t, len(blobs) == 0, - "expected zero unused blobs, got %v", blobs) - }) -} From 246302375db03b14a6ddd1b12e89dd6a26061502 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Wed, 3 Aug 2016 22:38:05 +0200 Subject: [PATCH 34/98] Index: Add multiple packs per blob, pack.Type Change the index so that a blob can be contained in multiple packs. Require passing the blob type to all lookup functions. --- src/restic/archive_reader.go | 4 +- src/restic/archiver.go | 8 +-- src/restic/node.go | 9 +-- src/restic/pack/handle.go | 6 +- src/restic/pack/pack.go | 5 +- src/restic/repository/index.go | 95 ++++++++++++------------ src/restic/repository/index_test.go | 54 +++++++++++--- src/restic/repository/master_index.go | 18 ++--- src/restic/repository/repack.go | 7 +- src/restic/repository/repack_test.go | 43 ++++++----- src/restic/repository/repository.go | 100 +++++++++++++++----------- src/restic/testing.go | 8 +-- 12 files changed, 210 insertions(+), 147 deletions(-) diff --git a/src/restic/archive_reader.go b/src/restic/archive_reader.go index 5a485e0f..ad6bded8 100644 --- a/src/restic/archive_reader.go +++ b/src/restic/archive_reader.go @@ -22,7 +22,7 @@ func saveTreeJSON(repo *repository.Repository, item interface{}) (backend.ID, er // check if tree has been saved before id := backend.Hash(data) - if repo.Index().Has(id) { + if repo.Index().Has(id, pack.Tree) { return id, nil } @@ -58,7 +58,7 @@ func ArchiveReader(repo *repository.Repository, p *Progress, rd io.Reader, name id := backend.Hash(chunk.Data) - if !repo.Index().Has(id) { + if !repo.Index().Has(id, pack.Data) { _, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, nil) if err != nil { return nil, backend.ID{}, err diff --git a/src/restic/archiver.go b/src/restic/archiver.go index 5aa27dcc..e3ac4237 100644 --- a/src/restic/archiver.go +++ b/src/restic/archiver.go @@ -72,7 +72,7 @@ func NewArchiver(repo *repository.Repository) *Archiver { // When the blob is not known, false is returned and the blob is added to the // list. This means that the caller false is returned to is responsible to save // the blob to the backend. -func (arch *Archiver) isKnownBlob(id backend.ID) bool { +func (arch *Archiver) isKnownBlob(id backend.ID, t pack.BlobType) bool { arch.knownBlobs.Lock() defer arch.knownBlobs.Unlock() @@ -82,7 +82,7 @@ func (arch *Archiver) isKnownBlob(id backend.ID) bool { arch.knownBlobs.Insert(id) - _, err := arch.repo.Index().Lookup(id) + _, err := arch.repo.Index().Lookup(id, t) if err == nil { return true } @@ -94,7 +94,7 @@ func (arch *Archiver) isKnownBlob(id backend.ID) bool { func (arch *Archiver) Save(t pack.BlobType, data []byte, id backend.ID) error { debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str()) - if arch.isKnownBlob(id) { + if arch.isKnownBlob(id, pack.Data) { debug.Log("Archiver.Save", "blob %v is known\n", id.Str()) return nil } @@ -119,7 +119,7 @@ func (arch *Archiver) SaveTreeJSON(item interface{}) (backend.ID, error) { // check if tree has been saved before id := backend.Hash(data) - if arch.isKnownBlob(id) { + if arch.isKnownBlob(id, pack.Tree) { return id, nil } diff --git a/src/restic/node.go b/src/restic/node.go index 21c447d1..9cce841c 100644 --- a/src/restic/node.go +++ b/src/restic/node.go @@ -12,12 +12,13 @@ import ( "runtime" - "github.com/juju/errors" "restic/backend" "restic/debug" "restic/fs" "restic/pack" "restic/repository" + + "github.com/juju/errors" ) // Node is a file, directory or other item in a backup. @@ -215,14 +216,14 @@ func (node Node) createFileAt(path string, repo *repository.Repository) error { var buf []byte for _, id := range node.Content { - blob, err := repo.Index().Lookup(id) + size, err := repo.LookupBlobSize(id, pack.Data) if err != nil { return err } buf = buf[:cap(buf)] - if uint(len(buf)) < blob.Length { - buf = make([]byte, blob.Length) + if uint(len(buf)) < size { + buf = make([]byte, size) } buf, err := repo.LoadBlob(pack.Data, id, buf) diff --git a/src/restic/pack/handle.go b/src/restic/pack/handle.go index b47aa129..04ab8f17 100644 --- a/src/restic/pack/handle.go +++ b/src/restic/pack/handle.go @@ -12,11 +12,7 @@ type Handle struct { } func (h Handle) String() string { - name := h.ID.String() - if len(name) > 10 { - name = name[:10] - } - return fmt.Sprintf("<%s/%s>", h.Type, name) + return fmt.Sprintf("<%s/%s>", h.Type, h.ID.Str()) } // Handles is an ordered list of Handles that implements sort.Interface. diff --git a/src/restic/pack/pack.go b/src/restic/pack/pack.go index 931bda86..2422e653 100644 --- a/src/restic/pack/pack.go +++ b/src/restic/pack/pack.go @@ -17,8 +17,9 @@ type BlobType uint8 // These are the blob types that can be stored in a pack. const ( - Data BlobType = 0 - Tree = 1 + Invalid BlobType = iota + Data + Tree ) func (t BlobType) String() string { diff --git a/src/restic/repository/index.go b/src/restic/repository/index.go index 995d949a..86968ba1 100644 --- a/src/restic/repository/index.go +++ b/src/restic/repository/index.go @@ -18,7 +18,7 @@ import ( // Index holds a lookup table for id -> pack. type Index struct { m sync.Mutex - pack map[backend.ID][]indexEntry + pack map[pack.Handle][]indexEntry final bool // set to true for all indexes read from the backend ("finalized") id backend.ID // set to the ID of the index when it's finalized @@ -27,7 +27,6 @@ type Index struct { } type indexEntry struct { - tpe pack.BlobType packID backend.ID offset uint length uint @@ -36,19 +35,19 @@ type indexEntry struct { // NewIndex returns a new index. func NewIndex() *Index { return &Index{ - pack: make(map[backend.ID][]indexEntry), + pack: make(map[pack.Handle][]indexEntry), created: time.Now(), } } func (idx *Index) store(blob PackedBlob) { - list := idx.pack[blob.ID] - idx.pack[blob.ID] = append(list, indexEntry{ - tpe: blob.Type, + newEntry := indexEntry{ packID: blob.PackID, offset: blob.Offset, length: blob.Length, - }) + } + h := pack.Handle{ID: blob.ID, Type: blob.Type} + idx.pack[h] = append(idx.pack[h], newEntry) } // Final returns true iff the index is already written to the repository, it is @@ -112,27 +111,35 @@ func (idx *Index) Store(blob PackedBlob) { } // Lookup queries the index for the blob ID and returns a PackedBlob. -func (idx *Index) Lookup(id backend.ID) (pb PackedBlob, err error) { +func (idx *Index) Lookup(id backend.ID, tpe pack.BlobType) (blobs []PackedBlob, err error) { idx.m.Lock() defer idx.m.Unlock() - if packs, ok := idx.pack[id]; ok { - p := packs[0] - debug.Log("Index.Lookup", "id %v found in pack %v at %d, length %d", - id.Str(), p.packID.Str(), p.offset, p.length) + h := pack.Handle{ID: id, Type: tpe} - pb := PackedBlob{ - Type: p.tpe, - Length: p.length, - ID: id, - Offset: p.offset, - PackID: p.packID, + if packs, ok := idx.pack[h]; ok { + blobs = make([]PackedBlob, 0, len(packs)) + + for _, p := range packs { + debug.Log("Index.Lookup", "id %v found in pack %v at %d, length %d", + id.Str(), p.packID.Str(), p.offset, p.length) + + blob := PackedBlob{ + Type: tpe, + Length: p.length, + ID: id, + Offset: p.offset, + PackID: p.packID, + } + + blobs = append(blobs, blob) } - return pb, nil + + return blobs, nil } debug.Log("Index.Lookup", "id %v not found", id.Str()) - return PackedBlob{}, fmt.Errorf("id %v not found in index", id) + return nil, fmt.Errorf("id %v not found in index", id) } // ListPack returns a list of blobs contained in a pack. @@ -140,12 +147,12 @@ func (idx *Index) ListPack(id backend.ID) (list []PackedBlob) { idx.m.Lock() defer idx.m.Unlock() - for blobID, packList := range idx.pack { + for h, packList := range idx.pack { for _, entry := range packList { if entry.packID == id { list = append(list, PackedBlob{ - ID: blobID, - Type: entry.tpe, + ID: h.ID, + Type: h.Type, Length: entry.length, Offset: entry.offset, PackID: entry.packID, @@ -158,8 +165,8 @@ func (idx *Index) ListPack(id backend.ID) (list []PackedBlob) { } // Has returns true iff the id is listed in the index. -func (idx *Index) Has(id backend.ID) bool { - _, err := idx.Lookup(id) +func (idx *Index) Has(id backend.ID, tpe pack.BlobType) bool { + _, err := idx.Lookup(id, tpe) if err == nil { return true } @@ -169,12 +176,13 @@ func (idx *Index) Has(id backend.ID) bool { // LookupSize returns the length of the cleartext content behind the // given id -func (idx *Index) LookupSize(id backend.ID) (cleartextLength uint, err error) { - blob, err := idx.Lookup(id) +func (idx *Index) LookupSize(id backend.ID, tpe pack.BlobType) (cleartextLength uint, err error) { + blobs, err := idx.Lookup(id, tpe) if err != nil { return 0, err } - return blob.PlaintextLength(), nil + + return blobs[0].PlaintextLength(), nil } // Supersedes returns the list of indexes this index supersedes, if any. @@ -229,15 +237,15 @@ func (idx *Index) Each(done chan struct{}) <-chan PackedBlob { close(ch) }() - for id, packs := range idx.pack { + for h, packs := range idx.pack { for _, blob := range packs { select { case <-done: return case ch <- PackedBlob{ - ID: id, + ID: h.ID, + Type: h.Type, Offset: blob.offset, - Type: blob.tpe, Length: blob.length, PackID: blob.packID, }: @@ -270,13 +278,12 @@ func (idx *Index) Count(t pack.BlobType) (n uint) { idx.m.Lock() defer idx.m.Unlock() - for id, list := range idx.pack { - for _, blob := range list { - if blob.tpe == t { - n++ - debug.Log("Index.Count", " blob %v counted: %v", id.Str(), blob) - } + for h, list := range idx.pack { + if h.Type != t { + continue } + + n += uint(len(list)) } return @@ -308,18 +315,18 @@ func (idx *Index) generatePackList() ([]*packJSON, error) { list := []*packJSON{} packs := make(map[backend.ID]*packJSON) - for id, packedBlobs := range idx.pack { + for h, packedBlobs := range idx.pack { for _, blob := range packedBlobs { if blob.packID.IsNull() { panic("null pack id") } - debug.Log("Index.generatePackList", "handle blob %v", id.Str()) + debug.Log("Index.generatePackList", "handle blob %v", h) if blob.packID.IsNull() { - debug.Log("Index.generatePackList", "blob %q has no packID! (type %v, offset %v, length %v)", - id.Str(), blob.tpe, blob.offset, blob.length) - return nil, fmt.Errorf("unable to serialize index: pack for blob %v hasn't been written yet", id) + debug.Log("Index.generatePackList", "blob %v has no packID! (offset %v, length %v)", + h, blob.offset, blob.length) + return nil, fmt.Errorf("unable to serialize index: pack for blob %v hasn't been written yet", h) } // see if pack is already in map @@ -335,8 +342,8 @@ func (idx *Index) generatePackList() ([]*packJSON, error) { // add blob p.Blobs = append(p.Blobs, blobJSON{ - ID: id, - Type: blob.tpe, + ID: h.ID, + Type: h.Type, Offset: blob.offset, Length: blob.length, }) diff --git a/src/restic/repository/index_test.go b/src/restic/repository/index_test.go index 0fafc409..5da7791f 100644 --- a/src/restic/repository/index_test.go +++ b/src/restic/repository/index_test.go @@ -74,17 +74,27 @@ func TestIndexSerialize(t *testing.T) { OK(t, err) for _, testBlob := range tests { - result, err := idx.Lookup(testBlob.id) + list, err := idx.Lookup(testBlob.id, testBlob.tpe) OK(t, err) + if len(list) != 1 { + t.Errorf("expected one result for blob %v, got %v: %v", testBlob.id.Str(), len(list), list) + } + result := list[0] + Equals(t, testBlob.pack, result.PackID) Equals(t, testBlob.tpe, result.Type) Equals(t, testBlob.offset, result.Offset) Equals(t, testBlob.length, result.Length) - result2, err := idx2.Lookup(testBlob.id) + list2, err := idx2.Lookup(testBlob.id, testBlob.tpe) OK(t, err) + if len(list2) != 1 { + t.Errorf("expected one result for blob %v, got %v: %v", testBlob.id.Str(), len(list2), list2) + } + result2 := list2[0] + Equals(t, testBlob.pack, result2.PackID) Equals(t, testBlob.tpe, result2.Type) Equals(t, testBlob.offset, result2.Offset) @@ -143,9 +153,15 @@ func TestIndexSerialize(t *testing.T) { // all new blobs must be in the index for _, testBlob := range newtests { - blob, err := idx3.Lookup(testBlob.id) + list, err := idx3.Lookup(testBlob.id, testBlob.tpe) OK(t, err) + if len(list) != 1 { + t.Errorf("expected one result for blob %v, got %v: %v", testBlob.id.Str(), len(list), list) + } + + blob := list[0] + Equals(t, testBlob.pack, blob.PackID) Equals(t, testBlob.tpe, blob.Type) Equals(t, testBlob.offset, blob.Offset) @@ -265,13 +281,13 @@ var exampleTests = []struct { var exampleLookupTest = struct { packID backend.ID - blobs backend.IDSet + blobs map[backend.ID]pack.BlobType }{ ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"), - backend.IDSet{ - ParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"): struct{}{}, - ParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"): struct{}{}, - ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"): struct{}{}, + map[backend.ID]pack.BlobType{ + ParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"): pack.Data, + ParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"): pack.Tree, + ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"): pack.Data, }, } @@ -282,9 +298,16 @@ func TestIndexUnserialize(t *testing.T) { OK(t, err) for _, test := range exampleTests { - blob, err := idx.Lookup(test.id) + list, err := idx.Lookup(test.id, test.tpe) OK(t, err) + if len(list) != 1 { + t.Errorf("expected one result for blob %v, got %v: %v", test.id.Str(), len(list), list) + } + blob := list[0] + + t.Logf("looking for blob %v/%v, got %v", test.tpe, test.id.Str(), blob) + Equals(t, test.packID, blob.PackID) Equals(t, test.tpe, blob.Type) Equals(t, test.offset, blob.Offset) @@ -299,9 +322,13 @@ func TestIndexUnserialize(t *testing.T) { } for _, blob := range blobs { - if !exampleLookupTest.blobs.Has(blob.ID) { + b, ok := exampleLookupTest.blobs[blob.ID] + if !ok { t.Errorf("unexpected blob %v found", blob.ID.Str()) } + if blob.Type != b { + t.Errorf("unexpected type for blob %v: want %v, got %v", blob.ID.Str(), b, blob.Type) + } } } @@ -310,9 +337,14 @@ func TestIndexUnserializeOld(t *testing.T) { OK(t, err) for _, test := range exampleTests { - blob, err := idx.Lookup(test.id) + list, err := idx.Lookup(test.id, test.tpe) OK(t, err) + if len(list) != 1 { + t.Errorf("expected one result for blob %v, got %v: %v", test.id.Str(), len(list), list) + } + blob := list[0] + Equals(t, test.packID, blob.PackID) Equals(t, test.tpe, blob.Type) Equals(t, test.offset, blob.Offset) diff --git a/src/restic/repository/master_index.go b/src/restic/repository/master_index.go index 5fcf6337..c6114d05 100644 --- a/src/restic/repository/master_index.go +++ b/src/restic/repository/master_index.go @@ -21,32 +21,32 @@ func NewMasterIndex() *MasterIndex { } // Lookup queries all known Indexes for the ID and returns the first match. -func (mi *MasterIndex) Lookup(id backend.ID) (blob PackedBlob, err error) { +func (mi *MasterIndex) Lookup(id backend.ID, tpe pack.BlobType) (blobs []PackedBlob, err error) { mi.idxMutex.RLock() defer mi.idxMutex.RUnlock() - debug.Log("MasterIndex.Lookup", "looking up id %v", id.Str()) + debug.Log("MasterIndex.Lookup", "looking up id %v, tpe %v", id.Str(), tpe) for _, idx := range mi.idx { - blob, err = idx.Lookup(id) + blobs, err = idx.Lookup(id, tpe) if err == nil { debug.Log("MasterIndex.Lookup", - "found id %v: %v", id.Str(), blob) + "found id %v: %v", id.Str(), blobs) return } } debug.Log("MasterIndex.Lookup", "id %v not found in any index", id.Str()) - return PackedBlob{}, fmt.Errorf("id %v not found in any index", id) + return nil, fmt.Errorf("id %v not found in any index", id) } // LookupSize queries all known Indexes for the ID and returns the first match. -func (mi *MasterIndex) LookupSize(id backend.ID) (uint, error) { +func (mi *MasterIndex) LookupSize(id backend.ID, tpe pack.BlobType) (uint, error) { mi.idxMutex.RLock() defer mi.idxMutex.RUnlock() for _, idx := range mi.idx { - length, err := idx.LookupSize(id) + length, err := idx.LookupSize(id, tpe) if err == nil { return length, nil } @@ -72,12 +72,12 @@ func (mi *MasterIndex) ListPack(id backend.ID) (list []PackedBlob) { } // Has queries all known Indexes for the ID and returns the first match. -func (mi *MasterIndex) Has(id backend.ID) bool { +func (mi *MasterIndex) Has(id backend.ID, tpe pack.BlobType) bool { mi.idxMutex.RLock() defer mi.idxMutex.RUnlock() for _, idx := range mi.idx { - if idx.Has(id) { + if idx.Has(id, tpe) { return true } } diff --git a/src/restic/repository/repack.go b/src/restic/repository/repack.go index 7177d096..9f99cdb6 100644 --- a/src/restic/repository/repack.go +++ b/src/restic/repository/repack.go @@ -13,7 +13,7 @@ import ( // these packs. Each pack is loaded and the blobs listed in keepBlobs is saved // into a new pack. Afterwards, the packs are removed. This operation requires // an exclusive lock on the repo. -func Repack(repo *Repository, packs, keepBlobs backend.IDSet) (err error) { +func Repack(repo *Repository, packs backend.IDSet, keepBlobs pack.BlobSet) (err error) { debug.Log("Repack", "repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs)) buf := make([]byte, 0, maxPackSize) @@ -41,7 +41,8 @@ func Repack(repo *Repository, packs, keepBlobs backend.IDSet) (err error) { debug.Log("Repack", "processing pack %v, blobs: %v", packID.Str(), len(unpck.Entries)) var plaintext []byte for _, entry := range unpck.Entries { - if !keepBlobs.Has(entry.ID) { + h := pack.Handle{ID: entry.ID, Type: entry.Type} + if !keepBlobs.Has(h) { continue } @@ -63,7 +64,7 @@ func Repack(repo *Repository, packs, keepBlobs backend.IDSet) (err error) { debug.Log("Repack", " saved blob %v", entry.ID.Str()) - keepBlobs.Delete(entry.ID) + keepBlobs.Delete(h) } } diff --git a/src/restic/repository/repack_test.go b/src/restic/repository/repack_test.go index 9b40e92b..5bc9d922 100644 --- a/src/restic/repository/repack_test.go +++ b/src/restic/repository/repack_test.go @@ -58,12 +58,12 @@ func createRandomBlobs(t *testing.T, repo *repository.Repository, blobs int, pDa // selectBlobs splits the list of all blobs randomly into two lists. A blob // will be contained in the firstone ith probability p. -func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, list2 backend.IDSet) { +func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, list2 pack.BlobSet) { done := make(chan struct{}) defer close(done) - list1 = backend.NewIDSet() - list2 = backend.NewIDSet() + list1 = pack.NewBlobSet() + list2 = pack.NewBlobSet() for id := range repo.List(backend.Data, done) { entries, err := repo.ListPack(id) @@ -73,9 +73,9 @@ func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, l for _, entry := range entries { if rand.Float32() <= p { - list1.Insert(entry.ID) + list1.Insert(pack.Handle{ID: entry.ID, Type: entry.Type}) } else { - list2.Insert(entry.ID) + list2.Insert(pack.Handle{ID: entry.ID, Type: entry.Type}) } } } @@ -95,23 +95,25 @@ func listPacks(t *testing.T, repo *repository.Repository) backend.IDSet { return list } -func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs backend.IDSet) backend.IDSet { +func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs pack.BlobSet) backend.IDSet { packs := backend.NewIDSet() idx := repo.Index() - for id := range blobs { - pb, err := idx.Lookup(id) + for h := range blobs { + list, err := idx.Lookup(h.ID, h.Type) if err != nil { t.Fatal(err) } - packs.Insert(pb.PackID) + for _, pb := range list { + packs.Insert(pb.PackID) + } } return packs } -func repack(t *testing.T, repo *repository.Repository, packs, blobs backend.IDSet) { +func repack(t *testing.T, repo *repository.Repository, packs backend.IDSet, blobs pack.BlobSet) { err := repository.Repack(repo, packs, blobs) if err != nil { t.Fatal(err) @@ -173,20 +175,29 @@ func TestRepack(t *testing.T) { } idx := repo.Index() - for id := range keepBlobs { - pb, err := idx.Lookup(id) + + for h := range keepBlobs { + list, err := idx.Lookup(h.ID, h.Type) if err != nil { - t.Errorf("unable to find blob %v in repo", id.Str()) + t.Errorf("unable to find blob %v in repo", h.ID.Str()) + continue } + if len(list) != 1 { + t.Errorf("expected one pack in the list, got: %v", list) + continue + } + + pb := list[0] + if removePacks.Has(pb.PackID) { t.Errorf("lookup returned pack ID %v that should've been removed", pb.PackID) } } - for id := range removeBlobs { - if _, err := idx.Lookup(id); err == nil { - t.Errorf("blob %v still contained in the repo", id.Str()) + for h := range removeBlobs { + if _, err := idx.Lookup(h.ID, h.Type); err == nil { + t.Errorf("blob %v still contained in the repo", h) } } } diff --git a/src/restic/repository/repository.go b/src/restic/repository/repository.go index bdda7fe4..72a1c153 100644 --- a/src/restic/repository/repository.go +++ b/src/restic/repository/repository.go @@ -79,54 +79,68 @@ func (r *Repository) LoadAndDecrypt(t backend.Type, id backend.ID) ([]byte, erro // large enough to hold the complete blob. func (r *Repository) LoadBlob(t pack.BlobType, id backend.ID, plaintextBuf []byte) ([]byte, error) { debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str()) - // lookup pack - blob, err := r.idx.Lookup(id) + + // lookup plaintext size of blob + size, err := r.idx.LookupSize(id, t) + if err != nil { + return nil, err + } + + // make sure the plaintext buffer is large enough, extend otherwise + plaintextBufSize := uint(cap(plaintextBuf)) + if size > plaintextBufSize { + debug.Log("Repo.LoadBlob", "need to expand buffer: want %d bytes, got %d", + size, plaintextBufSize) + plaintextBuf = make([]byte, size) + } + + // lookup packs + blobs, err := r.idx.Lookup(id, t) if err != nil { debug.Log("Repo.LoadBlob", "id %v not found in index: %v", id.Str(), err) return nil, err } - plaintextBufSize := uint(cap(plaintextBuf)) - if blob.PlaintextLength() > plaintextBufSize { - debug.Log("Repo.LoadBlob", "need to expand buffer: want %d bytes, got %d", - blob.PlaintextLength(), plaintextBufSize) - plaintextBuf = make([]byte, blob.PlaintextLength()) + for _, blob := range blobs { + debug.Log("Repo.LoadBlob", "id %v found: %v", id.Str(), blob) + + if blob.Type != t { + debug.Log("Repo.LoadBlob", "blob %v has wrong block type, want %v", blob, t) + } + + // load blob from pack + h := backend.Handle{Type: backend.Data, Name: blob.PackID.String()} + ciphertextBuf := make([]byte, blob.Length) + n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset)) + if err != nil { + debug.Log("Repo.LoadBlob", "error loading blob %v: %v", blob, err) + fmt.Fprintf(os.Stderr, "error loading blob %v: %v", id, err) + continue + } + + if uint(n) != blob.Length { + debug.Log("Repo.LoadBlob", "error loading blob %v: wrong length returned, want %d, got %d", + blob.Length, uint(n)) + continue + } + + // decrypt + plaintextBuf, err = r.decryptTo(plaintextBuf, ciphertextBuf) + if err != nil { + fmt.Fprintf(os.Stderr, "decrypting blob %v failed: %v", id, err) + continue + } + + // check hash + if !backend.Hash(plaintextBuf).Equal(id) { + fmt.Fprintf(os.Stderr, "blob %v returned invalid hash", id) + continue + } + + return plaintextBuf, nil } - if blob.Type != t { - debug.Log("Repo.LoadBlob", "wrong type returned for %v: wanted %v, got %v", id.Str(), t, blob.Type) - return nil, fmt.Errorf("blob has wrong type %v (wanted: %v)", blob.Type, t) - } - - debug.Log("Repo.LoadBlob", "id %v found: %v", id.Str(), blob) - - // load blob from pack - h := backend.Handle{Type: backend.Data, Name: blob.PackID.String()} - ciphertextBuf := make([]byte, blob.Length) - n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset)) - if err != nil { - debug.Log("Repo.LoadBlob", "error loading blob %v: %v", blob, err) - return nil, err - } - - if uint(n) != blob.Length { - debug.Log("Repo.LoadBlob", "error loading blob %v: wrong length returned, want %d, got %d", - blob.Length, uint(n)) - return nil, errors.New("wrong length returned") - } - - // decrypt - plaintextBuf, err = r.decryptTo(plaintextBuf, ciphertextBuf) - if err != nil { - return nil, err - } - - // check hash - if !backend.Hash(plaintextBuf).Equal(id) { - return nil, errors.New("invalid data returned") - } - - return plaintextBuf, nil + return nil, fmt.Errorf("loading blob %v from %v packs failed", id.Str(), len(blobs)) } // closeOrErr calls cl.Close() and sets err to the returned error value if @@ -162,8 +176,8 @@ func (r *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface } // LookupBlobSize returns the size of blob id. -func (r *Repository) LookupBlobSize(id backend.ID) (uint, error) { - return r.idx.LookupSize(id) +func (r *Repository) LookupBlobSize(id backend.ID, tpe pack.BlobType) (uint, error) { + return r.idx.LookupSize(id, tpe) } // SaveAndEncrypt encrypts data and stores it to the backend as type t. If data diff --git a/src/restic/testing.go b/src/restic/testing.go index 4275ad26..12bd7cf7 100644 --- a/src/restic/testing.go +++ b/src/restic/testing.go @@ -42,7 +42,7 @@ func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs backend.IDs) { } id := backend.Hash(chunk.Data) - if !fs.blobIsKnown(id) { + if !fs.blobIsKnown(id, pack.Data) { _, err := fs.repo.SaveAndEncrypt(pack.Data, chunk.Data, &id) if err != nil { fs.t.Fatalf("error saving chunk: %v", err) @@ -72,16 +72,16 @@ func (fs fakeFileSystem) treeIsKnown(tree *Tree) (bool, backend.ID) { data = append(data, '\n') id := backend.Hash(data) - return fs.blobIsKnown(id), id + return fs.blobIsKnown(id, pack.Tree), id } -func (fs fakeFileSystem) blobIsKnown(id backend.ID) bool { +func (fs fakeFileSystem) blobIsKnown(id backend.ID, t pack.BlobType) bool { if fs.knownBlobs.Has(id) { return true } - if fs.repo.Index().Has(id) { + if fs.repo.Index().Has(id, t) { return true } From 17e187254468879e78c8b7c35ad09c9690e721d1 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Wed, 3 Aug 2016 22:44:45 +0200 Subject: [PATCH 35/98] Switch order of parameters to repo.LoadBlob() --- src/restic/archive_reader_test.go | 2 +- src/restic/archiver_duplication_test.go | 2 +- src/restic/fuse/file.go | 8 ++++---- src/restic/fuse/file_test.go | 6 +++--- src/restic/node.go | 2 +- src/restic/repository/repository.go | 4 ++-- src/restic/repository/repository_test.go | 4 ++-- 7 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/restic/archive_reader_test.go b/src/restic/archive_reader_test.go index 4397bb90..49fcbeca 100644 --- a/src/restic/archive_reader_test.go +++ b/src/restic/archive_reader_test.go @@ -13,7 +13,7 @@ import ( ) func loadBlob(t *testing.T, repo *repository.Repository, id backend.ID, buf []byte) []byte { - buf, err := repo.LoadBlob(pack.Data, id, buf) + buf, err := repo.LoadBlob(id, pack.Data, buf) if err != nil { t.Fatalf("LoadBlob(%v) returned error %v", id, err) } diff --git a/src/restic/archiver_duplication_test.go b/src/restic/archiver_duplication_test.go index 1c0193ea..56341a99 100644 --- a/src/restic/archiver_duplication_test.go +++ b/src/restic/archiver_duplication_test.go @@ -101,7 +101,7 @@ func testArchiverDuplication(t *testing.T) { id := randomID() - if repo.Index().Has(id) { + if repo.Index().Has(id, pack.Data) { continue } diff --git a/src/restic/fuse/file.go b/src/restic/fuse/file.go index 8a409552..ef6766d9 100644 --- a/src/restic/fuse/file.go +++ b/src/restic/fuse/file.go @@ -27,8 +27,8 @@ var _ = fs.HandleReleaser(&file{}) // BlobLoader is an abstracted repository with a reduced set of methods used // for fuse operations. type BlobLoader interface { - LookupBlobSize(backend.ID) (uint, error) - LoadBlob(pack.BlobType, backend.ID, []byte) ([]byte, error) + LookupBlobSize(backend.ID, pack.BlobType) (uint, error) + LoadBlob(backend.ID, pack.BlobType, []byte) ([]byte, error) } type file struct { @@ -53,7 +53,7 @@ func newFile(repo BlobLoader, node *restic.Node, ownerIsRoot bool) (*file, error var bytes uint64 sizes := make([]uint, len(node.Content)) for i, id := range node.Content { - size, err := repo.LookupBlobSize(id) + size, err := repo.LookupBlobSize(id, pack.Data) if err != nil { return nil, err } @@ -110,7 +110,7 @@ func (f *file) getBlobAt(i int) (blob []byte, err error) { buf = make([]byte, f.sizes[i]) } - blob, err = f.repo.LoadBlob(pack.Data, f.node.Content[i], buf) + blob, err = f.repo.LoadBlob(f.node.Content[i], pack.Data, buf) if err != nil { debug.Log("file.getBlobAt", "LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err) return nil, err diff --git a/src/restic/fuse/file_test.go b/src/restic/fuse/file_test.go index b334f0bc..12bcb859 100644 --- a/src/restic/fuse/file_test.go +++ b/src/restic/fuse/file_test.go @@ -26,7 +26,7 @@ func NewMockRepo(content map[backend.ID][]byte) *MockRepo { return &MockRepo{blobs: content} } -func (m *MockRepo) LookupBlobSize(id backend.ID) (uint, error) { +func (m *MockRepo) LookupBlobSize(id backend.ID, t pack.BlobType) (uint, error) { buf, ok := m.blobs[id] if !ok { return 0, errors.New("blob not found") @@ -35,8 +35,8 @@ func (m *MockRepo) LookupBlobSize(id backend.ID) (uint, error) { return uint(len(buf)), nil } -func (m *MockRepo) LoadBlob(t pack.BlobType, id backend.ID, buf []byte) ([]byte, error) { - size, err := m.LookupBlobSize(id) +func (m *MockRepo) LoadBlob(id backend.ID, t pack.BlobType, buf []byte) ([]byte, error) { + size, err := m.LookupBlobSize(id, t) if err != nil { return nil, err } diff --git a/src/restic/node.go b/src/restic/node.go index 9cce841c..a2f64dac 100644 --- a/src/restic/node.go +++ b/src/restic/node.go @@ -226,7 +226,7 @@ func (node Node) createFileAt(path string, repo *repository.Repository) error { buf = make([]byte, size) } - buf, err := repo.LoadBlob(pack.Data, id, buf) + buf, err := repo.LoadBlob(id, pack.Data, buf) if err != nil { return errors.Annotate(err, "Load") } diff --git a/src/restic/repository/repository.go b/src/restic/repository/repository.go index 72a1c153..1fe2d26d 100644 --- a/src/restic/repository/repository.go +++ b/src/restic/repository/repository.go @@ -77,7 +77,7 @@ func (r *Repository) LoadAndDecrypt(t backend.Type, id backend.ID) ([]byte, erro // LoadBlob tries to load and decrypt content identified by t and id from a // pack from the backend, the result is stored in plaintextBuf, which must be // large enough to hold the complete blob. -func (r *Repository) LoadBlob(t pack.BlobType, id backend.ID, plaintextBuf []byte) ([]byte, error) { +func (r *Repository) LoadBlob(id backend.ID, t pack.BlobType, plaintextBuf []byte) ([]byte, error) { debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str()) // lookup plaintext size of blob @@ -167,7 +167,7 @@ func (r *Repository) LoadJSONUnpacked(t backend.Type, id backend.ID, item interf // LoadJSONPack calls LoadBlob() to load a blob from the backend, decrypt the // data and afterwards call json.Unmarshal on the item. func (r *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface{}) (err error) { - buf, err := r.LoadBlob(t, id, nil) + buf, err := r.LoadBlob(id, t, nil) if err != nil { return err } diff --git a/src/restic/repository/repository_test.go b/src/restic/repository/repository_test.go index 3df82477..db70765a 100644 --- a/src/restic/repository/repository_test.go +++ b/src/restic/repository/repository_test.go @@ -92,7 +92,7 @@ func TestSave(t *testing.T) { // OK(t, repo.SaveIndex()) // read back - buf, err := repo.LoadBlob(pack.Data, id, make([]byte, size)) + buf, err := repo.LoadBlob(id, pack.Data, make([]byte, size)) OK(t, err) Assert(t, len(buf) == len(data), @@ -124,7 +124,7 @@ func TestSaveFrom(t *testing.T) { OK(t, repo.Flush()) // read back - buf, err := repo.LoadBlob(pack.Data, id, make([]byte, size)) + buf, err := repo.LoadBlob(id, pack.Data, make([]byte, size)) OK(t, err) Assert(t, len(buf) == len(data), From cff6fea32a0daccab9d3a9b9c3bf83b2f3a5b594 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 4 Aug 2016 18:02:32 +0200 Subject: [PATCH 36/98] Fix 'cat' command --- src/cmds/restic/cmd_cat.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/cmds/restic/cmd_cat.go b/src/cmds/restic/cmd_cat.go index e3fdebe4..58e5dc45 100644 --- a/src/cmds/restic/cmd_cat.go +++ b/src/cmds/restic/cmd_cat.go @@ -161,13 +161,14 @@ func (cmd CmdCat) Execute(args []string) error { return err case "blob": - blob, err := repo.Index().Lookup(id) + list, err := repo.Index().Lookup(id, pack.Data) if err != nil { return err } + blob := list[0] buf := make([]byte, blob.Length) - data, err := repo.LoadBlob(blob.Type, id, buf) + data, err := repo.LoadBlob(id, pack.Data, buf) if err != nil { return err } From 3cca831b2e91136004a5c182417bc37bac9b5fe8 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 4 Aug 2016 18:40:31 +0200 Subject: [PATCH 37/98] Fix invalid type in newly created packs --- src/restic/pack/pack.go | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/src/restic/pack/pack.go b/src/restic/pack/pack.go index 2422e653..815f9ddb 100644 --- a/src/restic/pack/pack.go +++ b/src/restic/pack/pack.go @@ -119,7 +119,7 @@ var entrySize = uint(binary.Size(BlobType(0)) + binary.Size(uint32(0)) + backend // headerEntry is used with encoding/binary to read and write header entries type headerEntry struct { - Type BlobType + Type uint8 Length uint32 ID [backend.IDSize]byte } @@ -177,11 +177,19 @@ func (p *Packer) Finalize() (uint, error) { func (p *Packer) writeHeader(wr io.Writer) (bytesWritten uint, err error) { for _, b := range p.blobs { entry := headerEntry{ - Type: b.Type, Length: uint32(b.Length), ID: b.ID, } + switch b.Type { + case Data: + entry.Type = 0 + case Tree: + entry.Type = 1 + default: + return 0, fmt.Errorf("invalid blob type %v", b.Type) + } + err := binary.Write(wr, binary.LittleEndian, entry) if err != nil { return bytesWritten, err @@ -277,12 +285,22 @@ func NewUnpacker(k *crypto.Key, rd io.ReadSeeker) (*Unpacker, error) { return nil, err } - entries = append(entries, Blob{ - Type: e.Type, + entry := Blob{ Length: uint(e.Length), ID: e.ID, Offset: pos, - }) + } + + switch e.Type { + case 0: + entry.Type = Data + case 1: + entry.Type = Tree + default: + return nil, fmt.Errorf("invalid type %d", e.Type) + } + + entries = append(entries, entry) pos += uint(e.Length) } From 6285f31604fefeae9a79d6d98650f81d140408fe Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 4 Aug 2016 18:59:26 +0200 Subject: [PATCH 38/98] Use pack.BlobSet instead of backend.IDSet --- src/restic/find.go | 16 ++--- src/restic/find_test.go | 40 ++++++++----- src/restic/pack/handle.go | 6 +- src/restic/testdata/used_blobs_snapshot0 | 74 ++++++++++++------------ src/restic/testdata/used_blobs_snapshot1 | 68 +++++++++++----------- src/restic/testdata/used_blobs_snapshot2 | 18 +++--- 6 files changed, 114 insertions(+), 108 deletions(-) diff --git a/src/restic/find.go b/src/restic/find.go index dd2ab8ff..9e9af63f 100644 --- a/src/restic/find.go +++ b/src/restic/find.go @@ -2,12 +2,13 @@ package restic import ( "restic/backend" + "restic/pack" "restic/repository" ) // findUsedBlobs traverse the tree ID and adds all seen blobs to blobs. -func findUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs backend.IDSet, seen backend.IDSet) error { - blobs.Insert(treeID) +func findUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs pack.BlobSet, seen pack.BlobSet) error { + blobs.Insert(pack.Handle{ID: treeID, Type: pack.Tree}) tree, err := LoadTree(repo, treeID) if err != nil { @@ -18,15 +19,16 @@ func findUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs backend switch node.Type { case "file": for _, blob := range node.Content { - blobs.Insert(blob) + blobs.Insert(pack.Handle{ID: blob, Type: pack.Data}) } case "dir": subtreeID := *node.Subtree - if seen.Has(subtreeID) { + h := pack.Handle{ID: subtreeID, Type: pack.Tree} + if seen.Has(h) { continue } - seen.Insert(subtreeID) + seen.Insert(h) err := findUsedBlobs(repo, subtreeID, blobs, seen) if err != nil { @@ -39,6 +41,6 @@ func findUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs backend } // FindUsedBlobs traverses the tree ID and adds all seen blobs (trees and data blobs) to the set blobs. -func FindUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs backend.IDSet) error { - return findUsedBlobs(repo, treeID, blobs, backend.NewIDSet()) +func FindUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs pack.BlobSet) error { + return findUsedBlobs(repo, treeID, blobs, pack.NewBlobSet()) } diff --git a/src/restic/find_test.go b/src/restic/find_test.go index 5f1c2e0f..96fa505f 100644 --- a/src/restic/find_test.go +++ b/src/restic/find_test.go @@ -2,6 +2,7 @@ package restic import ( "bufio" + "encoding/json" "flag" "fmt" "os" @@ -10,51 +11,58 @@ import ( "testing" "time" - "restic/backend" + "restic/pack" "restic/repository" ) -func loadIDSet(t testing.TB, filename string) backend.IDSet { +func loadIDSet(t testing.TB, filename string) pack.BlobSet { f, err := os.Open(filename) if err != nil { t.Logf("unable to open golden file %v: %v", filename, err) - return backend.IDSet{} + return pack.NewBlobSet() } sc := bufio.NewScanner(f) - ids := backend.NewIDSet() + blobs := pack.NewBlobSet() for sc.Scan() { - id, err := backend.ParseID(sc.Text()) + var h pack.Handle + err := json.Unmarshal([]byte(sc.Text()), &h) if err != nil { - t.Errorf("file %v contained invalid id: %v", filename, err) + t.Errorf("file %v contained invalid blob: %#v", filename, err) + continue } - ids.Insert(id) + blobs.Insert(h) } if err = f.Close(); err != nil { t.Errorf("closing file %v failed with error %v", filename, err) } - return ids + return blobs } -func saveIDSet(t testing.TB, filename string, s backend.IDSet) { +func saveIDSet(t testing.TB, filename string, s pack.BlobSet) { f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0644) if err != nil { t.Fatalf("unable to update golden file %v: %v", filename, err) return } - var ids backend.IDs - for id := range s { - ids = append(ids, id) + var hs pack.Handles + for h := range s { + hs = append(hs, h) } - sort.Sort(ids) - for _, id := range ids { - fmt.Fprintf(f, "%s\n", id) + sort.Sort(hs) + + enc := json.NewEncoder(f) + for _, h := range hs { + err = enc.Encode(h) + if err != nil { + t.Fatalf("Encode() returned error: %v", err) + } } if err = f.Close(); err != nil { @@ -83,7 +91,7 @@ func TestFindUsedBlobs(t *testing.T) { } for i, sn := range snapshots { - usedBlobs := backend.NewIDSet() + usedBlobs := pack.NewBlobSet() err := FindUsedBlobs(repo, *sn.Tree, usedBlobs) if err != nil { t.Errorf("FindUsedBlobs returned error: %v", err) diff --git a/src/restic/pack/handle.go b/src/restic/pack/handle.go index 04ab8f17..9a0ce58f 100644 --- a/src/restic/pack/handle.go +++ b/src/restic/pack/handle.go @@ -23,10 +23,6 @@ func (h Handles) Len() int { } func (h Handles) Less(i, j int) bool { - if h[i].Type != h[j].Type { - return h[i].Type < h[j].Type - } - for k, b := range h[i].ID { if b == h[j].ID[k] { continue @@ -39,7 +35,7 @@ func (h Handles) Less(i, j int) bool { return false } - return false + return h[i].Type < h[j].Type } func (h Handles) Swap(i, j int) { diff --git a/src/restic/testdata/used_blobs_snapshot0 b/src/restic/testdata/used_blobs_snapshot0 index 543f534b..9443e1e1 100644 --- a/src/restic/testdata/used_blobs_snapshot0 +++ b/src/restic/testdata/used_blobs_snapshot0 @@ -1,37 +1,37 @@ -087e8d5f45f93a78e52a938ac0b7864f92f8910091c0da69201a156242df3b78 -0bf505951741c44714527d252313b6959ce4f19d2e5512fca1c1b2da14424da3 -0c82d00e6ee78b48559cda2f9cc909beeb8769183b115dfda0a5767832accc8d -2941bfd03b8933bb150b085a2252b69675495af64523bf8d38e67429e7cccb45 -378a9b6862c8fa5c6915f158d16e4416243159bb9da44c564896c065bc6c1cf4 -3ffcf5128fc404c2a363e3e8a8d4c8a7ae8c36fcacba7fdfe71ec9dabcadd567 -40f5ca234e5eed1dc967c83fa99076ef636619148082f300cf877676728ebf14 -42aad1ab6cc964043e53e5da13ed0f2b44a3bf6ae7702f60a805f13028377524 -42bc8f509dbd6b9881cab4c1684d5cf74207046336f654db1b884197f15cae7b -47cf470c1c6de9af00b3b1ee963de8b94f51a2870b3338b3f33cfc565c0f8be4 -587045d0ec69e47a3cc91b13c959aa80add9118ecfac47232ea992650f25f0b9 -615e8851030f318751f3c8baf8fbfa9958e2dd7f25dc1a87dcf6d6f79d1f1a9f -63ec5e835e11203bbeef69095523344dd975f1ab52bdbf4a1db7a53914d967ca -714f9e16404b9ec83de56715e5387b2c4c2ed0af1889166a4e767822f971bf52 -80ba9a145bf46cae605e911c18165c02213e8d11d68dc5b7824f259d17b7b6d0 -86af714d79d18be1c9c0ae23cca9dbd7cef44530e253e80af5bd5c34eab09714 -8a445cf5b6313cbe3b5872a55adde52aa8d1ae188f41d56f176e40a3137ac058 -8e171f7367d1b68012ed1ceec8f54b7b9b8654ebaf63a760017c34d761b17878 -8e98f35e65fb42c85eb4a2ab4793e294148e3f318252cb850a896274d2aa90bc -9d65ba6443863394a8c6582fef4a8aaab2fb46417eef41f1792cdbdb38ee0b4c -9da502ea8e7a768ee0dbafdc613db3df4a7cd9c98af08328265c4d2e953e8efa -9f2899688d2f23391cfd86e7b6d326a54f352bb294160878178639aab4aa378f -a2f3ccf973b3600c06c42dc3b867b263a788c18aa57f4448fea2525b7cbfd784 -b2deaf9174086129ec3b9f79e05401fdb3baf8b75335addffac1950182d779df -b81870ebe27b98f6b8746349e8ea444c96bf2eaac5dbd6236175150ce579f46b -bd4dacd46031b2b837bc9bd06145b0571156fa496408ce728c003ae50b265aaf -c0775cfc822f59524b4ed714d257607fd5f2c9f0dc9f65763a86ffc33aac325b -c3596f717c495d20c33561e991d4295550b6d7544687f2363e999bdc0266224d -c54c4899c4d7dcda8b9e597aebfbaf7d65c9c7a760527d77e7fc9894283d736e -ca51ecf1633896f852929cb2d56ad1b5bed4ab6055bdcf370ced4011bed164aa -ce8b656cead478c34060510962daf97cea52abde68bbef7934dd5c5513cf6f3b -dafbb65569781083b627de833fb931cf98401299a62d747f03d8fc135ab57279 -e193d395410520580e76a5b89b8d23a1d162c0e28c52cb8194d409a74a120f7d -e752efd93f9850ba0cafbbac01bb283c10095ac923cdb8ff027393001123d406 -f728e5576d4ab63248c310396d67d9afa3267dd2dea3cfba690dbd04efe181fb -f75b6460b68d254f2195b08c606672fb55c05fb7bed7e16699b3231104b673ea -fe19f084021bdac5a9a5d270042ff53ef36357dd0743318d0480dee1a43de266 +{"ID":"087e8d5f45f93a78e52a938ac0b7864f92f8910091c0da69201a156242df3b78","Type":"data"} +{"ID":"0bf505951741c44714527d252313b6959ce4f19d2e5512fca1c1b2da14424da3","Type":"data"} +{"ID":"0c82d00e6ee78b48559cda2f9cc909beeb8769183b115dfda0a5767832accc8d","Type":"data"} +{"ID":"2941bfd03b8933bb150b085a2252b69675495af64523bf8d38e67429e7cccb45","Type":"data"} +{"ID":"378a9b6862c8fa5c6915f158d16e4416243159bb9da44c564896c065bc6c1cf4","Type":"data"} +{"ID":"3ffcf5128fc404c2a363e3e8a8d4c8a7ae8c36fcacba7fdfe71ec9dabcadd567","Type":"data"} +{"ID":"40f5ca234e5eed1dc967c83fa99076ef636619148082f300cf877676728ebf14","Type":"data"} +{"ID":"42aad1ab6cc964043e53e5da13ed0f2b44a3bf6ae7702f60a805f13028377524","Type":"data"} +{"ID":"42bc8f509dbd6b9881cab4c1684d5cf74207046336f654db1b884197f15cae7b","Type":"data"} +{"ID":"47cf470c1c6de9af00b3b1ee963de8b94f51a2870b3338b3f33cfc565c0f8be4","Type":"data"} +{"ID":"587045d0ec69e47a3cc91b13c959aa80add9118ecfac47232ea992650f25f0b9","Type":"data"} +{"ID":"615e8851030f318751f3c8baf8fbfa9958e2dd7f25dc1a87dcf6d6f79d1f1a9f","Type":"data"} +{"ID":"63ec5e835e11203bbeef69095523344dd975f1ab52bdbf4a1db7a53914d967ca","Type":"tree"} +{"ID":"714f9e16404b9ec83de56715e5387b2c4c2ed0af1889166a4e767822f971bf52","Type":"data"} +{"ID":"80ba9a145bf46cae605e911c18165c02213e8d11d68dc5b7824f259d17b7b6d0","Type":"data"} +{"ID":"86af714d79d18be1c9c0ae23cca9dbd7cef44530e253e80af5bd5c34eab09714","Type":"data"} +{"ID":"8a445cf5b6313cbe3b5872a55adde52aa8d1ae188f41d56f176e40a3137ac058","Type":"data"} +{"ID":"8e171f7367d1b68012ed1ceec8f54b7b9b8654ebaf63a760017c34d761b17878","Type":"tree"} +{"ID":"8e98f35e65fb42c85eb4a2ab4793e294148e3f318252cb850a896274d2aa90bc","Type":"data"} +{"ID":"9d65ba6443863394a8c6582fef4a8aaab2fb46417eef41f1792cdbdb38ee0b4c","Type":"data"} +{"ID":"9da502ea8e7a768ee0dbafdc613db3df4a7cd9c98af08328265c4d2e953e8efa","Type":"data"} +{"ID":"9f2899688d2f23391cfd86e7b6d326a54f352bb294160878178639aab4aa378f","Type":"tree"} +{"ID":"a2f3ccf973b3600c06c42dc3b867b263a788c18aa57f4448fea2525b7cbfd784","Type":"data"} +{"ID":"b2deaf9174086129ec3b9f79e05401fdb3baf8b75335addffac1950182d779df","Type":"data"} +{"ID":"b81870ebe27b98f6b8746349e8ea444c96bf2eaac5dbd6236175150ce579f46b","Type":"tree"} +{"ID":"bd4dacd46031b2b837bc9bd06145b0571156fa496408ce728c003ae50b265aaf","Type":"data"} +{"ID":"c0775cfc822f59524b4ed714d257607fd5f2c9f0dc9f65763a86ffc33aac325b","Type":"data"} +{"ID":"c3596f717c495d20c33561e991d4295550b6d7544687f2363e999bdc0266224d","Type":"data"} +{"ID":"c54c4899c4d7dcda8b9e597aebfbaf7d65c9c7a760527d77e7fc9894283d736e","Type":"data"} +{"ID":"ca51ecf1633896f852929cb2d56ad1b5bed4ab6055bdcf370ced4011bed164aa","Type":"data"} +{"ID":"ce8b656cead478c34060510962daf97cea52abde68bbef7934dd5c5513cf6f3b","Type":"data"} +{"ID":"dafbb65569781083b627de833fb931cf98401299a62d747f03d8fc135ab57279","Type":"data"} +{"ID":"e193d395410520580e76a5b89b8d23a1d162c0e28c52cb8194d409a74a120f7d","Type":"data"} +{"ID":"e752efd93f9850ba0cafbbac01bb283c10095ac923cdb8ff027393001123d406","Type":"tree"} +{"ID":"f728e5576d4ab63248c310396d67d9afa3267dd2dea3cfba690dbd04efe181fb","Type":"data"} +{"ID":"f75b6460b68d254f2195b08c606672fb55c05fb7bed7e16699b3231104b673ea","Type":"tree"} +{"ID":"fe19f084021bdac5a9a5d270042ff53ef36357dd0743318d0480dee1a43de266","Type":"data"} diff --git a/src/restic/testdata/used_blobs_snapshot1 b/src/restic/testdata/used_blobs_snapshot1 index 502e9170..3e6b6f39 100644 --- a/src/restic/testdata/used_blobs_snapshot1 +++ b/src/restic/testdata/used_blobs_snapshot1 @@ -1,34 +1,34 @@ -011a951a9796979c2b515ef4209662013bd1f16a20a1b35d1d950d7408bdc8b4 -087e8d5f45f93a78e52a938ac0b7864f92f8910091c0da69201a156242df3b78 -0bad18b7f2d82d7c9cf8e405262ad2f3dbe57928aa242c1070b917042a99072d -0bf505951741c44714527d252313b6959ce4f19d2e5512fca1c1b2da14424da3 -0c82d00e6ee78b48559cda2f9cc909beeb8769183b115dfda0a5767832accc8d -2941bfd03b8933bb150b085a2252b69675495af64523bf8d38e67429e7cccb45 -3ffcf5128fc404c2a363e3e8a8d4c8a7ae8c36fcacba7fdfe71ec9dabcadd567 -40f5ca234e5eed1dc967c83fa99076ef636619148082f300cf877676728ebf14 -42bc8f509dbd6b9881cab4c1684d5cf74207046336f654db1b884197f15cae7b -47cf470c1c6de9af00b3b1ee963de8b94f51a2870b3338b3f33cfc565c0f8be4 -4b2e91022c34c756b7bd8ece046a2bab6f0dcad89f46c52d1f84cd48e8da55df -6416bc2321cdeb8758188af2b3925f2c82ffde014bf53b7a69c0f113a5c460fe -714f9e16404b9ec83de56715e5387b2c4c2ed0af1889166a4e767822f971bf52 -80ba9a145bf46cae605e911c18165c02213e8d11d68dc5b7824f259d17b7b6d0 -83bf0196cf45bbca0be7e292688a3622af7888c0e9ec01bb78edaff302cced06 -8a445cf5b6313cbe3b5872a55adde52aa8d1ae188f41d56f176e40a3137ac058 -8e98f35e65fb42c85eb4a2ab4793e294148e3f318252cb850a896274d2aa90bc -907acef01e05c3e0140858423e9284ddd3d64145ba8b0c3293371c5c7ab3d6b7 -9d65ba6443863394a8c6582fef4a8aaab2fb46417eef41f1792cdbdb38ee0b4c -9da502ea8e7a768ee0dbafdc613db3df4a7cd9c98af08328265c4d2e953e8efa -a2f3ccf973b3600c06c42dc3b867b263a788c18aa57f4448fea2525b7cbfd784 -b2deaf9174086129ec3b9f79e05401fdb3baf8b75335addffac1950182d779df -b3915971171e049292e28d7bc61fe362e94f73aa49b578f4ca1322b47d7fc39c -bd4dacd46031b2b837bc9bd06145b0571156fa496408ce728c003ae50b265aaf -c3596f717c495d20c33561e991d4295550b6d7544687f2363e999bdc0266224d -c54c4899c4d7dcda8b9e597aebfbaf7d65c9c7a760527d77e7fc9894283d736e -ca51ecf1633896f852929cb2d56ad1b5bed4ab6055bdcf370ced4011bed164aa -cb8001715217b4f6960aa24c1abb4b60a20c10f23abc1e5f69e0f5436bd788c8 -d39c4c264e01ec47b0386da3775c6b0cc337974627ff55792938cca4895ac6c4 -dafbb65569781083b627de833fb931cf98401299a62d747f03d8fc135ab57279 -e193d395410520580e76a5b89b8d23a1d162c0e28c52cb8194d409a74a120f7d -e791912a7fad8954c764fae41d2958d2feeae2278e403429add9119ab43a36f5 -f728e5576d4ab63248c310396d67d9afa3267dd2dea3cfba690dbd04efe181fb -fe19f084021bdac5a9a5d270042ff53ef36357dd0743318d0480dee1a43de266 +{"ID":"011a951a9796979c2b515ef4209662013bd1f16a20a1b35d1d950d7408bdc8b4","Type":"tree"} +{"ID":"087e8d5f45f93a78e52a938ac0b7864f92f8910091c0da69201a156242df3b78","Type":"data"} +{"ID":"0bad18b7f2d82d7c9cf8e405262ad2f3dbe57928aa242c1070b917042a99072d","Type":"data"} +{"ID":"0bf505951741c44714527d252313b6959ce4f19d2e5512fca1c1b2da14424da3","Type":"data"} +{"ID":"0c82d00e6ee78b48559cda2f9cc909beeb8769183b115dfda0a5767832accc8d","Type":"data"} +{"ID":"2941bfd03b8933bb150b085a2252b69675495af64523bf8d38e67429e7cccb45","Type":"data"} +{"ID":"3ffcf5128fc404c2a363e3e8a8d4c8a7ae8c36fcacba7fdfe71ec9dabcadd567","Type":"data"} +{"ID":"40f5ca234e5eed1dc967c83fa99076ef636619148082f300cf877676728ebf14","Type":"data"} +{"ID":"42bc8f509dbd6b9881cab4c1684d5cf74207046336f654db1b884197f15cae7b","Type":"data"} +{"ID":"47cf470c1c6de9af00b3b1ee963de8b94f51a2870b3338b3f33cfc565c0f8be4","Type":"data"} +{"ID":"4b2e91022c34c756b7bd8ece046a2bab6f0dcad89f46c52d1f84cd48e8da55df","Type":"tree"} +{"ID":"6416bc2321cdeb8758188af2b3925f2c82ffde014bf53b7a69c0f113a5c460fe","Type":"data"} +{"ID":"714f9e16404b9ec83de56715e5387b2c4c2ed0af1889166a4e767822f971bf52","Type":"data"} +{"ID":"80ba9a145bf46cae605e911c18165c02213e8d11d68dc5b7824f259d17b7b6d0","Type":"data"} +{"ID":"83bf0196cf45bbca0be7e292688a3622af7888c0e9ec01bb78edaff302cced06","Type":"data"} +{"ID":"8a445cf5b6313cbe3b5872a55adde52aa8d1ae188f41d56f176e40a3137ac058","Type":"data"} +{"ID":"8e98f35e65fb42c85eb4a2ab4793e294148e3f318252cb850a896274d2aa90bc","Type":"data"} +{"ID":"907acef01e05c3e0140858423e9284ddd3d64145ba8b0c3293371c5c7ab3d6b7","Type":"data"} +{"ID":"9d65ba6443863394a8c6582fef4a8aaab2fb46417eef41f1792cdbdb38ee0b4c","Type":"data"} +{"ID":"9da502ea8e7a768ee0dbafdc613db3df4a7cd9c98af08328265c4d2e953e8efa","Type":"data"} +{"ID":"a2f3ccf973b3600c06c42dc3b867b263a788c18aa57f4448fea2525b7cbfd784","Type":"data"} +{"ID":"b2deaf9174086129ec3b9f79e05401fdb3baf8b75335addffac1950182d779df","Type":"data"} +{"ID":"b3915971171e049292e28d7bc61fe362e94f73aa49b578f4ca1322b47d7fc39c","Type":"data"} +{"ID":"bd4dacd46031b2b837bc9bd06145b0571156fa496408ce728c003ae50b265aaf","Type":"data"} +{"ID":"c3596f717c495d20c33561e991d4295550b6d7544687f2363e999bdc0266224d","Type":"data"} +{"ID":"c54c4899c4d7dcda8b9e597aebfbaf7d65c9c7a760527d77e7fc9894283d736e","Type":"data"} +{"ID":"ca51ecf1633896f852929cb2d56ad1b5bed4ab6055bdcf370ced4011bed164aa","Type":"data"} +{"ID":"cb8001715217b4f6960aa24c1abb4b60a20c10f23abc1e5f69e0f5436bd788c8","Type":"data"} +{"ID":"d39c4c264e01ec47b0386da3775c6b0cc337974627ff55792938cca4895ac6c4","Type":"data"} +{"ID":"dafbb65569781083b627de833fb931cf98401299a62d747f03d8fc135ab57279","Type":"data"} +{"ID":"e193d395410520580e76a5b89b8d23a1d162c0e28c52cb8194d409a74a120f7d","Type":"data"} +{"ID":"e791912a7fad8954c764fae41d2958d2feeae2278e403429add9119ab43a36f5","Type":"tree"} +{"ID":"f728e5576d4ab63248c310396d67d9afa3267dd2dea3cfba690dbd04efe181fb","Type":"data"} +{"ID":"fe19f084021bdac5a9a5d270042ff53ef36357dd0743318d0480dee1a43de266","Type":"data"} diff --git a/src/restic/testdata/used_blobs_snapshot2 b/src/restic/testdata/used_blobs_snapshot2 index 382140b4..b57f4a41 100644 --- a/src/restic/testdata/used_blobs_snapshot2 +++ b/src/restic/testdata/used_blobs_snapshot2 @@ -1,9 +1,9 @@ -35e13e123748cd27d1634c4e07e5ff2fc86901b09b215f3125331d1226c782be -378a9b6862c8fa5c6915f158d16e4416243159bb9da44c564896c065bc6c1cf4 -42aad1ab6cc964043e53e5da13ed0f2b44a3bf6ae7702f60a805f13028377524 -47cf470c1c6de9af00b3b1ee963de8b94f51a2870b3338b3f33cfc565c0f8be4 -615e8851030f318751f3c8baf8fbfa9958e2dd7f25dc1a87dcf6d6f79d1f1a9f -83bf0196cf45bbca0be7e292688a3622af7888c0e9ec01bb78edaff302cced06 -9d65ba6443863394a8c6582fef4a8aaab2fb46417eef41f1792cdbdb38ee0b4c -b3915971171e049292e28d7bc61fe362e94f73aa49b578f4ca1322b47d7fc39c -c0775cfc822f59524b4ed714d257607fd5f2c9f0dc9f65763a86ffc33aac325b +{"ID":"35e13e123748cd27d1634c4e07e5ff2fc86901b09b215f3125331d1226c782be","Type":"tree"} +{"ID":"378a9b6862c8fa5c6915f158d16e4416243159bb9da44c564896c065bc6c1cf4","Type":"data"} +{"ID":"42aad1ab6cc964043e53e5da13ed0f2b44a3bf6ae7702f60a805f13028377524","Type":"data"} +{"ID":"47cf470c1c6de9af00b3b1ee963de8b94f51a2870b3338b3f33cfc565c0f8be4","Type":"data"} +{"ID":"615e8851030f318751f3c8baf8fbfa9958e2dd7f25dc1a87dcf6d6f79d1f1a9f","Type":"data"} +{"ID":"83bf0196cf45bbca0be7e292688a3622af7888c0e9ec01bb78edaff302cced06","Type":"data"} +{"ID":"9d65ba6443863394a8c6582fef4a8aaab2fb46417eef41f1792cdbdb38ee0b4c","Type":"data"} +{"ID":"b3915971171e049292e28d7bc61fe362e94f73aa49b578f4ca1322b47d7fc39c","Type":"data"} +{"ID":"c0775cfc822f59524b4ed714d257607fd5f2c9f0dc9f65763a86ffc33aac325b","Type":"data"} From acc2fa58168581facbeb34a1f328e571296dfd22 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 4 Aug 2016 20:42:11 +0200 Subject: [PATCH 39/98] Fix TestRepack * Decrease number of blobs for use in test * Fail the test when there's a duplicate blob --- src/restic/repository/repack_test.go | 30 ++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/src/restic/repository/repack_test.go b/src/restic/repository/repack_test.go index 5bc9d922..37b213bd 100644 --- a/src/restic/repository/repack_test.go +++ b/src/restic/repository/repack_test.go @@ -13,7 +13,7 @@ func randomSize(min, max int) int { return rand.Intn(max-min) + min } -func random(t *testing.T, length int) []byte { +func random(t testing.TB, length int) []byte { rd := repository.NewRandReader(rand.New(rand.NewSource(int64(length)))) buf := make([]byte, length) _, err := io.ReadFull(rd, buf) @@ -24,7 +24,7 @@ func random(t *testing.T, length int) []byte { return buf } -func createRandomBlobs(t *testing.T, repo *repository.Repository, blobs int, pData float32) { +func createRandomBlobs(t testing.TB, repo *repository.Repository, blobs int, pData float32) { for i := 0; i < blobs; i++ { var ( tpe pack.BlobType @@ -33,13 +33,21 @@ func createRandomBlobs(t *testing.T, repo *repository.Repository, blobs int, pDa if rand.Float32() < pData { tpe = pack.Data - length = randomSize(50*1024, 2*1024*1024) // 50KiB to 2MiB of data + length = randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data } else { tpe = pack.Tree - length = randomSize(5*1024, 50*1024) // 5KiB to 50KiB + length = randomSize(1*1024, 20*1024) // 1KiB to 20KiB } - _, err := repo.SaveAndEncrypt(tpe, random(t, length), nil) + buf := random(t, length) + id := backend.Hash(buf) + + if repo.Index().Has(id, pack.Data) { + t.Errorf("duplicate blob %v/%v ignored", id, pack.Data) + continue + } + + _, err := repo.SaveAndEncrypt(tpe, buf, &id) if err != nil { t.Fatalf("SaveFrom() error %v", err) } @@ -65,6 +73,8 @@ func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, l list1 = pack.NewBlobSet() list2 = pack.NewBlobSet() + blobs := pack.NewBlobSet() + for id := range repo.List(backend.Data, done) { entries, err := repo.ListPack(id) if err != nil { @@ -72,11 +82,19 @@ func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, l } for _, entry := range entries { + h := pack.Handle{ID: entry.ID, Type: entry.Type} + if blobs.Has(h) { + t.Errorf("ignoring duplicate blob %v", h) + continue + } + blobs.Insert(h) + if rand.Float32() <= p { list1.Insert(pack.Handle{ID: entry.ID, Type: entry.Type}) } else { list2.Insert(pack.Handle{ID: entry.ID, Type: entry.Type}) } + } } @@ -143,7 +161,7 @@ func TestRepack(t *testing.T) { repo, cleanup := repository.TestRepository(t) defer cleanup() - createRandomBlobs(t, repo, rand.Intn(400), 0.7) + createRandomBlobs(t, repo, 100, 0.7) packsBefore := listPacks(t, repo) From 2b1b6d8c2ab4d7a4cfaec9bcf24db46428f0b994 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 4 Aug 2016 21:21:32 +0200 Subject: [PATCH 40/98] Export ListAllPacks --- src/restic/repository/index_rebuild.go | 33 +++++++++++++++----------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/src/restic/repository/index_rebuild.go b/src/restic/repository/index_rebuild.go index 34ef6680..e18d08f1 100644 --- a/src/restic/repository/index_rebuild.go +++ b/src/restic/repository/index_rebuild.go @@ -11,19 +11,20 @@ import ( const rebuildIndexWorkers = 10 -type loadBlobsResult struct { - packID backend.ID - entries []pack.Blob +// LoadBlobsResult is returned in the channel from LoadBlobsFromAllPacks. +type LoadBlobsResult struct { + PackID backend.ID + Entries []pack.Blob } -// loadBlobsFromAllPacks sends the contents of all packs to ch. -func loadBlobsFromAllPacks(repo *Repository, ch chan<- worker.Job, done <-chan struct{}) { +// ListAllPacks sends the contents of all packs to ch. +func ListAllPacks(repo *Repository, ch chan<- worker.Job, done <-chan struct{}) { f := func(job worker.Job, done <-chan struct{}) (interface{}, error) { packID := job.Data.(backend.ID) entries, err := repo.ListPack(packID) - return loadBlobsResult{ - packID: packID, - entries: entries, + return LoadBlobsResult{ + PackID: packID, + Entries: entries, }, err } @@ -31,10 +32,14 @@ func loadBlobsFromAllPacks(repo *Repository, ch chan<- worker.Job, done <-chan s wp := worker.New(rebuildIndexWorkers, f, jobCh, ch) go func() { + defer close(jobCh) for id := range repo.List(backend.Data, done) { - jobCh <- worker.Job{Data: id} + select { + case jobCh <- worker.Job{Data: id}: + case <-done: + return + } } - close(jobCh) }() wp.Wait() @@ -50,7 +55,7 @@ func RebuildIndex(repo *Repository) error { defer close(done) ch := make(chan worker.Job) - go loadBlobsFromAllPacks(repo, ch, done) + go ListAllPacks(repo, ch, done) idx := NewIndex() for job := range ch { @@ -61,15 +66,15 @@ func RebuildIndex(repo *Repository) error { continue } - res := job.Result.(loadBlobsResult) + res := job.Result.(LoadBlobsResult) - for _, entry := range res.entries { + for _, entry := range res.Entries { pb := PackedBlob{ ID: entry.ID, Type: entry.Type, Length: entry.Length, Offset: entry.Offset, - PackID: res.packID, + PackID: res.PackID, } idx.Store(pb) } From d7e5f11b7865e62906e17d5328b84860281bbedc Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 15 Aug 2016 17:58:32 +0200 Subject: [PATCH 41/98] Export FindUsedBlobs --- src/restic/find.go | 13 +++++-------- src/restic/repository/index_rebuild.go | 8 ++++---- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/src/restic/find.go b/src/restic/find.go index 9e9af63f..63c8bd81 100644 --- a/src/restic/find.go +++ b/src/restic/find.go @@ -6,8 +6,10 @@ import ( "restic/repository" ) -// findUsedBlobs traverse the tree ID and adds all seen blobs to blobs. -func findUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs pack.BlobSet, seen pack.BlobSet) error { +// FindUsedBlobs traverses the tree ID and adds all seen blobs (trees and data +// blobs) to the set blobs. The tree blobs in the `seen` BlobSet will not be visited +// again. +func FindUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs pack.BlobSet, seen pack.BlobSet) error { blobs.Insert(pack.Handle{ID: treeID, Type: pack.Tree}) tree, err := LoadTree(repo, treeID) @@ -30,7 +32,7 @@ func findUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs pack.Bl seen.Insert(h) - err := findUsedBlobs(repo, subtreeID, blobs, seen) + err := FindUsedBlobs(repo, subtreeID, blobs, seen) if err != nil { return err } @@ -39,8 +41,3 @@ func findUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs pack.Bl return nil } - -// FindUsedBlobs traverses the tree ID and adds all seen blobs (trees and data blobs) to the set blobs. -func FindUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs pack.BlobSet) error { - return findUsedBlobs(repo, treeID, blobs, pack.NewBlobSet()) -} diff --git a/src/restic/repository/index_rebuild.go b/src/restic/repository/index_rebuild.go index e18d08f1..2fd3e4ea 100644 --- a/src/restic/repository/index_rebuild.go +++ b/src/restic/repository/index_rebuild.go @@ -11,8 +11,8 @@ import ( const rebuildIndexWorkers = 10 -// LoadBlobsResult is returned in the channel from LoadBlobsFromAllPacks. -type LoadBlobsResult struct { +// ListAllPacksResult is returned in the channel from LoadBlobsFromAllPacks. +type ListAllPacksResult struct { PackID backend.ID Entries []pack.Blob } @@ -22,7 +22,7 @@ func ListAllPacks(repo *Repository, ch chan<- worker.Job, done <-chan struct{}) f := func(job worker.Job, done <-chan struct{}) (interface{}, error) { packID := job.Data.(backend.ID) entries, err := repo.ListPack(packID) - return LoadBlobsResult{ + return ListAllPacksResult{ PackID: packID, Entries: entries, }, err @@ -66,7 +66,7 @@ func RebuildIndex(repo *Repository) error { continue } - res := job.Result.(LoadBlobsResult) + res := job.Result.(ListAllPacksResult) for _, entry := range res.Entries { pb := PackedBlob{ From e9cddc0be5fed4c3182c1ca6dedbf6efbb6cd1a3 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 4 Aug 2016 22:30:23 +0200 Subject: [PATCH 42/98] Fix TestFindUsedBlobs --- src/restic/find_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/restic/find_test.go b/src/restic/find_test.go index 96fa505f..b3050e26 100644 --- a/src/restic/find_test.go +++ b/src/restic/find_test.go @@ -92,7 +92,7 @@ func TestFindUsedBlobs(t *testing.T) { for i, sn := range snapshots { usedBlobs := pack.NewBlobSet() - err := FindUsedBlobs(repo, *sn.Tree, usedBlobs) + err := FindUsedBlobs(repo, *sn.Tree, usedBlobs, pack.NewBlobSet()) if err != nil { t.Errorf("FindUsedBlobs returned error: %v", err) continue From fd6c854a219d6d694b08e9aeb92f85c6bb983b40 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 6 Aug 2016 17:29:08 +0200 Subject: [PATCH 43/98] Add TestResetRepository and BenchmarkCreateSnapshot --- src/restic/testing.go | 17 +++++++++++++++++ src/restic/testing_test.go | 12 ++++++++++++ 2 files changed, 29 insertions(+) diff --git a/src/restic/testing.go b/src/restic/testing.go index 12bd7cf7..ce4e98cf 100644 --- a/src/restic/testing.go +++ b/src/restic/testing.go @@ -184,3 +184,20 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time, return snapshot } + +// TestResetRepository removes all packs and indexes from the repository. +func TestResetRepository(t testing.TB, repo *repository.Repository) { + done := make(chan struct{}) + defer close(done) + + for _, tpe := range []backend.Type{backend.Snapshot, backend.Index, backend.Data} { + for id := range repo.Backend().List(tpe, done) { + err := repo.Backend().Remove(tpe, id) + if err != nil { + t.Errorf("removing %v (%v) failed: %v", id[0:12], tpe, err) + } + } + } + + repo.SetIndex(repository.NewMasterIndex()) +} diff --git a/src/restic/testing_test.go b/src/restic/testing_test.go index 8ec68b7f..1427d4a6 100644 --- a/src/restic/testing_test.go +++ b/src/restic/testing_test.go @@ -47,3 +47,15 @@ func TestCreateSnapshot(t *testing.T) { checker.TestCheckRepo(t, repo) } + +func BenchmarkCreateSnapshot(b *testing.B) { + repo, cleanup := repository.TestRepository(b) + defer cleanup() + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + restic.TestCreateSnapshot(b, repo, testSnapshotTime, testDepth) + restic.TestResetRepository(b, repo) + } +} From 3c3a180417bd0fa5106884e9eb78f28171bd93a1 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 7 Aug 2016 12:36:24 +0200 Subject: [PATCH 44/98] Move RandomID() to backend package --- src/restic/backend/testing.go | 17 +++++++++++++++++ src/restic/repository/index_test.go | 29 +++++++++-------------------- 2 files changed, 26 insertions(+), 20 deletions(-) create mode 100644 src/restic/backend/testing.go diff --git a/src/restic/backend/testing.go b/src/restic/backend/testing.go new file mode 100644 index 00000000..e0c3dd56 --- /dev/null +++ b/src/restic/backend/testing.go @@ -0,0 +1,17 @@ +package backend + +import ( + "crypto/rand" + "io" +) + +// RandomID retuns a randomly generated ID. This is mainly used for testing. +// When reading from rand fails, the function panics. +func RandomID() ID { + id := ID{} + _, err := io.ReadFull(rand.Reader, id[:]) + if err != nil { + panic(err) + } + return id +} diff --git a/src/restic/repository/index_test.go b/src/restic/repository/index_test.go index 5da7791f..a16c6f2e 100644 --- a/src/restic/repository/index_test.go +++ b/src/restic/repository/index_test.go @@ -2,8 +2,6 @@ package repository_test import ( "bytes" - "crypto/rand" - "io" "testing" "restic/backend" @@ -12,15 +10,6 @@ import ( . "restic/test" ) -func randomID() backend.ID { - id := backend.ID{} - _, err := io.ReadFull(rand.Reader, id[:]) - if err != nil { - panic(err) - } - return id -} - func TestIndexSerialize(t *testing.T) { type testEntry struct { id backend.ID @@ -34,11 +23,11 @@ func TestIndexSerialize(t *testing.T) { // create 50 packs with 20 blobs each for i := 0; i < 50; i++ { - packID := randomID() + packID := backend.RandomID() pos := uint(0) for j := 0; j < 20; j++ { - id := randomID() + id := backend.RandomID() length := uint(i*100 + j) idx.Store(repository.PackedBlob{ Type: pack.Data, @@ -104,11 +93,11 @@ func TestIndexSerialize(t *testing.T) { // add more blobs to idx newtests := []testEntry{} for i := 0; i < 10; i++ { - packID := randomID() + packID := backend.RandomID() pos := uint(0) for j := 0; j < 10; j++ { - id := randomID() + id := backend.RandomID() length := uint(i*100 + j) idx.Store(repository.PackedBlob{ Type: pack.Data, @@ -138,7 +127,7 @@ func TestIndexSerialize(t *testing.T) { Assert(t, idx.Final(), "index not final after encoding") - id := randomID() + id := backend.RandomID() OK(t, idx.SetID(id)) id2, err := idx.ID() Assert(t, id2.Equal(id), @@ -175,11 +164,11 @@ func TestIndexSize(t *testing.T) { packs := 200 blobs := 100 for i := 0; i < packs; i++ { - packID := randomID() + packID := backend.RandomID() pos := uint(0) for j := 0; j < blobs; j++ { - id := randomID() + id := backend.RandomID() length := uint(i*100 + j) idx.Store(repository.PackedBlob{ Type: pack.Data, @@ -359,10 +348,10 @@ func TestIndexPacks(t *testing.T) { packs := backend.NewIDSet() for i := 0; i < 20; i++ { - packID := randomID() + packID := backend.RandomID() idx.Store(repository.PackedBlob{ Type: pack.Data, - ID: randomID(), + ID: backend.RandomID(), PackID: packID, Offset: 0, Length: 23, From f72f3dbc6ac67376193ec0ff4a3a6abc45caaf0a Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 7 Aug 2016 13:12:52 +0200 Subject: [PATCH 45/98] Buffer last 2048 bytes of a file for unpack --- src/restic/pack/pack.go | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/src/restic/pack/pack.go b/src/restic/pack/pack.go index 815f9ddb..91f12a63 100644 --- a/src/restic/pack/pack.go +++ b/src/restic/pack/pack.go @@ -241,32 +241,55 @@ type Unpacker struct { k *crypto.Key } +const preloadHeaderSize = 2048 + // NewUnpacker returns a pointer to Unpacker which can be used to read // individual Blobs from a pack. func NewUnpacker(k *crypto.Key, rd io.ReadSeeker) (*Unpacker, error) { var err error + + // read the last 2048 byte, this will mostly be enough for the header, so + // we do not need another round trip. + buf := make([]byte, preloadHeaderSize) + _, err = rd.Seek(-int64(len(buf)), 2) + if err != nil { + return nil, fmt.Errorf("seek to -%d failed: %v", len(buf), err) + } + + _, err = io.ReadFull(rd, buf) + if err != nil { + return nil, fmt.Errorf("error reading last %d bytes: %v", len(buf), err) + } + + hdrRd := io.ReadSeeker(bytes.NewReader(buf)) ls := binary.Size(uint32(0)) // reset to the end to read header length - _, err = rd.Seek(-int64(ls), 2) + _, err = hdrRd.Seek(-int64(ls), 2) if err != nil { return nil, fmt.Errorf("seeking to read header length failed: %v", err) } var length uint32 - err = binary.Read(rd, binary.LittleEndian, &length) + err = binary.Read(hdrRd, binary.LittleEndian, &length) if err != nil { return nil, fmt.Errorf("reading header length failed: %v", err) } + // if the header is longer than the preloaded buffer, use the original + // reader (and do another round trip) + if int(length) > preloadHeaderSize-ls { + hdrRd = rd + } + // reset to the beginning of the header - _, err = rd.Seek(-int64(ls)-int64(length), 2) + _, err = hdrRd.Seek(-int64(ls)-int64(length), 2) if err != nil { return nil, fmt.Errorf("seeking to read header length failed: %v", err) } // read header - hrd, err := crypto.DecryptFrom(k, io.LimitReader(rd, int64(length))) + hrd, err := crypto.DecryptFrom(k, io.LimitReader(hdrRd, int64(length))) if err != nil { return nil, err } From 94d157d97acd7c0a4ead68364e8f698782a9955b Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 7 Aug 2016 14:50:24 +0200 Subject: [PATCH 46/98] Introduce interface pack.Loader --- src/cmds/restic/cmd_dump.go | 4 +- src/restic/backend/interface.go | 4 +- src/restic/backend/local/local.go | 26 ++++++++--- src/restic/backend/mem/mem_backend.go | 7 ++- src/restic/backend/rest/rest.go | 14 ++++++ src/restic/backend/s3/s3.go | 12 +++-- src/restic/backend/sftp/sftp.go | 17 ++++--- src/restic/checker/checker.go | 3 +- src/restic/pack/loader.go | 43 +++++++++++++++++ src/restic/pack/pack.go | 66 ++++++++++++--------------- src/restic/pack/pack_test.go | 23 ++++++---- src/restic/repository/repack.go | 3 +- src/restic/repository/repository.go | 4 +- 13 files changed, 152 insertions(+), 74 deletions(-) create mode 100644 src/restic/pack/loader.go diff --git a/src/cmds/restic/cmd_dump.go b/src/cmds/restic/cmd_dump.go index 72a9d85b..68e4ac0d 100644 --- a/src/cmds/restic/cmd_dump.go +++ b/src/cmds/restic/cmd_dump.go @@ -126,9 +126,9 @@ func printPacks(repo *repository.Repository, wr io.Writer) error { name := job.Data.(string) h := backend.Handle{Type: backend.Data, Name: name} - rd := backend.NewReadSeeker(repo.Backend(), h) + ldr := pack.BackendLoader{Backend: repo.Backend(), Handle: h} - unpacker, err := pack.NewUnpacker(repo.Key(), rd) + unpacker, err := pack.NewUnpacker(repo.Key(), ldr) if err != nil { return nil, err } diff --git a/src/restic/backend/interface.go b/src/restic/backend/interface.go index fb0927c6..24838ddf 100644 --- a/src/restic/backend/interface.go +++ b/src/restic/backend/interface.go @@ -31,7 +31,9 @@ type Backend interface { Lister // Load returns the data stored in the backend for h at the given offset - // and saves it in p. Load has the same semantics as io.ReaderAt. + // and saves it in p. Load has the same semantics as io.ReaderAt, except + // that a negative offset is also allowed. In this case it references a + // position relative to the end of the file (similar to Seek()). Load(h Handle, p []byte, off int64) (int, error) // Save stores the data in the backend under the given handle. diff --git a/src/restic/backend/local/local.go b/src/restic/backend/local/local.go index 0821720c..6fa1ac9f 100644 --- a/src/restic/backend/local/local.go +++ b/src/restic/backend/local/local.go @@ -98,9 +98,12 @@ func dirname(base string, t backend.Type, name string) string { return filepath.Join(base, n) } -// Load returns the data stored in the backend for h at the given offset -// and saves it in p. Load has the same semantics as io.ReaderAt. +// Load returns the data stored in the backend for h at the given offset and +// saves it in p. Load has the same semantics as io.ReaderAt, with one +// exception: when off is lower than zero, it is treated as an offset relative +// to the end of the file. func (b *Local) Load(h backend.Handle, p []byte, off int64) (n int, err error) { + debug.Log("backend.local.Load", "Load %v, length %v at %v", h, len(p), off) if err := h.Valid(); err != nil { return 0, err } @@ -117,11 +120,15 @@ func (b *Local) Load(h backend.Handle, p []byte, off int64) (n int, err error) { } }() - if off > 0 { + switch { + case off > 0: _, err = f.Seek(off, 0) - if err != nil { - return 0, err - } + case off < 0: + _, err = f.Seek(off, 2) + } + + if err != nil { + return 0, err } return io.ReadFull(f, p) @@ -162,6 +169,7 @@ func writeToTempfile(tempdir string, p []byte) (filename string, err error) { // Save stores data in the backend at the handle. func (b *Local) Save(h backend.Handle, p []byte) (err error) { + debug.Log("backend.local.Save", "Save %v, length %v", h, len(p)) if err := h.Valid(); err != nil { return err } @@ -203,6 +211,7 @@ func (b *Local) Save(h backend.Handle, p []byte) (err error) { // Stat returns information about a blob. func (b *Local) Stat(h backend.Handle) (backend.BlobInfo, error) { + debug.Log("backend.local.Stat", "Stat %v", h) if err := h.Valid(); err != nil { return backend.BlobInfo{}, err } @@ -217,6 +226,7 @@ func (b *Local) Stat(h backend.Handle) (backend.BlobInfo, error) { // Test returns true if a blob of the given type and name exists in the backend. func (b *Local) Test(t backend.Type, name string) (bool, error) { + debug.Log("backend.local.Test", "Test %v %v", t, name) _, err := fs.Stat(filename(b.p, t, name)) if err != nil { if os.IsNotExist(err) { @@ -230,6 +240,7 @@ func (b *Local) Test(t backend.Type, name string) (bool, error) { // Remove removes the blob with the given name and type. func (b *Local) Remove(t backend.Type, name string) error { + debug.Log("backend.local.Remove", "Remove %v %v", t, name) fn := filename(b.p, t, name) // reset read-only flag @@ -304,6 +315,7 @@ func listDirs(dir string) (filenames []string, err error) { // goroutine is started for this. If the channel done is closed, sending // stops. func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string { + debug.Log("backend.local.List", "List %v", t) lister := listDir if t == backend.Data { lister = listDirs @@ -336,11 +348,13 @@ func (b *Local) List(t backend.Type, done <-chan struct{}) <-chan string { // Delete removes the repository and all files. func (b *Local) Delete() error { + debug.Log("backend.local.Delete", "Delete()") return fs.RemoveAll(b.p) } // Close closes all open files. func (b *Local) Close() error { + debug.Log("backend.local.Close", "Close()") // this does not need to do anything, all open files are closed within the // same function. return nil diff --git a/src/restic/backend/mem/mem_backend.go b/src/restic/backend/mem/mem_backend.go index 2dde7e32..961997ae 100644 --- a/src/restic/backend/mem/mem_backend.go +++ b/src/restic/backend/mem/mem_backend.go @@ -116,8 +116,13 @@ func memLoad(be *MemoryBackend, h backend.Handle, p []byte, off int64) (int, err } buf := be.data[entry{h.Type, h.Name}] - if off > int64(len(buf)) { + switch { + case off > int64(len(buf)): return 0, errors.New("offset beyond end of file") + case off < -int64(len(buf)): + return 0, errors.New("offset beyond beginning of file") + case off < 0: + off = int64(len(buf)) + off } buf = buf[off:] diff --git a/src/restic/backend/rest/rest.go b/src/restic/backend/rest/rest.go index e9303358..00fe0192 100644 --- a/src/restic/backend/rest/rest.go +++ b/src/restic/backend/rest/rest.go @@ -75,6 +75,20 @@ func (b *restBackend) Load(h backend.Handle, p []byte, off int64) (n int, err er return 0, err } + // invert offset + if off < 0 { + info, err := b.Stat(h) + if err != nil { + return 0, err + } + + if off > -info.Size { + return 0, errors.New("offset before beginning of file") + } + + off = info.Size + off + } + req, err := http.NewRequest("GET", restPath(b.url, h), nil) if err != nil { return 0, err diff --git a/src/restic/backend/s3/s3.go b/src/restic/backend/s3/s3.go index c3571932..0dbf4802 100644 --- a/src/restic/backend/s3/s3.go +++ b/src/restic/backend/s3/s3.go @@ -86,11 +86,15 @@ func (be s3) Load(h backend.Handle, p []byte, off int64) (int, error) { return 0, err } - if off > 0 { + switch { + case off > 0: _, err = obj.Seek(off, 0) - if err != nil { - return 0, err - } + case off < 0: + _, err = obj.Seek(off, 2) + } + + if err != nil { + return 0, err } <-be.connChan diff --git a/src/restic/backend/sftp/sftp.go b/src/restic/backend/sftp/sftp.go index 4279b8d5..37f27491 100644 --- a/src/restic/backend/sftp/sftp.go +++ b/src/restic/backend/sftp/sftp.go @@ -11,10 +11,11 @@ import ( "path/filepath" "strings" - "github.com/juju/errors" - "github.com/pkg/sftp" "restic/backend" "restic/debug" + + "github.com/juju/errors" + "github.com/pkg/sftp" ) const ( @@ -304,11 +305,15 @@ func (r *SFTP) Load(h backend.Handle, p []byte, off int64) (n int, err error) { } }() - if off > 0 { + switch { + case off > 0: _, err = f.Seek(off, 0) - if err != nil { - return 0, err - } + case off < 0: + _, err = f.Seek(off, 2) + } + + if err != nil { + return 0, err } return io.ReadFull(f, p) diff --git a/src/restic/checker/checker.go b/src/restic/checker/checker.go index 2f796de6..bcfa56a0 100644 --- a/src/restic/checker/checker.go +++ b/src/restic/checker/checker.go @@ -1,7 +1,6 @@ package checker import ( - "bytes" "errors" "fmt" "sync" @@ -677,7 +676,7 @@ func checkPack(r *repository.Repository, id backend.ID) error { return fmt.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str()) } - unpacker, err := pack.NewUnpacker(r.Key(), bytes.NewReader(buf)) + unpacker, err := pack.NewUnpacker(r.Key(), pack.BufferLoader(buf)) if err != nil { return err } diff --git a/src/restic/pack/loader.go b/src/restic/pack/loader.go new file mode 100644 index 00000000..d9610a96 --- /dev/null +++ b/src/restic/pack/loader.go @@ -0,0 +1,43 @@ +package pack + +import ( + "errors" + "restic/backend" +) + +// Loader loads data from somewhere at a given offset. In contrast to +// io.ReaderAt, off may be negative, in which case it references a position +// relative to the end of the file (similar to Seek()). +type Loader interface { + Load(p []byte, off int64) (int, error) +} + +// BackendLoader creates a Loader from a Backend and a Handle. +type BackendLoader struct { + Backend backend.Backend + Handle backend.Handle +} + +// Load returns data at the given offset. +func (l BackendLoader) Load(p []byte, off int64) (int, error) { + return l.Backend.Load(l.Handle, p, off) +} + +// BufferLoader allows using a buffer as a Loader. +type BufferLoader []byte + +// Load returns data at the given offset. +func (b BufferLoader) Load(p []byte, off int64) (int, error) { + switch { + case off > int64(len(b)): + return 0, errors.New("offset is larger than data") + case off < -int64(len(b)): + return 0, errors.New("offset starts before the beginning of the data") + case off < 0: + off = int64(len(b)) + off + } + + b = b[off:] + + return copy(p, b), nil +} diff --git a/src/restic/pack/pack.go b/src/restic/pack/pack.go index 91f12a63..930a15a7 100644 --- a/src/restic/pack/pack.go +++ b/src/restic/pack/pack.go @@ -245,61 +245,51 @@ const preloadHeaderSize = 2048 // NewUnpacker returns a pointer to Unpacker which can be used to read // individual Blobs from a pack. -func NewUnpacker(k *crypto.Key, rd io.ReadSeeker) (*Unpacker, error) { +func NewUnpacker(k *crypto.Key, ldr Loader) (*Unpacker, error) { var err error // read the last 2048 byte, this will mostly be enough for the header, so // we do not need another round trip. buf := make([]byte, preloadHeaderSize) - _, err = rd.Seek(-int64(len(buf)), 2) + n, err := ldr.Load(buf, -int64(len(buf))) if err != nil { - return nil, fmt.Errorf("seek to -%d failed: %v", len(buf), err) + return nil, fmt.Errorf("Load at -%d failed: %v", len(buf), err) + } + buf = buf[:n] + + bs := binary.Size(uint32(0)) + p := len(buf) - bs + + // read the length from the end of the buffer + length := int(binary.LittleEndian.Uint32(buf[p : p+bs])) + buf = buf[:p] + + // if the header is longer than the preloaded buffer, call the loader again. + if length > len(buf) { + buf = make([]byte, length) + n, err := ldr.Load(buf, -int64(len(buf)+bs)) + if err != nil { + return nil, fmt.Errorf("Load at -%d failed: %v", len(buf), err) + } + buf = buf[:n] } - _, err = io.ReadFull(rd, buf) - if err != nil { - return nil, fmt.Errorf("error reading last %d bytes: %v", len(buf), err) - } - - hdrRd := io.ReadSeeker(bytes.NewReader(buf)) - ls := binary.Size(uint32(0)) - - // reset to the end to read header length - _, err = hdrRd.Seek(-int64(ls), 2) - if err != nil { - return nil, fmt.Errorf("seeking to read header length failed: %v", err) - } - - var length uint32 - err = binary.Read(hdrRd, binary.LittleEndian, &length) - if err != nil { - return nil, fmt.Errorf("reading header length failed: %v", err) - } - - // if the header is longer than the preloaded buffer, use the original - // reader (and do another round trip) - if int(length) > preloadHeaderSize-ls { - hdrRd = rd - } - - // reset to the beginning of the header - _, err = hdrRd.Seek(-int64(ls)-int64(length), 2) - if err != nil { - return nil, fmt.Errorf("seeking to read header length failed: %v", err) - } + buf = buf[len(buf)-length:] // read header - hrd, err := crypto.DecryptFrom(k, io.LimitReader(hdrRd, int64(length))) + hdr, err := crypto.Decrypt(k, buf, buf) if err != nil { return nil, err } + rd := bytes.NewReader(hdr) + var entries []Blob pos := uint(0) for { e := headerEntry{} - err = binary.Read(hrd, binary.LittleEndian, &e) + err = binary.Read(rd, binary.LittleEndian, &e) if err == io.EOF { break } @@ -328,11 +318,11 @@ func NewUnpacker(k *crypto.Key, rd io.ReadSeeker) (*Unpacker, error) { pos += uint(e.Length) } - p := &Unpacker{ + up := &Unpacker{ rd: rd, k: k, Entries: entries, } - return p, nil + return up, nil } diff --git a/src/restic/pack/pack_test.go b/src/restic/pack/pack_test.go index e987ced7..16e2af1b 100644 --- a/src/restic/pack/pack_test.go +++ b/src/restic/pack/pack_test.go @@ -7,7 +7,6 @@ import ( "encoding/binary" "encoding/json" "io" - "io/ioutil" "testing" "restic/backend" @@ -48,7 +47,7 @@ func newPack(t testing.TB, k *crypto.Key) ([]Buf, []byte, uint) { return bufs, packData, p.Size() } -func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, rd io.ReadSeeker, packSize uint) { +func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, ldr pack.Loader, packSize uint) { written := 0 for _, l := range lengths { written += l @@ -64,20 +63,24 @@ func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, rd io.ReadSeeker, pack Equals(t, uint(written), packSize) // read and parse it again - np, err := pack.NewUnpacker(k, rd) + np, err := pack.NewUnpacker(k, ldr) OK(t, err) Equals(t, len(np.Entries), len(bufs)) + var buf []byte for i, b := range bufs { e := np.Entries[i] Equals(t, b.id, e.ID) - brd, err := e.GetReader(rd) - OK(t, err) - data, err := ioutil.ReadAll(brd) + if len(buf) < int(e.Length) { + buf = make([]byte, int(e.Length)) + } + buf = buf[:int(e.Length)] + n, err := ldr.Load(buf, int64(e.Offset)) OK(t, err) + buf = buf[:n] - Assert(t, bytes.Equal(b.data, data), + Assert(t, bytes.Equal(b.data, buf), "data for blob %v doesn't match", i) } } @@ -88,7 +91,7 @@ func TestCreatePack(t *testing.T) { bufs, packData, packSize := newPack(t, k) Equals(t, uint(len(packData)), packSize) - verifyBlobs(t, bufs, k, bytes.NewReader(packData), packSize) + verifyBlobs(t, bufs, k, pack.BufferLoader(packData), packSize) } var blobTypeJSON = []struct { @@ -125,6 +128,6 @@ func TestUnpackReadSeeker(t *testing.T) { handle := backend.Handle{Type: backend.Data, Name: id.String()} OK(t, b.Save(handle, packData)) - rd := backend.NewReadSeeker(b, handle) - verifyBlobs(t, bufs, k, rd, packSize) + ldr := pack.BackendLoader{Backend: b, Handle: handle} + verifyBlobs(t, bufs, k, ldr, packSize) } diff --git a/src/restic/repository/repack.go b/src/restic/repository/repack.go index 9f99cdb6..0498164a 100644 --- a/src/restic/repository/repack.go +++ b/src/restic/repository/repack.go @@ -1,7 +1,6 @@ package repository import ( - "bytes" "io" "restic/backend" "restic/crypto" @@ -33,7 +32,7 @@ func Repack(repo *Repository, packs backend.IDSet, keepBlobs pack.BlobSet) (err debug.Log("Repack", "pack %v loaded (%d bytes)", packID.Str(), len(buf)) - unpck, err := pack.NewUnpacker(repo.Key(), bytes.NewReader(buf)) + unpck, err := pack.NewUnpacker(repo.Key(), pack.BufferLoader(buf)) if err != nil { return err } diff --git a/src/restic/repository/repository.go b/src/restic/repository/repository.go index 1fe2d26d..654994af 100644 --- a/src/restic/repository/repository.go +++ b/src/restic/repository/repository.go @@ -547,9 +547,9 @@ func (r *Repository) List(t backend.Type, done <-chan struct{}) <-chan backend.I // ListPack returns the list of blobs saved in the pack id. func (r *Repository) ListPack(id backend.ID) ([]pack.Blob, error) { h := backend.Handle{Type: backend.Data, Name: id.String()} - rd := backend.NewReadSeeker(r.Backend(), h) + ldr := pack.BackendLoader{Backend: r.Backend(), Handle: h} - unpacker, err := pack.NewUnpacker(r.Key(), rd) + unpacker, err := pack.NewUnpacker(r.Key(), ldr) if err != nil { return nil, err } From fa283c6ecd5a7011380d888f0009030a6cc83bec Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 7 Aug 2016 14:50:41 +0200 Subject: [PATCH 47/98] Remove unused GetReader() --- src/restic/pack/pack.go | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/src/restic/pack/pack.go b/src/restic/pack/pack.go index 930a15a7..b2cc4dcc 100644 --- a/src/restic/pack/pack.go +++ b/src/restic/pack/pack.go @@ -67,17 +67,6 @@ type Blob struct { Offset uint } -// GetReader returns an io.Reader for the blob entry e. -func (e Blob) GetReader(rd io.ReadSeeker) (io.Reader, error) { - // seek to the correct location - _, err := rd.Seek(int64(e.Offset), 0) - if err != nil { - return nil, err - } - - return io.LimitReader(rd, int64(e.Length)), nil -} - // Packer is used to create a new Pack. type Packer struct { blobs []Blob From b0565015ccb85c363ca8f8a682558a9a7c12948f Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 7 Aug 2016 14:51:19 +0200 Subject: [PATCH 48/98] Remove ReadSeeker --- src/restic/backend/readseeker.go | 63 -------------- src/restic/backend/readseeker_test.go | 114 -------------------------- 2 files changed, 177 deletions(-) delete mode 100644 src/restic/backend/readseeker.go delete mode 100644 src/restic/backend/readseeker_test.go diff --git a/src/restic/backend/readseeker.go b/src/restic/backend/readseeker.go deleted file mode 100644 index ea063e3f..00000000 --- a/src/restic/backend/readseeker.go +++ /dev/null @@ -1,63 +0,0 @@ -package backend - -import ( - "errors" - "io" -) - -type readSeeker struct { - be Backend - h Handle - t Type - name string - offset int64 - size int64 -} - -// NewReadSeeker returns an io.ReadSeeker for the given object in the backend. -func NewReadSeeker(be Backend, h Handle) io.ReadSeeker { - return &readSeeker{be: be, h: h} -} - -func (rd *readSeeker) Read(p []byte) (int, error) { - n, err := rd.be.Load(rd.h, p, rd.offset) - rd.offset += int64(n) - return n, err -} - -func (rd *readSeeker) Seek(offset int64, whence int) (n int64, err error) { - switch whence { - case 0: - rd.offset = offset - case 1: - rd.offset += offset - case 2: - if rd.size == 0 { - rd.size, err = rd.getSize() - if err != nil { - return 0, err - } - } - - pos := rd.size + offset - if pos < 0 { - return 0, errors.New("invalid offset, before start of blob") - } - - rd.offset = pos - return rd.offset, nil - default: - return 0, errors.New("invalid value for parameter whence") - } - - return rd.offset, nil -} - -func (rd *readSeeker) getSize() (int64, error) { - stat, err := rd.be.Stat(rd.h) - if err != nil { - return 0, err - } - - return stat.Size, nil -} diff --git a/src/restic/backend/readseeker_test.go b/src/restic/backend/readseeker_test.go deleted file mode 100644 index 013f2528..00000000 --- a/src/restic/backend/readseeker_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package backend_test - -import ( - "bytes" - "io" - "math/rand" - "restic/backend" - "restic/backend/mem" - "testing" - - . "restic/test" -) - -func abs(a int) int { - if a < 0 { - return -a - } - - return a -} - -func loadAndCompare(t testing.TB, rd io.ReadSeeker, size int, offset int64, expected []byte) { - var ( - pos int64 - err error - ) - - if offset >= 0 { - pos, err = rd.Seek(offset, 0) - } else { - pos, err = rd.Seek(offset, 2) - } - if err != nil { - t.Errorf("Seek(%d, 0) returned error: %v", offset, err) - return - } - - if offset >= 0 && pos != offset { - t.Errorf("pos after seek is wrong, want %d, got %d", offset, pos) - } else if offset < 0 && pos != int64(size)+offset { - t.Errorf("pos after relative seek is wrong, want %d, got %d", int64(size)+offset, pos) - } - - buf := make([]byte, len(expected)) - n, err := rd.Read(buf) - - // if we requested data beyond the end of the file, ignore - // ErrUnexpectedEOF error - if offset > 0 && len(buf) > size && err == io.ErrUnexpectedEOF { - err = nil - buf = buf[:size] - } - - if offset < 0 && len(buf) > abs(int(offset)) && err == io.ErrUnexpectedEOF { - err = nil - buf = buf[:abs(int(offset))] - } - - if n != len(buf) { - t.Errorf("Load(%d, %d): wrong length returned, want %d, got %d", - len(buf), offset, len(buf), n) - return - } - - if err != nil { - t.Errorf("Load(%d, %d): unexpected error: %v", len(buf), offset, err) - return - } - - buf = buf[:n] - if !bytes.Equal(buf, expected) { - t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), offset) - return - } -} - -func TestReadSeeker(t *testing.T) { - b := mem.New() - - length := rand.Intn(1<<24) + 2000 - - data := Random(23, length) - id := backend.Hash(data) - - handle := backend.Handle{Type: backend.Data, Name: id.String()} - err := b.Save(handle, data) - if err != nil { - t.Fatalf("Save() error: %v", err) - } - - for i := 0; i < 50; i++ { - l := rand.Intn(length + 2000) - o := rand.Intn(length + 2000) - - if rand.Float32() > 0.5 { - o = -o - } - - d := data - if o > 0 && o < len(d) { - d = d[o:] - } else { - o = len(d) - d = d[:0] - } - - if l > 0 && l < len(d) { - d = d[:l] - } - - rd := backend.NewReadSeeker(b, handle) - loadAndCompare(t, rd, len(data), int64(o), d) - } -} From 71924fb7c08542a5f3c91e50641bace38b43b720 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 7 Aug 2016 15:52:31 +0200 Subject: [PATCH 49/98] Add tests for Load() with negative offset --- src/restic/backend/local/backend_test.go | 7 ++ src/restic/backend/mem/backend_test.go | 7 ++ src/restic/backend/mem/mem_backend.go | 2 +- src/restic/backend/rest/backend_test.go | 7 ++ src/restic/backend/rest/rest.go | 8 +- src/restic/backend/s3/backend_test.go | 7 ++ src/restic/backend/sftp/backend_test.go | 7 ++ src/restic/backend/test/backend_test.go | 7 ++ src/restic/backend/test/tests.go | 109 ++++++++++++++++++++++- 9 files changed, 154 insertions(+), 7 deletions(-) diff --git a/src/restic/backend/local/backend_test.go b/src/restic/backend/local/backend_test.go index 8607f01b..8954dc83 100644 --- a/src/restic/backend/local/backend_test.go +++ b/src/restic/backend/local/backend_test.go @@ -51,6 +51,13 @@ func TestLocalBackendLoad(t *testing.T) { test.TestLoad(t) } +func TestLocalBackendLoadNegativeOffset(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestLoadNegativeOffset(t) +} + func TestLocalBackendSave(t *testing.T) { if SkipMessage != "" { t.Skip(SkipMessage) diff --git a/src/restic/backend/mem/backend_test.go b/src/restic/backend/mem/backend_test.go index 13e95f11..6bf19580 100644 --- a/src/restic/backend/mem/backend_test.go +++ b/src/restic/backend/mem/backend_test.go @@ -51,6 +51,13 @@ func TestMemBackendLoad(t *testing.T) { test.TestLoad(t) } +func TestMemBackendLoadNegativeOffset(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestLoadNegativeOffset(t) +} + func TestMemBackendSave(t *testing.T) { if SkipMessage != "" { t.Skip(SkipMessage) diff --git a/src/restic/backend/mem/mem_backend.go b/src/restic/backend/mem/mem_backend.go index 961997ae..5682d491 100644 --- a/src/restic/backend/mem/mem_backend.go +++ b/src/restic/backend/mem/mem_backend.go @@ -120,7 +120,7 @@ func memLoad(be *MemoryBackend, h backend.Handle, p []byte, off int64) (int, err case off > int64(len(buf)): return 0, errors.New("offset beyond end of file") case off < -int64(len(buf)): - return 0, errors.New("offset beyond beginning of file") + off = 0 case off < 0: off = int64(len(buf)) + off } diff --git a/src/restic/backend/rest/backend_test.go b/src/restic/backend/rest/backend_test.go index 4274bfcb..9605396d 100644 --- a/src/restic/backend/rest/backend_test.go +++ b/src/restic/backend/rest/backend_test.go @@ -51,6 +51,13 @@ func TestRestBackendLoad(t *testing.T) { test.TestLoad(t) } +func TestRestBackendLoadNegativeOffset(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestLoadNegativeOffset(t) +} + func TestRestBackendSave(t *testing.T) { if SkipMessage != "" { t.Skip(SkipMessage) diff --git a/src/restic/backend/rest/rest.go b/src/restic/backend/rest/rest.go index 00fe0192..125331d7 100644 --- a/src/restic/backend/rest/rest.go +++ b/src/restic/backend/rest/rest.go @@ -82,11 +82,11 @@ func (b *restBackend) Load(h backend.Handle, p []byte, off int64) (n int, err er return 0, err } - if off > -info.Size { - return 0, errors.New("offset before beginning of file") + if -off > info.Size { + off = 0 + } else { + off = info.Size + off } - - off = info.Size + off } req, err := http.NewRequest("GET", restPath(b.url, h), nil) diff --git a/src/restic/backend/s3/backend_test.go b/src/restic/backend/s3/backend_test.go index 82eca263..9fb4dd3f 100644 --- a/src/restic/backend/s3/backend_test.go +++ b/src/restic/backend/s3/backend_test.go @@ -51,6 +51,13 @@ func TestS3BackendLoad(t *testing.T) { test.TestLoad(t) } +func TestS3BackendLoadNegativeOffset(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestLoadNegativeOffset(t) +} + func TestS3BackendSave(t *testing.T) { if SkipMessage != "" { t.Skip(SkipMessage) diff --git a/src/restic/backend/sftp/backend_test.go b/src/restic/backend/sftp/backend_test.go index a812f8cd..c28dd8c9 100644 --- a/src/restic/backend/sftp/backend_test.go +++ b/src/restic/backend/sftp/backend_test.go @@ -51,6 +51,13 @@ func TestSftpBackendLoad(t *testing.T) { test.TestLoad(t) } +func TestSftpBackendLoadNegativeOffset(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestLoadNegativeOffset(t) +} + func TestSftpBackendSave(t *testing.T) { if SkipMessage != "" { t.Skip(SkipMessage) diff --git a/src/restic/backend/test/backend_test.go b/src/restic/backend/test/backend_test.go index b495ce66..c577092f 100644 --- a/src/restic/backend/test/backend_test.go +++ b/src/restic/backend/test/backend_test.go @@ -51,6 +51,13 @@ func TestTestBackendLoad(t *testing.T) { test.TestLoad(t) } +func TestTestBackendLoadNegativeOffset(t *testing.T) { + if SkipMessage != "" { + t.Skip(SkipMessage) + } + test.TestLoadNegativeOffset(t) +} + func TestTestBackendSave(t *testing.T) { if SkipMessage != "" { t.Skip(SkipMessage) diff --git a/src/restic/backend/test/tests.go b/src/restic/backend/test/tests.go index ef0efd77..e030e6bb 100644 --- a/src/restic/backend/test/tests.go +++ b/src/restic/backend/test/tests.go @@ -220,9 +220,59 @@ func TestLoad(t testing.TB) { buf := make([]byte, l) n, err := b.Load(handle, buf, int64(o)) - // if we requested data beyond the end of the file, ignore + // if we requested data beyond the end of the file, require // ErrUnexpectedEOF error - if l > len(d) && err == io.ErrUnexpectedEOF { + if l > len(d) { + if err != io.ErrUnexpectedEOF { + t.Errorf("Load(%d, %d) did not return io.ErrUnexpectedEOF", len(buf), int64(o)) + } + err = nil + buf = buf[:len(d)] + } + + if err != nil { + t.Errorf("Load(%d, %d): unexpected error: %v", len(buf), int64(o), err) + continue + } + + if n != len(buf) { + t.Errorf("Load(%d, %d): wrong length returned, want %d, got %d", + len(buf), int64(o), len(buf), n) + continue + } + + buf = buf[:n] + if !bytes.Equal(buf, d) { + t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), int64(o)) + continue + } + } + + // test with negative offset + for i := 0; i < 50; i++ { + l := rand.Intn(length + 2000) + o := rand.Intn(length + 2000) + + d := data + if o < len(d) { + d = d[len(d)-o:] + } else { + o = 0 + } + + if l > 0 && l < len(d) { + d = d[:l] + } + + buf := make([]byte, l) + n, err := b.Load(handle, buf, -int64(o)) + + // if we requested data beyond the end of the file, require + // ErrUnexpectedEOF error + if l > len(d) { + if err != io.ErrUnexpectedEOF { + t.Errorf("Load(%d, %d) did not return io.ErrUnexpectedEOF", len(buf), int64(o)) + } err = nil buf = buf[:len(d)] } @@ -259,6 +309,61 @@ func TestLoad(t testing.TB) { OK(t, b.Remove(backend.Data, id.String())) } +// TestLoadNegativeOffset tests the backend's Load function with negative offsets. +func TestLoadNegativeOffset(t testing.TB) { + b := open(t) + defer close(t) + + length := rand.Intn(1<<24) + 2000 + + data := Random(23, length) + id := backend.Hash(data) + + handle := backend.Handle{Type: backend.Data, Name: id.String()} + err := b.Save(handle, data) + if err != nil { + t.Fatalf("Save() error: %v", err) + } + + // test normal reads + for i := 0; i < 50; i++ { + l := rand.Intn(length + 2000) + o := -rand.Intn(length + 2000) + + buf := make([]byte, l) + n, err := b.Load(handle, buf, int64(o)) + t.Logf("data %v, load(%v, %v) -> %v %v", + len(data), len(buf), o, n, err) + + // if we requested data beyond the end of the file, require + // ErrUnexpectedEOF error + if len(buf) > -o { + if err != io.ErrUnexpectedEOF { + t.Errorf("Load(%d, %d) did not return io.ErrUnexpectedEOF", len(buf), o) + } + err = nil + buf = buf[:-o] + } + + if err != nil { + t.Errorf("Load(%d, %d) returned error: %v", len(buf), o, err) + } + + if n != len(buf) { + t.Errorf("Load(%d, %d) returned short read, only got %d bytes", len(buf), o, n) + } + + p := len(data) + o + if !bytes.Equal(buf, data[p:p+len(buf)]) { + t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), o) + continue + } + + } + + OK(t, b.Remove(backend.Data, id.String())) +} + // TestSave tests saving data in the backend. func TestSave(t testing.TB) { b := open(t) From a5cbbb8b5ac8b133ca943888c58edfe4502067ae Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 7 Aug 2016 15:53:00 +0200 Subject: [PATCH 50/98] Fix BufferLoader for negative offset --- src/restic/pack/loader.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/restic/pack/loader.go b/src/restic/pack/loader.go index d9610a96..2b4ada8a 100644 --- a/src/restic/pack/loader.go +++ b/src/restic/pack/loader.go @@ -32,7 +32,7 @@ func (b BufferLoader) Load(p []byte, off int64) (int, error) { case off > int64(len(b)): return 0, errors.New("offset is larger than data") case off < -int64(len(b)): - return 0, errors.New("offset starts before the beginning of the data") + off = 0 case off < 0: off = int64(len(b)) + off } From ef33cf12cafc7fe67370b704df3587c5c2a9182c Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 7 Aug 2016 16:24:02 +0200 Subject: [PATCH 51/98] Fix Unpacker for packs < 2048 byte --- src/restic/pack/pack.go | 6 ++++++ src/restic/pack/pack_test.go | 28 +++++++++++++++++++++------- 2 files changed, 27 insertions(+), 7 deletions(-) diff --git a/src/restic/pack/pack.go b/src/restic/pack/pack.go index b2cc4dcc..5e0d671d 100644 --- a/src/restic/pack/pack.go +++ b/src/restic/pack/pack.go @@ -241,6 +241,12 @@ func NewUnpacker(k *crypto.Key, ldr Loader) (*Unpacker, error) { // we do not need another round trip. buf := make([]byte, preloadHeaderSize) n, err := ldr.Load(buf, -int64(len(buf))) + + if err == io.ErrUnexpectedEOF { + err = nil + buf = buf[:n] + } + if err != nil { return nil, fmt.Errorf("Load at -%d failed: %v", len(buf), err) } diff --git a/src/restic/pack/pack_test.go b/src/restic/pack/pack_test.go index 16e2af1b..e13c9884 100644 --- a/src/restic/pack/pack_test.go +++ b/src/restic/pack/pack_test.go @@ -16,14 +16,14 @@ import ( . "restic/test" ) -var lengths = []int{23, 31650, 25860, 10928, 13769, 19862, 5211, 127, 13690, 30231} +var testLens = []int{23, 31650, 25860, 10928, 13769, 19862, 5211, 127, 13690, 30231} type Buf struct { data []byte id backend.ID } -func newPack(t testing.TB, k *crypto.Key) ([]Buf, []byte, uint) { +func newPack(t testing.TB, k *crypto.Key, lengths []int) ([]Buf, []byte, uint) { bufs := []Buf{} for _, l := range lengths { @@ -49,13 +49,13 @@ func newPack(t testing.TB, k *crypto.Key) ([]Buf, []byte, uint) { func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, ldr pack.Loader, packSize uint) { written := 0 - for _, l := range lengths { - written += l + for _, buf := range bufs { + written += len(buf.data) } // header length written += binary.Size(uint32(0)) // header - written += len(lengths) * (binary.Size(pack.BlobType(0)) + binary.Size(uint32(0)) + backend.IDSize) + written += len(bufs) * (binary.Size(pack.BlobType(0)) + binary.Size(uint32(0)) + backend.IDSize) // header crypto written += crypto.Extension @@ -89,7 +89,7 @@ func TestCreatePack(t *testing.T) { // create random keys k := crypto.NewRandomKey() - bufs, packData, packSize := newPack(t, k) + bufs, packData, packSize := newPack(t, k, testLens) Equals(t, uint(len(packData)), packSize) verifyBlobs(t, bufs, k, pack.BufferLoader(packData), packSize) } @@ -121,7 +121,21 @@ func TestUnpackReadSeeker(t *testing.T) { // create random keys k := crypto.NewRandomKey() - bufs, packData, packSize := newPack(t, k) + bufs, packData, packSize := newPack(t, k, testLens) + + b := mem.New() + id := backend.Hash(packData) + + handle := backend.Handle{Type: backend.Data, Name: id.String()} + OK(t, b.Save(handle, packData)) + ldr := pack.BackendLoader{Backend: b, Handle: handle} + verifyBlobs(t, bufs, k, ldr, packSize) +} + +func TestShortPack(t *testing.T) { + k := crypto.NewRandomKey() + + bufs, packData, packSize := newPack(t, k, []int{23}) b := mem.New() id := backend.Hash(packData) From 6b384287f3c7333733921740ab2fa20484245baa Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 7 Aug 2016 17:18:45 +0200 Subject: [PATCH 52/98] Return error when it occurs --- src/restic/repository/parallel.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/restic/repository/parallel.go b/src/restic/repository/parallel.go index d1e66c58..7094ae29 100644 --- a/src/restic/repository/parallel.go +++ b/src/restic/repository/parallel.go @@ -83,7 +83,7 @@ func ParallelWorkFuncParseID(f ParallelIDWorkFunc) ParallelWorkFunc { id, err := backend.ParseID(s) if err != nil { debug.Log("repository.ParallelWorkFuncParseID", "invalid ID %q: %v", id, err) - return nil + return err } return f(id, done) From bad6184ab5433d894cd7dcd7496b56497555692f Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 7 Aug 2016 17:19:00 +0200 Subject: [PATCH 53/98] Add new Index data structure --- src/restic/index/index.go | 147 +++++++++++++++++++++++++++++++++ src/restic/index/index_test.go | 87 +++++++++++++++++++ 2 files changed, 234 insertions(+) create mode 100644 src/restic/index/index.go create mode 100644 src/restic/index/index_test.go diff --git a/src/restic/index/index.go b/src/restic/index/index.go new file mode 100644 index 00000000..9b7eed1d --- /dev/null +++ b/src/restic/index/index.go @@ -0,0 +1,147 @@ +// Package index contains various data structures for indexing content in a repository or backend. +package index + +import ( + "fmt" + "os" + "restic/backend" + "restic/debug" + "restic/pack" + "restic/repository" + "restic/worker" +) + +// Pack contains information about the contents of a pack. +type Pack struct { + Entries []pack.Blob +} + +// Index contains information about blobs and packs stored in a repo. +type Index struct { + Packs map[backend.ID]*Pack +} + +func newIndex() *Index { + return &Index{ + Packs: make(map[backend.ID]*Pack), + } +} + +// New creates a new index for repo from scratch. +func New(repo *repository.Repository) (*Index, error) { + done := make(chan struct{}) + defer close(done) + + ch := make(chan worker.Job) + go repository.ListAllPacks(repo, ch, done) + + idx := newIndex() + + for job := range ch { + packID := job.Data.(backend.ID) + if job.Error != nil { + fmt.Fprintf(os.Stderr, "unable to list pack %v: %v\n", packID.Str(), job.Error) + continue + } + + j := job.Result.(repository.ListAllPacksResult) + + debug.Log("Index.New", "pack %v contains %d blobs", packID.Str(), len(j.Entries)) + + if _, ok := idx.Packs[packID]; ok { + return nil, fmt.Errorf("pack %v processed twice", packID.Str()) + } + p := &Pack{Entries: j.Entries} + idx.Packs[packID] = p + } + + return idx, nil +} + +const loadIndexParallelism = 20 + +type packJSON struct { + ID backend.ID `json:"id"` + Blobs []blobJSON `json:"blobs"` +} + +type blobJSON struct { + ID backend.ID `json:"id"` + Type pack.BlobType `json:"type"` + Offset uint `json:"offset"` + Length uint `json:"length"` +} + +type indexJSON struct { + Supersedes backend.IDs `json:"supersedes,omitempty"` + Packs []*packJSON `json:"packs"` +} + +func loadIndexJSON(repo *repository.Repository, id backend.ID) (*indexJSON, error) { + fmt.Printf("process index %v\n", id.Str()) + + var idx indexJSON + err := repo.LoadJSONUnpacked(backend.Index, id, &idx) + if err != nil { + return nil, err + } + + return &idx, nil +} + +// Load creates an index by loading all index files from the repo. +func Load(repo *repository.Repository) (*Index, error) { + debug.Log("index.Load", "loading indexes") + + done := make(chan struct{}) + defer close(done) + + supersedes := make(map[backend.ID]backend.IDSet) + results := make(map[backend.ID]map[backend.ID]Pack) + + for id := range repo.List(backend.Index, done) { + debug.Log("index.Load", "Load index %v", id.Str()) + idx, err := loadIndexJSON(repo, id) + if err != nil { + return nil, err + } + + res := make(map[backend.ID]Pack) + supersedes[id] = backend.NewIDSet() + for _, sid := range idx.Supersedes { + debug.Log("index.Load", " index %v supersedes %v", id.Str(), sid) + supersedes[id].Insert(sid) + } + + for _, jpack := range idx.Packs { + P := Pack{} + for _, blob := range jpack.Blobs { + P.Entries = append(P.Entries, pack.Blob{ + ID: blob.ID, + Type: blob.Type, + Offset: blob.Offset, + Length: blob.Length, + }) + } + res[jpack.ID] = P + } + + results[id] = res + } + + for superID, list := range supersedes { + for indexID := range list { + debug.Log("index.Load", " removing index %v, superseded by %v", indexID.Str(), superID.Str()) + delete(results, indexID) + } + } + + idx := newIndex() + for _, packs := range results { + for id, pack := range packs { + idx.Packs[id] = &pack + } + } + + return idx, nil +} diff --git a/src/restic/index/index_test.go b/src/restic/index/index_test.go new file mode 100644 index 00000000..ad15a6a2 --- /dev/null +++ b/src/restic/index/index_test.go @@ -0,0 +1,87 @@ +package index + +import ( + "restic" + "restic/backend/local" + "restic/repository" + "testing" + "time" +) + +var ( + snapshotTime = time.Unix(1470492820, 207401672) + snapshots = 3 + depth = 3 +) + +func createFilledRepo(t testing.TB, snapshots int) (*repository.Repository, func()) { + repo, cleanup := repository.TestRepository(t) + + for i := 0; i < 3; i++ { + restic.TestCreateSnapshot(t, repo, snapshotTime.Add(time.Duration(i)*time.Second), depth) + } + + return repo, cleanup +} + +func TestIndexNew(t *testing.T) { + repo, cleanup := createFilledRepo(t, 3) + defer cleanup() + + idx, err := New(repo) + if err != nil { + t.Fatalf("New() returned error %v", err) + } + + if idx == nil { + t.Fatalf("New() returned nil index") + } +} + +func TestIndexLoad(t *testing.T) { + repo, cleanup := createFilledRepo(t, 3) + defer cleanup() + + idx, err := Load(repo) + if err != nil { + t.Fatalf("Load() returned error %v", err) + } + + if idx == nil { + t.Fatalf("Load() returned nil index") + } +} + +func openRepo(t testing.TB, dir, password string) *repository.Repository { + b, err := local.Open(dir) + if err != nil { + t.Fatalf("open backend %v failed: %v", dir, err) + } + + r := repository.New(b) + err = r.SearchKey(password) + if err != nil { + t.Fatalf("unable to open repo with password: %v", err) + } + + return r +} + +func BenchmarkIndexNew(b *testing.B) { + repo, cleanup := createFilledRepo(b, 3) + defer cleanup() + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + idx, err := New(repo) + + if err != nil { + b.Fatalf("New() returned error %v", err) + } + + if idx == nil { + b.Fatalf("New() returned nil index") + } + } +} From 6808523d34da11273049e9adca2e2f2baf742a52 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 7 Aug 2016 18:42:58 +0200 Subject: [PATCH 54/98] Add String() for Blob --- src/restic/pack/pack.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/restic/pack/pack.go b/src/restic/pack/pack.go index 5e0d671d..78159dfa 100644 --- a/src/restic/pack/pack.go +++ b/src/restic/pack/pack.go @@ -67,6 +67,11 @@ type Blob struct { Offset uint } +func (b Blob) String() string { + return fmt.Sprintf("", + b.ID.Str(), b.Type, b.Length, b.Offset) +} + // Packer is used to create a new Pack. type Packer struct { blobs []Blob From 240b8f273a611378bd1376f3dc074b2561636b24 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 7 Aug 2016 18:45:25 +0200 Subject: [PATCH 55/98] Add more index tests --- src/restic/index/index.go | 15 +++---- src/restic/index/index_test.go | 73 +++++++++++++++++++++++++++++++++- 2 files changed, 79 insertions(+), 9 deletions(-) diff --git a/src/restic/index/index.go b/src/restic/index/index.go index 9b7eed1d..9a493701 100644 --- a/src/restic/index/index.go +++ b/src/restic/index/index.go @@ -18,12 +18,12 @@ type Pack struct { // Index contains information about blobs and packs stored in a repo. type Index struct { - Packs map[backend.ID]*Pack + Packs map[backend.ID]Pack } func newIndex() *Index { return &Index{ - Packs: make(map[backend.ID]*Pack), + Packs: make(map[backend.ID]Pack), } } @@ -51,7 +51,7 @@ func New(repo *repository.Repository) (*Index, error) { if _, ok := idx.Packs[packID]; ok { return nil, fmt.Errorf("pack %v processed twice", packID.Str()) } - p := &Pack{Entries: j.Entries} + p := Pack{Entries: j.Entries} idx.Packs[packID] = p } @@ -78,7 +78,7 @@ type indexJSON struct { } func loadIndexJSON(repo *repository.Repository, id backend.ID) (*indexJSON, error) { - fmt.Printf("process index %v\n", id.Str()) + debug.Log("index.loadIndexJSON", "process index %v\n", id.Str()) var idx indexJSON err := repo.LoadJSONUnpacked(backend.Index, id, &idx) @@ -116,12 +116,13 @@ func Load(repo *repository.Repository) (*Index, error) { for _, jpack := range idx.Packs { P := Pack{} for _, blob := range jpack.Blobs { - P.Entries = append(P.Entries, pack.Blob{ + entry := pack.Blob{ ID: blob.ID, Type: blob.Type, Offset: blob.Offset, Length: blob.Length, - }) + } + P.Entries = append(P.Entries, entry) } res[jpack.ID] = P } @@ -139,7 +140,7 @@ func Load(repo *repository.Repository) (*Index, error) { idx := newIndex() for _, packs := range results { for id, pack := range packs { - idx.Packs[id] = &pack + idx.Packs[id] = pack } } diff --git a/src/restic/index/index_test.go b/src/restic/index/index_test.go index ad15a6a2..335b9305 100644 --- a/src/restic/index/index_test.go +++ b/src/restic/index/index_test.go @@ -2,6 +2,7 @@ package index import ( "restic" + "restic/backend" "restic/backend/local" "restic/repository" "testing" @@ -24,6 +25,14 @@ func createFilledRepo(t testing.TB, snapshots int) (*repository.Repository, func return repo, cleanup } +func validateIndex(t testing.TB, repo *repository.Repository, idx *Index) { + for id := range repo.List(backend.Data, nil) { + if _, ok := idx.Packs[id]; !ok { + t.Errorf("pack %v missing from index", id.Str()) + } + } +} + func TestIndexNew(t *testing.T) { repo, cleanup := createFilledRepo(t, 3) defer cleanup() @@ -36,20 +45,80 @@ func TestIndexNew(t *testing.T) { if idx == nil { t.Fatalf("New() returned nil index") } + + validateIndex(t, repo, idx) } func TestIndexLoad(t *testing.T) { repo, cleanup := createFilledRepo(t, 3) defer cleanup() - idx, err := Load(repo) + loadIdx, err := Load(repo) if err != nil { t.Fatalf("Load() returned error %v", err) } - if idx == nil { + if loadIdx == nil { t.Fatalf("Load() returned nil index") } + + validateIndex(t, repo, loadIdx) + + newIdx, err := New(repo) + if err != nil { + t.Fatalf("New() returned error %v", err) + } + + if len(loadIdx.Packs) != len(newIdx.Packs) { + t.Errorf("number of packs does not match: want %v, got %v", + len(loadIdx.Packs), len(newIdx.Packs)) + } + + validateIndex(t, repo, newIdx) + + for packID, packNew := range newIdx.Packs { + packLoad, ok := loadIdx.Packs[packID] + + if !ok { + t.Errorf("loaded index does not list pack %v", packID.Str()) + continue + } + + if len(packNew.Entries) != len(packLoad.Entries) { + t.Errorf(" number of entries in pack %v does not match: %d != %d\n %v\n %v", + packID.Str(), len(packNew.Entries), len(packLoad.Entries), + packNew.Entries, packLoad.Entries) + continue + } + + for _, entryNew := range packNew.Entries { + found := false + for _, entryLoad := range packLoad.Entries { + if !entryLoad.ID.Equal(entryNew.ID) { + continue + } + + if entryLoad.Type != entryNew.Type { + continue + } + + if entryLoad.Offset != entryNew.Offset { + continue + } + + if entryLoad.Length != entryNew.Length { + continue + } + + found = true + break + } + + if !found { + t.Errorf("blob not found in loaded index: %v", entryNew) + } + } + } } func openRepo(t testing.TB, dir, password string) *repository.Repository { From 1058a91b39fdbf901fd0187b209d63c2c0c63754 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 7 Aug 2016 21:56:06 +0200 Subject: [PATCH 56/98] Add option to create duplicate blobs in TestCreateSnapshot --- src/restic/find_test.go | 2 +- src/restic/index/index_test.go | 10 +++++----- src/restic/testing.go | 23 +++++++++++++++-------- src/restic/testing_test.go | 4 ++-- 4 files changed, 23 insertions(+), 16 deletions(-) diff --git a/src/restic/find_test.go b/src/restic/find_test.go index b3050e26..17fbc83b 100644 --- a/src/restic/find_test.go +++ b/src/restic/find_test.go @@ -85,7 +85,7 @@ func TestFindUsedBlobs(t *testing.T) { var snapshots []*Snapshot for i := 0; i < findTestSnapshots; i++ { - sn := TestCreateSnapshot(t, repo, findTestTime.Add(time.Duration(i)*time.Second), findTestDepth) + sn := TestCreateSnapshot(t, repo, findTestTime.Add(time.Duration(i)*time.Second), findTestDepth, 0) t.Logf("snapshot %v saved, tree %v", sn.ID().Str(), sn.Tree.Str()) snapshots = append(snapshots, sn) } diff --git a/src/restic/index/index_test.go b/src/restic/index/index_test.go index 335b9305..307a1c32 100644 --- a/src/restic/index/index_test.go +++ b/src/restic/index/index_test.go @@ -15,11 +15,11 @@ var ( depth = 3 ) -func createFilledRepo(t testing.TB, snapshots int) (*repository.Repository, func()) { +func createFilledRepo(t testing.TB, snapshots int, dup float32) (*repository.Repository, func()) { repo, cleanup := repository.TestRepository(t) for i := 0; i < 3; i++ { - restic.TestCreateSnapshot(t, repo, snapshotTime.Add(time.Duration(i)*time.Second), depth) + restic.TestCreateSnapshot(t, repo, snapshotTime.Add(time.Duration(i)*time.Second), depth, dup) } return repo, cleanup @@ -34,7 +34,7 @@ func validateIndex(t testing.TB, repo *repository.Repository, idx *Index) { } func TestIndexNew(t *testing.T) { - repo, cleanup := createFilledRepo(t, 3) + repo, cleanup := createFilledRepo(t, 3, 0) defer cleanup() idx, err := New(repo) @@ -50,7 +50,7 @@ func TestIndexNew(t *testing.T) { } func TestIndexLoad(t *testing.T) { - repo, cleanup := createFilledRepo(t, 3) + repo, cleanup := createFilledRepo(t, 3, 0) defer cleanup() loadIdx, err := Load(repo) @@ -137,7 +137,7 @@ func openRepo(t testing.TB, dir, password string) *repository.Repository { } func BenchmarkIndexNew(b *testing.B) { - repo, cleanup := createFilledRepo(b, 3) + repo, cleanup := createFilledRepo(b, 3, 0) defer cleanup() b.ResetTimer() diff --git a/src/restic/testing.go b/src/restic/testing.go index ce4e98cf..873a9ed0 100644 --- a/src/restic/testing.go +++ b/src/restic/testing.go @@ -20,9 +20,10 @@ func fakeFile(t testing.TB, seed, size int64) io.Reader { } type fakeFileSystem struct { - t testing.TB - repo *repository.Repository - knownBlobs backend.IDSet + t testing.TB + repo *repository.Repository + knownBlobs backend.IDSet + duplication float32 } // saveFile reads from rd and saves the blobs in the repository. The list of @@ -77,6 +78,10 @@ func (fs fakeFileSystem) treeIsKnown(tree *Tree) (bool, backend.ID) { } func (fs fakeFileSystem) blobIsKnown(id backend.ID, t pack.BlobType) bool { + if rand.Float32() < fs.duplication { + return false + } + if fs.knownBlobs.Has(id) { return true } @@ -142,8 +147,9 @@ func (fs fakeFileSystem) saveTree(seed int64, depth int) backend.ID { // TestCreateSnapshot creates a snapshot filled with fake data. The // fake data is generated deterministically from the timestamp `at`, which is // also used as the snapshot's timestamp. The tree's depth can be specified -// with the parameter depth. -func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time, depth int) *Snapshot { +// with the parameter depth. The parameter duplication is a probability that +// the same blob will saved again. +func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time, depth int, duplication float32) *Snapshot { seed := at.Unix() t.Logf("create fake snapshot at %s with seed %d", at, seed) @@ -155,9 +161,10 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time, snapshot.Time = at fs := fakeFileSystem{ - t: t, - repo: repo, - knownBlobs: backend.NewIDSet(), + t: t, + repo: repo, + knownBlobs: backend.NewIDSet(), + duplication: duplication, } treeID := fs.saveTree(seed, depth) diff --git a/src/restic/testing_test.go b/src/restic/testing_test.go index 1427d4a6..3c5ea5a6 100644 --- a/src/restic/testing_test.go +++ b/src/restic/testing_test.go @@ -20,7 +20,7 @@ func TestCreateSnapshot(t *testing.T) { defer cleanup() for i := 0; i < testCreateSnapshots; i++ { - restic.TestCreateSnapshot(t, repo, testSnapshotTime.Add(time.Duration(i)*time.Second), testDepth) + restic.TestCreateSnapshot(t, repo, testSnapshotTime.Add(time.Duration(i)*time.Second), testDepth, 0) } snapshots, err := restic.LoadAllSnapshots(repo) @@ -55,7 +55,7 @@ func BenchmarkCreateSnapshot(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - restic.TestCreateSnapshot(b, repo, testSnapshotTime, testDepth) + restic.TestCreateSnapshot(b, repo, testSnapshotTime, testDepth, 0) restic.TestResetRepository(b, repo) } } From f5daf333221e3a3787d581d38828b413f7a10661 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 7 Aug 2016 21:56:42 +0200 Subject: [PATCH 57/98] Add pack size to ListAllPacks --- src/restic/index/index.go | 3 ++- src/restic/repository/index_rebuild.go | 5 ++++- src/restic/repository/repack_test.go | 2 +- src/restic/repository/repository.go | 15 +++++++++++---- 4 files changed, 18 insertions(+), 7 deletions(-) diff --git a/src/restic/index/index.go b/src/restic/index/index.go index 9a493701..ffb5272d 100644 --- a/src/restic/index/index.go +++ b/src/restic/index/index.go @@ -13,6 +13,7 @@ import ( // Pack contains information about the contents of a pack. type Pack struct { + Size int64 Entries []pack.Blob } @@ -51,7 +52,7 @@ func New(repo *repository.Repository) (*Index, error) { if _, ok := idx.Packs[packID]; ok { return nil, fmt.Errorf("pack %v processed twice", packID.Str()) } - p := Pack{Entries: j.Entries} + p := Pack{Entries: j.Entries, Size: j.Size} idx.Packs[packID] = p } diff --git a/src/restic/repository/index_rebuild.go b/src/restic/repository/index_rebuild.go index 2fd3e4ea..dea5be0d 100644 --- a/src/restic/repository/index_rebuild.go +++ b/src/restic/repository/index_rebuild.go @@ -14,6 +14,7 @@ const rebuildIndexWorkers = 10 // ListAllPacksResult is returned in the channel from LoadBlobsFromAllPacks. type ListAllPacksResult struct { PackID backend.ID + Size int64 Entries []pack.Blob } @@ -21,9 +22,11 @@ type ListAllPacksResult struct { func ListAllPacks(repo *Repository, ch chan<- worker.Job, done <-chan struct{}) { f := func(job worker.Job, done <-chan struct{}) (interface{}, error) { packID := job.Data.(backend.ID) - entries, err := repo.ListPack(packID) + entries, size, err := repo.ListPack(packID) + return ListAllPacksResult{ PackID: packID, + Size: size, Entries: entries, }, err } diff --git a/src/restic/repository/repack_test.go b/src/restic/repository/repack_test.go index 37b213bd..b29c7e62 100644 --- a/src/restic/repository/repack_test.go +++ b/src/restic/repository/repack_test.go @@ -76,7 +76,7 @@ func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, l blobs := pack.NewBlobSet() for id := range repo.List(backend.Data, done) { - entries, err := repo.ListPack(id) + entries, _, err := repo.ListPack(id) if err != nil { t.Fatalf("error listing pack %v: %v", id, err) } diff --git a/src/restic/repository/repository.go b/src/restic/repository/repository.go index 654994af..ca53fb39 100644 --- a/src/restic/repository/repository.go +++ b/src/restic/repository/repository.go @@ -544,17 +544,24 @@ func (r *Repository) List(t backend.Type, done <-chan struct{}) <-chan backend.I return outCh } -// ListPack returns the list of blobs saved in the pack id. -func (r *Repository) ListPack(id backend.ID) ([]pack.Blob, error) { +// ListPack returns the list of blobs saved in the pack id and the length of +// the file as stored in the backend. +func (r *Repository) ListPack(id backend.ID) ([]pack.Blob, int64, error) { h := backend.Handle{Type: backend.Data, Name: id.String()} + + blobInfo, err := r.Backend().Stat(h) + if err != nil { + return nil, 0, err + } + ldr := pack.BackendLoader{Backend: r.Backend(), Handle: h} unpacker, err := pack.NewUnpacker(r.Key(), ldr) if err != nil { - return nil, err + return nil, 0, err } - return unpacker.Entries, nil + return unpacker.Entries, blobInfo.Size, nil } // Delete calls backend.Delete() if implemented, and returns an error From 4bdd59b4ad3e4b0c703973892fcf0041ca95cf0c Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 7 Aug 2016 21:57:31 +0200 Subject: [PATCH 58/98] Index: Add DuplicateBlobs() --- src/restic/index/index.go | 19 +++++++++++++++++++ src/restic/index/index_test.go | 15 +++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/src/restic/index/index.go b/src/restic/index/index.go index ffb5272d..f53981ab 100644 --- a/src/restic/index/index.go +++ b/src/restic/index/index.go @@ -147,3 +147,22 @@ func Load(repo *repository.Repository) (*Index, error) { return idx, nil } + +// DuplicateBlobs returns a list of blobs that are stored more than once in the +// repo. +func (idx *Index) DuplicateBlobs() (dups map[pack.Handle]int) { + dups = make(map[pack.Handle]int) + seen := pack.NewBlobSet() + + for _, p := range idx.Packs { + for _, entry := range p.Entries { + h := pack.Handle{ID: entry.ID, Type: entry.Type} + if seen.Has(h) { + dups[h]++ + } + seen.Insert(h) + } + } + + return dups +} diff --git a/src/restic/index/index_test.go b/src/restic/index/index_test.go index 307a1c32..b0d39abe 100644 --- a/src/restic/index/index_test.go +++ b/src/restic/index/index_test.go @@ -154,3 +154,18 @@ func BenchmarkIndexNew(b *testing.B) { } } } + +func TestIndexDuplicateBlobs(t *testing.T) { + repo, cleanup := createFilledRepo(t, 3, 0.05) + defer cleanup() + + idx, err := New(repo) + if err != nil { + t.Fatal(err) + } + + dups := idx.DuplicateBlobs() + if len(dups) == 0 { + t.Errorf("no duplicate blobs found") + } +} From 2c517e4a330950be23f489fbfe73dc4fa8ec630b Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 7 Aug 2016 22:18:20 +0200 Subject: [PATCH 59/98] Add Index structures for Blobs --- src/restic/index/index.go | 78 ++++++++++++++++++++++++++++------ src/restic/index/index_test.go | 9 +++- 2 files changed, 72 insertions(+), 15 deletions(-) diff --git a/src/restic/index/index.go b/src/restic/index/index.go index f53981ab..23232061 100644 --- a/src/restic/index/index.go +++ b/src/restic/index/index.go @@ -17,14 +17,22 @@ type Pack struct { Entries []pack.Blob } +// Blob contains informaiton about a blob. +type Blob struct { + Size int64 + Packs backend.IDSet +} + // Index contains information about blobs and packs stored in a repo. type Index struct { Packs map[backend.ID]Pack + Blobs map[pack.Handle]Blob } func newIndex() *Index { return &Index{ Packs: make(map[backend.ID]Pack), + Blobs: make(map[pack.Handle]Blob), } } @@ -49,9 +57,11 @@ func New(repo *repository.Repository) (*Index, error) { debug.Log("Index.New", "pack %v contains %d blobs", packID.Str(), len(j.Entries)) - if _, ok := idx.Packs[packID]; ok { - return nil, fmt.Errorf("pack %v processed twice", packID.Str()) + err := idx.AddPack(packID, j.Size, j.Entries) + if err != nil { + return nil, err } + p := Pack{Entries: j.Entries, Size: j.Size} idx.Packs[packID] = p } @@ -100,6 +110,8 @@ func Load(repo *repository.Repository) (*Index, error) { supersedes := make(map[backend.ID]backend.IDSet) results := make(map[backend.ID]map[backend.ID]Pack) + index := newIndex() + for id := range repo.List(backend.Index, done) { debug.Log("index.Load", "Load index %v", id.Str()) idx, err := loadIndexJSON(repo, id) @@ -115,7 +127,7 @@ func Load(repo *repository.Repository) (*Index, error) { } for _, jpack := range idx.Packs { - P := Pack{} + entries := make([]pack.Blob, 0, len(jpack.Blobs)) for _, blob := range jpack.Blobs { entry := pack.Blob{ ID: blob.ID, @@ -123,9 +135,12 @@ func Load(repo *repository.Repository) (*Index, error) { Offset: blob.Offset, Length: blob.Length, } - P.Entries = append(P.Entries, entry) + entries = append(entries, entry) + } + + if err = index.AddPack(jpack.ID, 0, entries); err != nil { + return nil, err } - res[jpack.ID] = P } results[id] = res @@ -138,27 +153,44 @@ func Load(repo *repository.Repository) (*Index, error) { } } - idx := newIndex() - for _, packs := range results { - for id, pack := range packs { - idx.Packs[id] = pack - } + return index, nil +} + +// AddPack adds a pack to the index. If this pack is already in the index, an +// error is returned. +func (idx *Index) AddPack(id backend.ID, size int64, entries []pack.Blob) error { + if _, ok := idx.Packs[id]; ok { + return fmt.Errorf("pack %v already present in the index", id.Str()) } - return idx, nil + idx.Packs[id] = Pack{Size: size, Entries: entries} + + for _, entry := range entries { + h := pack.Handle{ID: entry.ID, Type: entry.Type} + if _, ok := idx.Blobs[h]; !ok { + idx.Blobs[h] = Blob{ + Size: int64(entry.Length), + Packs: backend.NewIDSet(), + } + } + + idx.Blobs[h].Packs.Insert(id) + } + + return nil } // DuplicateBlobs returns a list of blobs that are stored more than once in the // repo. -func (idx *Index) DuplicateBlobs() (dups map[pack.Handle]int) { - dups = make(map[pack.Handle]int) +func (idx *Index) DuplicateBlobs() (dups pack.BlobSet) { + dups = pack.NewBlobSet() seen := pack.NewBlobSet() for _, p := range idx.Packs { for _, entry := range p.Entries { h := pack.Handle{ID: entry.ID, Type: entry.Type} if seen.Has(h) { - dups[h]++ + dups.Insert(h) } seen.Insert(h) } @@ -166,3 +198,21 @@ func (idx *Index) DuplicateBlobs() (dups map[pack.Handle]int) { return dups } + +// PacksForBlobs returns the set of packs in which the blobs are contained. +func (idx *Index) PacksForBlobs(blobs pack.BlobSet) (packs backend.IDSet) { + packs = backend.NewIDSet() + + for h := range blobs { + blob, ok := idx.Blobs[h] + if !ok { + continue + } + + for id := range blob.Packs { + packs.Insert(id) + } + } + + return packs +} diff --git a/src/restic/index/index_test.go b/src/restic/index/index_test.go index b0d39abe..a5e56797 100644 --- a/src/restic/index/index_test.go +++ b/src/restic/index/index_test.go @@ -156,7 +156,7 @@ func BenchmarkIndexNew(b *testing.B) { } func TestIndexDuplicateBlobs(t *testing.T) { - repo, cleanup := createFilledRepo(t, 3, 0.05) + repo, cleanup := createFilledRepo(t, 3, 0.01) defer cleanup() idx, err := New(repo) @@ -168,4 +168,11 @@ func TestIndexDuplicateBlobs(t *testing.T) { if len(dups) == 0 { t.Errorf("no duplicate blobs found") } + t.Logf("%d packs, %d unique blobs", len(idx.Packs), len(idx.Blobs)) + + packs := idx.PacksForBlobs(dups) + if len(packs) == 0 { + t.Errorf("no packs with duplicate blobs found") + } + t.Logf("%d packs with duplicate blobs", len(packs)) } From b350b443d084536064dd64c4ccec6f290b5a1084 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 8 Aug 2016 21:57:50 +0200 Subject: [PATCH 60/98] Stop backend tests early on failure --- src/restic/backend/test/tests.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/restic/backend/test/tests.go b/src/restic/backend/test/tests.go index e030e6bb..34e30cc4 100644 --- a/src/restic/backend/test/tests.go +++ b/src/restic/backend/test/tests.go @@ -272,6 +272,7 @@ func TestLoad(t testing.TB) { if l > len(d) { if err != io.ErrUnexpectedEOF { t.Errorf("Load(%d, %d) did not return io.ErrUnexpectedEOF", len(buf), int64(o)) + continue } err = nil buf = buf[:len(d)] @@ -340,6 +341,7 @@ func TestLoadNegativeOffset(t testing.TB) { if len(buf) > -o { if err != io.ErrUnexpectedEOF { t.Errorf("Load(%d, %d) did not return io.ErrUnexpectedEOF", len(buf), o) + continue } err = nil buf = buf[:-o] @@ -347,10 +349,12 @@ func TestLoadNegativeOffset(t testing.TB) { if err != nil { t.Errorf("Load(%d, %d) returned error: %v", len(buf), o, err) + continue } if n != len(buf) { t.Errorf("Load(%d, %d) returned short read, only got %d bytes", len(buf), o, n) + continue } p := len(data) + o From a60e3b50301db61ff5eb24845f928199da77c644 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 8 Aug 2016 21:58:09 +0200 Subject: [PATCH 61/98] Make backend tests less verbose --- src/restic/backend/test/tests.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/restic/backend/test/tests.go b/src/restic/backend/test/tests.go index 34e30cc4..77896499 100644 --- a/src/restic/backend/test/tests.go +++ b/src/restic/backend/test/tests.go @@ -333,8 +333,6 @@ func TestLoadNegativeOffset(t testing.TB) { buf := make([]byte, l) n, err := b.Load(handle, buf, int64(o)) - t.Logf("data %v, load(%v, %v) -> %v %v", - len(data), len(buf), o, n, err) // if we requested data beyond the end of the file, require // ErrUnexpectedEOF error From 23107737987071572bd04d80009809e63aabdf17 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 8 Aug 2016 21:58:26 +0200 Subject: [PATCH 62/98] Compute negative offsets ourselves in the s3 backend --- src/restic/backend/s3/s3.go | 84 ++++++++++++++++++++++++++----------- 1 file changed, 59 insertions(+), 25 deletions(-) diff --git a/src/restic/backend/s3/s3.go b/src/restic/backend/s3/s3.go index 0dbf4802..2fc6f9f3 100644 --- a/src/restic/backend/s3/s3.go +++ b/src/restic/backend/s3/s3.go @@ -77,41 +77,75 @@ func (be *s3) Location() string { // Load returns the data stored in the backend for h at the given offset // and saves it in p. Load has the same semantics as io.ReaderAt. -func (be s3) Load(h backend.Handle, p []byte, off int64) (int, error) { +func (be s3) Load(h backend.Handle, p []byte, off int64) (n int, err error) { + var obj *minio.Object + debug.Log("s3.Load", "%v, offset %v, len %v", h, off, len(p)) path := be.s3path(h.Type, h.Name) - obj, err := be.client.GetObject(be.bucketname, path) - if err != nil { - debug.Log("s3.GetReader", " err %v", err) - return 0, err - } - - switch { - case off > 0: - _, err = obj.Seek(off, 0) - case off < 0: - _, err = obj.Seek(off, 2) - } - - if err != nil { - return 0, err - } <-be.connChan defer func() { be.connChan <- struct{}{} }() - // This may not read the whole object, so ensure object - // is closed to avoid duplicate connections. - n, err := io.ReadFull(obj, p) + obj, err = be.client.GetObject(be.bucketname, path) if err != nil { - obj.Close() - } else { - err = obj.Close() + debug.Log("s3.Load", " err %v", err) + return 0, err } - return n, err + // make sure that the object is closed properly. + defer func() { + e := obj.Close() + if err == nil { + err = e + } + }() + + info, err := obj.Stat() + if err != nil { + return 0, err + } + + // handle negative offsets + if off < 0 { + // if the negative offset is larger than the object itself, read from + // the beginning. + if -off > info.Size { + off = 0 + } else { + // otherwise compute the offset from the end of the file. + off = info.Size + off + } + } + + // return an error if the offset is beyond the end of the file + if off > info.Size { + return 0, io.EOF + } + + var nextError error + + // manually create an io.ErrUnexpectedEOF + if off+int64(len(p)) > info.Size { + newlen := info.Size - off + p = p[:newlen] + + nextError = io.ErrUnexpectedEOF + + debug.Log("s3.Load", " capped buffer to %v byte", len(p)) + } + + n, err = obj.ReadAt(p, off) + if int64(n) == info.Size-off && err == io.EOF { + err = nil + } + + if err == nil { + err = nextError + } + + return n, err } // Save stores data in the backend at the handle. @@ -120,7 +154,7 @@ func (be s3) Save(h backend.Handle, p []byte) (err error) { return err } - debug.Log("s3.Save", "%v bytes at %d", len(p), h) + debug.Log("s3.Save", "%v with %d bytes", h, len(p)) path := be.s3path(h.Type, h.Name) From 9ecf7070afe2d0d4942982615a9b7616de2cff0d Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 14 Aug 2016 13:38:59 +0200 Subject: [PATCH 63/98] Implement Lookup() and Save() for new Index --- src/restic/index/index.go | 88 ++++++++++++++++++++++++++++++++-- src/restic/index/index_test.go | 61 +++++++++++++++++++++++ 2 files changed, 144 insertions(+), 5 deletions(-) diff --git a/src/restic/index/index.go b/src/restic/index/index.go index 23232061..bbd5c981 100644 --- a/src/restic/index/index.go +++ b/src/restic/index/index.go @@ -2,6 +2,7 @@ package index import ( + "errors" "fmt" "os" "restic/backend" @@ -17,7 +18,7 @@ type Pack struct { Entries []pack.Blob } -// Blob contains informaiton about a blob. +// Blob contains information about a blob. type Blob struct { Size int64 Packs backend.IDSet @@ -25,14 +26,16 @@ type Blob struct { // Index contains information about blobs and packs stored in a repo. type Index struct { - Packs map[backend.ID]Pack - Blobs map[pack.Handle]Blob + Packs map[backend.ID]Pack + Blobs map[pack.Handle]Blob + IndexIDs backend.IDSet } func newIndex() *Index { return &Index{ - Packs: make(map[backend.ID]Pack), - Blobs: make(map[pack.Handle]Blob), + Packs: make(map[backend.ID]Pack), + Blobs: make(map[pack.Handle]Blob), + IndexIDs: backend.NewIDSet(), } } @@ -144,11 +147,16 @@ func Load(repo *repository.Repository) (*Index, error) { } results[id] = res + index.IndexIDs.Insert(id) } for superID, list := range supersedes { for indexID := range list { + if _, ok := results[indexID]; !ok { + continue + } debug.Log("index.Load", " removing index %v, superseded by %v", indexID.Str(), superID.Str()) + fmt.Fprintf(os.Stderr, "index %v can be removed, superseded by index %v\n", indexID.Str(), superID.Str()) delete(results, indexID) } } @@ -216,3 +224,73 @@ func (idx *Index) PacksForBlobs(blobs pack.BlobSet) (packs backend.IDSet) { return packs } + +// Location describes the location of a blob in a pack. +type Location struct { + PackID backend.ID + pack.Blob +} + +// ErrBlobNotFound is return by FindBlob when the blob could not be found in +// the index. +var ErrBlobNotFound = errors.New("blob not found in index") + +// FindBlob returns a list of packs and positions the blob can be found in. +func (idx *Index) FindBlob(h pack.Handle) ([]Location, error) { + blob, ok := idx.Blobs[h] + if !ok { + return nil, ErrBlobNotFound + } + + result := make([]Location, 0, len(blob.Packs)) + for packID := range blob.Packs { + pack, ok := idx.Packs[packID] + if !ok { + return nil, fmt.Errorf("pack %v not found in index", packID.Str()) + } + + for _, entry := range pack.Entries { + if entry.Type != h.Type { + continue + } + + if !entry.ID.Equal(h.ID) { + continue + } + + loc := Location{PackID: packID, Blob: entry} + result = append(result, loc) + } + } + + return result, nil +} + +// Save writes a new index containing the given packs. +func Save(repo *repository.Repository, packs map[backend.ID][]pack.Blob, supersedes backend.IDs) (backend.ID, error) { + idx := &indexJSON{ + Supersedes: supersedes, + Packs: make([]*packJSON, 0, len(packs)), + } + + for packID, blobs := range packs { + b := make([]blobJSON, 0, len(blobs)) + for _, blob := range blobs { + b = append(b, blobJSON{ + ID: blob.ID, + Type: blob.Type, + Offset: blob.Offset, + Length: blob.Length, + }) + } + + p := &packJSON{ + ID: packID, + Blobs: b, + } + + idx.Packs = append(idx.Packs, p) + } + + return repo.SaveJSONUnpacked(backend.Index, idx) +} diff --git a/src/restic/index/index_test.go b/src/restic/index/index_test.go index a5e56797..2ae33a70 100644 --- a/src/restic/index/index_test.go +++ b/src/restic/index/index_test.go @@ -1,9 +1,11 @@ package index import ( + "math/rand" "restic" "restic/backend" "restic/backend/local" + "restic/pack" "restic/repository" "testing" "time" @@ -176,3 +178,62 @@ func TestIndexDuplicateBlobs(t *testing.T) { } t.Logf("%d packs with duplicate blobs", len(packs)) } + +func loadIndex(t testing.TB, repo *repository.Repository) *Index { + idx, err := Load(repo) + if err != nil { + t.Fatalf("Load() returned error %v", err) + } + + return idx +} + +func TestIndexSave(t *testing.T) { + repo, cleanup := createFilledRepo(t, 3, 0) + defer cleanup() + + idx := loadIndex(t, repo) + + packs := make(map[backend.ID][]pack.Blob) + for id := range idx.Packs { + if rand.Float32() < 0.5 { + packs[id] = idx.Packs[id].Entries + } + } + + t.Logf("save %d/%d packs in a new index\n", len(packs), len(idx.Packs)) + + id, err := Save(repo, packs, idx.IndexIDs.List()) + if err != nil { + t.Fatalf("unable to save new index: %v", err) + } + + t.Logf("new index saved as %v", id.Str()) + + for id := range idx.IndexIDs { + t.Logf("remove index %v", id.Str()) + err = repo.Backend().Remove(backend.Index, id.String()) + if err != nil { + t.Errorf("error removing index %v: %v", id, err) + } + } + + idx2 := loadIndex(t, repo) + t.Logf("load new index with %d packs", len(idx2.Packs)) + + if len(idx2.Packs) != len(packs) { + t.Errorf("wrong number of packs in new index, want %d, got %d", len(packs), len(idx2.Packs)) + } + + for id := range packs { + if _, ok := idx2.Packs[id]; !ok { + t.Errorf("pack %v is not contained in new index", id.Str()) + } + } + + for id := range idx2.Packs { + if _, ok := packs[id]; !ok { + t.Errorf("pack %v is not contained in new index", id.Str()) + } + } +} From 47950b82a00da62b0e6831467f29d5d9104aacde Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 14 Aug 2016 14:47:02 +0200 Subject: [PATCH 64/98] Add test for loading index from documentation --- src/restic/index/index_test.go | 75 ++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/src/restic/index/index_test.go b/src/restic/index/index_test.go index 2ae33a70..825a2440 100644 --- a/src/restic/index/index_test.go +++ b/src/restic/index/index_test.go @@ -7,6 +7,7 @@ import ( "restic/backend/local" "restic/pack" "restic/repository" + . "restic/test" "testing" "time" ) @@ -237,3 +238,77 @@ func TestIndexSave(t *testing.T) { } } } + +// example index serialization from doc/Design.md +var docExample = []byte(` +{ + "supersedes": [ + "ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452" + ], + "packs": [ + { + "id": "73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c", + "blobs": [ + { + "id": "3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce", + "type": "data", + "offset": 0, + "length": 25 + },{ + "id": "9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae", + "type": "tree", + "offset": 38, + "length": 100 + }, + { + "id": "d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66", + "type": "data", + "offset": 150, + "length": 123 + } + ] + } + ] +} +`) + +func TestIndexLoadDocReference(t *testing.T) { + repo, cleanup := repository.TestRepository(t) + defer cleanup() + + id, err := repo.SaveUnpacked(backend.Index, docExample) + if err != nil { + t.Fatalf("SaveUnpacked() returned error %v", err) + } + + t.Logf("index saved as %v", id.Str()) + + idx := loadIndex(t, repo) + + blobID := ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66") + locs, err := idx.FindBlob(pack.Handle{ID: blobID, Type: pack.Data}) + if err != nil { + t.Errorf("FindBlob() returned error %v", err) + } + + if len(locs) != 1 { + t.Errorf("blob found %d times, expected just one", len(locs)) + } + + l := locs[0] + if !l.ID.Equal(blobID) { + t.Errorf("blob IDs are not equal: %v != %v", l.ID, blobID) + } + + if l.Type != pack.Data { + t.Errorf("want type %v, got %v", pack.Data, l.Type) + } + + if l.Offset != 150 { + t.Errorf("wrong offset, want %d, got %v", 150, l.Offset) + } + + if l.Length != 123 { + t.Errorf("wrong length, want %d, got %v", 123, l.Length) + } +} From 3fa7304e94019ae344ac1a6b45816ed48c4f28dc Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 14 Aug 2016 16:01:42 +0200 Subject: [PATCH 65/98] Add interfaces to ListAllPacks --- src/restic/repository/index_rebuild.go | 40 -------------------- src/restic/repository/list.go | 52 ++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 40 deletions(-) create mode 100644 src/restic/repository/list.go diff --git a/src/restic/repository/index_rebuild.go b/src/restic/repository/index_rebuild.go index dea5be0d..36e42ddf 100644 --- a/src/restic/repository/index_rebuild.go +++ b/src/restic/repository/index_rebuild.go @@ -5,49 +5,9 @@ import ( "os" "restic/backend" "restic/debug" - "restic/pack" "restic/worker" ) -const rebuildIndexWorkers = 10 - -// ListAllPacksResult is returned in the channel from LoadBlobsFromAllPacks. -type ListAllPacksResult struct { - PackID backend.ID - Size int64 - Entries []pack.Blob -} - -// ListAllPacks sends the contents of all packs to ch. -func ListAllPacks(repo *Repository, ch chan<- worker.Job, done <-chan struct{}) { - f := func(job worker.Job, done <-chan struct{}) (interface{}, error) { - packID := job.Data.(backend.ID) - entries, size, err := repo.ListPack(packID) - - return ListAllPacksResult{ - PackID: packID, - Size: size, - Entries: entries, - }, err - } - - jobCh := make(chan worker.Job) - wp := worker.New(rebuildIndexWorkers, f, jobCh, ch) - - go func() { - defer close(jobCh) - for id := range repo.List(backend.Data, done) { - select { - case jobCh <- worker.Job{Data: id}: - case <-done: - return - } - } - }() - - wp.Wait() -} - // RebuildIndex lists all packs in the repo, writes a new index and removes all // old indexes. This operation should only be done with an exclusive lock in // place. diff --git a/src/restic/repository/list.go b/src/restic/repository/list.go new file mode 100644 index 00000000..9d6f44e4 --- /dev/null +++ b/src/restic/repository/list.go @@ -0,0 +1,52 @@ +package repository + +import ( + "restic/backend" + "restic/pack" + "restic/worker" +) + +const listPackWorkers = 10 + +// Lister combines lists packs in a repo and blobs in a pack. +type Lister interface { + List(backend.Type, <-chan struct{}) <-chan backend.ID + ListPack(backend.ID) ([]pack.Blob, int64, error) +} + +// ListAllPacksResult is returned in the channel from LoadBlobsFromAllPacks. +type ListAllPacksResult struct { + PackID backend.ID + Size int64 + Entries []pack.Blob +} + +// ListAllPacks sends the contents of all packs to ch. +func ListAllPacks(repo Lister, ch chan<- worker.Job, done <-chan struct{}) { + f := func(job worker.Job, done <-chan struct{}) (interface{}, error) { + packID := job.Data.(backend.ID) + entries, size, err := repo.ListPack(packID) + + return ListAllPacksResult{ + PackID: packID, + Size: size, + Entries: entries, + }, err + } + + jobCh := make(chan worker.Job) + wp := worker.New(listPackWorkers, f, jobCh, ch) + + go func() { + defer close(jobCh) + for id := range repo.List(backend.Data, done) { + select { + case jobCh <- worker.Job{Data: id}: + case <-done: + return + } + } + }() + + wp.Wait() +} From 3b57075109f739706cf9eb12de573f0d13e2ab43 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 14 Aug 2016 16:04:34 +0200 Subject: [PATCH 66/98] Add global interface Repository --- src/restic/repository.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 src/restic/repository.go diff --git a/src/restic/repository.go b/src/restic/repository.go new file mode 100644 index 00000000..1fba0ded --- /dev/null +++ b/src/restic/repository.go @@ -0,0 +1,19 @@ +package restic + +import ( + "restic/backend" + "restic/pack" +) + +// Repository manages encrypted and packed data stored in a backend. +type Repository interface { + LoadJSONUnpacked(backend.Type, backend.ID, interface{}) error + + Lister +} + +// Lister combines lists packs in a repo and blobs in a pack. +type Lister interface { + List(backend.Type, <-chan struct{}) <-chan backend.ID + ListPack(backend.ID) ([]pack.Blob, int64, error) +} From 1f263a7683cb56e690b52ab8483c6ae183327e5d Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 14 Aug 2016 16:11:59 +0200 Subject: [PATCH 67/98] Decouple index/ and repository/ --- src/restic/index/index.go | 21 +++++++++++++------- src/restic/repository/index_rebuild.go | 4 ++-- src/restic/repository/list.go | 27 ++++++++++++++++++++------ 3 files changed, 37 insertions(+), 15 deletions(-) diff --git a/src/restic/index/index.go b/src/restic/index/index.go index bbd5c981..c0b404a3 100644 --- a/src/restic/index/index.go +++ b/src/restic/index/index.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "os" + "restic" "restic/backend" "restic/debug" "restic/pack" @@ -39,8 +40,14 @@ func newIndex() *Index { } } +type listAllPacksResult interface { + PackID() backend.ID + Entries() []pack.Blob + Size() int64 +} + // New creates a new index for repo from scratch. -func New(repo *repository.Repository) (*Index, error) { +func New(repo restic.Repository) (*Index, error) { done := make(chan struct{}) defer close(done) @@ -56,16 +63,16 @@ func New(repo *repository.Repository) (*Index, error) { continue } - j := job.Result.(repository.ListAllPacksResult) + j := job.Result.(listAllPacksResult) - debug.Log("Index.New", "pack %v contains %d blobs", packID.Str(), len(j.Entries)) + debug.Log("Index.New", "pack %v contains %d blobs", packID.Str(), len(j.Entries())) - err := idx.AddPack(packID, j.Size, j.Entries) + err := idx.AddPack(packID, j.Size(), j.Entries()) if err != nil { return nil, err } - p := Pack{Entries: j.Entries, Size: j.Size} + p := Pack{Entries: j.Entries(), Size: j.Size()} idx.Packs[packID] = p } @@ -91,7 +98,7 @@ type indexJSON struct { Packs []*packJSON `json:"packs"` } -func loadIndexJSON(repo *repository.Repository, id backend.ID) (*indexJSON, error) { +func loadIndexJSON(repo restic.Repository, id backend.ID) (*indexJSON, error) { debug.Log("index.loadIndexJSON", "process index %v\n", id.Str()) var idx indexJSON @@ -104,7 +111,7 @@ func loadIndexJSON(repo *repository.Repository, id backend.ID) (*indexJSON, erro } // Load creates an index by loading all index files from the repo. -func Load(repo *repository.Repository) (*Index, error) { +func Load(repo restic.Repository) (*Index, error) { debug.Log("index.Load", "loading indexes") done := make(chan struct{}) diff --git a/src/restic/repository/index_rebuild.go b/src/restic/repository/index_rebuild.go index 36e42ddf..fa33c5c8 100644 --- a/src/restic/repository/index_rebuild.go +++ b/src/restic/repository/index_rebuild.go @@ -31,13 +31,13 @@ func RebuildIndex(repo *Repository) error { res := job.Result.(ListAllPacksResult) - for _, entry := range res.Entries { + for _, entry := range res.Entries() { pb := PackedBlob{ ID: entry.ID, Type: entry.Type, Length: entry.Length, Offset: entry.Offset, - PackID: res.PackID, + PackID: res.PackID(), } idx.Store(pb) } diff --git a/src/restic/repository/list.go b/src/restic/repository/list.go index 9d6f44e4..a3c0c5d9 100644 --- a/src/restic/repository/list.go +++ b/src/restic/repository/list.go @@ -16,9 +16,24 @@ type Lister interface { // ListAllPacksResult is returned in the channel from LoadBlobsFromAllPacks. type ListAllPacksResult struct { - PackID backend.ID - Size int64 - Entries []pack.Blob + packID backend.ID + size int64 + entries []pack.Blob +} + +// PackID returns the pack ID of this result. +func (l ListAllPacksResult) PackID() backend.ID { + return l.packID +} + +// Size ruturns the size of the pack. +func (l ListAllPacksResult) Size() int64 { + return l.size +} + +// Entries returns a list of all blobs saved in the pack. +func (l ListAllPacksResult) Entries() []pack.Blob { + return l.entries } // ListAllPacks sends the contents of all packs to ch. @@ -28,9 +43,9 @@ func ListAllPacks(repo Lister, ch chan<- worker.Job, done <-chan struct{}) { entries, size, err := repo.ListPack(packID) return ListAllPacksResult{ - PackID: packID, - Size: size, - Entries: entries, + packID: packID, + size: size, + entries: entries, }, err } From 80bcae44e2b844ca0e34e52761c81d3a45419db9 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 14 Aug 2016 17:59:20 +0200 Subject: [PATCH 68/98] Decouple ListAllPacks from repository --- src/restic/index/index.go | 14 ++++---------- src/restic/{repository => list}/list.go | 18 +++++++++--------- src/restic/repository.go | 1 + src/restic/repository/index_rebuild.go | 5 +++-- 4 files changed, 17 insertions(+), 21 deletions(-) rename src/restic/{repository => list}/list.go (70%) diff --git a/src/restic/index/index.go b/src/restic/index/index.go index c0b404a3..9426fced 100644 --- a/src/restic/index/index.go +++ b/src/restic/index/index.go @@ -8,8 +8,8 @@ import ( "restic" "restic/backend" "restic/debug" + "restic/list" "restic/pack" - "restic/repository" "restic/worker" ) @@ -40,19 +40,13 @@ func newIndex() *Index { } } -type listAllPacksResult interface { - PackID() backend.ID - Entries() []pack.Blob - Size() int64 -} - // New creates a new index for repo from scratch. func New(repo restic.Repository) (*Index, error) { done := make(chan struct{}) defer close(done) ch := make(chan worker.Job) - go repository.ListAllPacks(repo, ch, done) + go list.AllPacks(repo, ch, done) idx := newIndex() @@ -63,7 +57,7 @@ func New(repo restic.Repository) (*Index, error) { continue } - j := job.Result.(listAllPacksResult) + j := job.Result.(list.Result) debug.Log("Index.New", "pack %v contains %d blobs", packID.Str(), len(j.Entries())) @@ -274,7 +268,7 @@ func (idx *Index) FindBlob(h pack.Handle) ([]Location, error) { } // Save writes a new index containing the given packs. -func Save(repo *repository.Repository, packs map[backend.ID][]pack.Blob, supersedes backend.IDs) (backend.ID, error) { +func Save(repo restic.Repository, packs map[backend.ID][]pack.Blob, supersedes backend.IDs) (backend.ID, error) { idx := &indexJSON{ Supersedes: supersedes, Packs: make([]*packJSON, 0, len(packs)), diff --git a/src/restic/repository/list.go b/src/restic/list/list.go similarity index 70% rename from src/restic/repository/list.go rename to src/restic/list/list.go index a3c0c5d9..e3a14798 100644 --- a/src/restic/repository/list.go +++ b/src/restic/list/list.go @@ -1,4 +1,4 @@ -package repository +package list import ( "restic/backend" @@ -14,35 +14,35 @@ type Lister interface { ListPack(backend.ID) ([]pack.Blob, int64, error) } -// ListAllPacksResult is returned in the channel from LoadBlobsFromAllPacks. -type ListAllPacksResult struct { +// Result is returned in the channel from LoadBlobsFromAllPacks. +type Result struct { packID backend.ID size int64 entries []pack.Blob } // PackID returns the pack ID of this result. -func (l ListAllPacksResult) PackID() backend.ID { +func (l Result) PackID() backend.ID { return l.packID } // Size ruturns the size of the pack. -func (l ListAllPacksResult) Size() int64 { +func (l Result) Size() int64 { return l.size } // Entries returns a list of all blobs saved in the pack. -func (l ListAllPacksResult) Entries() []pack.Blob { +func (l Result) Entries() []pack.Blob { return l.entries } -// ListAllPacks sends the contents of all packs to ch. -func ListAllPacks(repo Lister, ch chan<- worker.Job, done <-chan struct{}) { +// AllPacks sends the contents of all packs to ch. +func AllPacks(repo Lister, ch chan<- worker.Job, done <-chan struct{}) { f := func(job worker.Job, done <-chan struct{}) (interface{}, error) { packID := job.Data.(backend.ID) entries, size, err := repo.ListPack(packID) - return ListAllPacksResult{ + return Result{ packID: packID, size: size, entries: entries, diff --git a/src/restic/repository.go b/src/restic/repository.go index 1fba0ded..e35de0e9 100644 --- a/src/restic/repository.go +++ b/src/restic/repository.go @@ -8,6 +8,7 @@ import ( // Repository manages encrypted and packed data stored in a backend. type Repository interface { LoadJSONUnpacked(backend.Type, backend.ID, interface{}) error + SaveJSONUnpacked(backend.Type, interface{}) (backend.ID, error) Lister } diff --git a/src/restic/repository/index_rebuild.go b/src/restic/repository/index_rebuild.go index fa33c5c8..99c28148 100644 --- a/src/restic/repository/index_rebuild.go +++ b/src/restic/repository/index_rebuild.go @@ -5,6 +5,7 @@ import ( "os" "restic/backend" "restic/debug" + "restic/list" "restic/worker" ) @@ -18,7 +19,7 @@ func RebuildIndex(repo *Repository) error { defer close(done) ch := make(chan worker.Job) - go ListAllPacks(repo, ch, done) + go list.AllPacks(repo, ch, done) idx := NewIndex() for job := range ch { @@ -29,7 +30,7 @@ func RebuildIndex(repo *Repository) error { continue } - res := job.Result.(ListAllPacksResult) + res := job.Result.(list.Result) for _, entry := range res.Entries() { pb := PackedBlob{ From 302619a11ab2a093b5647364bfce89bac176edb2 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 14 Aug 2016 18:12:07 +0200 Subject: [PATCH 69/98] Move interfaces to package restic/types --- src/restic/index/index.go | 10 +++++----- src/restic/{ => types}/repository.go | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) rename src/restic/{ => types}/repository.go (96%) diff --git a/src/restic/index/index.go b/src/restic/index/index.go index 9426fced..d2f0d985 100644 --- a/src/restic/index/index.go +++ b/src/restic/index/index.go @@ -5,11 +5,11 @@ import ( "errors" "fmt" "os" - "restic" "restic/backend" "restic/debug" "restic/list" "restic/pack" + "restic/types" "restic/worker" ) @@ -41,7 +41,7 @@ func newIndex() *Index { } // New creates a new index for repo from scratch. -func New(repo restic.Repository) (*Index, error) { +func New(repo types.Repository) (*Index, error) { done := make(chan struct{}) defer close(done) @@ -92,7 +92,7 @@ type indexJSON struct { Packs []*packJSON `json:"packs"` } -func loadIndexJSON(repo restic.Repository, id backend.ID) (*indexJSON, error) { +func loadIndexJSON(repo types.Repository, id backend.ID) (*indexJSON, error) { debug.Log("index.loadIndexJSON", "process index %v\n", id.Str()) var idx indexJSON @@ -105,7 +105,7 @@ func loadIndexJSON(repo restic.Repository, id backend.ID) (*indexJSON, error) { } // Load creates an index by loading all index files from the repo. -func Load(repo restic.Repository) (*Index, error) { +func Load(repo types.Repository) (*Index, error) { debug.Log("index.Load", "loading indexes") done := make(chan struct{}) @@ -268,7 +268,7 @@ func (idx *Index) FindBlob(h pack.Handle) ([]Location, error) { } // Save writes a new index containing the given packs. -func Save(repo restic.Repository, packs map[backend.ID][]pack.Blob, supersedes backend.IDs) (backend.ID, error) { +func Save(repo types.Repository, packs map[backend.ID][]pack.Blob, supersedes backend.IDs) (backend.ID, error) { idx := &indexJSON{ Supersedes: supersedes, Packs: make([]*packJSON, 0, len(packs)), diff --git a/src/restic/repository.go b/src/restic/types/repository.go similarity index 96% rename from src/restic/repository.go rename to src/restic/types/repository.go index e35de0e9..d13d9333 100644 --- a/src/restic/repository.go +++ b/src/restic/types/repository.go @@ -1,4 +1,4 @@ -package restic +package types import ( "restic/backend" From f102406cd70cf55a4e2f5874b7201ac8aeb401c7 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 14 Aug 2016 18:56:13 +0200 Subject: [PATCH 70/98] ID: move Str() to non-pointer receiver --- src/restic/backend/id.go | 6 +----- src/restic/backend/id_int_test.go | 5 ----- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/src/restic/backend/id.go b/src/restic/backend/id.go index 11579270..6c7bc532 100644 --- a/src/restic/backend/id.go +++ b/src/restic/backend/id.go @@ -44,11 +44,7 @@ func (id ID) String() string { const shortStr = 4 // Str returns the shortened string version of id. -func (id *ID) Str() string { - if id == nil { - return "[nil]" - } - +func (id ID) Str() string { if id.IsNull() { return "[null]" } diff --git a/src/restic/backend/id_int_test.go b/src/restic/backend/id_int_test.go index d46a1554..ed84a5a3 100644 --- a/src/restic/backend/id_int_test.go +++ b/src/restic/backend/id_int_test.go @@ -8,9 +8,4 @@ func TestIDMethods(t *testing.T) { if id.Str() != "[null]" { t.Errorf("ID.Str() returned wrong value, want %v, got %v", "[null]", id.Str()) } - - var pid *ID - if pid.Str() != "[nil]" { - t.Errorf("ID.Str() returned wrong value, want %v, got %v", "[nil]", pid.Str()) - } } From 69c2e8ce7e2a83639471242f898b84c820da0d3a Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 4 Aug 2016 19:42:40 +0200 Subject: [PATCH 71/98] Add first version of the `prune` command --- src/cmds/restic/cmd_prune.go | 199 +++++++++++++++++++++++++++++++++++ 1 file changed, 199 insertions(+) create mode 100644 src/cmds/restic/cmd_prune.go diff --git a/src/cmds/restic/cmd_prune.go b/src/cmds/restic/cmd_prune.go new file mode 100644 index 00000000..f0c87fb8 --- /dev/null +++ b/src/cmds/restic/cmd_prune.go @@ -0,0 +1,199 @@ +package main + +import ( + "fmt" + "os" + "restic" + "restic/backend" + "restic/debug" + "restic/list" + "restic/pack" + "restic/repository" + "restic/worker" + "time" + + "golang.org/x/crypto/ssh/terminal" +) + +// CmdPrune implements the 'prune' command. +type CmdPrune struct { + global *GlobalOptions +} + +func init() { + _, err := parser.AddCommand("prune", + "removes content from a repository", + "The prune command removes rendundant and unneeded data from the repository", + &CmdPrune{global: &globalOpts}) + if err != nil { + panic(err) + } +} + +// newProgressMax returns a progress that counts blobs. +func newProgressMax(show bool, max uint64, description string) *restic.Progress { + if !show { + return nil + } + + p := restic.NewProgress(time.Second) + + p.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) { + status := fmt.Sprintf("[%s] %s %d / %d %s", + formatDuration(d), + formatPercent(s.Blobs, max), + s.Blobs, max, description) + + w, _, err := terminal.GetSize(int(os.Stdout.Fd())) + if err == nil { + if len(status) > w { + max := w - len(status) - 4 + status = status[:max] + "... " + } + } + + fmt.Printf("\x1b[2K%s\r", status) + } + + p.OnDone = func(s restic.Stat, d time.Duration, ticker bool) { + p.OnUpdate(s, d, false) + fmt.Printf("\n") + } + + return p +} + +// Execute runs the 'prune' command. +func (cmd CmdPrune) Execute(args []string) error { + repo, err := cmd.global.OpenRepository() + if err != nil { + return err + } + + lock, err := lockRepoExclusive(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + + err = repo.LoadIndex() + if err != nil { + return err + } + + done := make(chan struct{}) + defer close(done) + + cmd.global.Verbosef("loading list of files from the repo\n") + + var stats struct { + blobs int + packs int + snapshots int + } + + packs := make(map[backend.ID]pack.BlobSet) + for packID := range repo.List(backend.Data, done) { + debug.Log("CmdPrune.Execute", "found %v", packID.Str()) + packs[packID] = pack.NewBlobSet() + stats.packs++ + } + + cmd.global.Verbosef("listing %v files\n", stats.packs) + + blobCount := make(map[backend.ID]int) + duplicateBlobs := 0 + duplicateBytes := 0 + rewritePacks := backend.NewIDSet() + + ch := make(chan worker.Job) + go list.AllPacks(repo, ch, done) + + bar := newProgressMax(cmd.global.ShowProgress(), uint64(len(packs)), "files") + bar.Start() + for job := range ch { + packID := job.Data.(backend.ID) + if job.Error != nil { + cmd.global.Warnf("unable to list pack %v: %v\n", packID.Str(), job.Error) + continue + } + + j := job.Result.(list.Result) + + debug.Log("CmdPrune.Execute", "pack %v contains %d blobs", packID.Str(), len(j.Entries())) + for _, pb := range j.Entries() { + packs[packID].Insert(pack.Handle{ID: pb.ID, Type: pb.Type}) + stats.blobs++ + blobCount[pb.ID]++ + + if blobCount[pb.ID] > 1 { + duplicateBlobs++ + duplicateBytes += int(pb.Length) + } + } + bar.Report(restic.Stat{Blobs: 1}) + } + bar.Done() + + cmd.global.Verbosef("processed %d blobs: %d duplicate blobs, %d duplicate bytes\n", + stats.blobs, duplicateBlobs, duplicateBytes) + cmd.global.Verbosef("load all snapshots\n") + + snapshots, err := restic.LoadAllSnapshots(repo) + if err != nil { + return err + } + + stats.snapshots = len(snapshots) + + cmd.global.Verbosef("find data that is still in use for %d snapshots\n", stats.snapshots) + + usedBlobs := pack.NewBlobSet() + seenBlobs := pack.NewBlobSet() + + bar = newProgressMax(cmd.global.ShowProgress(), uint64(len(snapshots)), "snapshots") + bar.Start() + for _, sn := range snapshots { + debug.Log("CmdPrune.Execute", "process snapshot %v", sn.ID().Str()) + + err = restic.FindUsedBlobs(repo, *sn.Tree, usedBlobs, seenBlobs) + if err != nil { + return err + } + + debug.Log("CmdPrune.Execute", "found %v blobs for snapshot %v", sn.ID().Str()) + bar.Report(restic.Stat{Blobs: 1}) + } + bar.Done() + + cmd.global.Verbosef("found %d of %d data blobs still in use\n", len(usedBlobs), stats.blobs) + + for packID, blobSet := range packs { + for h := range blobSet { + if !usedBlobs.Has(h) { + rewritePacks.Insert(packID) + } + + if blobCount[h.ID] > 1 { + rewritePacks.Insert(packID) + } + } + } + + cmd.global.Verbosef("will rewrite %d packs\n", len(rewritePacks)) + + err = repository.Repack(repo, rewritePacks, usedBlobs) + if err != nil { + return err + } + + cmd.global.Verbosef("creating new index\n") + + err = repository.RebuildIndex(repo) + if err != nil { + return err + } + + cmd.global.Verbosef("done\n") + return nil +} From c0ef1ec6fd837ca0abc37d3f9fbb13f42628ac5c Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 15 Aug 2016 18:55:52 +0200 Subject: [PATCH 72/98] Add RemovePack for index --- src/restic/index/index.go | 20 ++++++++++++++++++ src/restic/index/index_test.go | 38 ++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+) diff --git a/src/restic/index/index.go b/src/restic/index/index.go index d2f0d985..0bd87be3 100644 --- a/src/restic/index/index.go +++ b/src/restic/index/index.go @@ -189,6 +189,26 @@ func (idx *Index) AddPack(id backend.ID, size int64, entries []pack.Blob) error return nil } +// RemovePack deletes a pack from the index. +func (idx *Index) RemovePack(id backend.ID) error { + if _, ok := idx.Packs[id]; !ok { + return fmt.Errorf("pack %v not found in the index", id.Str()) + } + + for _, blob := range idx.Packs[id].Entries { + h := pack.Handle{ID: blob.ID, Type: blob.Type} + idx.Blobs[h].Packs.Delete(id) + + if len(idx.Blobs[h].Packs) == 0 { + delete(idx.Blobs, h) + } + } + + delete(idx.Packs, id) + + return nil +} + // DuplicateBlobs returns a list of blobs that are stored more than once in the // repo. func (idx *Index) DuplicateBlobs() (dups pack.BlobSet) { diff --git a/src/restic/index/index_test.go b/src/restic/index/index_test.go index 825a2440..580fffcc 100644 --- a/src/restic/index/index_test.go +++ b/src/restic/index/index_test.go @@ -239,6 +239,44 @@ func TestIndexSave(t *testing.T) { } } +func TestIndexAddRemovePack(t *testing.T) { + repo, cleanup := createFilledRepo(t, 3, 0) + defer cleanup() + + idx, err := Load(repo) + if err != nil { + t.Fatalf("Load() returned error %v", err) + } + + done := make(chan struct{}) + defer close(done) + + packID := <-repo.List(backend.Data, done) + + t.Logf("selected pack %v", packID.Str()) + + blobs := idx.Packs[packID].Entries + + idx.RemovePack(packID) + + if _, ok := idx.Packs[packID]; ok { + t.Errorf("removed pack %v found in index.Packs", packID.Str()) + } + + for _, blob := range blobs { + h := pack.Handle{ID: blob.ID, Type: blob.Type} + _, err := idx.FindBlob(h) + if err == nil { + t.Errorf("removed blob %v found in index", h) + } + + if _, ok := idx.Blobs[h]; ok { + t.Errorf("removed blob %v found in index.Blobs", h) + } + } + +} + // example index serialization from doc/Design.md var docExample = []byte(` { From 009c803c8a90717598835202feba215b60395dbc Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 15 Aug 2016 20:13:56 +0200 Subject: [PATCH 73/98] prune: Use new Index --- src/cmds/restic/cmd_prune.go | 73 +++++++++++++++--------------------- 1 file changed, 30 insertions(+), 43 deletions(-) diff --git a/src/cmds/restic/cmd_prune.go b/src/cmds/restic/cmd_prune.go index f0c87fb8..8e52bd8d 100644 --- a/src/cmds/restic/cmd_prune.go +++ b/src/cmds/restic/cmd_prune.go @@ -6,10 +6,9 @@ import ( "restic" "restic/backend" "restic/debug" - "restic/list" + "restic/index" "restic/pack" "restic/repository" - "restic/worker" "time" "golang.org/x/crypto/ssh/terminal" @@ -84,61 +83,49 @@ func (cmd CmdPrune) Execute(args []string) error { done := make(chan struct{}) defer close(done) - cmd.global.Verbosef("loading list of files from the repo\n") + cmd.global.Verbosef("building new index for repo\n") + + idx, err := index.New(repo) + if err != nil { + return err + } var stats struct { blobs int packs int snapshots int + bytes int64 } - packs := make(map[backend.ID]pack.BlobSet) - for packID := range repo.List(backend.Data, done) { - debug.Log("CmdPrune.Execute", "found %v", packID.Str()) - packs[packID] = pack.NewBlobSet() - stats.packs++ + for _, pack := range idx.Packs { + stats.bytes += pack.Size } + cmd.global.Verbosef("repository contains %v packs (%v blobs) with %v bytes\n", + len(idx.Packs), len(idx.Blobs), formatBytes(uint64(stats.bytes))) - cmd.global.Verbosef("listing %v files\n", stats.packs) - - blobCount := make(map[backend.ID]int) + blobCount := make(map[pack.Handle]int) duplicateBlobs := 0 duplicateBytes := 0 - rewritePacks := backend.NewIDSet() - ch := make(chan worker.Job) - go list.AllPacks(repo, ch, done) - - bar := newProgressMax(cmd.global.ShowProgress(), uint64(len(packs)), "files") - bar.Start() - for job := range ch { - packID := job.Data.(backend.ID) - if job.Error != nil { - cmd.global.Warnf("unable to list pack %v: %v\n", packID.Str(), job.Error) - continue - } - - j := job.Result.(list.Result) - - debug.Log("CmdPrune.Execute", "pack %v contains %d blobs", packID.Str(), len(j.Entries())) - for _, pb := range j.Entries() { - packs[packID].Insert(pack.Handle{ID: pb.ID, Type: pb.Type}) + // find duplicate blobs + for _, p := range idx.Packs { + for _, entry := range p.Entries { stats.blobs++ - blobCount[pb.ID]++ + h := pack.Handle{ID: entry.ID, Type: entry.Type} + blobCount[h]++ - if blobCount[pb.ID] > 1 { + if blobCount[h] > 1 { duplicateBlobs++ - duplicateBytes += int(pb.Length) + duplicateBytes += int(entry.Length) } } - bar.Report(restic.Stat{Blobs: 1}) } - bar.Done() cmd.global.Verbosef("processed %d blobs: %d duplicate blobs, %d duplicate bytes\n", stats.blobs, duplicateBlobs, duplicateBytes) cmd.global.Verbosef("load all snapshots\n") + // find referenced blobs snapshots, err := restic.LoadAllSnapshots(repo) if err != nil { return err @@ -151,7 +138,7 @@ func (cmd CmdPrune) Execute(args []string) error { usedBlobs := pack.NewBlobSet() seenBlobs := pack.NewBlobSet() - bar = newProgressMax(cmd.global.ShowProgress(), uint64(len(snapshots)), "snapshots") + bar := newProgressMax(cmd.global.ShowProgress(), uint64(len(snapshots)), "snapshots") bar.Start() for _, sn := range snapshots { debug.Log("CmdPrune.Execute", "process snapshot %v", sn.ID().Str()) @@ -168,15 +155,15 @@ func (cmd CmdPrune) Execute(args []string) error { cmd.global.Verbosef("found %d of %d data blobs still in use\n", len(usedBlobs), stats.blobs) - for packID, blobSet := range packs { - for h := range blobSet { - if !usedBlobs.Has(h) { - rewritePacks.Insert(packID) - } + // find packs that need a rewrite + rewritePacks := backend.NewIDSet() + for h, blob := range idx.Blobs { + if !usedBlobs.Has(h) { + rewritePacks.Merge(blob.Packs) + } - if blobCount[h.ID] > 1 { - rewritePacks.Insert(packID) - } + if blobCount[h] > 1 { + rewritePacks.Merge(blob.Packs) } } From 3ceb2ad3cf8a79fac325fc8cd7b9df88e589c061 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 15 Aug 2016 20:15:17 +0200 Subject: [PATCH 74/98] Progress: Call OnUpdate before OnDone --- src/cmds/restic/cmd_prune.go | 1 - src/restic/progress.go | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmds/restic/cmd_prune.go b/src/cmds/restic/cmd_prune.go index 8e52bd8d..228e7ac0 100644 --- a/src/cmds/restic/cmd_prune.go +++ b/src/cmds/restic/cmd_prune.go @@ -55,7 +55,6 @@ func newProgressMax(show bool, max uint64, description string) *restic.Progress } p.OnDone = func(s restic.Stat, d time.Duration, ticker bool) { - p.OnUpdate(s, d, false) fmt.Printf("\n") } diff --git a/src/restic/progress.go b/src/restic/progress.go index cca1a4e4..4721fac3 100644 --- a/src/restic/progress.go +++ b/src/restic/progress.go @@ -152,6 +152,7 @@ func (p *Progress) Done() { if p.OnDone != nil { p.fnM.Lock() + p.OnUpdate(cur, time.Since(p.start), false) p.OnDone(cur, time.Since(p.start), false) p.fnM.Unlock() } From 1bb2d59e38814e24e8b29abf6ec6717a6af9975a Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 15 Aug 2016 20:46:14 +0200 Subject: [PATCH 75/98] Add Save() method to Index --- src/restic/index/index.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/restic/index/index.go b/src/restic/index/index.go index 0bd87be3..6bd934dc 100644 --- a/src/restic/index/index.go +++ b/src/restic/index/index.go @@ -287,6 +287,17 @@ func (idx *Index) FindBlob(h pack.Handle) ([]Location, error) { return result, nil } +// Save writes the complete index to the repo and includes all previously read +// indexes to the Supersedes field. +func (idx *Index) Save(repo types.Repository) (backend.ID, error) { + packs := make(map[backend.ID][]pack.Blob, len(idx.Packs)) + for id, p := range idx.Packs { + packs[id] = p.Entries + } + + return Save(repo, packs, idx.IndexIDs.List()) +} + // Save writes a new index containing the given packs. func Save(repo types.Repository, packs map[backend.ID][]pack.Blob, supersedes backend.IDs) (backend.ID, error) { idx := &indexJSON{ From 29bb845f0efee42c59062b5f4620ad5b5a668116 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 15 Aug 2016 20:46:24 +0200 Subject: [PATCH 76/98] Rebuild index at the end of prune --- src/cmds/restic/cmd_prune.go | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/src/cmds/restic/cmd_prune.go b/src/cmds/restic/cmd_prune.go index 228e7ac0..a7f81da3 100644 --- a/src/cmds/restic/cmd_prune.go +++ b/src/cmds/restic/cmd_prune.go @@ -175,11 +175,27 @@ func (cmd CmdPrune) Execute(args []string) error { cmd.global.Verbosef("creating new index\n") - err = repository.RebuildIndex(repo) + idx, err = index.New(repo) if err != nil { return err } + id, err := idx.Save(repo) + if err != nil { + return err + } + cmd.global.Verbosef("saved new index as %v\n", id.Str()) + + for oldIndex := range repo.List(backend.Index, done) { + if id.Equal(oldIndex) { + continue + } + err := repo.Backend().Remove(backend.Index, oldIndex.String()) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to remove index %v: %v\n", oldIndex.Str(), err) + } + } + cmd.global.Verbosef("done\n") return nil } From 8d735cf6a98cadca36abac2b48a50f23536c5f56 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 15 Aug 2016 20:49:01 +0200 Subject: [PATCH 77/98] Explicitely specify supersedes for new index --- src/cmds/restic/cmd_prune.go | 22 +++++++++++----------- src/restic/index/index.go | 7 +++---- 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/src/cmds/restic/cmd_prune.go b/src/cmds/restic/cmd_prune.go index a7f81da3..d9306c34 100644 --- a/src/cmds/restic/cmd_prune.go +++ b/src/cmds/restic/cmd_prune.go @@ -180,22 +180,22 @@ func (cmd CmdPrune) Execute(args []string) error { return err } - id, err := idx.Save(repo) + var supersedes backend.IDs + for idxID := range repo.List(backend.Index, done) { + err := repo.Backend().Remove(backend.Index, idxID.String()) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to remove index %v: %v\n", idxID.Str(), err) + } + + supersedes = append(supersedes, idxID) + } + + id, err := idx.Save(repo, supersedes) if err != nil { return err } cmd.global.Verbosef("saved new index as %v\n", id.Str()) - for oldIndex := range repo.List(backend.Index, done) { - if id.Equal(oldIndex) { - continue - } - err := repo.Backend().Remove(backend.Index, oldIndex.String()) - if err != nil { - fmt.Fprintf(os.Stderr, "unable to remove index %v: %v\n", oldIndex.Str(), err) - } - } - cmd.global.Verbosef("done\n") return nil } diff --git a/src/restic/index/index.go b/src/restic/index/index.go index 6bd934dc..887e1234 100644 --- a/src/restic/index/index.go +++ b/src/restic/index/index.go @@ -287,15 +287,14 @@ func (idx *Index) FindBlob(h pack.Handle) ([]Location, error) { return result, nil } -// Save writes the complete index to the repo and includes all previously read -// indexes to the Supersedes field. -func (idx *Index) Save(repo types.Repository) (backend.ID, error) { +// Save writes the complete index to the repo. +func (idx *Index) Save(repo types.Repository, supersedes backend.IDs) (backend.ID, error) { packs := make(map[backend.ID][]pack.Blob, len(idx.Packs)) for id, p := range idx.Packs { packs[id] = p.Entries } - return Save(repo, packs, idx.IndexIDs.List()) + return Save(repo, packs, supersedes) } // Save writes a new index containing the given packs. From 8de6e5a627fb5e491bf1bb329e1ab7e8add26e88 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 15 Aug 2016 21:10:12 +0200 Subject: [PATCH 78/98] Add progress option to index --- src/restic/index/index.go | 15 +++++++++++++-- src/restic/index/index_test.go | 14 +++++++------- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/src/restic/index/index.go b/src/restic/index/index.go index 887e1234..0d2d7039 100644 --- a/src/restic/index/index.go +++ b/src/restic/index/index.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "os" + "restic" "restic/backend" "restic/debug" "restic/list" @@ -41,16 +42,21 @@ func newIndex() *Index { } // New creates a new index for repo from scratch. -func New(repo types.Repository) (*Index, error) { +func New(repo types.Repository, p *restic.Progress) (*Index, error) { done := make(chan struct{}) defer close(done) + p.Start() + defer p.Done() + ch := make(chan worker.Job) go list.AllPacks(repo, ch, done) idx := newIndex() for job := range ch { + p.Report(restic.Stat{Blobs: 1}) + packID := job.Data.(backend.ID) if job.Error != nil { fmt.Fprintf(os.Stderr, "unable to list pack %v: %v\n", packID.Str(), job.Error) @@ -105,9 +111,12 @@ func loadIndexJSON(repo types.Repository, id backend.ID) (*indexJSON, error) { } // Load creates an index by loading all index files from the repo. -func Load(repo types.Repository) (*Index, error) { +func Load(repo types.Repository, p *restic.Progress) (*Index, error) { debug.Log("index.Load", "loading indexes") + p.Start() + defer p.Done() + done := make(chan struct{}) defer close(done) @@ -117,6 +126,8 @@ func Load(repo types.Repository) (*Index, error) { index := newIndex() for id := range repo.List(backend.Index, done) { + p.Report(restic.Stat{Blobs: 1}) + debug.Log("index.Load", "Load index %v", id.Str()) idx, err := loadIndexJSON(repo, id) if err != nil { diff --git a/src/restic/index/index_test.go b/src/restic/index/index_test.go index 580fffcc..c950608a 100644 --- a/src/restic/index/index_test.go +++ b/src/restic/index/index_test.go @@ -40,7 +40,7 @@ func TestIndexNew(t *testing.T) { repo, cleanup := createFilledRepo(t, 3, 0) defer cleanup() - idx, err := New(repo) + idx, err := New(repo, nil) if err != nil { t.Fatalf("New() returned error %v", err) } @@ -56,7 +56,7 @@ func TestIndexLoad(t *testing.T) { repo, cleanup := createFilledRepo(t, 3, 0) defer cleanup() - loadIdx, err := Load(repo) + loadIdx, err := Load(repo, nil) if err != nil { t.Fatalf("Load() returned error %v", err) } @@ -67,7 +67,7 @@ func TestIndexLoad(t *testing.T) { validateIndex(t, repo, loadIdx) - newIdx, err := New(repo) + newIdx, err := New(repo, nil) if err != nil { t.Fatalf("New() returned error %v", err) } @@ -146,7 +146,7 @@ func BenchmarkIndexNew(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - idx, err := New(repo) + idx, err := New(repo, nil) if err != nil { b.Fatalf("New() returned error %v", err) @@ -162,7 +162,7 @@ func TestIndexDuplicateBlobs(t *testing.T) { repo, cleanup := createFilledRepo(t, 3, 0.01) defer cleanup() - idx, err := New(repo) + idx, err := New(repo, nil) if err != nil { t.Fatal(err) } @@ -181,7 +181,7 @@ func TestIndexDuplicateBlobs(t *testing.T) { } func loadIndex(t testing.TB, repo *repository.Repository) *Index { - idx, err := Load(repo) + idx, err := Load(repo, nil) if err != nil { t.Fatalf("Load() returned error %v", err) } @@ -243,7 +243,7 @@ func TestIndexAddRemovePack(t *testing.T) { repo, cleanup := createFilledRepo(t, 3, 0) defer cleanup() - idx, err := Load(repo) + idx, err := Load(repo, nil) if err != nil { t.Fatalf("Load() returned error %v", err) } From 7f9d2277255b5b7b08a22f91cf596dfca4f01a19 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 15 Aug 2016 21:10:20 +0200 Subject: [PATCH 79/98] Use progress in prune command --- src/cmds/restic/cmd_prune.go | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/src/cmds/restic/cmd_prune.go b/src/cmds/restic/cmd_prune.go index d9306c34..0761b3c3 100644 --- a/src/cmds/restic/cmd_prune.go +++ b/src/cmds/restic/cmd_prune.go @@ -82,13 +82,6 @@ func (cmd CmdPrune) Execute(args []string) error { done := make(chan struct{}) defer close(done) - cmd.global.Verbosef("building new index for repo\n") - - idx, err := index.New(repo) - if err != nil { - return err - } - var stats struct { blobs int packs int @@ -96,6 +89,19 @@ func (cmd CmdPrune) Execute(args []string) error { bytes int64 } + cmd.global.Verbosef("counting files in repo\n") + for _ = range repo.List(backend.Data, done) { + stats.packs++ + } + + cmd.global.Verbosef("building new index for repo\n") + + bar := newProgressMax(cmd.global.ShowProgress(), uint64(stats.packs), "files") + idx, err := index.New(repo, bar) + if err != nil { + return err + } + for _, pack := range idx.Packs { stats.bytes += pack.Size } @@ -137,7 +143,7 @@ func (cmd CmdPrune) Execute(args []string) error { usedBlobs := pack.NewBlobSet() seenBlobs := pack.NewBlobSet() - bar := newProgressMax(cmd.global.ShowProgress(), uint64(len(snapshots)), "snapshots") + bar = newProgressMax(cmd.global.ShowProgress(), uint64(len(snapshots)), "snapshots") bar.Start() for _, sn := range snapshots { debug.Log("CmdPrune.Execute", "process snapshot %v", sn.ID().Str()) @@ -175,7 +181,8 @@ func (cmd CmdPrune) Execute(args []string) error { cmd.global.Verbosef("creating new index\n") - idx, err = index.New(repo) + bar = newProgressMax(cmd.global.ShowProgress(), uint64(stats.packs), "files") + idx, err = index.New(repo, bar) if err != nil { return err } From 238d3807e9692534616d5ebbdf20e3896375589b Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 15 Aug 2016 21:13:38 +0200 Subject: [PATCH 80/98] prune: Format duplicate bytes properly --- src/cmds/restic/cmd_prune.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cmds/restic/cmd_prune.go b/src/cmds/restic/cmd_prune.go index 0761b3c3..7ee1117c 100644 --- a/src/cmds/restic/cmd_prune.go +++ b/src/cmds/restic/cmd_prune.go @@ -126,8 +126,8 @@ func (cmd CmdPrune) Execute(args []string) error { } } - cmd.global.Verbosef("processed %d blobs: %d duplicate blobs, %d duplicate bytes\n", - stats.blobs, duplicateBlobs, duplicateBytes) + cmd.global.Verbosef("processed %d blobs: %d duplicate blobs, %v duplicate\n", + stats.blobs, duplicateBlobs, formatBytes(uint64(duplicateBytes))) cmd.global.Verbosef("load all snapshots\n") // find referenced blobs From 2c04ad3c2935882879808e01217e76d8c44978fa Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 15 Aug 2016 21:37:11 +0200 Subject: [PATCH 81/98] TestCreateSnapshot: free buffer --- src/restic/testing.go | 1 + 1 file changed, 1 insertion(+) diff --git a/src/restic/testing.go b/src/restic/testing.go index 873a9ed0..e1d6bf61 100644 --- a/src/restic/testing.go +++ b/src/restic/testing.go @@ -51,6 +51,7 @@ func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs backend.IDs) { fs.knownBlobs.Insert(id) } + freeBuf(chunk.Data) blobs = append(blobs, id) } From 162629571d0d83e5d70b7a5988374f95575f4960 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Mon, 15 Aug 2016 21:37:19 +0200 Subject: [PATCH 82/98] Add BenchmarkFindUsedBlobs --- src/restic/find_test.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/src/restic/find_test.go b/src/restic/find_test.go index 17fbc83b..f7e47bde 100644 --- a/src/restic/find_test.go +++ b/src/restic/find_test.go @@ -116,3 +116,23 @@ func TestFindUsedBlobs(t *testing.T) { } } } + +func BenchmarkFindUsedBlobs(b *testing.B) { + repo, cleanup := repository.TestRepository(b) + defer cleanup() + + sn := TestCreateSnapshot(b, repo, findTestTime, findTestDepth, 0) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + seen := pack.NewBlobSet() + blobs := pack.NewBlobSet() + err := FindUsedBlobs(repo, *sn.Tree, blobs, seen) + if err != nil { + b.Error(err) + } + + b.Logf("found %v blobs", len(blobs)) + } +} From bd819a5e818af66a3dff8528794c5063ab012426 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Tue, 16 Aug 2016 21:59:43 +0200 Subject: [PATCH 83/98] Fix panic --- src/restic/lock.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/restic/lock.go b/src/restic/lock.go index ed159344..5628e757 100644 --- a/src/restic/lock.go +++ b/src/restic/lock.go @@ -188,7 +188,7 @@ var staleTimeout = 30 * time.Minute // older than 30 minutes or if it was created on the current machine and the // process isn't alive any more. func (l *Lock) Stale() bool { - debug.Log("Lock.Stale", "testing if lock %v for process %d is stale", l.lockID.Str(), l.PID) + debug.Log("Lock.Stale", "testing if lock %v for process %d is stale", l, l.PID) if time.Now().Sub(l.Time) > staleTimeout { debug.Log("Lock.Stale", "lock is stale, timestamp is too old: %v\n", l.Time) return true From a107e3cc84aa4c0d72f252490214861999e30d75 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Fri, 19 Aug 2016 20:36:24 +0200 Subject: [PATCH 84/98] Correct comment --- src/restic/snapshot.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/restic/snapshot.go b/src/restic/snapshot.go index 9c95e84c..4a8339d4 100644 --- a/src/restic/snapshot.go +++ b/src/restic/snapshot.go @@ -125,7 +125,7 @@ func SamePaths(expected, actual []string) bool { return true } -// Error when no snapshot is found for the given criteria +// ErrNoSnapshotFound is returned when no snapshot for the given criteria could be found. var ErrNoSnapshotFound = errors.New("no snapshot found") // FindLatestSnapshot finds latest snapshot with optional target/directory and source filters From bb84d351f17c76044138ed4b496c60881027522f Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Fri, 19 Aug 2016 20:45:19 +0200 Subject: [PATCH 85/98] Revert "ID: move Str() to non-pointer receiver" This reverts commit f102406cd70cf55a4e2f5874b7201ac8aeb401c7. --- src/restic/backend/id.go | 6 +++++- src/restic/backend/id_int_test.go | 5 +++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/restic/backend/id.go b/src/restic/backend/id.go index 6c7bc532..11579270 100644 --- a/src/restic/backend/id.go +++ b/src/restic/backend/id.go @@ -44,7 +44,11 @@ func (id ID) String() string { const shortStr = 4 // Str returns the shortened string version of id. -func (id ID) Str() string { +func (id *ID) Str() string { + if id == nil { + return "[nil]" + } + if id.IsNull() { return "[null]" } diff --git a/src/restic/backend/id_int_test.go b/src/restic/backend/id_int_test.go index ed84a5a3..d46a1554 100644 --- a/src/restic/backend/id_int_test.go +++ b/src/restic/backend/id_int_test.go @@ -8,4 +8,9 @@ func TestIDMethods(t *testing.T) { if id.Str() != "[null]" { t.Errorf("ID.Str() returned wrong value, want %v, got %v", "[null]", id.Str()) } + + var pid *ID + if pid.Str() != "[nil]" { + t.Errorf("ID.Str() returned wrong value, want %v, got %v", "[nil]", pid.Str()) + } } From 6cf4b815583cc5b19e12eda46bab910b4ad1d529 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Fri, 19 Aug 2016 20:50:52 +0200 Subject: [PATCH 86/98] Add functions to filter snapshots --- src/restic/snapshot.go | 3 +- src/restic/snapshot_filter.go | 195 +++++++++ src/restic/snapshot_filter_test.go | 262 ++++++++++++ src/restic/testdata/expired_snapshots_0 | 497 +++++++++++++++++++++++ src/restic/testdata/expired_snapshots_1 | 52 +++ src/restic/testdata/expired_snapshots_10 | 12 + src/restic/testdata/expired_snapshots_11 | 22 + src/restic/testdata/expired_snapshots_12 | 37 ++ src/restic/testdata/expired_snapshots_13 | 32 ++ src/restic/testdata/expired_snapshots_14 | 52 +++ src/restic/testdata/expired_snapshots_15 | 17 + src/restic/testdata/expired_snapshots_16 | 72 ++++ src/restic/testdata/expired_snapshots_2 | 77 ++++ src/restic/testdata/expired_snapshots_3 | 497 +++++++++++++++++++++++ src/restic/testdata/expired_snapshots_4 | 497 +++++++++++++++++++++++ src/restic/testdata/expired_snapshots_5 | 17 + src/restic/testdata/expired_snapshots_6 | 52 +++ src/restic/testdata/expired_snapshots_7 | 152 +++++++ src/restic/testdata/expired_snapshots_8 | 52 +++ src/restic/testdata/expired_snapshots_9 | 62 +++ src/restic/testdata/filter_snapshots_0 | 162 ++++++++ src/restic/testdata/filter_snapshots_1 | 32 ++ src/restic/testdata/filter_snapshots_2 | 22 + src/restic/testdata/filter_snapshots_3 | 162 ++++++++ src/restic/testdata/filter_snapshots_4 | 22 + src/restic/testdata/filter_snapshots_5 | 22 + 26 files changed, 3080 insertions(+), 1 deletion(-) create mode 100644 src/restic/snapshot_filter.go create mode 100644 src/restic/snapshot_filter_test.go create mode 100644 src/restic/testdata/expired_snapshots_0 create mode 100644 src/restic/testdata/expired_snapshots_1 create mode 100644 src/restic/testdata/expired_snapshots_10 create mode 100644 src/restic/testdata/expired_snapshots_11 create mode 100644 src/restic/testdata/expired_snapshots_12 create mode 100644 src/restic/testdata/expired_snapshots_13 create mode 100644 src/restic/testdata/expired_snapshots_14 create mode 100644 src/restic/testdata/expired_snapshots_15 create mode 100644 src/restic/testdata/expired_snapshots_16 create mode 100644 src/restic/testdata/expired_snapshots_2 create mode 100644 src/restic/testdata/expired_snapshots_3 create mode 100644 src/restic/testdata/expired_snapshots_4 create mode 100644 src/restic/testdata/expired_snapshots_5 create mode 100644 src/restic/testdata/expired_snapshots_6 create mode 100644 src/restic/testdata/expired_snapshots_7 create mode 100644 src/restic/testdata/expired_snapshots_8 create mode 100644 src/restic/testdata/expired_snapshots_9 create mode 100644 src/restic/testdata/filter_snapshots_0 create mode 100644 src/restic/testdata/filter_snapshots_1 create mode 100644 src/restic/testdata/filter_snapshots_2 create mode 100644 src/restic/testdata/filter_snapshots_3 create mode 100644 src/restic/testdata/filter_snapshots_4 create mode 100644 src/restic/testdata/filter_snapshots_5 diff --git a/src/restic/snapshot.go b/src/restic/snapshot.go index 4a8339d4..3eaa0b61 100644 --- a/src/restic/snapshot.go +++ b/src/restic/snapshot.go @@ -83,7 +83,8 @@ func LoadAllSnapshots(repo *repository.Repository) (snapshots []*Snapshot, err e } func (sn Snapshot) String() string { - return fmt.Sprintf("", sn.id.Str(), sn.Paths, sn.Time) + return fmt.Sprintf("", + sn.id.Str(), sn.Paths, sn.Time, sn.Username, sn.Hostname) } // ID retuns the snapshot's ID. diff --git a/src/restic/snapshot_filter.go b/src/restic/snapshot_filter.go new file mode 100644 index 00000000..589b4bd4 --- /dev/null +++ b/src/restic/snapshot_filter.go @@ -0,0 +1,195 @@ +package restic + +import ( + "fmt" + "reflect" + "sort" + "time" +) + +// Snapshots is a list of snapshots. +type Snapshots []*Snapshot + +// Len returns the number of snapshots in sn. +func (sn Snapshots) Len() int { + return len(sn) +} + +// Less returns true iff the ith snapshot has been made after the jth. +func (sn Snapshots) Less(i, j int) bool { + return sn[i].Time.After(sn[j].Time) +} + +// Swap exchanges the two snapshots. +func (sn Snapshots) Swap(i, j int) { + sn[i], sn[j] = sn[j], sn[i] +} + +// SnapshotFilter configures criteria for filtering snapshots before an +// ExpirePolicy can be applied. +type SnapshotFilter struct { + Hostname string + Username string + Paths []string +} + +// FilterSnapshots returns the snapshots from s which match the filter f. +func FilterSnapshots(s Snapshots, f SnapshotFilter) (result Snapshots) { + for _, snap := range s { + if f.Hostname != "" && f.Hostname != snap.Hostname { + continue + } + + if f.Username != "" && f.Username != snap.Username { + continue + } + + if f.Paths != nil && !reflect.DeepEqual(f.Paths, snap.Paths) { + continue + } + + result = append(result, snap) + } + + return result +} + +// ExpirePolicy configures which snapshots should be automatically removed. +type ExpirePolicy struct { + Last int // keep the last n snapshots + Daily int // keep the last n daily snapshots + Weekly int // keep the last n weekly snapshots + Monthly int // keep the last n monthly snapshots + Yearly int // keep the last n yearly snapshots +} + +// Sum returns the maximum number of snapshots to be kept according to this +// policy. +func (e ExpirePolicy) Sum() int { + return e.Last + e.Daily + e.Weekly + e.Monthly + e.Yearly +} + +// filter is used to split a list of snapshots into those to keep and those to +// remove according to a policy. +type filter struct { + Unprocessed Snapshots + Remove Snapshots + Keep Snapshots +} + +func (f filter) String() string { + return fmt.Sprintf("", len(f.Unprocessed), len(f.Keep), len(f.Remove)) +} + +// ymd returns an integer in the form YYYYMMDD. +func ymd(d time.Time) int { + return d.Year()*10000 + int(d.Month())*100 + d.Day() +} + +// yw returns an integer in the form YYYYWW, where WW is the week number. +func yw(d time.Time) int { + year, week := d.ISOWeek() + return year*100 + week +} + +// ym returns an integer in the form YYYYMM. +func ym(d time.Time) int { + return d.Year()*100 + int(d.Month()) +} + +// y returns the year of d. +func y(d time.Time) int { + return d.Year() +} + +// apply moves snapshots from Unprocess to either Keep or Remove. It sorts the +// snapshots into buckets according to the return of fn, and then moves the +// newest snapshot in each bucket to Keep and all others to Remove. When max +// snapshots were found, processing stops. +func (f *filter) apply(fn func(time.Time) int, max int) { + if max == 0 || len(f.Unprocessed) == 0 { + return + } + + sameDay := Snapshots{} + lastDay := fn(f.Unprocessed[0].Time) + + for len(f.Unprocessed) > 0 { + cur := f.Unprocessed[0] + + day := fn(cur.Time) + + // if the snapshots are from a new day, forget all but the first (=last + // in time) snapshot from the previous day. + if day != lastDay { + f.Keep = append(f.Keep, sameDay[0]) + for _, snapshot := range sameDay[1:] { + f.Remove = append(f.Remove, snapshot) + } + + sameDay = Snapshots{} + lastDay = day + max-- + + if max == 0 { + break + } + } + + // collect all snapshots for the current day + sameDay = append(sameDay, cur) + f.Unprocessed = f.Unprocessed[1:] + } + + if len(sameDay) > 0 { + f.Keep = append(f.Keep, sameDay[0]) + for _, snapshot := range sameDay[1:] { + f.Remove = append(f.Remove, snapshot) + } + } +} + +// keepLast marks the last n snapshots as to be kept. +func (f *filter) keepLast(n int) { + if n > len(f.Unprocessed) { + n = len(f.Unprocessed) + } + + f.Keep = append(f.Keep, f.Unprocessed[:n]...) + f.Unprocessed = f.Unprocessed[n:] +} + +// finish moves all remaining snapshots to remove. +func (f *filter) finish() { + f.Remove = append(f.Remove, f.Unprocessed...) +} + +// ApplyPolicy runs returns the snapshots from s that are to be deleted according +// to the policy p. s is sorted in the process. +func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots) { + sort.Sort(list) + + empty := ExpirePolicy{} + if p == empty { + return list, remove + } + + if len(list) == 0 { + return list, remove + } + + f := filter{ + Unprocessed: list, + Remove: Snapshots{}, + Keep: Snapshots{}, + } + + f.keepLast(p.Last) + f.apply(ymd, p.Daily) + f.apply(yw, p.Weekly) + f.apply(ym, p.Monthly) + f.apply(y, p.Yearly) + f.finish() + + return f.Keep, f.Remove +} diff --git a/src/restic/snapshot_filter_test.go b/src/restic/snapshot_filter_test.go new file mode 100644 index 00000000..6bf65b9b --- /dev/null +++ b/src/restic/snapshot_filter_test.go @@ -0,0 +1,262 @@ +package restic + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + "reflect" + "sort" + "testing" + "time" +) + +func parseTime(s string) time.Time { + t, err := time.Parse("2006-01-02 15:04:05 -0700", s) + if err != nil { + panic(err) + } + + return t.Local() +} + +var testFilterSnapshots = Snapshots{ + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-01 01:02:03 +0100"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "bar", Username: "testuser", Time: parseTime("2016-01-01 01:03:03 +0100"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-03 07:02:03 +0100"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "bar", Username: "testuser", Time: parseTime("2016-01-01 07:08:03 +0100"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 10:23:03 +0100"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 11:23:03 +0100"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:23:03 +0100"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:24:03 +0100"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:28:03 +0100"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:30:03 +0100"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 16:23:03 +0100"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-05 09:02:03 +0100"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-06 08:02:03 +0100"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-07 10:02:03 +0100"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "root", Time: parseTime("2016-01-08 20:02:03 +0100"), Paths: []string{"/usr", "/sbin"}}, + {Hostname: "foo", Username: "root", Time: parseTime("2016-01-09 21:02:03 +0100"), Paths: []string{"/usr", "/sbin"}}, + {Hostname: "bar", Username: "root", Time: parseTime("2016-01-12 21:02:03 +0100"), Paths: []string{"/usr", "/sbin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-12 21:08:03 +0100"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-18 12:02:03 +0100"), Paths: []string{"/usr", "/bin"}}, +} + +var filterTests = []SnapshotFilter{ + {Hostname: "foo"}, + {Username: "root"}, + {Hostname: "foo", Username: "root"}, + {Paths: []string{"/usr", "/bin"}}, + {Hostname: "bar", Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "root", Paths: []string{"/usr", "/sbin"}}, +} + +func TestFilterSnapshots(t *testing.T) { + sort.Sort(testFilterSnapshots) + + for i, f := range filterTests { + res := FilterSnapshots(testFilterSnapshots, f) + + goldenFilename := filepath.Join("testdata", fmt.Sprintf("filter_snapshots_%d", i)) + + if *updateGoldenFiles { + buf, err := json.MarshalIndent(res, "", " ") + if err != nil { + t.Fatalf("error marshaling result: %v", err) + } + + if err = ioutil.WriteFile(goldenFilename, buf, 0644); err != nil { + t.Fatalf("unable to update golden file: %v", err) + } + } + + buf, err := ioutil.ReadFile(goldenFilename) + if err != nil { + t.Errorf("error loading golden file %v: %v", goldenFilename, err) + continue + } + + var want Snapshots + err = json.Unmarshal(buf, &want) + + if !reflect.DeepEqual(res, want) { + t.Errorf("test %v: wrong result, want:\n %#v\ngot:\n %#v", i, want, res) + continue + } + } +} + +var testExpireSnapshots = Snapshots{ + {Time: parseTime("2014-09-01 10:20:30 +0100")}, + {Time: parseTime("2014-09-02 10:20:30 +0100")}, + {Time: parseTime("2014-09-05 10:20:30 +0100")}, + {Time: parseTime("2014-09-06 10:20:30 +0100")}, + {Time: parseTime("2014-09-08 10:20:30 +0100")}, + {Time: parseTime("2014-09-09 10:20:30 +0100")}, + {Time: parseTime("2014-09-10 10:20:30 +0100")}, + {Time: parseTime("2014-09-11 10:20:30 +0100")}, + {Time: parseTime("2014-09-20 10:20:30 +0100")}, + {Time: parseTime("2014-09-22 10:20:30 +0100")}, + {Time: parseTime("2014-08-08 10:20:30 +0100")}, + {Time: parseTime("2014-08-10 10:20:30 +0100")}, + {Time: parseTime("2014-08-12 10:20:30 +0100")}, + {Time: parseTime("2014-08-13 10:20:30 +0100")}, + {Time: parseTime("2014-08-13 10:20:30 +0100")}, + {Time: parseTime("2014-08-15 10:20:30 +0100")}, + {Time: parseTime("2014-08-18 10:20:30 +0100")}, + {Time: parseTime("2014-08-20 10:20:30 +0100")}, + {Time: parseTime("2014-08-21 10:20:30 +0100")}, + {Time: parseTime("2014-08-22 10:20:30 +0100")}, + {Time: parseTime("2014-10-01 10:20:30 +0100")}, + {Time: parseTime("2014-10-02 10:20:30 +0100")}, + {Time: parseTime("2014-10-05 10:20:30 +0100")}, + {Time: parseTime("2014-10-06 10:20:30 +0100")}, + {Time: parseTime("2014-10-08 10:20:30 +0100")}, + {Time: parseTime("2014-10-09 10:20:30 +0100")}, + {Time: parseTime("2014-10-10 10:20:30 +0100")}, + {Time: parseTime("2014-10-11 10:20:30 +0100")}, + {Time: parseTime("2014-10-20 10:20:30 +0100")}, + {Time: parseTime("2014-10-22 10:20:30 +0100")}, + {Time: parseTime("2014-11-08 10:20:30 +0100")}, + {Time: parseTime("2014-11-10 10:20:30 +0100")}, + {Time: parseTime("2014-11-12 10:20:30 +0100")}, + {Time: parseTime("2014-11-13 10:20:30 +0100")}, + {Time: parseTime("2014-11-13 10:20:30 +0100")}, + {Time: parseTime("2014-11-15 10:20:30 +0100")}, + {Time: parseTime("2014-11-18 10:20:30 +0100")}, + {Time: parseTime("2014-11-20 10:20:30 +0100")}, + {Time: parseTime("2014-11-21 10:20:30 +0100")}, + {Time: parseTime("2014-11-22 10:20:30 +0100")}, + {Time: parseTime("2015-09-01 10:20:30 +0100")}, + {Time: parseTime("2015-09-02 10:20:30 +0100")}, + {Time: parseTime("2015-09-05 10:20:30 +0100")}, + {Time: parseTime("2015-09-06 10:20:30 +0100")}, + {Time: parseTime("2015-09-08 10:20:30 +0100")}, + {Time: parseTime("2015-09-09 10:20:30 +0100")}, + {Time: parseTime("2015-09-10 10:20:30 +0100")}, + {Time: parseTime("2015-09-11 10:20:30 +0100")}, + {Time: parseTime("2015-09-20 10:20:30 +0100")}, + {Time: parseTime("2015-09-22 10:20:30 +0100")}, + {Time: parseTime("2015-08-08 10:20:30 +0100")}, + {Time: parseTime("2015-08-10 10:20:30 +0100")}, + {Time: parseTime("2015-08-12 10:20:30 +0100")}, + {Time: parseTime("2015-08-13 10:20:30 +0100")}, + {Time: parseTime("2015-08-13 10:20:30 +0100")}, + {Time: parseTime("2015-08-15 10:20:30 +0100")}, + {Time: parseTime("2015-08-18 10:20:30 +0100")}, + {Time: parseTime("2015-08-20 10:20:30 +0100")}, + {Time: parseTime("2015-08-21 10:20:30 +0100")}, + {Time: parseTime("2015-08-22 10:20:30 +0100")}, + {Time: parseTime("2015-10-01 10:20:30 +0100")}, + {Time: parseTime("2015-10-02 10:20:30 +0100")}, + {Time: parseTime("2015-10-05 10:20:30 +0100")}, + {Time: parseTime("2015-10-06 10:20:30 +0100")}, + {Time: parseTime("2015-10-08 10:20:30 +0100")}, + {Time: parseTime("2015-10-09 10:20:30 +0100")}, + {Time: parseTime("2015-10-10 10:20:30 +0100")}, + {Time: parseTime("2015-10-11 10:20:30 +0100")}, + {Time: parseTime("2015-10-20 10:20:30 +0100")}, + {Time: parseTime("2015-10-22 10:20:30 +0100")}, + {Time: parseTime("2015-11-08 10:20:30 +0100")}, + {Time: parseTime("2015-11-10 10:20:30 +0100")}, + {Time: parseTime("2015-11-12 10:20:30 +0100")}, + {Time: parseTime("2015-11-13 10:20:30 +0100")}, + {Time: parseTime("2015-11-13 10:20:30 +0100")}, + {Time: parseTime("2015-11-15 10:20:30 +0100")}, + {Time: parseTime("2015-11-18 10:20:30 +0100")}, + {Time: parseTime("2015-11-20 10:20:30 +0100")}, + {Time: parseTime("2015-11-21 10:20:30 +0100")}, + {Time: parseTime("2015-11-22 10:20:30 +0100")}, + {Time: parseTime("2016-01-01 01:02:03 +0100")}, + {Time: parseTime("2016-01-01 01:03:03 +0100")}, + {Time: parseTime("2016-01-01 07:08:03 +0100")}, + {Time: parseTime("2016-01-03 07:02:03 +0100")}, + {Time: parseTime("2016-01-04 10:23:03 +0100")}, + {Time: parseTime("2016-01-04 11:23:03 +0100")}, + {Time: parseTime("2016-01-04 12:23:03 +0100")}, + {Time: parseTime("2016-01-04 12:24:03 +0100")}, + {Time: parseTime("2016-01-04 12:28:03 +0100")}, + {Time: parseTime("2016-01-04 12:30:03 +0100")}, + {Time: parseTime("2016-01-04 16:23:03 +0100")}, + {Time: parseTime("2016-01-05 09:02:03 +0100")}, + {Time: parseTime("2016-01-06 08:02:03 +0100")}, + {Time: parseTime("2016-01-07 10:02:03 +0100")}, + {Time: parseTime("2016-01-08 20:02:03 +0100")}, + {Time: parseTime("2016-01-09 21:02:03 +0100")}, + {Time: parseTime("2016-01-12 21:02:03 +0100")}, + {Time: parseTime("2016-01-12 21:08:03 +0100")}, + {Time: parseTime("2016-01-18 12:02:03 +0100")}, +} + +var expireTests = []ExpirePolicy{ + {}, + {Last: 10}, + {Last: 15}, + {Last: 99}, + {Last: 200}, + {Daily: 3}, + {Daily: 10}, + {Daily: 30}, + {Last: 5, Daily: 5}, + {Last: 2, Daily: 10}, + {Weekly: 2}, + {Weekly: 4}, + {Daily: 3, Weekly: 4}, + {Monthly: 6}, + {Daily: 2, Weekly: 2, Monthly: 6}, + {Yearly: 10}, + {Daily: 7, Weekly: 2, Monthly: 3, Yearly: 10}, +} + +func TestApplyPolicy(t *testing.T) { + for i, p := range expireTests { + keep, remove := ApplyPolicy(testExpireSnapshots, p) + + t.Logf("test %d: returned keep %v, remove %v (of %v) expired snapshots for policy %v", + i, len(keep), len(remove), len(testExpireSnapshots), p) + + if len(keep)+len(remove) != len(testExpireSnapshots) { + t.Errorf("test %d: len(keep)+len(remove) = %d != len(testExpireSnapshots) = %d", + i, len(keep)+len(remove), len(testExpireSnapshots)) + } + + if p.Sum() > 0 && len(keep) > p.Sum() { + t.Errorf("not enough snapshots removed: policy allows %v snapshots to remain, but ended up with %v", + p.Sum(), len(keep)) + } + + for _, sn := range keep { + t.Logf("test %d: keep snapshot at %v\n", i, sn.Time) + } + for _, sn := range remove { + t.Logf("test %d: forget snapshot at %v\n", i, sn.Time) + } + + goldenFilename := filepath.Join("testdata", fmt.Sprintf("expired_snapshots_%d", i)) + + if *updateGoldenFiles { + buf, err := json.MarshalIndent(keep, "", " ") + if err != nil { + t.Fatalf("error marshaling result: %v", err) + } + + if err = ioutil.WriteFile(goldenFilename, buf, 0644); err != nil { + t.Fatalf("unable to update golden file: %v", err) + } + } + + buf, err := ioutil.ReadFile(goldenFilename) + if err != nil { + t.Errorf("error loading golden file %v: %v", goldenFilename, err) + continue + } + + var want Snapshots + err = json.Unmarshal(buf, &want) + + if !reflect.DeepEqual(keep, want) { + t.Errorf("test %v: wrong result, want:\n %v\ngot:\n %v", i, want, keep) + continue + } + } +} diff --git a/src/restic/testdata/expired_snapshots_0 b/src/restic/testdata/expired_snapshots_0 new file mode 100644 index 00000000..d83ec624 --- /dev/null +++ b/src/restic/testdata/expired_snapshots_0 @@ -0,0 +1,497 @@ +[ + { + "time": "2016-01-18T12:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:30:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:28:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:24:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T11:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T10:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T07:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:03:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-21T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-20T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-18T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-15T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-13T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-13T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-12T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-10T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-08T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-20T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-11T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-10T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-09T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-08T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-06T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-05T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-02T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-01T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-20T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-11T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-10T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-09T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-08T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-06T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-05T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-02T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-01T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-21T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-20T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-18T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-15T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-13T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-13T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-12T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-10T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-08T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-22T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-21T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-20T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-18T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-15T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-13T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-13T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-12T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-10T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-08T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-20T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-11T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-10T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-09T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-08T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-06T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-05T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-02T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-01T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-20T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-11T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-10T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-09T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-08T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-06T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-05T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-02T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-01T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-21T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-20T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-18T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-15T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-13T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-13T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-12T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-10T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-08T11:20:30+02:00", + "tree": null, + "paths": null + } +] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_1 b/src/restic/testdata/expired_snapshots_1 new file mode 100644 index 00000000..5a6f3e8a --- /dev/null +++ b/src/restic/testdata/expired_snapshots_1 @@ -0,0 +1,52 @@ +[ + { + "time": "2016-01-18T12:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:30:03+01:00", + "tree": null, + "paths": null + } +] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_10 b/src/restic/testdata/expired_snapshots_10 new file mode 100644 index 00000000..853fdce7 --- /dev/null +++ b/src/restic/testdata/expired_snapshots_10 @@ -0,0 +1,12 @@ +[ + { + "time": "2016-01-18T12:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03+01:00", + "tree": null, + "paths": null + } +] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_11 b/src/restic/testdata/expired_snapshots_11 new file mode 100644 index 00000000..c89b120a --- /dev/null +++ b/src/restic/testdata/expired_snapshots_11 @@ -0,0 +1,22 @@ +[ + { + "time": "2016-01-18T12:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03+01:00", + "tree": null, + "paths": null + } +] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_12 b/src/restic/testdata/expired_snapshots_12 new file mode 100644 index 00000000..240906b1 --- /dev/null +++ b/src/restic/testdata/expired_snapshots_12 @@ -0,0 +1,37 @@ +[ + { + "time": "2016-01-18T12:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-15T10:20:30+01:00", + "tree": null, + "paths": null + } +] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_13 b/src/restic/testdata/expired_snapshots_13 new file mode 100644 index 00000000..938a8527 --- /dev/null +++ b/src/restic/testdata/expired_snapshots_13 @@ -0,0 +1,32 @@ +[ + { + "time": "2016-01-18T12:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-22T10:20:30+01:00", + "tree": null, + "paths": null + } +] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_14 b/src/restic/testdata/expired_snapshots_14 new file mode 100644 index 00000000..52f6b1aa --- /dev/null +++ b/src/restic/testdata/expired_snapshots_14 @@ -0,0 +1,52 @@ +[ + { + "time": "2016-01-18T12:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-22T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-22T11:20:30+02:00", + "tree": null, + "paths": null + } +] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_15 b/src/restic/testdata/expired_snapshots_15 new file mode 100644 index 00000000..aaf743f0 --- /dev/null +++ b/src/restic/testdata/expired_snapshots_15 @@ -0,0 +1,17 @@ +[ + { + "time": "2016-01-18T12:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-22T10:20:30+01:00", + "tree": null, + "paths": null + } +] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_16 b/src/restic/testdata/expired_snapshots_16 new file mode 100644 index 00000000..eddb2c83 --- /dev/null +++ b/src/restic/testdata/expired_snapshots_16 @@ -0,0 +1,72 @@ +[ + { + "time": "2016-01-18T12:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-22T10:20:30+01:00", + "tree": null, + "paths": null + } +] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_2 b/src/restic/testdata/expired_snapshots_2 new file mode 100644 index 00000000..fcc8bb97 --- /dev/null +++ b/src/restic/testdata/expired_snapshots_2 @@ -0,0 +1,77 @@ +[ + { + "time": "2016-01-18T12:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:30:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:28:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:24:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T11:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T10:23:03+01:00", + "tree": null, + "paths": null + } +] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_3 b/src/restic/testdata/expired_snapshots_3 new file mode 100644 index 00000000..d83ec624 --- /dev/null +++ b/src/restic/testdata/expired_snapshots_3 @@ -0,0 +1,497 @@ +[ + { + "time": "2016-01-18T12:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:30:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:28:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:24:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T11:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T10:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T07:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:03:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-21T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-20T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-18T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-15T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-13T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-13T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-12T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-10T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-08T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-20T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-11T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-10T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-09T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-08T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-06T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-05T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-02T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-01T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-20T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-11T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-10T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-09T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-08T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-06T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-05T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-02T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-01T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-21T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-20T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-18T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-15T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-13T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-13T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-12T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-10T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-08T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-22T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-21T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-20T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-18T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-15T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-13T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-13T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-12T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-10T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-08T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-20T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-11T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-10T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-09T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-08T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-06T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-05T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-02T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-01T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-20T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-11T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-10T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-09T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-08T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-06T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-05T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-02T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-01T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-21T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-20T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-18T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-15T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-13T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-13T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-12T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-10T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-08T11:20:30+02:00", + "tree": null, + "paths": null + } +] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_4 b/src/restic/testdata/expired_snapshots_4 new file mode 100644 index 00000000..d83ec624 --- /dev/null +++ b/src/restic/testdata/expired_snapshots_4 @@ -0,0 +1,497 @@ +[ + { + "time": "2016-01-18T12:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:30:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:28:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:24:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T11:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T10:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T07:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:03:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-21T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-20T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-18T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-15T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-13T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-13T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-12T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-10T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-08T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-20T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-11T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-10T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-09T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-08T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-06T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-05T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-02T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-01T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-20T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-11T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-10T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-09T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-08T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-06T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-05T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-02T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-01T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-21T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-20T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-18T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-15T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-13T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-13T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-12T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-10T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-08T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-22T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-21T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-20T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-18T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-15T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-13T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-13T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-12T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-10T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-08T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-20T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-11T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-10T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-09T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-08T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-06T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-05T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-02T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-10-01T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-20T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-11T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-10T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-09T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-08T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-06T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-05T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-02T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-09-01T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-21T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-20T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-18T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-15T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-13T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-13T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-12T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-10T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-08-08T11:20:30+02:00", + "tree": null, + "paths": null + } +] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_5 b/src/restic/testdata/expired_snapshots_5 new file mode 100644 index 00000000..e862779c --- /dev/null +++ b/src/restic/testdata/expired_snapshots_5 @@ -0,0 +1,17 @@ +[ + { + "time": "2016-01-18T12:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03+01:00", + "tree": null, + "paths": null + } +] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_6 b/src/restic/testdata/expired_snapshots_6 new file mode 100644 index 00000000..6f53af13 --- /dev/null +++ b/src/restic/testdata/expired_snapshots_6 @@ -0,0 +1,52 @@ +[ + { + "time": "2016-01-18T12:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T07:08:03+01:00", + "tree": null, + "paths": null + } +] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_7 b/src/restic/testdata/expired_snapshots_7 new file mode 100644 index 00000000..750da193 --- /dev/null +++ b/src/restic/testdata/expired_snapshots_7 @@ -0,0 +1,152 @@ +[ + { + "time": "2016-01-18T12:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T07:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-21T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-20T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-18T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-15T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-13T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-12T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-10T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-08T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-20T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-11T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-10T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-09T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-08T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-06T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-05T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-02T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-01T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-22T11:20:30+02:00", + "tree": null, + "paths": null + } +] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_8 b/src/restic/testdata/expired_snapshots_8 new file mode 100644 index 00000000..fd72ec6a --- /dev/null +++ b/src/restic/testdata/expired_snapshots_8 @@ -0,0 +1,52 @@ +[ + { + "time": "2016-01-18T12:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03+01:00", + "tree": null, + "paths": null + } +] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_9 b/src/restic/testdata/expired_snapshots_9 new file mode 100644 index 00000000..63fbd8b0 --- /dev/null +++ b/src/restic/testdata/expired_snapshots_9 @@ -0,0 +1,62 @@ +[ + { + "time": "2016-01-18T12:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T07:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30+01:00", + "tree": null, + "paths": null + } +] \ No newline at end of file diff --git a/src/restic/testdata/filter_snapshots_0 b/src/restic/testdata/filter_snapshots_0 new file mode 100644 index 00000000..022aa7ae --- /dev/null +++ b/src/restic/testdata/filter_snapshots_0 @@ -0,0 +1,162 @@ +[ + { + "time": "2016-01-18T12:02:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-12T21:08:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-09T21:02:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/sbin" + ], + "hostname": "foo", + "username": "root" + }, + { + "time": "2016-01-08T20:02:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/sbin" + ], + "hostname": "foo", + "username": "root" + }, + { + "time": "2016-01-07T10:02:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-06T08:02:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-05T09:02:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-04T16:23:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-04T12:30:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-04T12:28:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-04T12:24:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-04T12:23:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-04T11:23:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-04T10:23:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-03T07:02:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-01T01:02:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + } +] \ No newline at end of file diff --git a/src/restic/testdata/filter_snapshots_1 b/src/restic/testdata/filter_snapshots_1 new file mode 100644 index 00000000..1239b481 --- /dev/null +++ b/src/restic/testdata/filter_snapshots_1 @@ -0,0 +1,32 @@ +[ + { + "time": "2016-01-12T21:02:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/sbin" + ], + "hostname": "bar", + "username": "root" + }, + { + "time": "2016-01-09T21:02:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/sbin" + ], + "hostname": "foo", + "username": "root" + }, + { + "time": "2016-01-08T20:02:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/sbin" + ], + "hostname": "foo", + "username": "root" + } +] \ No newline at end of file diff --git a/src/restic/testdata/filter_snapshots_2 b/src/restic/testdata/filter_snapshots_2 new file mode 100644 index 00000000..1bde9e51 --- /dev/null +++ b/src/restic/testdata/filter_snapshots_2 @@ -0,0 +1,22 @@ +[ + { + "time": "2016-01-09T21:02:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/sbin" + ], + "hostname": "foo", + "username": "root" + }, + { + "time": "2016-01-08T20:02:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/sbin" + ], + "hostname": "foo", + "username": "root" + } +] \ No newline at end of file diff --git a/src/restic/testdata/filter_snapshots_3 b/src/restic/testdata/filter_snapshots_3 new file mode 100644 index 00000000..bf120b32 --- /dev/null +++ b/src/restic/testdata/filter_snapshots_3 @@ -0,0 +1,162 @@ +[ + { + "time": "2016-01-18T12:02:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-12T21:08:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-07T10:02:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-06T08:02:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-05T09:02:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-04T16:23:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-04T12:30:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-04T12:28:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-04T12:24:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-04T12:23:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-04T11:23:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-04T10:23:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-03T07:02:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + }, + { + "time": "2016-01-01T07:08:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "bar", + "username": "testuser" + }, + { + "time": "2016-01-01T01:03:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "bar", + "username": "testuser" + }, + { + "time": "2016-01-01T01:02:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "foo", + "username": "testuser" + } +] \ No newline at end of file diff --git a/src/restic/testdata/filter_snapshots_4 b/src/restic/testdata/filter_snapshots_4 new file mode 100644 index 00000000..2c566811 --- /dev/null +++ b/src/restic/testdata/filter_snapshots_4 @@ -0,0 +1,22 @@ +[ + { + "time": "2016-01-01T07:08:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "bar", + "username": "testuser" + }, + { + "time": "2016-01-01T01:03:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/bin" + ], + "hostname": "bar", + "username": "testuser" + } +] \ No newline at end of file diff --git a/src/restic/testdata/filter_snapshots_5 b/src/restic/testdata/filter_snapshots_5 new file mode 100644 index 00000000..1bde9e51 --- /dev/null +++ b/src/restic/testdata/filter_snapshots_5 @@ -0,0 +1,22 @@ +[ + { + "time": "2016-01-09T21:02:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/sbin" + ], + "hostname": "foo", + "username": "root" + }, + { + "time": "2016-01-08T20:02:03+01:00", + "tree": null, + "paths": [ + "/usr", + "/sbin" + ], + "hostname": "foo", + "username": "root" + } +] \ No newline at end of file From cbd457e557ff68dd6ffe1236199270bfb1f21e83 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 20 Aug 2016 15:55:02 +0200 Subject: [PATCH 87/98] Add Hourly expire functions --- src/restic/snapshot_filter.go | 7 ++ src/restic/snapshot_filter_test.go | 1 + src/restic/testdata/expired_snapshots_10 | 50 +++++++++++ src/restic/testdata/expired_snapshots_11 | 10 --- src/restic/testdata/expired_snapshots_12 | 15 ---- src/restic/testdata/expired_snapshots_13 | 37 ++++---- src/restic/testdata/expired_snapshots_14 | 20 ----- src/restic/testdata/expired_snapshots_15 | 35 ++++++++ src/restic/testdata/expired_snapshots_16 | 55 ------------ src/restic/testdata/expired_snapshots_17 | 72 +++++++++++++++ src/restic/testdata/expired_snapshots_5 | 85 ++++++++++++++++++ src/restic/testdata/expired_snapshots_6 | 35 -------- src/restic/testdata/expired_snapshots_7 | 100 --------------------- src/restic/testdata/expired_snapshots_8 | 110 +++++++++++++++++++++-- src/restic/testdata/expired_snapshots_9 | 10 --- 15 files changed, 376 insertions(+), 266 deletions(-) create mode 100644 src/restic/testdata/expired_snapshots_17 diff --git a/src/restic/snapshot_filter.go b/src/restic/snapshot_filter.go index 589b4bd4..3753e264 100644 --- a/src/restic/snapshot_filter.go +++ b/src/restic/snapshot_filter.go @@ -57,6 +57,7 @@ func FilterSnapshots(s Snapshots, f SnapshotFilter) (result Snapshots) { // ExpirePolicy configures which snapshots should be automatically removed. type ExpirePolicy struct { Last int // keep the last n snapshots + Hourly int // keep the last n hourly snapshots Daily int // keep the last n daily snapshots Weekly int // keep the last n weekly snapshots Monthly int // keep the last n monthly snapshots @@ -81,6 +82,11 @@ func (f filter) String() string { return fmt.Sprintf("", len(f.Unprocessed), len(f.Keep), len(f.Remove)) } +// ymdh returns an integer in the form YYYYMMDDHH. +func ymdh(d time.Time) int { + return d.Year()*1000000 + int(d.Month())*10000 + d.Day()*100 + d.Hour() +} + // ymd returns an integer in the form YYYYMMDD. func ymd(d time.Time) int { return d.Year()*10000 + int(d.Month())*100 + d.Day() @@ -185,6 +191,7 @@ func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots) { } f.keepLast(p.Last) + f.apply(ymdh, p.Hourly) f.apply(ymd, p.Daily) f.apply(yw, p.Weekly) f.apply(ym, p.Monthly) diff --git a/src/restic/snapshot_filter_test.go b/src/restic/snapshot_filter_test.go index 6bf65b9b..7a902a68 100644 --- a/src/restic/snapshot_filter_test.go +++ b/src/restic/snapshot_filter_test.go @@ -194,6 +194,7 @@ var expireTests = []ExpirePolicy{ {Last: 15}, {Last: 99}, {Last: 200}, + {Hourly: 20}, {Daily: 3}, {Daily: 10}, {Daily: 30}, diff --git a/src/restic/testdata/expired_snapshots_10 b/src/restic/testdata/expired_snapshots_10 index 853fdce7..63fbd8b0 100644 --- a/src/restic/testdata/expired_snapshots_10 +++ b/src/restic/testdata/expired_snapshots_10 @@ -8,5 +8,55 @@ "time": "2016-01-12T21:08:03+01:00", "tree": null, "paths": null + }, + { + "time": "2016-01-12T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T07:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30+01:00", + "tree": null, + "paths": null } ] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_11 b/src/restic/testdata/expired_snapshots_11 index c89b120a..853fdce7 100644 --- a/src/restic/testdata/expired_snapshots_11 +++ b/src/restic/testdata/expired_snapshots_11 @@ -8,15 +8,5 @@ "time": "2016-01-12T21:08:03+01:00", "tree": null, "paths": null - }, - { - "time": "2016-01-09T21:02:03+01:00", - "tree": null, - "paths": null - }, - { - "time": "2016-01-03T07:02:03+01:00", - "tree": null, - "paths": null } ] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_12 b/src/restic/testdata/expired_snapshots_12 index 240906b1..c89b120a 100644 --- a/src/restic/testdata/expired_snapshots_12 +++ b/src/restic/testdata/expired_snapshots_12 @@ -14,24 +14,9 @@ "tree": null, "paths": null }, - { - "time": "2016-01-08T20:02:03+01:00", - "tree": null, - "paths": null - }, { "time": "2016-01-03T07:02:03+01:00", "tree": null, "paths": null - }, - { - "time": "2015-11-22T10:20:30+01:00", - "tree": null, - "paths": null - }, - { - "time": "2015-11-15T10:20:30+01:00", - "tree": null, - "paths": null } ] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_13 b/src/restic/testdata/expired_snapshots_13 index 938a8527..240906b1 100644 --- a/src/restic/testdata/expired_snapshots_13 +++ b/src/restic/testdata/expired_snapshots_13 @@ -4,28 +4,33 @@ "tree": null, "paths": null }, + { + "time": "2016-01-12T21:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03+01:00", + "tree": null, + "paths": null + }, { "time": "2015-11-22T10:20:30+01:00", "tree": null, "paths": null }, { - "time": "2015-10-22T11:20:30+02:00", - "tree": null, - "paths": null - }, - { - "time": "2015-09-22T11:20:30+02:00", - "tree": null, - "paths": null - }, - { - "time": "2015-08-22T11:20:30+02:00", - "tree": null, - "paths": null - }, - { - "time": "2014-11-22T10:20:30+01:00", + "time": "2015-11-15T10:20:30+01:00", "tree": null, "paths": null } diff --git a/src/restic/testdata/expired_snapshots_14 b/src/restic/testdata/expired_snapshots_14 index 52f6b1aa..938a8527 100644 --- a/src/restic/testdata/expired_snapshots_14 +++ b/src/restic/testdata/expired_snapshots_14 @@ -4,21 +4,6 @@ "tree": null, "paths": null }, - { - "time": "2016-01-12T21:08:03+01:00", - "tree": null, - "paths": null - }, - { - "time": "2016-01-09T21:02:03+01:00", - "tree": null, - "paths": null - }, - { - "time": "2016-01-03T07:02:03+01:00", - "tree": null, - "paths": null - }, { "time": "2015-11-22T10:20:30+01:00", "tree": null, @@ -43,10 +28,5 @@ "time": "2014-11-22T10:20:30+01:00", "tree": null, "paths": null - }, - { - "time": "2014-10-22T11:20:30+02:00", - "tree": null, - "paths": null } ] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_15 b/src/restic/testdata/expired_snapshots_15 index aaf743f0..52f6b1aa 100644 --- a/src/restic/testdata/expired_snapshots_15 +++ b/src/restic/testdata/expired_snapshots_15 @@ -4,14 +4,49 @@ "tree": null, "paths": null }, + { + "time": "2016-01-12T21:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03+01:00", + "tree": null, + "paths": null + }, { "time": "2015-11-22T10:20:30+01:00", "tree": null, "paths": null }, + { + "time": "2015-10-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-22T11:20:30+02:00", + "tree": null, + "paths": null + }, { "time": "2014-11-22T10:20:30+01:00", "tree": null, "paths": null + }, + { + "time": "2014-10-22T11:20:30+02:00", + "tree": null, + "paths": null } ] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_16 b/src/restic/testdata/expired_snapshots_16 index eddb2c83..aaf743f0 100644 --- a/src/restic/testdata/expired_snapshots_16 +++ b/src/restic/testdata/expired_snapshots_16 @@ -4,66 +4,11 @@ "tree": null, "paths": null }, - { - "time": "2016-01-12T21:08:03+01:00", - "tree": null, - "paths": null - }, - { - "time": "2016-01-09T21:02:03+01:00", - "tree": null, - "paths": null - }, - { - "time": "2016-01-08T20:02:03+01:00", - "tree": null, - "paths": null - }, - { - "time": "2016-01-07T10:02:03+01:00", - "tree": null, - "paths": null - }, - { - "time": "2016-01-06T08:02:03+01:00", - "tree": null, - "paths": null - }, - { - "time": "2016-01-05T09:02:03+01:00", - "tree": null, - "paths": null - }, - { - "time": "2016-01-04T16:23:03+01:00", - "tree": null, - "paths": null - }, - { - "time": "2016-01-03T07:02:03+01:00", - "tree": null, - "paths": null - }, { "time": "2015-11-22T10:20:30+01:00", "tree": null, "paths": null }, - { - "time": "2015-10-22T11:20:30+02:00", - "tree": null, - "paths": null - }, - { - "time": "2015-09-22T11:20:30+02:00", - "tree": null, - "paths": null - }, - { - "time": "2015-08-22T11:20:30+02:00", - "tree": null, - "paths": null - }, { "time": "2014-11-22T10:20:30+01:00", "tree": null, diff --git a/src/restic/testdata/expired_snapshots_17 b/src/restic/testdata/expired_snapshots_17 new file mode 100644 index 00000000..eddb2c83 --- /dev/null +++ b/src/restic/testdata/expired_snapshots_17 @@ -0,0 +1,72 @@ +[ + { + "time": "2016-01-18T12:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-12T21:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-09T21:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-08T20:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-08-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2014-11-22T10:20:30+01:00", + "tree": null, + "paths": null + } +] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_5 b/src/restic/testdata/expired_snapshots_5 index e862779c..f84efb1f 100644 --- a/src/restic/testdata/expired_snapshots_5 +++ b/src/restic/testdata/expired_snapshots_5 @@ -13,5 +13,90 @@ "time": "2016-01-09T21:02:03+01:00", "tree": null, "paths": null + }, + { + "time": "2016-01-08T20:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-07T10:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-06T08:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-05T09:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T16:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T12:30:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T11:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-04T10:23:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-03T07:02:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T07:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2016-01-01T01:03:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-21T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-20T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-18T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-15T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-13T10:20:30+01:00", + "tree": null, + "paths": null } ] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_6 b/src/restic/testdata/expired_snapshots_6 index 6f53af13..e862779c 100644 --- a/src/restic/testdata/expired_snapshots_6 +++ b/src/restic/testdata/expired_snapshots_6 @@ -13,40 +13,5 @@ "time": "2016-01-09T21:02:03+01:00", "tree": null, "paths": null - }, - { - "time": "2016-01-08T20:02:03+01:00", - "tree": null, - "paths": null - }, - { - "time": "2016-01-07T10:02:03+01:00", - "tree": null, - "paths": null - }, - { - "time": "2016-01-06T08:02:03+01:00", - "tree": null, - "paths": null - }, - { - "time": "2016-01-05T09:02:03+01:00", - "tree": null, - "paths": null - }, - { - "time": "2016-01-04T16:23:03+01:00", - "tree": null, - "paths": null - }, - { - "time": "2016-01-03T07:02:03+01:00", - "tree": null, - "paths": null - }, - { - "time": "2016-01-01T07:08:03+01:00", - "tree": null, - "paths": null } ] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_7 b/src/restic/testdata/expired_snapshots_7 index 750da193..6f53af13 100644 --- a/src/restic/testdata/expired_snapshots_7 +++ b/src/restic/testdata/expired_snapshots_7 @@ -48,105 +48,5 @@ "time": "2016-01-01T07:08:03+01:00", "tree": null, "paths": null - }, - { - "time": "2015-11-22T10:20:30+01:00", - "tree": null, - "paths": null - }, - { - "time": "2015-11-21T10:20:30+01:00", - "tree": null, - "paths": null - }, - { - "time": "2015-11-20T10:20:30+01:00", - "tree": null, - "paths": null - }, - { - "time": "2015-11-18T10:20:30+01:00", - "tree": null, - "paths": null - }, - { - "time": "2015-11-15T10:20:30+01:00", - "tree": null, - "paths": null - }, - { - "time": "2015-11-13T10:20:30+01:00", - "tree": null, - "paths": null - }, - { - "time": "2015-11-12T10:20:30+01:00", - "tree": null, - "paths": null - }, - { - "time": "2015-11-10T10:20:30+01:00", - "tree": null, - "paths": null - }, - { - "time": "2015-11-08T10:20:30+01:00", - "tree": null, - "paths": null - }, - { - "time": "2015-10-22T11:20:30+02:00", - "tree": null, - "paths": null - }, - { - "time": "2015-10-20T11:20:30+02:00", - "tree": null, - "paths": null - }, - { - "time": "2015-10-11T11:20:30+02:00", - "tree": null, - "paths": null - }, - { - "time": "2015-10-10T11:20:30+02:00", - "tree": null, - "paths": null - }, - { - "time": "2015-10-09T11:20:30+02:00", - "tree": null, - "paths": null - }, - { - "time": "2015-10-08T11:20:30+02:00", - "tree": null, - "paths": null - }, - { - "time": "2015-10-06T11:20:30+02:00", - "tree": null, - "paths": null - }, - { - "time": "2015-10-05T11:20:30+02:00", - "tree": null, - "paths": null - }, - { - "time": "2015-10-02T11:20:30+02:00", - "tree": null, - "paths": null - }, - { - "time": "2015-10-01T11:20:30+02:00", - "tree": null, - "paths": null - }, - { - "time": "2015-09-22T11:20:30+02:00", - "tree": null, - "paths": null } ] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_8 b/src/restic/testdata/expired_snapshots_8 index fd72ec6a..750da193 100644 --- a/src/restic/testdata/expired_snapshots_8 +++ b/src/restic/testdata/expired_snapshots_8 @@ -9,11 +9,6 @@ "tree": null, "paths": null }, - { - "time": "2016-01-12T21:02:03+01:00", - "tree": null, - "paths": null - }, { "time": "2016-01-09T21:02:03+01:00", "tree": null, @@ -48,5 +43,110 @@ "time": "2016-01-03T07:02:03+01:00", "tree": null, "paths": null + }, + { + "time": "2016-01-01T07:08:03+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-22T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-21T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-20T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-18T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-15T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-13T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-12T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-10T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-11-08T10:20:30+01:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-22T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-20T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-11T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-10T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-09T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-08T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-06T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-05T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-02T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-10-01T11:20:30+02:00", + "tree": null, + "paths": null + }, + { + "time": "2015-09-22T11:20:30+02:00", + "tree": null, + "paths": null } ] \ No newline at end of file diff --git a/src/restic/testdata/expired_snapshots_9 b/src/restic/testdata/expired_snapshots_9 index 63fbd8b0..fd72ec6a 100644 --- a/src/restic/testdata/expired_snapshots_9 +++ b/src/restic/testdata/expired_snapshots_9 @@ -48,15 +48,5 @@ "time": "2016-01-03T07:02:03+01:00", "tree": null, "paths": null - }, - { - "time": "2016-01-01T07:08:03+01:00", - "tree": null, - "paths": null - }, - { - "time": "2015-11-22T10:20:30+01:00", - "tree": null, - "paths": null } ] \ No newline at end of file From bf47dba1c4539ab829f58f3e19642dbb9194425f Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 20 Aug 2016 17:43:25 +0200 Subject: [PATCH 88/98] Add 'forget' command --- src/cmds/restic/cmd_forget.go | 153 ++++++++++++++++++++++++++++++++++ 1 file changed, 153 insertions(+) create mode 100644 src/cmds/restic/cmd_forget.go diff --git a/src/cmds/restic/cmd_forget.go b/src/cmds/restic/cmd_forget.go new file mode 100644 index 00000000..df6d9706 --- /dev/null +++ b/src/cmds/restic/cmd_forget.go @@ -0,0 +1,153 @@ +package main + +import ( + "fmt" + "io" + "path" + "restic" + "restic/backend" +) + +// CmdForget implements the 'forget' command. +type CmdForget struct { + Last int `short:"l" long:"keep-last" description:"keep the last n snapshots"` + Hourly int `short:"H" long:"keep-hourly" description:"keep the last n hourly snapshots"` + Daily int `short:"d" long:"keep-daily" description:"keep the last n daily snapshots"` + Weekly int `short:"w" long:"keep-weekly" description:"keep the last n weekly snapshots"` + Monthly int `short:"m" long:"keep-monthly" description:"keep the last n monthly snapshots"` + Yearly int `short:"y" long:"keep-yearly" description:"keep the last n yearly snapshots"` + + DryRun bool `short:"n" long:"dry-run" description:"do not delete anything, just print what would be done"` + + global *GlobalOptions +} + +func init() { + _, err := parser.AddCommand("forget", + "removes snapshots from a repository", + "The forget command removes snapshots according to a policy.", + &CmdForget{global: &globalOpts}) + if err != nil { + panic(err) + } +} + +// Usage returns usage information for 'forget'. +func (cmd CmdForget) Usage() string { + return "[snapshot ID] ..." +} + +func printSnapshots(w io.Writer, snapshots restic.Snapshots) { + tab := NewTable() + tab.Header = fmt.Sprintf("%-8s %-19s %-10s %s", "ID", "Date", "Host", "Directory") + tab.RowFormat = "%-8s %-19s %-10s %s" + + for _, sn := range snapshots { + if len(sn.Paths) == 0 { + continue + } + id := sn.ID() + tab.Rows = append(tab.Rows, []interface{}{id.Str(), sn.Time.Format(TimeFormat), sn.Hostname, sn.Paths[0]}) + + if len(sn.Paths) > 1 { + for _, path := range sn.Paths[1:] { + tab.Rows = append(tab.Rows, []interface{}{"", "", "", path}) + } + } + } + + tab.Write(w) +} + +// Execute runs the 'forget' command. +func (cmd CmdForget) Execute(args []string) error { + repo, err := cmd.global.OpenRepository() + if err != nil { + return err + } + + lock, err := lockRepoExclusive(repo) + defer unlockRepo(lock) + if err != nil { + return err + } + + err = repo.LoadIndex() + if err != nil { + return err + } + + // first, process all snapshot IDs given as arguments + for _, s := range args { + id, err := restic.FindSnapshot(repo, s) + if err != nil { + return err + } + + if !cmd.DryRun { + err = repo.Backend().Remove(backend.Snapshot, id.String()) + if err != nil { + return err + } + + cmd.global.Verbosef("removed snapshot %v\n", id.Str()) + } else { + cmd.global.Verbosef("would removed snapshot %v\n", id.Str()) + } + + } + + // then, load all remaining snapshots + snapshots, err := restic.LoadAllSnapshots(repo) + if err != nil { + return err + } + + // group by hostname and dirs + type key struct { + Hostname string + Dirs string + } + + snapshotGroups := make(map[key]restic.Snapshots) + + for _, sn := range snapshots { + k := key{Hostname: sn.Hostname, Dirs: path.Join(sn.Paths...)} + list := snapshotGroups[k] + list = append(list, sn) + snapshotGroups[k] = list + } + + policy := restic.ExpirePolicy{ + Last: cmd.Last, + Hourly: cmd.Hourly, + Daily: cmd.Daily, + Weekly: cmd.Weekly, + Monthly: cmd.Monthly, + Yearly: cmd.Yearly, + } + + for key, snapshotGroup := range snapshotGroups { + cmd.global.Printf("snapshots for host %v, directories %v:\n\n", key.Hostname, key.Dirs) + keep, remove := restic.ApplyPolicy(snapshotGroup, policy) + + cmd.global.Printf("keep %d snapshots:\n", len(keep)) + printSnapshots(cmd.global.stdout, keep) + cmd.global.Printf("\n") + + cmd.global.Printf("remove %d snapshots:\n", len(remove)) + printSnapshots(cmd.global.stdout, remove) + cmd.global.Printf("\n") + + if !cmd.DryRun { + for _, sn := range remove { + err = repo.Backend().Remove(backend.Snapshot, sn.ID().String()) + if err != nil { + return err + } + } + } + } + + return nil +} From 71f7f4f543d3b75df36ab6335365b1ae90383a8f Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 20 Aug 2016 17:51:48 +0200 Subject: [PATCH 89/98] Add ExpirePolicy.Empty() --- src/restic/snapshot_filter.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/restic/snapshot_filter.go b/src/restic/snapshot_filter.go index 3753e264..595987d9 100644 --- a/src/restic/snapshot_filter.go +++ b/src/restic/snapshot_filter.go @@ -67,7 +67,13 @@ type ExpirePolicy struct { // Sum returns the maximum number of snapshots to be kept according to this // policy. func (e ExpirePolicy) Sum() int { - return e.Last + e.Daily + e.Weekly + e.Monthly + e.Yearly + return e.Last + e.Hourly + e.Daily + e.Weekly + e.Monthly + e.Yearly +} + +// Empty returns true iff no policy has been configured (all values zero). +func (e ExpirePolicy) Empty() bool { + empty := ExpirePolicy{} + return e == empty } // filter is used to split a list of snapshots into those to keep and those to @@ -175,8 +181,7 @@ func (f *filter) finish() { func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots) { sort.Sort(list) - empty := ExpirePolicy{} - if p == empty { + if p.Empty() { return list, remove } From 5cf7c827b86081f9e1ba97d0b40dbbdc40b88a4d Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 20 Aug 2016 17:53:03 +0200 Subject: [PATCH 90/98] forget: Do nothing if no policy is configured --- src/cmds/restic/cmd_forget.go | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/src/cmds/restic/cmd_forget.go b/src/cmds/restic/cmd_forget.go index df6d9706..da1f4d4c 100644 --- a/src/cmds/restic/cmd_forget.go +++ b/src/cmds/restic/cmd_forget.go @@ -94,7 +94,20 @@ func (cmd CmdForget) Execute(args []string) error { } else { cmd.global.Verbosef("would removed snapshot %v\n", id.Str()) } + } + policy := restic.ExpirePolicy{ + Last: cmd.Last, + Hourly: cmd.Hourly, + Daily: cmd.Daily, + Weekly: cmd.Weekly, + Monthly: cmd.Monthly, + Yearly: cmd.Yearly, + } + + if policy.Empty() { + cmd.global.Verbosef("no expire policy configured, exiting\n") + return nil } // then, load all remaining snapshots @@ -118,15 +131,6 @@ func (cmd CmdForget) Execute(args []string) error { snapshotGroups[k] = list } - policy := restic.ExpirePolicy{ - Last: cmd.Last, - Hourly: cmd.Hourly, - Daily: cmd.Daily, - Weekly: cmd.Weekly, - Monthly: cmd.Monthly, - Yearly: cmd.Yearly, - } - for key, snapshotGroup := range snapshotGroups { cmd.global.Printf("snapshots for host %v, directories %v:\n\n", key.Hostname, key.Dirs) keep, remove := restic.ApplyPolicy(snapshotGroup, policy) From 8e7202bd6add62c716fa1d78a594a3d5bb75dca8 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 20 Aug 2016 17:54:27 +0200 Subject: [PATCH 91/98] Rename function in debug 'dump' command --- src/cmds/restic/cmd_dump.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/cmds/restic/cmd_dump.go b/src/cmds/restic/cmd_dump.go index 68e4ac0d..95efa257 100644 --- a/src/cmds/restic/cmd_dump.go +++ b/src/cmds/restic/cmd_dump.go @@ -48,7 +48,7 @@ func prettyPrintJSON(wr io.Writer, item interface{}) error { return err } -func printSnapshots(repo *repository.Repository, wr io.Writer) error { +func debugPrintSnapshots(repo *repository.Repository, wr io.Writer) error { done := make(chan struct{}) defer close(done) @@ -226,14 +226,14 @@ func (cmd CmdDump) Execute(args []string) error { case "indexes": return cmd.DumpIndexes() case "snapshots": - return printSnapshots(repo, os.Stdout) + return debugPrintSnapshots(repo, os.Stdout) case "trees": return printTrees(repo, os.Stdout) case "packs": return printPacks(repo, os.Stdout) case "all": fmt.Printf("snapshots:\n") - err := printSnapshots(repo, os.Stdout) + err := debugPrintSnapshots(repo, os.Stdout) if err != nil { return err } From 00f647dc92507d3a5db6db6ad3366e40d62280fb Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 20 Aug 2016 17:59:10 +0200 Subject: [PATCH 92/98] forget: Join paths by ":" --- src/cmds/restic/cmd_forget.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cmds/restic/cmd_forget.go b/src/cmds/restic/cmd_forget.go index da1f4d4c..07943316 100644 --- a/src/cmds/restic/cmd_forget.go +++ b/src/cmds/restic/cmd_forget.go @@ -3,9 +3,9 @@ package main import ( "fmt" "io" - "path" "restic" "restic/backend" + "strings" ) // CmdForget implements the 'forget' command. @@ -125,7 +125,7 @@ func (cmd CmdForget) Execute(args []string) error { snapshotGroups := make(map[key]restic.Snapshots) for _, sn := range snapshots { - k := key{Hostname: sn.Hostname, Dirs: path.Join(sn.Paths...)} + k := key{Hostname: sn.Hostname, Dirs: strings.Join(sn.Paths, ":")} list := snapshotGroups[k] list = append(list, sn) snapshotGroups[k] = list From 5f0ebb71b2fc9524b58b08efca488ae5f8ffbb66 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 20 Aug 2016 17:59:47 +0200 Subject: [PATCH 93/98] forget: Allow filtering for a hostname --- src/cmds/restic/cmd_forget.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/cmds/restic/cmd_forget.go b/src/cmds/restic/cmd_forget.go index 07943316..dd43c1fc 100644 --- a/src/cmds/restic/cmd_forget.go +++ b/src/cmds/restic/cmd_forget.go @@ -17,6 +17,8 @@ type CmdForget struct { Monthly int `short:"m" long:"keep-monthly" description:"keep the last n monthly snapshots"` Yearly int `short:"y" long:"keep-yearly" description:"keep the last n yearly snapshots"` + Hostname string `long:"hostname" description:"only forget snapshots for the given hostname"` + DryRun bool `short:"n" long:"dry-run" description:"do not delete anything, just print what would be done"` global *GlobalOptions @@ -125,6 +127,10 @@ func (cmd CmdForget) Execute(args []string) error { snapshotGroups := make(map[key]restic.Snapshots) for _, sn := range snapshots { + if cmd.Hostname != "" && sn.Hostname != cmd.Hostname { + continue + } + k := key{Hostname: sn.Hostname, Dirs: strings.Join(sn.Paths, ":")} list := snapshotGroups[k] list = append(list, sn) From 27d09093025af6d1ab21beb79094722d00bfc2c7 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 20 Aug 2016 18:15:36 +0200 Subject: [PATCH 94/98] forget: Remove message when no policy is specified --- src/cmds/restic/cmd_forget.go | 1 - 1 file changed, 1 deletion(-) diff --git a/src/cmds/restic/cmd_forget.go b/src/cmds/restic/cmd_forget.go index dd43c1fc..17f371a5 100644 --- a/src/cmds/restic/cmd_forget.go +++ b/src/cmds/restic/cmd_forget.go @@ -108,7 +108,6 @@ func (cmd CmdForget) Execute(args []string) error { } if policy.Empty() { - cmd.global.Verbosef("no expire policy configured, exiting\n") return nil } From 458448357c445242ae6dc791c03e9fc3bde9db87 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 20 Aug 2016 18:33:24 +0200 Subject: [PATCH 95/98] Add help texts which cross-line prune/forget --- src/cmds/restic/cmd_forget.go | 8 +++++++- src/cmds/restic/cmd_prune.go | 6 +++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/cmds/restic/cmd_forget.go b/src/cmds/restic/cmd_forget.go index 17f371a5..16da4b55 100644 --- a/src/cmds/restic/cmd_forget.go +++ b/src/cmds/restic/cmd_forget.go @@ -27,7 +27,13 @@ type CmdForget struct { func init() { _, err := parser.AddCommand("forget", "removes snapshots from a repository", - "The forget command removes snapshots according to a policy.", + ` +The forget command removes snapshots according to a policy. Please note +that this command really only deletes the snapshot object in the repo, which +is a reference to data stored there. In order to remove this (now +unreferenced) data after 'forget' was run successfully, see the 'prune' +command. +`, &CmdForget{global: &globalOpts}) if err != nil { panic(err) diff --git a/src/cmds/restic/cmd_prune.go b/src/cmds/restic/cmd_prune.go index 7ee1117c..b7a2653a 100644 --- a/src/cmds/restic/cmd_prune.go +++ b/src/cmds/restic/cmd_prune.go @@ -22,7 +22,11 @@ type CmdPrune struct { func init() { _, err := parser.AddCommand("prune", "removes content from a repository", - "The prune command removes rendundant and unneeded data from the repository", + ` +The prune command removes rendundant and unneeded data from the repository. +For removing snapshots, please see the 'forget' command, then afterwards run +'prune'. +`, &CmdPrune{global: &globalOpts}) if err != nil { panic(err) From 3337b5d3c4371bd8ca6b633a43e556babb187399 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 20 Aug 2016 18:38:16 +0200 Subject: [PATCH 96/98] Add prune/forget to the manual --- doc/Manual.md | 92 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) diff --git a/doc/Manual.md b/doc/Manual.md index 27274a9f..555439cb 100644 --- a/doc/Manual.md +++ b/doc/Manual.md @@ -381,6 +381,98 @@ Now you can easily initialize restic to use Minio server as backend with this co Please note that knowledge of your password is required to access the repository. Losing your password means that your data is irrecoverably lost. +# Removing old snapshots + +All backup space is finite, so restic allows removing old snapshots. This can +be done either manually (by specifying a snapshot ID to remove) or by using a +policy that describes which snapshots to forget. For all remove operations, two +commands need to be called in sequence: `forget` to remove a snapshot and +`prune` to actually remove the data that was referenced by the snapshot from +the repository. + +## Remove a single snapshot + +The command `snapshots` can be used to list all snapshots in a repository like this: + + $ restic -r /tmp/backup snapshots + enter password for repository: + ID Date Host Directory + ---------------------------------------------------------------------- + 40dc1520 2015-05-08 21:38:30 kasimir /home/user/work + 79766175 2015-05-08 21:40:19 kasimir /home/user/work + bdbd3439 2015-05-08 21:45:17 luigi /home/art + 590c8fc8 2015-05-08 21:47:38 kazik /srv + 9f0bc19e 2015-05-08 21:46:11 luigi /srv + +In order to remove the snapshot of `/home/art`, use the `forget` command and +specify the snapshot ID on the command line: + + $ restic -r /tmp/backup forget bdbd3439 + enter password for repository: + removed snapshot d3f01f63 + +Afterwards this snapshot is removed: + + $ restic -r /tmp/backup snapshots + enter password for repository: + ID Date Host Directory + ---------------------------------------------------------------------- + 40dc1520 2015-05-08 21:38:30 kasimir /home/user/work + 79766175 2015-05-08 21:40:19 kasimir /home/user/work + 590c8fc8 2015-05-08 21:47:38 kazik /srv + 9f0bc19e 2015-05-08 21:46:11 luigi /srv + +But the data that was referenced by files in this snapshot is still stored in +the repository. To cleanup unreferenced data, the `prune` command must be run: + + $ restic -r /tmp/backup prune + enter password for repository: + + counting files in repo + building new index for repo + [0:00] 100.00% 22 / 22 files + repository contains 22 packs (8512 blobs) with 100.092 MiB bytes + processed 8512 blobs: 0 duplicate blobs, 0B duplicate + load all snapshots + find data that is still in use for 1 snapshots + [0:00] 100.00% 1 / 1 snapshots + found 8433 of 8512 data blobs still in use + will rewrite 3 packs + creating new index + [0:00] 86.36% 19 / 22 files + saved new index as 544a5084 + done + +Afterwards the repository is smaller. + +## Removing snapshots according to a policy + +Removing snapshots manually is tedious and error-prone, therefore restic allows +specifying which snapshots should be removed automatically according to a +policy. You can specify how many hourly, daily, weekly, monthly and yearly +snapshots to keep, any other snapshots are removed. The most important +command-line parameter here is `--dry-run` which instructs restic to not remove +anything but print which snapshots would be removed. + +When `forget` is run with a policy, restic loads the list of all snapshots, +then groups these by host name and list of directories. The policy is then +applied to each group of snapshots separately. This is a safety feature. + +The `forget` command accepts the following parameters: + + * `--keep-last n` never delete the `n` last (most recent) snapshots + * `--keep-hourly n` for the last `n` hours in which a snapshot was made, keep + only the last snapshot for each hour. + * `--keep-daily n` for the last `n` days which have one or more snapshots, only + keep the last one for that day. + * `--keep-monthly n` for the last `n` months which have one or more snapshots, only + keep the last one for that month. + * `--keep-yearly n` for the last `n` years which have one or more snapshots, only + keep the last one for that year. + +Additionally, you can restrict removing snapshots to those which have a +particular hostname with the `--hostname` parameter. + # Debugging restic The program can be built with debug support like this: From d3da30e8fbea485f29a867763f7d7f6b98dd395b Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 20 Aug 2016 18:49:02 +0200 Subject: [PATCH 97/98] Use UTC for snapshot time based tests --- src/restic/snapshot_filter_test.go | 240 +++++++++++------------ src/restic/testdata/expired_snapshots_0 | 198 +++++++++---------- src/restic/testdata/expired_snapshots_1 | 20 +- src/restic/testdata/expired_snapshots_10 | 24 +-- src/restic/testdata/expired_snapshots_11 | 4 +- src/restic/testdata/expired_snapshots_12 | 8 +- src/restic/testdata/expired_snapshots_13 | 14 +- src/restic/testdata/expired_snapshots_14 | 12 +- src/restic/testdata/expired_snapshots_15 | 20 +- src/restic/testdata/expired_snapshots_16 | 6 +- src/restic/testdata/expired_snapshots_17 | 28 +-- src/restic/testdata/expired_snapshots_2 | 30 +-- src/restic/testdata/expired_snapshots_3 | 198 +++++++++---------- src/restic/testdata/expired_snapshots_4 | 198 +++++++++---------- src/restic/testdata/expired_snapshots_5 | 40 ++-- src/restic/testdata/expired_snapshots_6 | 6 +- src/restic/testdata/expired_snapshots_7 | 20 +- src/restic/testdata/expired_snapshots_8 | 60 +++--- src/restic/testdata/expired_snapshots_9 | 20 +- src/restic/testdata/filter_snapshots_0 | 32 +-- src/restic/testdata/filter_snapshots_1 | 6 +- src/restic/testdata/filter_snapshots_2 | 4 +- src/restic/testdata/filter_snapshots_3 | 32 +-- src/restic/testdata/filter_snapshots_4 | 4 +- src/restic/testdata/filter_snapshots_5 | 4 +- 25 files changed, 614 insertions(+), 614 deletions(-) diff --git a/src/restic/snapshot_filter_test.go b/src/restic/snapshot_filter_test.go index 7a902a68..07d2e106 100644 --- a/src/restic/snapshot_filter_test.go +++ b/src/restic/snapshot_filter_test.go @@ -12,34 +12,34 @@ import ( ) func parseTime(s string) time.Time { - t, err := time.Parse("2006-01-02 15:04:05 -0700", s) + t, err := time.Parse("2006-01-02 15:04:05", s) if err != nil { panic(err) } - return t.Local() + return t.UTC() } var testFilterSnapshots = Snapshots{ - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-01 01:02:03 +0100"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "bar", Username: "testuser", Time: parseTime("2016-01-01 01:03:03 +0100"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-03 07:02:03 +0100"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "bar", Username: "testuser", Time: parseTime("2016-01-01 07:08:03 +0100"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 10:23:03 +0100"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 11:23:03 +0100"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:23:03 +0100"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:24:03 +0100"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:28:03 +0100"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:30:03 +0100"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 16:23:03 +0100"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-05 09:02:03 +0100"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-06 08:02:03 +0100"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-07 10:02:03 +0100"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "root", Time: parseTime("2016-01-08 20:02:03 +0100"), Paths: []string{"/usr", "/sbin"}}, - {Hostname: "foo", Username: "root", Time: parseTime("2016-01-09 21:02:03 +0100"), Paths: []string{"/usr", "/sbin"}}, - {Hostname: "bar", Username: "root", Time: parseTime("2016-01-12 21:02:03 +0100"), Paths: []string{"/usr", "/sbin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-12 21:08:03 +0100"), Paths: []string{"/usr", "/bin"}}, - {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-18 12:02:03 +0100"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-01 01:02:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "bar", Username: "testuser", Time: parseTime("2016-01-01 01:03:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-03 07:02:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "bar", Username: "testuser", Time: parseTime("2016-01-01 07:08:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 10:23:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 11:23:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:23:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:24:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:28:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 12:30:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-04 16:23:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-05 09:02:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-06 08:02:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-07 10:02:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "root", Time: parseTime("2016-01-08 20:02:03"), Paths: []string{"/usr", "/sbin"}}, + {Hostname: "foo", Username: "root", Time: parseTime("2016-01-09 21:02:03"), Paths: []string{"/usr", "/sbin"}}, + {Hostname: "bar", Username: "root", Time: parseTime("2016-01-12 21:02:03"), Paths: []string{"/usr", "/sbin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-12 21:08:03"), Paths: []string{"/usr", "/bin"}}, + {Hostname: "foo", Username: "testuser", Time: parseTime("2016-01-18 12:02:03"), Paths: []string{"/usr", "/bin"}}, } var filterTests = []SnapshotFilter{ @@ -87,105 +87,105 @@ func TestFilterSnapshots(t *testing.T) { } var testExpireSnapshots = Snapshots{ - {Time: parseTime("2014-09-01 10:20:30 +0100")}, - {Time: parseTime("2014-09-02 10:20:30 +0100")}, - {Time: parseTime("2014-09-05 10:20:30 +0100")}, - {Time: parseTime("2014-09-06 10:20:30 +0100")}, - {Time: parseTime("2014-09-08 10:20:30 +0100")}, - {Time: parseTime("2014-09-09 10:20:30 +0100")}, - {Time: parseTime("2014-09-10 10:20:30 +0100")}, - {Time: parseTime("2014-09-11 10:20:30 +0100")}, - {Time: parseTime("2014-09-20 10:20:30 +0100")}, - {Time: parseTime("2014-09-22 10:20:30 +0100")}, - {Time: parseTime("2014-08-08 10:20:30 +0100")}, - {Time: parseTime("2014-08-10 10:20:30 +0100")}, - {Time: parseTime("2014-08-12 10:20:30 +0100")}, - {Time: parseTime("2014-08-13 10:20:30 +0100")}, - {Time: parseTime("2014-08-13 10:20:30 +0100")}, - {Time: parseTime("2014-08-15 10:20:30 +0100")}, - {Time: parseTime("2014-08-18 10:20:30 +0100")}, - {Time: parseTime("2014-08-20 10:20:30 +0100")}, - {Time: parseTime("2014-08-21 10:20:30 +0100")}, - {Time: parseTime("2014-08-22 10:20:30 +0100")}, - {Time: parseTime("2014-10-01 10:20:30 +0100")}, - {Time: parseTime("2014-10-02 10:20:30 +0100")}, - {Time: parseTime("2014-10-05 10:20:30 +0100")}, - {Time: parseTime("2014-10-06 10:20:30 +0100")}, - {Time: parseTime("2014-10-08 10:20:30 +0100")}, - {Time: parseTime("2014-10-09 10:20:30 +0100")}, - {Time: parseTime("2014-10-10 10:20:30 +0100")}, - {Time: parseTime("2014-10-11 10:20:30 +0100")}, - {Time: parseTime("2014-10-20 10:20:30 +0100")}, - {Time: parseTime("2014-10-22 10:20:30 +0100")}, - {Time: parseTime("2014-11-08 10:20:30 +0100")}, - {Time: parseTime("2014-11-10 10:20:30 +0100")}, - {Time: parseTime("2014-11-12 10:20:30 +0100")}, - {Time: parseTime("2014-11-13 10:20:30 +0100")}, - {Time: parseTime("2014-11-13 10:20:30 +0100")}, - {Time: parseTime("2014-11-15 10:20:30 +0100")}, - {Time: parseTime("2014-11-18 10:20:30 +0100")}, - {Time: parseTime("2014-11-20 10:20:30 +0100")}, - {Time: parseTime("2014-11-21 10:20:30 +0100")}, - {Time: parseTime("2014-11-22 10:20:30 +0100")}, - {Time: parseTime("2015-09-01 10:20:30 +0100")}, - {Time: parseTime("2015-09-02 10:20:30 +0100")}, - {Time: parseTime("2015-09-05 10:20:30 +0100")}, - {Time: parseTime("2015-09-06 10:20:30 +0100")}, - {Time: parseTime("2015-09-08 10:20:30 +0100")}, - {Time: parseTime("2015-09-09 10:20:30 +0100")}, - {Time: parseTime("2015-09-10 10:20:30 +0100")}, - {Time: parseTime("2015-09-11 10:20:30 +0100")}, - {Time: parseTime("2015-09-20 10:20:30 +0100")}, - {Time: parseTime("2015-09-22 10:20:30 +0100")}, - {Time: parseTime("2015-08-08 10:20:30 +0100")}, - {Time: parseTime("2015-08-10 10:20:30 +0100")}, - {Time: parseTime("2015-08-12 10:20:30 +0100")}, - {Time: parseTime("2015-08-13 10:20:30 +0100")}, - {Time: parseTime("2015-08-13 10:20:30 +0100")}, - {Time: parseTime("2015-08-15 10:20:30 +0100")}, - {Time: parseTime("2015-08-18 10:20:30 +0100")}, - {Time: parseTime("2015-08-20 10:20:30 +0100")}, - {Time: parseTime("2015-08-21 10:20:30 +0100")}, - {Time: parseTime("2015-08-22 10:20:30 +0100")}, - {Time: parseTime("2015-10-01 10:20:30 +0100")}, - {Time: parseTime("2015-10-02 10:20:30 +0100")}, - {Time: parseTime("2015-10-05 10:20:30 +0100")}, - {Time: parseTime("2015-10-06 10:20:30 +0100")}, - {Time: parseTime("2015-10-08 10:20:30 +0100")}, - {Time: parseTime("2015-10-09 10:20:30 +0100")}, - {Time: parseTime("2015-10-10 10:20:30 +0100")}, - {Time: parseTime("2015-10-11 10:20:30 +0100")}, - {Time: parseTime("2015-10-20 10:20:30 +0100")}, - {Time: parseTime("2015-10-22 10:20:30 +0100")}, - {Time: parseTime("2015-11-08 10:20:30 +0100")}, - {Time: parseTime("2015-11-10 10:20:30 +0100")}, - {Time: parseTime("2015-11-12 10:20:30 +0100")}, - {Time: parseTime("2015-11-13 10:20:30 +0100")}, - {Time: parseTime("2015-11-13 10:20:30 +0100")}, - {Time: parseTime("2015-11-15 10:20:30 +0100")}, - {Time: parseTime("2015-11-18 10:20:30 +0100")}, - {Time: parseTime("2015-11-20 10:20:30 +0100")}, - {Time: parseTime("2015-11-21 10:20:30 +0100")}, - {Time: parseTime("2015-11-22 10:20:30 +0100")}, - {Time: parseTime("2016-01-01 01:02:03 +0100")}, - {Time: parseTime("2016-01-01 01:03:03 +0100")}, - {Time: parseTime("2016-01-01 07:08:03 +0100")}, - {Time: parseTime("2016-01-03 07:02:03 +0100")}, - {Time: parseTime("2016-01-04 10:23:03 +0100")}, - {Time: parseTime("2016-01-04 11:23:03 +0100")}, - {Time: parseTime("2016-01-04 12:23:03 +0100")}, - {Time: parseTime("2016-01-04 12:24:03 +0100")}, - {Time: parseTime("2016-01-04 12:28:03 +0100")}, - {Time: parseTime("2016-01-04 12:30:03 +0100")}, - {Time: parseTime("2016-01-04 16:23:03 +0100")}, - {Time: parseTime("2016-01-05 09:02:03 +0100")}, - {Time: parseTime("2016-01-06 08:02:03 +0100")}, - {Time: parseTime("2016-01-07 10:02:03 +0100")}, - {Time: parseTime("2016-01-08 20:02:03 +0100")}, - {Time: parseTime("2016-01-09 21:02:03 +0100")}, - {Time: parseTime("2016-01-12 21:02:03 +0100")}, - {Time: parseTime("2016-01-12 21:08:03 +0100")}, - {Time: parseTime("2016-01-18 12:02:03 +0100")}, + {Time: parseTime("2014-09-01 10:20:30")}, + {Time: parseTime("2014-09-02 10:20:30")}, + {Time: parseTime("2014-09-05 10:20:30")}, + {Time: parseTime("2014-09-06 10:20:30")}, + {Time: parseTime("2014-09-08 10:20:30")}, + {Time: parseTime("2014-09-09 10:20:30")}, + {Time: parseTime("2014-09-10 10:20:30")}, + {Time: parseTime("2014-09-11 10:20:30")}, + {Time: parseTime("2014-09-20 10:20:30")}, + {Time: parseTime("2014-09-22 10:20:30")}, + {Time: parseTime("2014-08-08 10:20:30")}, + {Time: parseTime("2014-08-10 10:20:30")}, + {Time: parseTime("2014-08-12 10:20:30")}, + {Time: parseTime("2014-08-13 10:20:30")}, + {Time: parseTime("2014-08-13 10:20:30")}, + {Time: parseTime("2014-08-15 10:20:30")}, + {Time: parseTime("2014-08-18 10:20:30")}, + {Time: parseTime("2014-08-20 10:20:30")}, + {Time: parseTime("2014-08-21 10:20:30")}, + {Time: parseTime("2014-08-22 10:20:30")}, + {Time: parseTime("2014-10-01 10:20:30")}, + {Time: parseTime("2014-10-02 10:20:30")}, + {Time: parseTime("2014-10-05 10:20:30")}, + {Time: parseTime("2014-10-06 10:20:30")}, + {Time: parseTime("2014-10-08 10:20:30")}, + {Time: parseTime("2014-10-09 10:20:30")}, + {Time: parseTime("2014-10-10 10:20:30")}, + {Time: parseTime("2014-10-11 10:20:30")}, + {Time: parseTime("2014-10-20 10:20:30")}, + {Time: parseTime("2014-10-22 10:20:30")}, + {Time: parseTime("2014-11-08 10:20:30")}, + {Time: parseTime("2014-11-10 10:20:30")}, + {Time: parseTime("2014-11-12 10:20:30")}, + {Time: parseTime("2014-11-13 10:20:30")}, + {Time: parseTime("2014-11-13 10:20:30")}, + {Time: parseTime("2014-11-15 10:20:30")}, + {Time: parseTime("2014-11-18 10:20:30")}, + {Time: parseTime("2014-11-20 10:20:30")}, + {Time: parseTime("2014-11-21 10:20:30")}, + {Time: parseTime("2014-11-22 10:20:30")}, + {Time: parseTime("2015-09-01 10:20:30")}, + {Time: parseTime("2015-09-02 10:20:30")}, + {Time: parseTime("2015-09-05 10:20:30")}, + {Time: parseTime("2015-09-06 10:20:30")}, + {Time: parseTime("2015-09-08 10:20:30")}, + {Time: parseTime("2015-09-09 10:20:30")}, + {Time: parseTime("2015-09-10 10:20:30")}, + {Time: parseTime("2015-09-11 10:20:30")}, + {Time: parseTime("2015-09-20 10:20:30")}, + {Time: parseTime("2015-09-22 10:20:30")}, + {Time: parseTime("2015-08-08 10:20:30")}, + {Time: parseTime("2015-08-10 10:20:30")}, + {Time: parseTime("2015-08-12 10:20:30")}, + {Time: parseTime("2015-08-13 10:20:30")}, + {Time: parseTime("2015-08-13 10:20:30")}, + {Time: parseTime("2015-08-15 10:20:30")}, + {Time: parseTime("2015-08-18 10:20:30")}, + {Time: parseTime("2015-08-20 10:20:30")}, + {Time: parseTime("2015-08-21 10:20:30")}, + {Time: parseTime("2015-08-22 10:20:30")}, + {Time: parseTime("2015-10-01 10:20:30")}, + {Time: parseTime("2015-10-02 10:20:30")}, + {Time: parseTime("2015-10-05 10:20:30")}, + {Time: parseTime("2015-10-06 10:20:30")}, + {Time: parseTime("2015-10-08 10:20:30")}, + {Time: parseTime("2015-10-09 10:20:30")}, + {Time: parseTime("2015-10-10 10:20:30")}, + {Time: parseTime("2015-10-11 10:20:30")}, + {Time: parseTime("2015-10-20 10:20:30")}, + {Time: parseTime("2015-10-22 10:20:30")}, + {Time: parseTime("2015-11-08 10:20:30")}, + {Time: parseTime("2015-11-10 10:20:30")}, + {Time: parseTime("2015-11-12 10:20:30")}, + {Time: parseTime("2015-11-13 10:20:30")}, + {Time: parseTime("2015-11-13 10:20:30")}, + {Time: parseTime("2015-11-15 10:20:30")}, + {Time: parseTime("2015-11-18 10:20:30")}, + {Time: parseTime("2015-11-20 10:20:30")}, + {Time: parseTime("2015-11-21 10:20:30")}, + {Time: parseTime("2015-11-22 10:20:30")}, + {Time: parseTime("2016-01-01 01:02:03")}, + {Time: parseTime("2016-01-01 01:03:03")}, + {Time: parseTime("2016-01-01 07:08:03")}, + {Time: parseTime("2016-01-03 07:02:03")}, + {Time: parseTime("2016-01-04 10:23:03")}, + {Time: parseTime("2016-01-04 11:23:03")}, + {Time: parseTime("2016-01-04 12:23:03")}, + {Time: parseTime("2016-01-04 12:24:03")}, + {Time: parseTime("2016-01-04 12:28:03")}, + {Time: parseTime("2016-01-04 12:30:03")}, + {Time: parseTime("2016-01-04 16:23:03")}, + {Time: parseTime("2016-01-05 09:02:03")}, + {Time: parseTime("2016-01-06 08:02:03")}, + {Time: parseTime("2016-01-07 10:02:03")}, + {Time: parseTime("2016-01-08 20:02:03")}, + {Time: parseTime("2016-01-09 21:02:03")}, + {Time: parseTime("2016-01-12 21:02:03")}, + {Time: parseTime("2016-01-12 21:08:03")}, + {Time: parseTime("2016-01-18 12:02:03")}, } var expireTests = []ExpirePolicy{ diff --git a/src/restic/testdata/expired_snapshots_0 b/src/restic/testdata/expired_snapshots_0 index d83ec624..d70bdbaa 100644 --- a/src/restic/testdata/expired_snapshots_0 +++ b/src/restic/testdata/expired_snapshots_0 @@ -1,496 +1,496 @@ [ { - "time": "2016-01-18T12:02:03+01:00", + "time": "2016-01-18T12:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-12T21:08:03+01:00", + "time": "2016-01-12T21:08:03Z", "tree": null, "paths": null }, { - "time": "2016-01-12T21:02:03+01:00", + "time": "2016-01-12T21:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-09T21:02:03+01:00", + "time": "2016-01-09T21:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-08T20:02:03+01:00", + "time": "2016-01-08T20:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-07T10:02:03+01:00", + "time": "2016-01-07T10:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-06T08:02:03+01:00", + "time": "2016-01-06T08:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-05T09:02:03+01:00", + "time": "2016-01-05T09:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T16:23:03+01:00", + "time": "2016-01-04T16:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T12:30:03+01:00", + "time": "2016-01-04T12:30:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T12:28:03+01:00", + "time": "2016-01-04T12:28:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T12:24:03+01:00", + "time": "2016-01-04T12:24:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T12:23:03+01:00", + "time": "2016-01-04T12:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T11:23:03+01:00", + "time": "2016-01-04T11:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T10:23:03+01:00", + "time": "2016-01-04T10:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-03T07:02:03+01:00", + "time": "2016-01-03T07:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-01T07:08:03+01:00", + "time": "2016-01-01T07:08:03Z", "tree": null, "paths": null }, { - "time": "2016-01-01T01:03:03+01:00", + "time": "2016-01-01T01:03:03Z", "tree": null, "paths": null }, { - "time": "2016-01-01T01:02:03+01:00", + "time": "2016-01-01T01:02:03Z", "tree": null, "paths": null }, { - "time": "2015-11-22T10:20:30+01:00", + "time": "2015-11-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-21T10:20:30+01:00", + "time": "2015-11-21T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-20T10:20:30+01:00", + "time": "2015-11-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-18T10:20:30+01:00", + "time": "2015-11-18T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-15T10:20:30+01:00", + "time": "2015-11-15T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-13T10:20:30+01:00", + "time": "2015-11-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-13T10:20:30+01:00", + "time": "2015-11-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-12T10:20:30+01:00", + "time": "2015-11-12T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-10T10:20:30+01:00", + "time": "2015-11-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-08T10:20:30+01:00", + "time": "2015-11-08T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-22T11:20:30+02:00", + "time": "2015-10-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-20T11:20:30+02:00", + "time": "2015-10-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-11T11:20:30+02:00", + "time": "2015-10-11T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-10T11:20:30+02:00", + "time": "2015-10-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-09T11:20:30+02:00", + "time": "2015-10-09T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-08T11:20:30+02:00", + "time": "2015-10-08T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-06T11:20:30+02:00", + "time": "2015-10-06T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-05T11:20:30+02:00", + "time": "2015-10-05T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-02T11:20:30+02:00", + "time": "2015-10-02T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-01T11:20:30+02:00", + "time": "2015-10-01T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-22T11:20:30+02:00", + "time": "2015-09-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-20T11:20:30+02:00", + "time": "2015-09-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-11T11:20:30+02:00", + "time": "2015-09-11T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-10T11:20:30+02:00", + "time": "2015-09-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-09T11:20:30+02:00", + "time": "2015-09-09T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-08T11:20:30+02:00", + "time": "2015-09-08T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-06T11:20:30+02:00", + "time": "2015-09-06T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-05T11:20:30+02:00", + "time": "2015-09-05T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-02T11:20:30+02:00", + "time": "2015-09-02T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-01T11:20:30+02:00", + "time": "2015-09-01T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-22T11:20:30+02:00", + "time": "2015-08-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-21T11:20:30+02:00", + "time": "2015-08-21T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-20T11:20:30+02:00", + "time": "2015-08-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-18T11:20:30+02:00", + "time": "2015-08-18T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-15T11:20:30+02:00", + "time": "2015-08-15T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-13T11:20:30+02:00", + "time": "2015-08-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-13T11:20:30+02:00", + "time": "2015-08-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-12T11:20:30+02:00", + "time": "2015-08-12T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-10T11:20:30+02:00", + "time": "2015-08-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-08T11:20:30+02:00", + "time": "2015-08-08T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-22T10:20:30+01:00", + "time": "2014-11-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-21T10:20:30+01:00", + "time": "2014-11-21T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-20T10:20:30+01:00", + "time": "2014-11-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-18T10:20:30+01:00", + "time": "2014-11-18T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-15T10:20:30+01:00", + "time": "2014-11-15T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-13T10:20:30+01:00", + "time": "2014-11-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-13T10:20:30+01:00", + "time": "2014-11-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-12T10:20:30+01:00", + "time": "2014-11-12T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-10T10:20:30+01:00", + "time": "2014-11-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-08T10:20:30+01:00", + "time": "2014-11-08T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-22T11:20:30+02:00", + "time": "2014-10-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-20T11:20:30+02:00", + "time": "2014-10-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-11T11:20:30+02:00", + "time": "2014-10-11T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-10T11:20:30+02:00", + "time": "2014-10-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-09T11:20:30+02:00", + "time": "2014-10-09T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-08T11:20:30+02:00", + "time": "2014-10-08T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-06T11:20:30+02:00", + "time": "2014-10-06T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-05T11:20:30+02:00", + "time": "2014-10-05T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-02T11:20:30+02:00", + "time": "2014-10-02T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-01T11:20:30+02:00", + "time": "2014-10-01T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-22T11:20:30+02:00", + "time": "2014-09-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-20T11:20:30+02:00", + "time": "2014-09-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-11T11:20:30+02:00", + "time": "2014-09-11T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-10T11:20:30+02:00", + "time": "2014-09-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-09T11:20:30+02:00", + "time": "2014-09-09T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-08T11:20:30+02:00", + "time": "2014-09-08T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-06T11:20:30+02:00", + "time": "2014-09-06T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-05T11:20:30+02:00", + "time": "2014-09-05T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-02T11:20:30+02:00", + "time": "2014-09-02T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-01T11:20:30+02:00", + "time": "2014-09-01T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-22T11:20:30+02:00", + "time": "2014-08-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-21T11:20:30+02:00", + "time": "2014-08-21T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-20T11:20:30+02:00", + "time": "2014-08-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-18T11:20:30+02:00", + "time": "2014-08-18T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-15T11:20:30+02:00", + "time": "2014-08-15T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-13T11:20:30+02:00", + "time": "2014-08-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-13T11:20:30+02:00", + "time": "2014-08-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-12T11:20:30+02:00", + "time": "2014-08-12T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-10T11:20:30+02:00", + "time": "2014-08-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-08T11:20:30+02:00", + "time": "2014-08-08T10:20:30Z", "tree": null, "paths": null } diff --git a/src/restic/testdata/expired_snapshots_1 b/src/restic/testdata/expired_snapshots_1 index 5a6f3e8a..22e6c214 100644 --- a/src/restic/testdata/expired_snapshots_1 +++ b/src/restic/testdata/expired_snapshots_1 @@ -1,51 +1,51 @@ [ { - "time": "2016-01-18T12:02:03+01:00", + "time": "2016-01-18T12:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-12T21:08:03+01:00", + "time": "2016-01-12T21:08:03Z", "tree": null, "paths": null }, { - "time": "2016-01-12T21:02:03+01:00", + "time": "2016-01-12T21:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-09T21:02:03+01:00", + "time": "2016-01-09T21:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-08T20:02:03+01:00", + "time": "2016-01-08T20:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-07T10:02:03+01:00", + "time": "2016-01-07T10:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-06T08:02:03+01:00", + "time": "2016-01-06T08:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-05T09:02:03+01:00", + "time": "2016-01-05T09:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T16:23:03+01:00", + "time": "2016-01-04T16:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T12:30:03+01:00", + "time": "2016-01-04T12:30:03Z", "tree": null, "paths": null } diff --git a/src/restic/testdata/expired_snapshots_10 b/src/restic/testdata/expired_snapshots_10 index 63fbd8b0..72ae755c 100644 --- a/src/restic/testdata/expired_snapshots_10 +++ b/src/restic/testdata/expired_snapshots_10 @@ -1,61 +1,61 @@ [ { - "time": "2016-01-18T12:02:03+01:00", + "time": "2016-01-18T12:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-12T21:08:03+01:00", + "time": "2016-01-12T21:08:03Z", "tree": null, "paths": null }, { - "time": "2016-01-12T21:02:03+01:00", + "time": "2016-01-12T21:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-09T21:02:03+01:00", + "time": "2016-01-09T21:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-08T20:02:03+01:00", + "time": "2016-01-08T20:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-07T10:02:03+01:00", + "time": "2016-01-07T10:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-06T08:02:03+01:00", + "time": "2016-01-06T08:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-05T09:02:03+01:00", + "time": "2016-01-05T09:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T16:23:03+01:00", + "time": "2016-01-04T16:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-03T07:02:03+01:00", + "time": "2016-01-03T07:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-01T07:08:03+01:00", + "time": "2016-01-01T07:08:03Z", "tree": null, "paths": null }, { - "time": "2015-11-22T10:20:30+01:00", + "time": "2015-11-22T10:20:30Z", "tree": null, "paths": null } diff --git a/src/restic/testdata/expired_snapshots_11 b/src/restic/testdata/expired_snapshots_11 index 853fdce7..d36f97b3 100644 --- a/src/restic/testdata/expired_snapshots_11 +++ b/src/restic/testdata/expired_snapshots_11 @@ -1,11 +1,11 @@ [ { - "time": "2016-01-18T12:02:03+01:00", + "time": "2016-01-18T12:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-12T21:08:03+01:00", + "time": "2016-01-12T21:08:03Z", "tree": null, "paths": null } diff --git a/src/restic/testdata/expired_snapshots_12 b/src/restic/testdata/expired_snapshots_12 index c89b120a..57b4ab84 100644 --- a/src/restic/testdata/expired_snapshots_12 +++ b/src/restic/testdata/expired_snapshots_12 @@ -1,21 +1,21 @@ [ { - "time": "2016-01-18T12:02:03+01:00", + "time": "2016-01-18T12:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-12T21:08:03+01:00", + "time": "2016-01-12T21:08:03Z", "tree": null, "paths": null }, { - "time": "2016-01-09T21:02:03+01:00", + "time": "2016-01-09T21:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-03T07:02:03+01:00", + "time": "2016-01-03T07:02:03Z", "tree": null, "paths": null } diff --git a/src/restic/testdata/expired_snapshots_13 b/src/restic/testdata/expired_snapshots_13 index 240906b1..93a52ad8 100644 --- a/src/restic/testdata/expired_snapshots_13 +++ b/src/restic/testdata/expired_snapshots_13 @@ -1,36 +1,36 @@ [ { - "time": "2016-01-18T12:02:03+01:00", + "time": "2016-01-18T12:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-12T21:08:03+01:00", + "time": "2016-01-12T21:08:03Z", "tree": null, "paths": null }, { - "time": "2016-01-09T21:02:03+01:00", + "time": "2016-01-09T21:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-08T20:02:03+01:00", + "time": "2016-01-08T20:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-03T07:02:03+01:00", + "time": "2016-01-03T07:02:03Z", "tree": null, "paths": null }, { - "time": "2015-11-22T10:20:30+01:00", + "time": "2015-11-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-15T10:20:30+01:00", + "time": "2015-11-15T10:20:30Z", "tree": null, "paths": null } diff --git a/src/restic/testdata/expired_snapshots_14 b/src/restic/testdata/expired_snapshots_14 index 938a8527..5126b3e0 100644 --- a/src/restic/testdata/expired_snapshots_14 +++ b/src/restic/testdata/expired_snapshots_14 @@ -1,31 +1,31 @@ [ { - "time": "2016-01-18T12:02:03+01:00", + "time": "2016-01-18T12:02:03Z", "tree": null, "paths": null }, { - "time": "2015-11-22T10:20:30+01:00", + "time": "2015-11-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-22T11:20:30+02:00", + "time": "2015-10-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-22T11:20:30+02:00", + "time": "2015-09-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-22T11:20:30+02:00", + "time": "2015-08-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-22T10:20:30+01:00", + "time": "2014-11-22T10:20:30Z", "tree": null, "paths": null } diff --git a/src/restic/testdata/expired_snapshots_15 b/src/restic/testdata/expired_snapshots_15 index 52f6b1aa..58d52ae4 100644 --- a/src/restic/testdata/expired_snapshots_15 +++ b/src/restic/testdata/expired_snapshots_15 @@ -1,51 +1,51 @@ [ { - "time": "2016-01-18T12:02:03+01:00", + "time": "2016-01-18T12:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-12T21:08:03+01:00", + "time": "2016-01-12T21:08:03Z", "tree": null, "paths": null }, { - "time": "2016-01-09T21:02:03+01:00", + "time": "2016-01-09T21:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-03T07:02:03+01:00", + "time": "2016-01-03T07:02:03Z", "tree": null, "paths": null }, { - "time": "2015-11-22T10:20:30+01:00", + "time": "2015-11-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-22T11:20:30+02:00", + "time": "2015-10-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-22T11:20:30+02:00", + "time": "2015-09-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-22T11:20:30+02:00", + "time": "2015-08-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-22T10:20:30+01:00", + "time": "2014-11-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-22T11:20:30+02:00", + "time": "2014-10-22T10:20:30Z", "tree": null, "paths": null } diff --git a/src/restic/testdata/expired_snapshots_16 b/src/restic/testdata/expired_snapshots_16 index aaf743f0..37a905da 100644 --- a/src/restic/testdata/expired_snapshots_16 +++ b/src/restic/testdata/expired_snapshots_16 @@ -1,16 +1,16 @@ [ { - "time": "2016-01-18T12:02:03+01:00", + "time": "2016-01-18T12:02:03Z", "tree": null, "paths": null }, { - "time": "2015-11-22T10:20:30+01:00", + "time": "2015-11-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-22T10:20:30+01:00", + "time": "2014-11-22T10:20:30Z", "tree": null, "paths": null } diff --git a/src/restic/testdata/expired_snapshots_17 b/src/restic/testdata/expired_snapshots_17 index eddb2c83..553c18d8 100644 --- a/src/restic/testdata/expired_snapshots_17 +++ b/src/restic/testdata/expired_snapshots_17 @@ -1,71 +1,71 @@ [ { - "time": "2016-01-18T12:02:03+01:00", + "time": "2016-01-18T12:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-12T21:08:03+01:00", + "time": "2016-01-12T21:08:03Z", "tree": null, "paths": null }, { - "time": "2016-01-09T21:02:03+01:00", + "time": "2016-01-09T21:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-08T20:02:03+01:00", + "time": "2016-01-08T20:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-07T10:02:03+01:00", + "time": "2016-01-07T10:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-06T08:02:03+01:00", + "time": "2016-01-06T08:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-05T09:02:03+01:00", + "time": "2016-01-05T09:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T16:23:03+01:00", + "time": "2016-01-04T16:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-03T07:02:03+01:00", + "time": "2016-01-03T07:02:03Z", "tree": null, "paths": null }, { - "time": "2015-11-22T10:20:30+01:00", + "time": "2015-11-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-22T11:20:30+02:00", + "time": "2015-10-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-22T11:20:30+02:00", + "time": "2015-09-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-22T11:20:30+02:00", + "time": "2015-08-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-22T10:20:30+01:00", + "time": "2014-11-22T10:20:30Z", "tree": null, "paths": null } diff --git a/src/restic/testdata/expired_snapshots_2 b/src/restic/testdata/expired_snapshots_2 index fcc8bb97..867e90ff 100644 --- a/src/restic/testdata/expired_snapshots_2 +++ b/src/restic/testdata/expired_snapshots_2 @@ -1,76 +1,76 @@ [ { - "time": "2016-01-18T12:02:03+01:00", + "time": "2016-01-18T12:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-12T21:08:03+01:00", + "time": "2016-01-12T21:08:03Z", "tree": null, "paths": null }, { - "time": "2016-01-12T21:02:03+01:00", + "time": "2016-01-12T21:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-09T21:02:03+01:00", + "time": "2016-01-09T21:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-08T20:02:03+01:00", + "time": "2016-01-08T20:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-07T10:02:03+01:00", + "time": "2016-01-07T10:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-06T08:02:03+01:00", + "time": "2016-01-06T08:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-05T09:02:03+01:00", + "time": "2016-01-05T09:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T16:23:03+01:00", + "time": "2016-01-04T16:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T12:30:03+01:00", + "time": "2016-01-04T12:30:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T12:28:03+01:00", + "time": "2016-01-04T12:28:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T12:24:03+01:00", + "time": "2016-01-04T12:24:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T12:23:03+01:00", + "time": "2016-01-04T12:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T11:23:03+01:00", + "time": "2016-01-04T11:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T10:23:03+01:00", + "time": "2016-01-04T10:23:03Z", "tree": null, "paths": null } diff --git a/src/restic/testdata/expired_snapshots_3 b/src/restic/testdata/expired_snapshots_3 index d83ec624..d70bdbaa 100644 --- a/src/restic/testdata/expired_snapshots_3 +++ b/src/restic/testdata/expired_snapshots_3 @@ -1,496 +1,496 @@ [ { - "time": "2016-01-18T12:02:03+01:00", + "time": "2016-01-18T12:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-12T21:08:03+01:00", + "time": "2016-01-12T21:08:03Z", "tree": null, "paths": null }, { - "time": "2016-01-12T21:02:03+01:00", + "time": "2016-01-12T21:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-09T21:02:03+01:00", + "time": "2016-01-09T21:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-08T20:02:03+01:00", + "time": "2016-01-08T20:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-07T10:02:03+01:00", + "time": "2016-01-07T10:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-06T08:02:03+01:00", + "time": "2016-01-06T08:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-05T09:02:03+01:00", + "time": "2016-01-05T09:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T16:23:03+01:00", + "time": "2016-01-04T16:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T12:30:03+01:00", + "time": "2016-01-04T12:30:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T12:28:03+01:00", + "time": "2016-01-04T12:28:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T12:24:03+01:00", + "time": "2016-01-04T12:24:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T12:23:03+01:00", + "time": "2016-01-04T12:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T11:23:03+01:00", + "time": "2016-01-04T11:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T10:23:03+01:00", + "time": "2016-01-04T10:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-03T07:02:03+01:00", + "time": "2016-01-03T07:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-01T07:08:03+01:00", + "time": "2016-01-01T07:08:03Z", "tree": null, "paths": null }, { - "time": "2016-01-01T01:03:03+01:00", + "time": "2016-01-01T01:03:03Z", "tree": null, "paths": null }, { - "time": "2016-01-01T01:02:03+01:00", + "time": "2016-01-01T01:02:03Z", "tree": null, "paths": null }, { - "time": "2015-11-22T10:20:30+01:00", + "time": "2015-11-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-21T10:20:30+01:00", + "time": "2015-11-21T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-20T10:20:30+01:00", + "time": "2015-11-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-18T10:20:30+01:00", + "time": "2015-11-18T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-15T10:20:30+01:00", + "time": "2015-11-15T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-13T10:20:30+01:00", + "time": "2015-11-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-13T10:20:30+01:00", + "time": "2015-11-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-12T10:20:30+01:00", + "time": "2015-11-12T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-10T10:20:30+01:00", + "time": "2015-11-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-08T10:20:30+01:00", + "time": "2015-11-08T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-22T11:20:30+02:00", + "time": "2015-10-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-20T11:20:30+02:00", + "time": "2015-10-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-11T11:20:30+02:00", + "time": "2015-10-11T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-10T11:20:30+02:00", + "time": "2015-10-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-09T11:20:30+02:00", + "time": "2015-10-09T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-08T11:20:30+02:00", + "time": "2015-10-08T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-06T11:20:30+02:00", + "time": "2015-10-06T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-05T11:20:30+02:00", + "time": "2015-10-05T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-02T11:20:30+02:00", + "time": "2015-10-02T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-01T11:20:30+02:00", + "time": "2015-10-01T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-22T11:20:30+02:00", + "time": "2015-09-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-20T11:20:30+02:00", + "time": "2015-09-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-11T11:20:30+02:00", + "time": "2015-09-11T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-10T11:20:30+02:00", + "time": "2015-09-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-09T11:20:30+02:00", + "time": "2015-09-09T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-08T11:20:30+02:00", + "time": "2015-09-08T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-06T11:20:30+02:00", + "time": "2015-09-06T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-05T11:20:30+02:00", + "time": "2015-09-05T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-02T11:20:30+02:00", + "time": "2015-09-02T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-01T11:20:30+02:00", + "time": "2015-09-01T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-22T11:20:30+02:00", + "time": "2015-08-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-21T11:20:30+02:00", + "time": "2015-08-21T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-20T11:20:30+02:00", + "time": "2015-08-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-18T11:20:30+02:00", + "time": "2015-08-18T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-15T11:20:30+02:00", + "time": "2015-08-15T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-13T11:20:30+02:00", + "time": "2015-08-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-13T11:20:30+02:00", + "time": "2015-08-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-12T11:20:30+02:00", + "time": "2015-08-12T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-10T11:20:30+02:00", + "time": "2015-08-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-08T11:20:30+02:00", + "time": "2015-08-08T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-22T10:20:30+01:00", + "time": "2014-11-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-21T10:20:30+01:00", + "time": "2014-11-21T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-20T10:20:30+01:00", + "time": "2014-11-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-18T10:20:30+01:00", + "time": "2014-11-18T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-15T10:20:30+01:00", + "time": "2014-11-15T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-13T10:20:30+01:00", + "time": "2014-11-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-13T10:20:30+01:00", + "time": "2014-11-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-12T10:20:30+01:00", + "time": "2014-11-12T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-10T10:20:30+01:00", + "time": "2014-11-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-08T10:20:30+01:00", + "time": "2014-11-08T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-22T11:20:30+02:00", + "time": "2014-10-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-20T11:20:30+02:00", + "time": "2014-10-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-11T11:20:30+02:00", + "time": "2014-10-11T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-10T11:20:30+02:00", + "time": "2014-10-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-09T11:20:30+02:00", + "time": "2014-10-09T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-08T11:20:30+02:00", + "time": "2014-10-08T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-06T11:20:30+02:00", + "time": "2014-10-06T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-05T11:20:30+02:00", + "time": "2014-10-05T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-02T11:20:30+02:00", + "time": "2014-10-02T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-01T11:20:30+02:00", + "time": "2014-10-01T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-22T11:20:30+02:00", + "time": "2014-09-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-20T11:20:30+02:00", + "time": "2014-09-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-11T11:20:30+02:00", + "time": "2014-09-11T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-10T11:20:30+02:00", + "time": "2014-09-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-09T11:20:30+02:00", + "time": "2014-09-09T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-08T11:20:30+02:00", + "time": "2014-09-08T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-06T11:20:30+02:00", + "time": "2014-09-06T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-05T11:20:30+02:00", + "time": "2014-09-05T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-02T11:20:30+02:00", + "time": "2014-09-02T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-01T11:20:30+02:00", + "time": "2014-09-01T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-22T11:20:30+02:00", + "time": "2014-08-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-21T11:20:30+02:00", + "time": "2014-08-21T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-20T11:20:30+02:00", + "time": "2014-08-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-18T11:20:30+02:00", + "time": "2014-08-18T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-15T11:20:30+02:00", + "time": "2014-08-15T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-13T11:20:30+02:00", + "time": "2014-08-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-13T11:20:30+02:00", + "time": "2014-08-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-12T11:20:30+02:00", + "time": "2014-08-12T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-10T11:20:30+02:00", + "time": "2014-08-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-08T11:20:30+02:00", + "time": "2014-08-08T10:20:30Z", "tree": null, "paths": null } diff --git a/src/restic/testdata/expired_snapshots_4 b/src/restic/testdata/expired_snapshots_4 index d83ec624..d70bdbaa 100644 --- a/src/restic/testdata/expired_snapshots_4 +++ b/src/restic/testdata/expired_snapshots_4 @@ -1,496 +1,496 @@ [ { - "time": "2016-01-18T12:02:03+01:00", + "time": "2016-01-18T12:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-12T21:08:03+01:00", + "time": "2016-01-12T21:08:03Z", "tree": null, "paths": null }, { - "time": "2016-01-12T21:02:03+01:00", + "time": "2016-01-12T21:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-09T21:02:03+01:00", + "time": "2016-01-09T21:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-08T20:02:03+01:00", + "time": "2016-01-08T20:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-07T10:02:03+01:00", + "time": "2016-01-07T10:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-06T08:02:03+01:00", + "time": "2016-01-06T08:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-05T09:02:03+01:00", + "time": "2016-01-05T09:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T16:23:03+01:00", + "time": "2016-01-04T16:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T12:30:03+01:00", + "time": "2016-01-04T12:30:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T12:28:03+01:00", + "time": "2016-01-04T12:28:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T12:24:03+01:00", + "time": "2016-01-04T12:24:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T12:23:03+01:00", + "time": "2016-01-04T12:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T11:23:03+01:00", + "time": "2016-01-04T11:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T10:23:03+01:00", + "time": "2016-01-04T10:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-03T07:02:03+01:00", + "time": "2016-01-03T07:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-01T07:08:03+01:00", + "time": "2016-01-01T07:08:03Z", "tree": null, "paths": null }, { - "time": "2016-01-01T01:03:03+01:00", + "time": "2016-01-01T01:03:03Z", "tree": null, "paths": null }, { - "time": "2016-01-01T01:02:03+01:00", + "time": "2016-01-01T01:02:03Z", "tree": null, "paths": null }, { - "time": "2015-11-22T10:20:30+01:00", + "time": "2015-11-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-21T10:20:30+01:00", + "time": "2015-11-21T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-20T10:20:30+01:00", + "time": "2015-11-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-18T10:20:30+01:00", + "time": "2015-11-18T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-15T10:20:30+01:00", + "time": "2015-11-15T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-13T10:20:30+01:00", + "time": "2015-11-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-13T10:20:30+01:00", + "time": "2015-11-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-12T10:20:30+01:00", + "time": "2015-11-12T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-10T10:20:30+01:00", + "time": "2015-11-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-08T10:20:30+01:00", + "time": "2015-11-08T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-22T11:20:30+02:00", + "time": "2015-10-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-20T11:20:30+02:00", + "time": "2015-10-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-11T11:20:30+02:00", + "time": "2015-10-11T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-10T11:20:30+02:00", + "time": "2015-10-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-09T11:20:30+02:00", + "time": "2015-10-09T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-08T11:20:30+02:00", + "time": "2015-10-08T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-06T11:20:30+02:00", + "time": "2015-10-06T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-05T11:20:30+02:00", + "time": "2015-10-05T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-02T11:20:30+02:00", + "time": "2015-10-02T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-01T11:20:30+02:00", + "time": "2015-10-01T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-22T11:20:30+02:00", + "time": "2015-09-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-20T11:20:30+02:00", + "time": "2015-09-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-11T11:20:30+02:00", + "time": "2015-09-11T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-10T11:20:30+02:00", + "time": "2015-09-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-09T11:20:30+02:00", + "time": "2015-09-09T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-08T11:20:30+02:00", + "time": "2015-09-08T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-06T11:20:30+02:00", + "time": "2015-09-06T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-05T11:20:30+02:00", + "time": "2015-09-05T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-02T11:20:30+02:00", + "time": "2015-09-02T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-01T11:20:30+02:00", + "time": "2015-09-01T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-22T11:20:30+02:00", + "time": "2015-08-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-21T11:20:30+02:00", + "time": "2015-08-21T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-20T11:20:30+02:00", + "time": "2015-08-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-18T11:20:30+02:00", + "time": "2015-08-18T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-15T11:20:30+02:00", + "time": "2015-08-15T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-13T11:20:30+02:00", + "time": "2015-08-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-13T11:20:30+02:00", + "time": "2015-08-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-12T11:20:30+02:00", + "time": "2015-08-12T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-10T11:20:30+02:00", + "time": "2015-08-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-08-08T11:20:30+02:00", + "time": "2015-08-08T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-22T10:20:30+01:00", + "time": "2014-11-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-21T10:20:30+01:00", + "time": "2014-11-21T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-20T10:20:30+01:00", + "time": "2014-11-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-18T10:20:30+01:00", + "time": "2014-11-18T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-15T10:20:30+01:00", + "time": "2014-11-15T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-13T10:20:30+01:00", + "time": "2014-11-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-13T10:20:30+01:00", + "time": "2014-11-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-12T10:20:30+01:00", + "time": "2014-11-12T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-10T10:20:30+01:00", + "time": "2014-11-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-11-08T10:20:30+01:00", + "time": "2014-11-08T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-22T11:20:30+02:00", + "time": "2014-10-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-20T11:20:30+02:00", + "time": "2014-10-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-11T11:20:30+02:00", + "time": "2014-10-11T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-10T11:20:30+02:00", + "time": "2014-10-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-09T11:20:30+02:00", + "time": "2014-10-09T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-08T11:20:30+02:00", + "time": "2014-10-08T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-06T11:20:30+02:00", + "time": "2014-10-06T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-05T11:20:30+02:00", + "time": "2014-10-05T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-02T11:20:30+02:00", + "time": "2014-10-02T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-10-01T11:20:30+02:00", + "time": "2014-10-01T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-22T11:20:30+02:00", + "time": "2014-09-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-20T11:20:30+02:00", + "time": "2014-09-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-11T11:20:30+02:00", + "time": "2014-09-11T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-10T11:20:30+02:00", + "time": "2014-09-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-09T11:20:30+02:00", + "time": "2014-09-09T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-08T11:20:30+02:00", + "time": "2014-09-08T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-06T11:20:30+02:00", + "time": "2014-09-06T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-05T11:20:30+02:00", + "time": "2014-09-05T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-02T11:20:30+02:00", + "time": "2014-09-02T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-09-01T11:20:30+02:00", + "time": "2014-09-01T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-22T11:20:30+02:00", + "time": "2014-08-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-21T11:20:30+02:00", + "time": "2014-08-21T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-20T11:20:30+02:00", + "time": "2014-08-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-18T11:20:30+02:00", + "time": "2014-08-18T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-15T11:20:30+02:00", + "time": "2014-08-15T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-13T11:20:30+02:00", + "time": "2014-08-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-13T11:20:30+02:00", + "time": "2014-08-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-12T11:20:30+02:00", + "time": "2014-08-12T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-10T11:20:30+02:00", + "time": "2014-08-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2014-08-08T11:20:30+02:00", + "time": "2014-08-08T10:20:30Z", "tree": null, "paths": null } diff --git a/src/restic/testdata/expired_snapshots_5 b/src/restic/testdata/expired_snapshots_5 index f84efb1f..42ed6051 100644 --- a/src/restic/testdata/expired_snapshots_5 +++ b/src/restic/testdata/expired_snapshots_5 @@ -1,101 +1,101 @@ [ { - "time": "2016-01-18T12:02:03+01:00", + "time": "2016-01-18T12:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-12T21:08:03+01:00", + "time": "2016-01-12T21:08:03Z", "tree": null, "paths": null }, { - "time": "2016-01-09T21:02:03+01:00", + "time": "2016-01-09T21:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-08T20:02:03+01:00", + "time": "2016-01-08T20:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-07T10:02:03+01:00", + "time": "2016-01-07T10:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-06T08:02:03+01:00", + "time": "2016-01-06T08:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-05T09:02:03+01:00", + "time": "2016-01-05T09:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T16:23:03+01:00", + "time": "2016-01-04T16:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T12:30:03+01:00", + "time": "2016-01-04T12:30:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T11:23:03+01:00", + "time": "2016-01-04T11:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T10:23:03+01:00", + "time": "2016-01-04T10:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-03T07:02:03+01:00", + "time": "2016-01-03T07:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-01T07:08:03+01:00", + "time": "2016-01-01T07:08:03Z", "tree": null, "paths": null }, { - "time": "2016-01-01T01:03:03+01:00", + "time": "2016-01-01T01:03:03Z", "tree": null, "paths": null }, { - "time": "2015-11-22T10:20:30+01:00", + "time": "2015-11-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-21T10:20:30+01:00", + "time": "2015-11-21T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-20T10:20:30+01:00", + "time": "2015-11-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-18T10:20:30+01:00", + "time": "2015-11-18T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-15T10:20:30+01:00", + "time": "2015-11-15T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-13T10:20:30+01:00", + "time": "2015-11-13T10:20:30Z", "tree": null, "paths": null } diff --git a/src/restic/testdata/expired_snapshots_6 b/src/restic/testdata/expired_snapshots_6 index e862779c..69b347d7 100644 --- a/src/restic/testdata/expired_snapshots_6 +++ b/src/restic/testdata/expired_snapshots_6 @@ -1,16 +1,16 @@ [ { - "time": "2016-01-18T12:02:03+01:00", + "time": "2016-01-18T12:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-12T21:08:03+01:00", + "time": "2016-01-12T21:08:03Z", "tree": null, "paths": null }, { - "time": "2016-01-09T21:02:03+01:00", + "time": "2016-01-09T21:02:03Z", "tree": null, "paths": null } diff --git a/src/restic/testdata/expired_snapshots_7 b/src/restic/testdata/expired_snapshots_7 index 6f53af13..ca08ff08 100644 --- a/src/restic/testdata/expired_snapshots_7 +++ b/src/restic/testdata/expired_snapshots_7 @@ -1,51 +1,51 @@ [ { - "time": "2016-01-18T12:02:03+01:00", + "time": "2016-01-18T12:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-12T21:08:03+01:00", + "time": "2016-01-12T21:08:03Z", "tree": null, "paths": null }, { - "time": "2016-01-09T21:02:03+01:00", + "time": "2016-01-09T21:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-08T20:02:03+01:00", + "time": "2016-01-08T20:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-07T10:02:03+01:00", + "time": "2016-01-07T10:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-06T08:02:03+01:00", + "time": "2016-01-06T08:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-05T09:02:03+01:00", + "time": "2016-01-05T09:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T16:23:03+01:00", + "time": "2016-01-04T16:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-03T07:02:03+01:00", + "time": "2016-01-03T07:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-01T07:08:03+01:00", + "time": "2016-01-01T07:08:03Z", "tree": null, "paths": null } diff --git a/src/restic/testdata/expired_snapshots_8 b/src/restic/testdata/expired_snapshots_8 index 750da193..b0a64d69 100644 --- a/src/restic/testdata/expired_snapshots_8 +++ b/src/restic/testdata/expired_snapshots_8 @@ -1,151 +1,151 @@ [ { - "time": "2016-01-18T12:02:03+01:00", + "time": "2016-01-18T12:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-12T21:08:03+01:00", + "time": "2016-01-12T21:08:03Z", "tree": null, "paths": null }, { - "time": "2016-01-09T21:02:03+01:00", + "time": "2016-01-09T21:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-08T20:02:03+01:00", + "time": "2016-01-08T20:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-07T10:02:03+01:00", + "time": "2016-01-07T10:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-06T08:02:03+01:00", + "time": "2016-01-06T08:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-05T09:02:03+01:00", + "time": "2016-01-05T09:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T16:23:03+01:00", + "time": "2016-01-04T16:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-03T07:02:03+01:00", + "time": "2016-01-03T07:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-01T07:08:03+01:00", + "time": "2016-01-01T07:08:03Z", "tree": null, "paths": null }, { - "time": "2015-11-22T10:20:30+01:00", + "time": "2015-11-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-21T10:20:30+01:00", + "time": "2015-11-21T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-20T10:20:30+01:00", + "time": "2015-11-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-18T10:20:30+01:00", + "time": "2015-11-18T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-15T10:20:30+01:00", + "time": "2015-11-15T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-13T10:20:30+01:00", + "time": "2015-11-13T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-12T10:20:30+01:00", + "time": "2015-11-12T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-10T10:20:30+01:00", + "time": "2015-11-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-11-08T10:20:30+01:00", + "time": "2015-11-08T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-22T11:20:30+02:00", + "time": "2015-10-22T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-20T11:20:30+02:00", + "time": "2015-10-20T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-11T11:20:30+02:00", + "time": "2015-10-11T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-10T11:20:30+02:00", + "time": "2015-10-10T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-09T11:20:30+02:00", + "time": "2015-10-09T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-08T11:20:30+02:00", + "time": "2015-10-08T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-06T11:20:30+02:00", + "time": "2015-10-06T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-05T11:20:30+02:00", + "time": "2015-10-05T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-02T11:20:30+02:00", + "time": "2015-10-02T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-10-01T11:20:30+02:00", + "time": "2015-10-01T10:20:30Z", "tree": null, "paths": null }, { - "time": "2015-09-22T11:20:30+02:00", + "time": "2015-09-22T10:20:30Z", "tree": null, "paths": null } diff --git a/src/restic/testdata/expired_snapshots_9 b/src/restic/testdata/expired_snapshots_9 index fd72ec6a..0b577ae7 100644 --- a/src/restic/testdata/expired_snapshots_9 +++ b/src/restic/testdata/expired_snapshots_9 @@ -1,51 +1,51 @@ [ { - "time": "2016-01-18T12:02:03+01:00", + "time": "2016-01-18T12:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-12T21:08:03+01:00", + "time": "2016-01-12T21:08:03Z", "tree": null, "paths": null }, { - "time": "2016-01-12T21:02:03+01:00", + "time": "2016-01-12T21:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-09T21:02:03+01:00", + "time": "2016-01-09T21:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-08T20:02:03+01:00", + "time": "2016-01-08T20:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-07T10:02:03+01:00", + "time": "2016-01-07T10:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-06T08:02:03+01:00", + "time": "2016-01-06T08:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-05T09:02:03+01:00", + "time": "2016-01-05T09:02:03Z", "tree": null, "paths": null }, { - "time": "2016-01-04T16:23:03+01:00", + "time": "2016-01-04T16:23:03Z", "tree": null, "paths": null }, { - "time": "2016-01-03T07:02:03+01:00", + "time": "2016-01-03T07:02:03Z", "tree": null, "paths": null } diff --git a/src/restic/testdata/filter_snapshots_0 b/src/restic/testdata/filter_snapshots_0 index 022aa7ae..4afbe3b5 100644 --- a/src/restic/testdata/filter_snapshots_0 +++ b/src/restic/testdata/filter_snapshots_0 @@ -1,6 +1,6 @@ [ { - "time": "2016-01-18T12:02:03+01:00", + "time": "2016-01-18T12:02:03Z", "tree": null, "paths": [ "/usr", @@ -10,7 +10,7 @@ "username": "testuser" }, { - "time": "2016-01-12T21:08:03+01:00", + "time": "2016-01-12T21:08:03Z", "tree": null, "paths": [ "/usr", @@ -20,7 +20,7 @@ "username": "testuser" }, { - "time": "2016-01-09T21:02:03+01:00", + "time": "2016-01-09T21:02:03Z", "tree": null, "paths": [ "/usr", @@ -30,7 +30,7 @@ "username": "root" }, { - "time": "2016-01-08T20:02:03+01:00", + "time": "2016-01-08T20:02:03Z", "tree": null, "paths": [ "/usr", @@ -40,7 +40,7 @@ "username": "root" }, { - "time": "2016-01-07T10:02:03+01:00", + "time": "2016-01-07T10:02:03Z", "tree": null, "paths": [ "/usr", @@ -50,7 +50,7 @@ "username": "testuser" }, { - "time": "2016-01-06T08:02:03+01:00", + "time": "2016-01-06T08:02:03Z", "tree": null, "paths": [ "/usr", @@ -60,7 +60,7 @@ "username": "testuser" }, { - "time": "2016-01-05T09:02:03+01:00", + "time": "2016-01-05T09:02:03Z", "tree": null, "paths": [ "/usr", @@ -70,7 +70,7 @@ "username": "testuser" }, { - "time": "2016-01-04T16:23:03+01:00", + "time": "2016-01-04T16:23:03Z", "tree": null, "paths": [ "/usr", @@ -80,7 +80,7 @@ "username": "testuser" }, { - "time": "2016-01-04T12:30:03+01:00", + "time": "2016-01-04T12:30:03Z", "tree": null, "paths": [ "/usr", @@ -90,7 +90,7 @@ "username": "testuser" }, { - "time": "2016-01-04T12:28:03+01:00", + "time": "2016-01-04T12:28:03Z", "tree": null, "paths": [ "/usr", @@ -100,7 +100,7 @@ "username": "testuser" }, { - "time": "2016-01-04T12:24:03+01:00", + "time": "2016-01-04T12:24:03Z", "tree": null, "paths": [ "/usr", @@ -110,7 +110,7 @@ "username": "testuser" }, { - "time": "2016-01-04T12:23:03+01:00", + "time": "2016-01-04T12:23:03Z", "tree": null, "paths": [ "/usr", @@ -120,7 +120,7 @@ "username": "testuser" }, { - "time": "2016-01-04T11:23:03+01:00", + "time": "2016-01-04T11:23:03Z", "tree": null, "paths": [ "/usr", @@ -130,7 +130,7 @@ "username": "testuser" }, { - "time": "2016-01-04T10:23:03+01:00", + "time": "2016-01-04T10:23:03Z", "tree": null, "paths": [ "/usr", @@ -140,7 +140,7 @@ "username": "testuser" }, { - "time": "2016-01-03T07:02:03+01:00", + "time": "2016-01-03T07:02:03Z", "tree": null, "paths": [ "/usr", @@ -150,7 +150,7 @@ "username": "testuser" }, { - "time": "2016-01-01T01:02:03+01:00", + "time": "2016-01-01T01:02:03Z", "tree": null, "paths": [ "/usr", diff --git a/src/restic/testdata/filter_snapshots_1 b/src/restic/testdata/filter_snapshots_1 index 1239b481..c9bd44dd 100644 --- a/src/restic/testdata/filter_snapshots_1 +++ b/src/restic/testdata/filter_snapshots_1 @@ -1,6 +1,6 @@ [ { - "time": "2016-01-12T21:02:03+01:00", + "time": "2016-01-12T21:02:03Z", "tree": null, "paths": [ "/usr", @@ -10,7 +10,7 @@ "username": "root" }, { - "time": "2016-01-09T21:02:03+01:00", + "time": "2016-01-09T21:02:03Z", "tree": null, "paths": [ "/usr", @@ -20,7 +20,7 @@ "username": "root" }, { - "time": "2016-01-08T20:02:03+01:00", + "time": "2016-01-08T20:02:03Z", "tree": null, "paths": [ "/usr", diff --git a/src/restic/testdata/filter_snapshots_2 b/src/restic/testdata/filter_snapshots_2 index 1bde9e51..cae5b39e 100644 --- a/src/restic/testdata/filter_snapshots_2 +++ b/src/restic/testdata/filter_snapshots_2 @@ -1,6 +1,6 @@ [ { - "time": "2016-01-09T21:02:03+01:00", + "time": "2016-01-09T21:02:03Z", "tree": null, "paths": [ "/usr", @@ -10,7 +10,7 @@ "username": "root" }, { - "time": "2016-01-08T20:02:03+01:00", + "time": "2016-01-08T20:02:03Z", "tree": null, "paths": [ "/usr", diff --git a/src/restic/testdata/filter_snapshots_3 b/src/restic/testdata/filter_snapshots_3 index bf120b32..d30f4bf3 100644 --- a/src/restic/testdata/filter_snapshots_3 +++ b/src/restic/testdata/filter_snapshots_3 @@ -1,6 +1,6 @@ [ { - "time": "2016-01-18T12:02:03+01:00", + "time": "2016-01-18T12:02:03Z", "tree": null, "paths": [ "/usr", @@ -10,7 +10,7 @@ "username": "testuser" }, { - "time": "2016-01-12T21:08:03+01:00", + "time": "2016-01-12T21:08:03Z", "tree": null, "paths": [ "/usr", @@ -20,7 +20,7 @@ "username": "testuser" }, { - "time": "2016-01-07T10:02:03+01:00", + "time": "2016-01-07T10:02:03Z", "tree": null, "paths": [ "/usr", @@ -30,7 +30,7 @@ "username": "testuser" }, { - "time": "2016-01-06T08:02:03+01:00", + "time": "2016-01-06T08:02:03Z", "tree": null, "paths": [ "/usr", @@ -40,7 +40,7 @@ "username": "testuser" }, { - "time": "2016-01-05T09:02:03+01:00", + "time": "2016-01-05T09:02:03Z", "tree": null, "paths": [ "/usr", @@ -50,7 +50,7 @@ "username": "testuser" }, { - "time": "2016-01-04T16:23:03+01:00", + "time": "2016-01-04T16:23:03Z", "tree": null, "paths": [ "/usr", @@ -60,7 +60,7 @@ "username": "testuser" }, { - "time": "2016-01-04T12:30:03+01:00", + "time": "2016-01-04T12:30:03Z", "tree": null, "paths": [ "/usr", @@ -70,7 +70,7 @@ "username": "testuser" }, { - "time": "2016-01-04T12:28:03+01:00", + "time": "2016-01-04T12:28:03Z", "tree": null, "paths": [ "/usr", @@ -80,7 +80,7 @@ "username": "testuser" }, { - "time": "2016-01-04T12:24:03+01:00", + "time": "2016-01-04T12:24:03Z", "tree": null, "paths": [ "/usr", @@ -90,7 +90,7 @@ "username": "testuser" }, { - "time": "2016-01-04T12:23:03+01:00", + "time": "2016-01-04T12:23:03Z", "tree": null, "paths": [ "/usr", @@ -100,7 +100,7 @@ "username": "testuser" }, { - "time": "2016-01-04T11:23:03+01:00", + "time": "2016-01-04T11:23:03Z", "tree": null, "paths": [ "/usr", @@ -110,7 +110,7 @@ "username": "testuser" }, { - "time": "2016-01-04T10:23:03+01:00", + "time": "2016-01-04T10:23:03Z", "tree": null, "paths": [ "/usr", @@ -120,7 +120,7 @@ "username": "testuser" }, { - "time": "2016-01-03T07:02:03+01:00", + "time": "2016-01-03T07:02:03Z", "tree": null, "paths": [ "/usr", @@ -130,7 +130,7 @@ "username": "testuser" }, { - "time": "2016-01-01T07:08:03+01:00", + "time": "2016-01-01T07:08:03Z", "tree": null, "paths": [ "/usr", @@ -140,7 +140,7 @@ "username": "testuser" }, { - "time": "2016-01-01T01:03:03+01:00", + "time": "2016-01-01T01:03:03Z", "tree": null, "paths": [ "/usr", @@ -150,7 +150,7 @@ "username": "testuser" }, { - "time": "2016-01-01T01:02:03+01:00", + "time": "2016-01-01T01:02:03Z", "tree": null, "paths": [ "/usr", diff --git a/src/restic/testdata/filter_snapshots_4 b/src/restic/testdata/filter_snapshots_4 index 2c566811..a334bbcb 100644 --- a/src/restic/testdata/filter_snapshots_4 +++ b/src/restic/testdata/filter_snapshots_4 @@ -1,6 +1,6 @@ [ { - "time": "2016-01-01T07:08:03+01:00", + "time": "2016-01-01T07:08:03Z", "tree": null, "paths": [ "/usr", @@ -10,7 +10,7 @@ "username": "testuser" }, { - "time": "2016-01-01T01:03:03+01:00", + "time": "2016-01-01T01:03:03Z", "tree": null, "paths": [ "/usr", diff --git a/src/restic/testdata/filter_snapshots_5 b/src/restic/testdata/filter_snapshots_5 index 1bde9e51..cae5b39e 100644 --- a/src/restic/testdata/filter_snapshots_5 +++ b/src/restic/testdata/filter_snapshots_5 @@ -1,6 +1,6 @@ [ { - "time": "2016-01-09T21:02:03+01:00", + "time": "2016-01-09T21:02:03Z", "tree": null, "paths": [ "/usr", @@ -10,7 +10,7 @@ "username": "root" }, { - "time": "2016-01-08T20:02:03+01:00", + "time": "2016-01-08T20:02:03Z", "tree": null, "paths": [ "/usr", From aa29c68189ef72dc71af0d049d38bd8ae23d5fe3 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sat, 20 Aug 2016 20:44:57 +0200 Subject: [PATCH 98/98] Fix progress for new index --- src/cmds/restic/cmd_prune.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/cmds/restic/cmd_prune.go b/src/cmds/restic/cmd_prune.go index b7a2653a..a2aa6451 100644 --- a/src/cmds/restic/cmd_prune.go +++ b/src/cmds/restic/cmd_prune.go @@ -185,6 +185,9 @@ func (cmd CmdPrune) Execute(args []string) error { cmd.global.Verbosef("creating new index\n") + for _ = range repo.List(backend.Data, done) { + stats.packs++ + } bar = newProgressMax(cmd.global.ShowProgress(), uint64(stats.packs), "files") idx, err = index.New(repo, bar) if err != nil {