diff --git a/internal/repository/pack/pack.go b/internal/repository/pack/pack.go index e9c0ab77b..8f4d0d52a 100644 --- a/internal/repository/pack/pack.go +++ b/internal/repository/pack/pack.go @@ -163,6 +163,8 @@ func makeHeader(blobs []restic.Blob) ([]byte, error) { return buf, nil } +// Merge merges another packer into the current packer. Both packers must not be +// finalized yet. func (p *Packer) Merge(other *Packer, otherData io.Reader) error { other.m.Lock() defer other.m.Unlock() diff --git a/internal/repository/pack/pack_test.go b/internal/repository/pack/pack_test.go index 5ac146348..e5e61330b 100644 --- a/internal/repository/pack/pack_test.go +++ b/internal/repository/pack/pack_test.go @@ -25,15 +25,7 @@ type Buf struct { } func newPack(t testing.TB, k *crypto.Key, lengths []int) ([]Buf, []byte, uint) { - bufs := []Buf{} - - for _, l := range lengths { - b := make([]byte, l) - _, err := io.ReadFull(rand.Reader, b) - rtest.OK(t, err) - h := sha256.Sum256(b) - bufs = append(bufs, Buf{data: b, id: h}) - } + bufs := createBuffers(t, lengths) // pack blobs var buf bytes.Buffer @@ -49,6 +41,18 @@ func newPack(t testing.TB, k *crypto.Key, lengths []int) ([]Buf, []byte, uint) { return bufs, buf.Bytes(), p.Size() } +func createBuffers(t testing.TB, lengths []int) []Buf { + bufs := []Buf{} + for _, l := range lengths { + b := make([]byte, l) + _, err := io.ReadFull(rand.Reader, b) + rtest.OK(t, err) + h := sha256.Sum256(b) + bufs = append(bufs, Buf{data: b, id: h}) + } + return bufs +} + func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, rd io.ReaderAt, packSize uint) { written := 0 for _, buf := range bufs { @@ -144,3 +148,34 @@ func TestShortPack(t *testing.T) { rtest.OK(t, b.Save(context.TODO(), handle, backend.NewByteReader(packData, b.Hasher()))) verifyBlobs(t, bufs, k, backend.ReaderAt(context.TODO(), b, handle), packSize) } + +func TestPackMerge(t *testing.T) { + k := crypto.NewRandomKey() + + bufs := createBuffers(t, []int{1000, 5000, 2000, 3000, 4000, 1500}) + splitAt := 3 + + // Fill packers + var buf1 bytes.Buffer + packer1 := pack.NewPacker(k, &buf1) + for _, b := range bufs[:splitAt] { + _, err := packer1.Add(restic.TreeBlob, b.id, b.data, 2*len(b.data)) + rtest.OK(t, err) + } + + var buf2 bytes.Buffer + packer2 := pack.NewPacker(k, &buf2) + for _, b := range bufs[splitAt:] { + _, err := packer2.Add(restic.DataBlob, b.id, b.data, 2*len(b.data)) + rtest.OK(t, err) + } + + err := packer1.Merge(packer2, &buf2) + rtest.OK(t, err) + err = packer1.Finalize() + rtest.OK(t, err) + + // Verify all blobs are present in the merged pack + verifyBlobs(t, bufs, k, bytes.NewReader(buf1.Bytes()), packer1.Size()) + rtest.Equals(t, len(bufs), packer1.Count()) +}