mirror of
https://github.com/restic/restic.git
synced 2025-03-16 00:00:05 +01:00
Only restore blobs which don't exist locally
* If we can, calculate the hash of the part of the local file that the blob corresponds to * If it matches then we already have the data and can skip fetching it * This should significantly improve the speed of remote restores when a local dataset already exists
This commit is contained in:
parent
190673b24a
commit
86a4634ee9
1 changed files with 19 additions and 1 deletions
|
@ -192,7 +192,7 @@ func (node Node) createDirAt(path string) error {
|
|||
}
|
||||
|
||||
func (node Node) createFileAt(path string, repo Repository) error {
|
||||
f, err := fs.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0600)
|
||||
f, err := fs.OpenFile(path, os.O_CREATE|os.O_RDWR, 0600)
|
||||
defer f.Close()
|
||||
|
||||
if err != nil {
|
||||
|
@ -206,6 +206,24 @@ func (node Node) createFileAt(path string, repo Repository) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Skip loading if the data already exists locally
|
||||
existingData := make([]byte, size)
|
||||
|
||||
readSize, _ := f.Read(existingData)
|
||||
if readSize == int(size) {
|
||||
if Hash(existingData) == id {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
debug.Log("Fetching blob %v (size: %v)", id, size)
|
||||
|
||||
// Move the write offset back to where it was
|
||||
_, err = f.Seek(int64(-readSize), os.SEEK_CUR)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf = buf[:cap(buf)]
|
||||
if uint(len(buf)) < size {
|
||||
buf = make([]byte, size)
|
||||
|
|
Loading…
Add table
Reference in a new issue