1
0
Fork 0
mirror of https://github.com/restic/restic.git synced 2025-03-16 00:00:05 +01:00

Added support for Google Cloud Storage repositories using minio

Implemented Google Cloud Storage support using the existing minio libraries.
GCS repositories get specified using gs://bucketname/prefix syntax.

Cloned the existing S3 support and tests to use them for GCS as well.

Added tests for the various cases to config_test and location_test.

Remove trailing slashes in repository specification.
This commit is contained in:
Christian Kemper 2016-02-10 23:39:15 -08:00
parent 621b50d218
commit 562c8b4264
9 changed files with 561 additions and 0 deletions

View file

@ -0,0 +1,87 @@
// DO NOT EDIT, AUTOMATICALLY GENERATED
package gcs_test
import (
"testing"
"github.com/restic/restic/backend/test"
)
var SkipMessage string
func TestGcsBackendCreate(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestCreate(t)
}
func TestGcsBackendOpen(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestOpen(t)
}
func TestGcsBackendCreateWithConfig(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestCreateWithConfig(t)
}
func TestGcsBackendLocation(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestLocation(t)
}
func TestGcsBackendConfig(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestConfig(t)
}
func TestGcsBackendLoad(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestLoad(t)
}
func TestGcsBackendSave(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestSave(t)
}
func TestGcsBackendSaveFilenames(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestSaveFilenames(t)
}
func TestGcsBackendBackend(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestBackend(t)
}
func TestGcsBackendDelete(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestDelete(t)
}
func TestGcsBackendCleanup(t *testing.T) {
if SkipMessage != "" {
t.Skip(SkipMessage)
}
test.TestCleanup(t)
}

50
backend/gcs/config.go Normal file
View file

@ -0,0 +1,50 @@
package gcs
import (
"errors"
"strings"
)
// Config contains all configuration necessary to connect to an s3 compatible
// server.
type Config struct {
Endpoint string
UseHTTP bool
KeyID, Secret string
Bucket string
Prefix string
}
const Scheme = "gs"
const defaultPrefix = "restic"
const gcsEndpoint = "storage.googleapis.com"
// ParseConfig parses the string s and extracts the gcs config. The two
// supported configuration formats are gcs://bucketname/prefix and
// gcs:bucketname/prefix.
func ParseConfig(s string) (interface {}, error) {
if strings.HasPrefix(s, "gs://") {
s = s[5:]
} else if strings.HasPrefix(s, "gs:") {
s = s[3:]
} else {
return nil, errors.New(`gcs: config does not start with "gcs"`)
}
// be dfensive against wron user input and trim trailing slashes
data := strings.SplitN(strings.TrimRight(s, "/"), "/", 2)
if len(data) < 1 {
return nil, errors.New("gcs: invalid format, bucket name not found")
}
prefix := defaultPrefix
if len(data) > 1 {
prefix = data[1]
}
cfg := Config{
Endpoint: gcsEndpoint,
Bucket: data[0],
Prefix: prefix,
}
return cfg, nil
}

View file

@ -0,0 +1,65 @@
package gcs
import "testing"
var configTests = []struct {
s string
cfg Config
}{
{"gs://bucketname", Config{
Endpoint: "storage.googleapis.com",
Bucket: "bucketname",
Prefix: "restic",
}},
{"gs://bucketname/", Config{
Endpoint: "storage.googleapis.com",
Bucket: "bucketname",
Prefix: "restic",
}},
{"gs://bucketname/prefix/dir", Config{
Endpoint: "storage.googleapis.com",
Bucket: "bucketname",
Prefix: "prefix/dir",
}},
{"gs://bucketname/prefix/dir/", Config{
Endpoint: "storage.googleapis.com",
Bucket: "bucketname",
Prefix: "prefix/dir",
}},
{"gs:bucketname", Config{
Endpoint: "storage.googleapis.com",
Bucket: "bucketname",
Prefix: "restic",
}},
{"gs:bucketname/", Config{
Endpoint: "storage.googleapis.com",
Bucket: "bucketname",
Prefix: "restic",
}},
{"gs:bucketname/prefix/dir", Config{
Endpoint: "storage.googleapis.com",
Bucket: "bucketname",
Prefix: "prefix/dir",
}},
{"gs:bucketname/prefix/dir/", Config{
Endpoint: "storage.googleapis.com",
Bucket: "bucketname",
Prefix: "prefix/dir",
}},
}
func TestParseConfig(t *testing.T) {
for i, test := range configTests {
cfg, err := ParseConfig(test.s)
if err != nil {
t.Errorf("test %d failed: %v", i, err)
continue
}
if cfg != test.cfg {
t.Errorf("test %d: wrong config, want:\n %v\ngot:\n %v",
i, test.cfg, cfg)
continue
}
}
}

230
backend/gcs/gcs.go Normal file
View file

@ -0,0 +1,230 @@
package gcs
import (
"bytes"
"errors"
"io"
"strings"
"github.com/minio/minio-go"
"github.com/restic/restic/backend"
"github.com/restic/restic/debug"
)
const connLimit = 20
func s3path(prefix string, t backend.Type, name string) string {
if t == backend.Config {
return prefix + "/" + string(t)
}
return prefix + "/" + string(t) + "/" + name
}
// s3 is a backend which stores the data on an S3 compatible endpoint.
type s3 struct {
client minio.CloudStorageClient
connChan chan struct{}
bucketname string
prefix string
}
// Open opens the S3 backend at bucket and region. The bucket is created if it
// does not exist yet.
func Open(cfg Config) (backend.Backend, error) {
debug.Log("gcs.Open", "open, config %#v", cfg)
client, err := minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, cfg.UseHTTP)
if err != nil {
return nil, err
}
be := &s3{client: client, bucketname: cfg.Bucket, prefix: cfg.Prefix}
be.createConnections()
if err := client.BucketExists(cfg.Bucket); err != nil {
debug.Log("gcs.Open", "BucketExists(%v) returned err %v, trying to create the bucket", cfg.Bucket, err)
// create new bucket with default ACL in default region
err = client.MakeBucket(cfg.Bucket, "", "")
if err != nil {
return nil, err
}
}
return be, nil
}
func (be *s3) createConnections() {
be.connChan = make(chan struct{}, connLimit)
for i := 0; i < connLimit; i++ {
be.connChan <- struct{}{}
}
}
// Location returns this backend's location (the bucket name).
func (be *s3) Location() string {
return be.bucketname
}
// Load returns the data stored in the backend for h at the given offset
// and saves it in p. Load has the same semantics as io.ReaderAt.
func (be s3) Load(h backend.Handle, p []byte, off int64) (int, error) {
debug.Log("gcs.Load", "%v, offset %v, len %v", h, off, len(p))
path := s3path(be.prefix, h.Type, h.Name)
obj, err := be.client.GetObject(be.bucketname, path)
if err != nil {
debug.Log("gcs.GetReader", " err %v", err)
return 0, err
}
if off > 0 {
_, err = obj.Seek(off, 0)
if err != nil {
return 0, err
}
}
<-be.connChan
defer func() {
be.connChan <- struct{}{}
}()
return io.ReadFull(obj, p)
}
// Save stores data in the backend at the handle.
func (be s3) Save(h backend.Handle, p []byte) (err error) {
if err := h.Valid(); err != nil {
return err
}
debug.Log("gcs.Save", "%v bytes at %d", len(p), h)
path := s3path(be.prefix, h.Type, h.Name)
// Check key does not already exist
_, err = be.client.StatObject(be.bucketname, path)
if err == nil {
debug.Log("gcs.blob.Finalize()", "%v already exists", h)
return errors.New("key already exists")
}
<-be.connChan
defer func() {
be.connChan <- struct{}{}
}()
debug.Log("gcs.Save", "PutObject(%v, %v, %v, %v)",
be.bucketname, path, int64(len(p)), "binary/octet-stream")
n, err := be.client.PutObject(be.bucketname, path, bytes.NewReader(p), "binary/octet-stream")
debug.Log("gcs.Save", "%v -> %v bytes, err %#v", path, n, err)
return err
}
// Stat returns information about a blob.
func (be s3) Stat(h backend.Handle) (backend.BlobInfo, error) {
debug.Log("gcs.Stat", "%v")
path := s3path(be.prefix, h.Type, h.Name)
obj, err := be.client.GetObject(be.bucketname, path)
if err != nil {
debug.Log("gcs.Stat", "GetObject() err %v", err)
return backend.BlobInfo{}, err
}
fi, err := obj.Stat()
if err != nil {
debug.Log("s3.Stat", "Stat() err %v", err)
return backend.BlobInfo{}, err
}
return backend.BlobInfo{Size: fi.Size}, nil
}
// Test returns true if a blob of the given type and name exists in the backend.
func (be *s3) Test(t backend.Type, name string) (bool, error) {
found := false
path := s3path(be.prefix, t, name)
_, err := be.client.StatObject(be.bucketname, path)
if err == nil {
found = true
}
// If error, then not found
return found, nil
}
// Remove removes the blob with the given name and type.
func (be *s3) Remove(t backend.Type, name string) error {
path := s3path(be.prefix, t, name)
err := be.client.RemoveObject(be.bucketname, path)
debug.Log("gcs.Remove", "%v %v -> err %v", t, name, err)
return err
}
// List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending
// stops.
func (be *s3) List(t backend.Type, done <-chan struct{}) <-chan string {
debug.Log("gcs.List", "listing %v", t)
ch := make(chan string)
prefix := s3path(be.prefix, t, "")
listresp := be.client.ListObjects(be.bucketname, prefix, true, done)
go func() {
defer close(ch)
for obj := range listresp {
m := strings.TrimPrefix(obj.Key, prefix)
if m == "" {
continue
}
select {
case ch <- m:
case <-done:
return
}
}
}()
return ch
}
// Remove keys for a specified backend type.
func (be *s3) removeKeys(t backend.Type) error {
done := make(chan struct{})
defer close(done)
for key := range be.List(backend.Data, done) {
err := be.Remove(backend.Data, key)
if err != nil {
return err
}
}
return nil
}
// Delete removes all restic keys in the bucket. It will not remove the bucket itself.
func (be *s3) Delete() error {
alltypes := []backend.Type{
backend.Data,
backend.Key,
backend.Lock,
backend.Snapshot,
backend.Index}
for _, t := range alltypes {
err := be.removeKeys(t)
if err != nil {
return nil
}
}
return be.Remove(backend.Config, "")
}
// Close does nothing
func (be *s3) Close() error { return nil }

72
backend/gcs/gcs_test.go Normal file
View file

@ -0,0 +1,72 @@
package gcs_test
import (
"errors"
"fmt"
"net/url"
"os"
"github.com/restic/restic/backend"
"github.com/restic/restic/backend/gcs"
"github.com/restic/restic/backend/test"
. "github.com/restic/restic/test"
)
//go:generate go run ../test/generate_backend_tests.go
func init() {
if TestS3Server == "" {
SkipMessage = "s3 test server not available"
return
}
url, err := url.Parse(TestS3Server)
if err != nil {
fmt.Fprintf(os.Stderr, "invalid url: %v\n", err)
return
}
cfg := gcs.Config{
Endpoint: url.Host,
Bucket: "restictestbucket",
KeyID: os.Getenv("S3_ACCESS_KEY_ID"),
Secret: os.Getenv("S3_SECRET_ACCESS_KEY"),
}
if url.Scheme == "http" {
cfg.UseHTTP = true
}
test.CreateFn = func() (backend.Backend, error) {
be, err := gcs.Open(cfg)
if err != nil {
return nil, err
}
exists, err := be.Test(backend.Config, "")
if err != nil {
return nil, err
}
if exists {
return nil, errors.New("config already exists")
}
return be, nil
}
test.OpenFn = func() (backend.Backend, error) {
return gcs.Open(cfg)
}
// test.CleanupFn = func() error {
// if tempBackendDir == "" {
// return nil
// }
// fmt.Printf("removing test backend at %v\n", tempBackendDir)
// err := os.RemoveAll(tempBackendDir)
// tempBackendDir = ""
// return err
// }
}

View file

@ -6,6 +6,8 @@ import (
"strings"
)
const Scheme = "sftp"
// Config collects all information required to connect to an sftp server.
type Config struct {
User, Host, Dir string

View file

@ -9,6 +9,7 @@ import (
"github.com/jessevdk/go-flags"
"github.com/restic/restic/backend"
"github.com/restic/restic/backend/gcs"
"github.com/restic/restic/backend/local"
"github.com/restic/restic/backend/s3"
"github.com/restic/restic/backend/sftp"
@ -204,6 +205,17 @@ func open(s string) (backend.Backend, error) {
debug.Log("open", "opening s3 repository at %#v", cfg)
return s3.Open(cfg)
case gcs.Scheme:
cfg := loc.Config.(gcs.Config)
if cfg.KeyID == "" {
cfg.KeyID = os.Getenv("GS_ACCESS_KEY_ID")
}
if cfg.Secret == "" {
cfg.Secret = os.Getenv("GS_SECRET_ACCESS_KEY")
}
debug.Log("open", "opening gcs repository at %#v", cfg)
return gcs.Open(cfg)
}
debug.Log("open", "invalid repository location: %v", s)
@ -237,6 +249,18 @@ func create(s string) (backend.Backend, error) {
debug.Log("open", "create s3 repository at %#v", loc.Config)
return s3.Open(cfg)
case gcs.Scheme:
cfg := loc.Config.(gcs.Config)
if cfg.KeyID == "" {
cfg.KeyID = os.Getenv("GS_ACCESS_KEY_ID")
}
if cfg.Secret == "" {
cfg.Secret = os.Getenv("GS_SECRET_ACCESS_KEY")
}
debug.Log("open", "create gcs repository at %#v", loc.Config)
return gcs.Open(cfg)
}
debug.Log("open", "invalid repository scheme: %v", s)

View file

@ -4,6 +4,7 @@ package location
import (
"strings"
"github.com/restic/restic/backend/gcs"
"github.com/restic/restic/backend/local"
"github.com/restic/restic/backend/s3"
"github.com/restic/restic/backend/sftp"
@ -27,6 +28,7 @@ var parsers = []parser{
{local.Scheme, local.ParseConfig},
{sftp.Scheme, sftp.ParseConfig},
{s3.Scheme, s3.ParseConfig},
{gcs.Scheme, gcs.ParseConfig},
}
// Parse extracts repository location information from the string s. If s

View file

@ -4,6 +4,7 @@ import (
"reflect"
"testing"
"github.com/restic/restic/backend/gcs"
"github.com/restic/restic/backend/s3"
"github.com/restic/restic/backend/sftp"
)
@ -44,6 +45,34 @@ var parseTests = []struct {
Dir: "/srv/repo",
}}},
{"gs://bucketname", Location{Scheme: "gs",
Config: gcs.Config{
Endpoint: "storage.googleapis.com",
Bucket: "bucketname",
Prefix: "restic",
}},
},
{"gs://bucketname/prefix/directory", Location{Scheme: "gs",
Config: gcs.Config{
Endpoint: "storage.googleapis.com",
Bucket: "bucketname",
Prefix: "prefix/directory",
}},
},
{"gs:bucketname", Location{Scheme: "gs",
Config: gcs.Config{
Endpoint: "storage.googleapis.com",
Bucket: "bucketname",
Prefix: "restic",
}},
},
{"gs:bucketname/prefix/directory", Location{Scheme: "gs",
Config: gcs.Config{
Endpoint: "storage.googleapis.com",
Bucket: "bucketname",
Prefix: "prefix/directory",
}},
},
{"s3://eu-central-1/bucketname", Location{Scheme: "s3",
Config: s3.Config{
Endpoint: "eu-central-1",