New Upstream Release - golang-github-kurin-blazer
Ready changes
Summary
Merged new upstream version: 0.5.3 (was: 0.4.4).
Resulting package
Built on 2022-03-08T05:49 (took 14m42s)
The resulting binary packages can be installed (if you have the apt repository enabled) by running one of:
apt install -t fresh-releases golang-github-kurin-blazer-dev
Lintian Result
Diff
diff --git a/.gitignore b/.gitignore
index 38a74b4..743c06a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,4 @@
-bonfire
+bin/bonfire/bonfire
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
diff --git a/AUTHORS b/AUTHORS
new file mode 100644
index 0000000..9fd5e55
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,8 @@
+# This is the list of Blazer authors for copyright purposes.
+#
+# This does not necessarily list everyone who has contributed code, since in
+# some cases, their employer may be the copyright holder. To see the full list
+# of contributors, see the revision history in source control.
+#
+# Tag yourself.
+Google LLC
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 2827b7d..f1eb0d7 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,27 +1,5 @@
-Want to contribute? Great! First, read this page (including the small print at the end).
-
-### Before you contribute
-Before we can use your code, you must sign the
-[Google Individual Contributor License Agreement]
-(https://cla.developers.google.com/about/google-individual)
-(CLA), which you can do online. The CLA is necessary mainly because you own the
-copyright to your changes, even after your contribution becomes part of our
-codebase, so we need your permission to use and distribute your code. We also
-need to be sure of various other things—for instance that you'll tell us if you
-know that your code infringes on other people's patents. You don't have to sign
-the CLA until after you've submitted your code for review and a member has
-approved it, but you must do it before we can put your code into our codebase.
-Before you start working on a larger contribution, you should get in touch with
-us first through the issue tracker with your idea so that we can help out and
-possibly guide you. Coordinating up front makes it much easier to avoid
-frustration later on.
+Want to contribute? Great! First, read this page.
### Code reviews
All submissions, including submissions by project members, require review. We
use Github pull requests for this purpose.
-
-### The small print
-Contributions made by corporations are covered by a different agreement than
-the one above, the
-[Software Grant and Corporate Contributor License Agreement]
-(https://cla.developers.google.com/about/google-corporate).
diff --git a/LICENSE b/LICENSE
index 88755c6..95f7e96 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,4 +1,4 @@
-Copyright 2016, Google
+Copyright 2016, the Blazer authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/README.md b/README.md
index cd1a027..ed466a1 100644
--- a/README.md
+++ b/README.md
@@ -25,7 +25,7 @@ import "github.com/kurin/blazer/b2"
```go
func copyFile(ctx context.Context, bucket *b2.Bucket, src, dst string) error {
- f, err := file.Open(src)
+ f, err := os.Open(src)
if err != nil {
return err
}
@@ -52,7 +52,7 @@ Uploading a large file with multiple HTTP connections is simple:
```go
func copyFile(ctx context.Context, bucket *b2.Bucket, writers int, src, dst string) error {
- f, err := file.Open(src)
+ f, err := os.Open(src)
if err != nil {
return err
}
@@ -80,7 +80,7 @@ func downloadFile(ctx context.Context, bucket *b2.Bucket, downloads int, src, ds
r := bucket.Object(src).NewReader(ctx)
defer r.Close()
- f, err := file.Create(dst)
+ f, err := os.Create(dst)
if err != nil {
return err
}
@@ -99,7 +99,7 @@ func downloadFile(ctx context.Context, bucket *b2.Bucket, downloads int, src, ds
func printObjects(ctx context.Context, bucket *b2.Bucket) error {
iterator := bucket.List(ctx)
for iterator.Next() {
- fmt.Println(itrator.Object())
+ fmt.Println(iterator.Object())
}
return iterator.Err()
}
@@ -124,7 +124,3 @@ be found via the BaseURL method:
```go
base := bucket.BaseURL()
```
-
----
-
-This is not an official Google product.
diff --git a/b2/b2.go b/b2/b2.go
index cd83b85..728fcdc 100644
--- a/b2/b2.go
+++ b/b2/b2.go
@@ -1,4 +1,4 @@
-// Copyright 2016, Google
+// Copyright 2016, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -32,6 +32,7 @@ import (
"fmt"
"io"
"net/http"
+ "net/url"
"regexp"
"strconv"
"sync"
@@ -46,6 +47,7 @@ type Client struct {
sWriters map[string]*Writer
sReaders map[string]*Reader
sMethods []methodCounter
+ opts clientOptions
}
// NewClient creates and returns a new Client with valid B2 service account
@@ -63,7 +65,10 @@ func NewClient(ctx context.Context, account, key string, opts ...ClientOption) (
},
}
opts = append(opts, client(c))
- if err := c.backend.authorizeAccount(ctx, account, key, opts...); err != nil {
+ for _, f := range opts {
+ f(&c.opts)
+ }
+ if err := c.backend.authorizeAccount(ctx, account, key, c.opts); err != nil {
return nil, err
}
return c, nil
@@ -75,27 +80,9 @@ type clientOptions struct {
failSomeUploads bool
expireTokens bool
capExceeded bool
+ apiBase string
userAgents []string
-}
-
-// for testing
-func (c clientOptions) eq(o clientOptions) bool {
- if c.client != o.client ||
- c.transport != o.transport ||
- c.failSomeUploads != o.failSomeUploads ||
- c.expireTokens != o.expireTokens ||
- c.capExceeded != o.capExceeded {
- return false
- }
- if len(c.userAgents) != len(o.userAgents) {
- return false
- }
- for i := range c.userAgents {
- if c.userAgents[i] != o.userAgents[i] {
- return false
- }
- }
- return true
+ writerOpts []WriterOption
}
// A ClientOption allows callers to adjust various per-client settings.
@@ -112,6 +99,13 @@ func UserAgent(agent string) ClientOption {
}
}
+// APIBase returns a ClientOption specifying the URL root of API requests.
+func APIBase(url string) ClientOption {
+ return func(o *clientOptions) {
+ o.apiBase = url
+ }
+}
+
// Transport sets the underlying HTTP transport mechanism. If unset,
// http.DefaultTransport is used.
func Transport(rt http.RoundTripper) ClientOption {
@@ -434,7 +428,7 @@ type Attrs struct {
ContentType string // Used on upload, default is "application/octet-stream".
Status ObjectState // Not used on upload.
UploadTimestamp time.Time // Not used on upload.
- SHA1 string // Not used on upload. Can be "none" for large files.
+ SHA1 string // Can be "none" for large files. If set on upload, will be used for large files.
LastModified time.Time // If present, and there are fewer than 10 keys in the Info field, this is saved on upload.
Info map[string]string // Save arbitrary metadata on upload, but limited to 10 keys.
}
@@ -474,6 +468,9 @@ func (o *Object) Attrs(ctx context.Context) (*Attrs, error) {
mtime = time.Unix(ms/1e3, (ms%1e3)*1e6)
delete(info, "src_last_modified_millis")
}
+ if v, ok := info["large_file_sha1"]; ok {
+ sha = v
+ }
return &Attrs{
Name: name,
Size: size,
@@ -524,14 +521,21 @@ func (o *Object) URL() string {
// overwritten are not deleted, but are "hidden".
//
// Callers must close the writer when finished and check the error status.
-func (o *Object) NewWriter(ctx context.Context) *Writer {
+func (o *Object) NewWriter(ctx context.Context, opts ...WriterOption) *Writer {
ctx, cancel := context.WithCancel(ctx)
- return &Writer{
+ w := &Writer{
o: o,
name: o.name,
ctx: ctx,
cancel: cancel,
}
+ for _, f := range o.b.c.opts.writerOpts {
+ f(w)
+ }
+ for _, f := range opts {
+ f(w)
+ }
+ return w
}
// NewRangeReader returns a reader for the given object, reading up to length
@@ -573,139 +577,6 @@ func (o *Object) Delete(ctx context.Context) error {
return o.f.deleteFileVersion(ctx)
}
-// Cursor is passed to ListObjects to return subsequent pages.
-//
-// DEPRECATED. Will be removed in a future release.
-type Cursor struct {
- // Prefix limits the listed objects to those that begin with this string.
- Prefix string
-
- // Delimiter denotes the path separator. If set, object listings will be
- // truncated at this character.
- //
- // For example, if the bucket contains objects foo/bar, foo/baz, and foo,
- // then a delimiter of "/" will cause the listing to return "foo" and "foo/".
- // Otherwise, the listing would have returned all object names.
- //
- // Note that objects returned that end in the delimiter may not be actual
- // objects, e.g. you cannot read from (or write to, or delete) an object "foo/",
- // both because no actual object exists and because B2 disallows object names
- // that end with "/". If you want to ensure that all objects returned by
- // ListObjects and ListCurrentObjects are actual objects, leave this unset.
- Delimiter string
-
- name string
- id string
-}
-
-// ListObjects returns all objects in the bucket, including multiple versions
-// of the same object. Cursor may be nil; when passed to a subsequent query,
-// it will continue the listing.
-//
-// ListObjects will return io.EOF when there are no objects left in the bucket,
-// however it may do so concurrently with the last objects.
-//
-// DEPRECATED. Will be removed in a future release.
-func (b *Bucket) ListObjects(ctx context.Context, count int, c *Cursor) ([]*Object, *Cursor, error) {
- if c == nil {
- c = &Cursor{}
- }
- fs, name, id, err := b.b.listFileVersions(ctx, count, c.name, c.id, c.Prefix, c.Delimiter)
- if err != nil {
- return nil, nil, err
- }
- var next *Cursor
- if name != "" && id != "" {
- next = &Cursor{
- Prefix: c.Prefix,
- Delimiter: c.Delimiter,
- name: name,
- id: id,
- }
- }
- var objects []*Object
- for _, f := range fs {
- objects = append(objects, &Object{
- name: f.name(),
- f: f,
- b: b,
- })
- }
- var rtnErr error
- if len(objects) == 0 || next == nil {
- rtnErr = io.EOF
- }
- return objects, next, rtnErr
-}
-
-// ListCurrentObjects is similar to ListObjects, except that it returns only
-// current, unhidden objects in the bucket.
-//
-// DEPRECATED. Will be removed in a future release.
-func (b *Bucket) ListCurrentObjects(ctx context.Context, count int, c *Cursor) ([]*Object, *Cursor, error) {
- if c == nil {
- c = &Cursor{}
- }
- fs, name, err := b.b.listFileNames(ctx, count, c.name, c.Prefix, c.Delimiter)
- if err != nil {
- return nil, nil, err
- }
- var next *Cursor
- if name != "" {
- next = &Cursor{
- Prefix: c.Prefix,
- Delimiter: c.Delimiter,
- name: name,
- }
- }
- var objects []*Object
- for _, f := range fs {
- objects = append(objects, &Object{
- name: f.name(),
- f: f,
- b: b,
- })
- }
- var rtnErr error
- if len(objects) == 0 || next == nil {
- rtnErr = io.EOF
- }
- return objects, next, rtnErr
-}
-
-// ListUnfinishedLargeFiles lists any objects that correspond to large file uploads that haven't been completed.
-// This can happen for example when an upload is interrupted.
-//
-// DEPRECATED. Will be removed in a future release.
-func (b *Bucket) ListUnfinishedLargeFiles(ctx context.Context, count int, c *Cursor) ([]*Object, *Cursor, error) {
- if c == nil {
- c = &Cursor{}
- }
- fs, name, err := b.b.listUnfinishedLargeFiles(ctx, count, c.name)
- if err != nil {
- return nil, nil, err
- }
- var next *Cursor
- if name != "" {
- next = &Cursor{
- name: name,
- }
- }
- var objects []*Object
- for _, f := range fs {
- objects = append(objects, &Object{
- name: f.name(),
- f: f,
- b: b,
- })
- }
- var rtnErr error
- if len(objects) == 0 || next == nil {
- rtnErr = io.EOF
- }
- return objects, next, rtnErr
-}
-
// Hide hides the object from name-based listing.
func (o *Object) Hide(ctx context.Context) error {
if err := o.ensure(ctx); err != nil {
@@ -718,21 +589,20 @@ func (o *Object) Hide(ctx context.Context) error {
// Reveal unhides (if hidden) the named object. If there are multiple objects
// of a given name, it will reveal the most recent.
func (b *Bucket) Reveal(ctx context.Context, name string) error {
- cur := &Cursor{
- name: name,
- }
- objs, _, err := b.ListObjects(ctx, 1, cur)
- if err != nil && err != io.EOF {
- return err
- }
- if len(objs) < 1 || objs[0].name != name {
- return b2err{err: fmt.Errorf("%s: not found", name), notFoundErr: true}
- }
- obj := objs[0]
- if obj.f.status() != "hide" {
- return nil
+ iter := b.List(ctx, ListPrefix(name), ListHidden())
+ for iter.Next() {
+ obj := iter.Object()
+ if obj.Name() == name {
+ if obj.f.status() == "hide" {
+ return obj.Delete(ctx)
+ }
+ return nil
+ }
+ if obj.Name() > name {
+ break
+ }
}
- return obj.Delete(ctx)
+ return b2err{err: fmt.Errorf("%s: not found", name), notFoundErr: true}
}
// I don't want to import all of ioutil for this.
@@ -760,5 +630,24 @@ func (b *Bucket) getObject(ctx context.Context, name string) (*Object, error) {
// in a private bucket. Only objects that begin with prefix can be accessed.
// The token expires after the given duration.
func (b *Bucket) AuthToken(ctx context.Context, prefix string, valid time.Duration) (string, error) {
- return b.b.getDownloadAuthorization(ctx, prefix, valid)
+ return b.b.getDownloadAuthorization(ctx, prefix, valid, "")
+}
+
+// AuthURL returns a URL for the given object with embedded token and,
+// possibly, b2ContentDisposition arguments. Leave b2cd blank for no content
+// disposition.
+func (o *Object) AuthURL(ctx context.Context, valid time.Duration, b2cd string) (*url.URL, error) {
+ token, err := o.b.b.getDownloadAuthorization(ctx, o.name, valid, b2cd)
+ if err != nil {
+ return nil, err
+ }
+ urlString := fmt.Sprintf("%s?Authorization=%s", o.URL(), url.QueryEscape(token))
+ if b2cd != "" {
+ urlString = fmt.Sprintf("%s&b2ContentDisposition=%s", urlString, url.QueryEscape(b2cd))
+ }
+ u, err := url.Parse(urlString)
+ if err != nil {
+ return nil, err
+ }
+ return u, nil
}
diff --git a/b2/b2_test.go b/b2/b2_test.go
index add7d75..6eea36d 100644
--- a/b2/b2_test.go
+++ b/b2/b2_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016, Google
+// Copyright 2016, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -71,7 +71,7 @@ type testRoot struct {
bucketMap map[string]map[string]string
}
-func (t *testRoot) authorizeAccount(context.Context, string, string, ...ClientOption) error {
+func (t *testRoot) authorizeAccount(context.Context, string, string, clientOptions) error {
t.auths++
return nil
}
@@ -108,6 +108,13 @@ func (t *testRoot) transient(err error) bool {
return e.retry || e.reupload || e.backoff > 0
}
+func (t *testRoot) createKey(context.Context, string, []string, time.Duration, string, string) (b2KeyInterface, error) {
+ return nil, nil
+}
+func (t *testRoot) listKeys(context.Context, int, string) ([]b2KeyInterface, string, error) {
+ return nil, "", nil
+}
+
func (t *testRoot) createBucket(_ context.Context, name, _ string, _ map[string]string, _ []LifecycleRule) (b2BucketInterface, error) {
if err := t.errs.getError("createBucket"); err != nil {
return nil, err
@@ -147,6 +154,7 @@ func (t *testBucket) btype() string { return
func (t *testBucket) attrs() *BucketAttrs { return nil }
func (t *testBucket) deleteBucket(context.Context) error { return nil }
func (t *testBucket) updateBucket(context.Context, *BucketAttrs) error { return nil }
+func (t *testBucket) id() string { return "" }
func (t *testBucket) getUploadURL(context.Context) (b2URLInterface, error) {
if err := t.errs.getError("getUploadURL"); err != nil {
@@ -221,7 +229,7 @@ func (t *testBucket) downloadFileByName(_ context.Context, name string, offset,
}
func (t *testBucket) hideFile(context.Context, string) (b2FileInterface, error) { return nil, nil }
-func (t *testBucket) getDownloadAuthorization(context.Context, string, time.Duration) (string, error) {
+func (t *testBucket) getDownloadAuthorization(context.Context, string, time.Duration, string) (string, error) {
return "", nil
}
func (t *testBucket) baseURL() string { return "" }
@@ -279,6 +287,8 @@ func (t *testLargeFile) getUploadPartURL(context.Context) (b2FileChunkInterface,
}, nil
}
+func (t *testLargeFile) cancel(ctx context.Context) error { return ctx.Err() }
+
type testFileChunk struct {
parts map[int][]byte
errs *errCont
@@ -672,6 +682,50 @@ func TestReadWrite(t *testing.T) {
}
}
+func TestLargeFileCancellation(t *testing.T) {
+ ctx := context.Background()
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ client := &Client{
+ backend: &beRoot{
+ b2i: &testRoot{
+ bucketMap: make(map[string]map[string]string),
+ errs: &errCont{},
+ },
+ },
+ }
+
+ b, err := client.NewBucket(ctx, bucketName, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var called bool
+ w := b.Object("foo").NewWriter(ctx, WithCancelOnError(func() context.Context { return context.Background() }, func(err error) {
+ called = true
+ if err != nil {
+ t.Errorf("expected no error, got %v", err)
+ }
+ }))
+ w.ChunkSize = 10
+
+ for i := 0; i < 10; i++ {
+ r := io.LimitReader(zReader{}, 20)
+ if _, err := io.Copy(w, r); err != nil && err != context.Canceled {
+ t.Errorf("Copy: %v", err)
+ }
+ cancel()
+ }
+
+ if err := w.Close(); err != context.Canceled {
+ t.Errorf("expected cancelled context; got %v", err)
+ }
+
+ if !called {
+ t.Errorf("error callback not called")
+ }
+}
+
func TestReadRangeReturnsRight(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
diff --git a/b2/backend.go b/b2/backend.go
index 06de880..5ca3bd4 100644
--- a/b2/backend.go
+++ b/b2/backend.go
@@ -1,4 +1,4 @@
-// Copyright 2016, Google
+// Copyright 2016, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -28,22 +28,25 @@ type beRootInterface interface {
reauth(error) bool
transient(error) bool
reupload(error) bool
- authorizeAccount(context.Context, string, string, ...ClientOption) error
+ authorizeAccount(context.Context, string, string, clientOptions) error
reauthorizeAccount(context.Context) error
createBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (beBucketInterface, error)
listBuckets(context.Context) ([]beBucketInterface, error)
+ createKey(context.Context, string, []string, time.Duration, string, string) (beKeyInterface, error)
+ listKeys(context.Context, int, string) ([]beKeyInterface, string, error)
}
type beRoot struct {
account, key string
b2i b2RootInterface
- options []ClientOption
+ options clientOptions
}
type beBucketInterface interface {
name() string
btype() BucketType
attrs() *BucketAttrs
+ id() string
updateBucket(context.Context, *BucketAttrs) error
deleteBucket(context.Context) error
getUploadURL(context.Context) (beURLInterface, error)
@@ -53,7 +56,7 @@ type beBucketInterface interface {
listUnfinishedLargeFiles(context.Context, int, string) ([]beFileInterface, string, error)
downloadFileByName(context.Context, string, int64, int64) (beFileReaderInterface, error)
hideFile(context.Context, string) (beFileInterface, error)
- getDownloadAuthorization(context.Context, string, time.Duration) (string, error)
+ getDownloadAuthorization(context.Context, string, time.Duration, string) (string, error)
baseURL() string
file(string, string) beFileInterface
}
@@ -92,6 +95,7 @@ type beFile struct {
type beLargeFileInterface interface {
finishLargeFile(context.Context) (beFileInterface, error)
getUploadPartURL(context.Context) (beFileChunkInterface, error)
+ cancel(context.Context) error
}
type beLargeFile struct {
@@ -145,26 +149,40 @@ type beFileInfo struct {
stamp time.Time
}
+type beKeyInterface interface {
+ del(context.Context) error
+ caps() []string
+ name() string
+ expires() time.Time
+ secret() string
+ id() string
+}
+
+type beKey struct {
+ b2i beRootInterface
+ k b2KeyInterface
+}
+
func (r *beRoot) backoff(err error) time.Duration { return r.b2i.backoff(err) }
func (r *beRoot) reauth(err error) bool { return r.b2i.reauth(err) }
func (r *beRoot) reupload(err error) bool { return r.b2i.reupload(err) }
func (r *beRoot) transient(err error) bool { return r.b2i.transient(err) }
-func (r *beRoot) authorizeAccount(ctx context.Context, account, key string, opts ...ClientOption) error {
+func (r *beRoot) authorizeAccount(ctx context.Context, account, key string, c clientOptions) error {
f := func() error {
- if err := r.b2i.authorizeAccount(ctx, account, key, opts...); err != nil {
+ if err := r.b2i.authorizeAccount(ctx, account, key, c); err != nil {
return err
}
r.account = account
r.key = key
- r.options = opts
+ r.options = c
return nil
}
return withBackoff(ctx, r, f)
}
func (r *beRoot) reauthorizeAccount(ctx context.Context) error {
- return r.authorizeAccount(ctx, r.account, r.key, r.options...)
+ return r.authorizeAccount(ctx, r.account, r.key, r.options)
}
func (r *beRoot) createBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (beBucketInterface, error) {
@@ -213,17 +231,58 @@ func (r *beRoot) listBuckets(ctx context.Context) ([]beBucketInterface, error) {
return buckets, nil
}
-func (b *beBucket) name() string {
- return b.b2bucket.name()
+func (r *beRoot) createKey(ctx context.Context, name string, caps []string, valid time.Duration, bucketID string, prefix string) (beKeyInterface, error) {
+ var k *beKey
+ f := func() error {
+ g := func() error {
+ got, err := r.b2i.createKey(ctx, name, caps, valid, bucketID, prefix)
+ if err != nil {
+ return err
+ }
+ k = &beKey{
+ b2i: r,
+ k: got,
+ }
+ return nil
+ }
+ return withReauth(ctx, r, g)
+ }
+ if err := withBackoff(ctx, r, f); err != nil {
+ return nil, err
+ }
+ return k, nil
}
-func (b *beBucket) btype() BucketType {
- return BucketType(b.b2bucket.btype())
+func (r *beRoot) listKeys(ctx context.Context, max int, next string) ([]beKeyInterface, string, error) {
+ var keys []beKeyInterface
+ var cur string
+ f := func() error {
+ g := func() error {
+ got, n, err := r.b2i.listKeys(ctx, max, next)
+ if err != nil {
+ return err
+ }
+ cur = n
+ for _, g := range got {
+ keys = append(keys, &beKey{
+ b2i: r,
+ k: g,
+ })
+ }
+ return nil
+ }
+ return withReauth(ctx, r, g)
+ }
+ if err := withBackoff(ctx, r, f); err != nil {
+ return nil, "", err
+ }
+ return keys, cur, nil
}
-func (b *beBucket) attrs() *BucketAttrs {
- return b.b2bucket.attrs()
-}
+func (b *beBucket) name() string { return b.b2bucket.name() }
+func (b *beBucket) btype() BucketType { return BucketType(b.b2bucket.btype()) }
+func (b *beBucket) attrs() *BucketAttrs { return b.b2bucket.attrs() }
+func (b *beBucket) id() string { return b.b2bucket.id() }
func (b *beBucket) updateBucket(ctx context.Context, attrs *BucketAttrs) error {
f := func() error {
@@ -412,11 +471,11 @@ func (b *beBucket) hideFile(ctx context.Context, name string) (beFileInterface,
return file, nil
}
-func (b *beBucket) getDownloadAuthorization(ctx context.Context, p string, v time.Duration) (string, error) {
+func (b *beBucket) getDownloadAuthorization(ctx context.Context, p string, v time.Duration, s string) (string, error) {
var tok string
f := func() error {
g := func() error {
- t, err := b.b2bucket.getDownloadAuthorization(ctx, p, v)
+ t, err := b.b2bucket.getDownloadAuthorization(ctx, p, v, s)
if err != nil {
return err
}
@@ -596,6 +655,16 @@ func (b *beLargeFile) finishLargeFile(ctx context.Context) (beFileInterface, err
return file, nil
}
+func (b *beLargeFile) cancel(ctx context.Context) error {
+ f := func() error {
+ g := func() error {
+ return b.b2largeFile.cancel(ctx)
+ }
+ return withReauth(ctx, b.ri, g)
+ }
+ return withBackoff(ctx, b.ri, f)
+}
+
func (b *beFileChunk) reload(ctx context.Context) error {
f := func() error {
g := func() error {
@@ -649,6 +718,13 @@ func (b *beFilePart) number() int { return b.b2filePart.number() }
func (b *beFilePart) sha1() string { return b.b2filePart.sha1() }
func (b *beFilePart) size() int64 { return b.b2filePart.size() }
+func (b *beKey) del(ctx context.Context) error { return b.k.del(ctx) }
+func (b *beKey) caps() []string { return b.k.caps() }
+func (b *beKey) name() string { return b.k.name() }
+func (b *beKey) expires() time.Time { return b.k.expires() }
+func (b *beKey) secret() string { return b.k.secret() }
+func (b *beKey) id() string { return b.k.id() }
+
func jitter(d time.Duration) time.Duration {
f := float64(d)
f /= 50
@@ -657,8 +733,8 @@ func jitter(d time.Duration) time.Duration {
}
func getBackoff(d time.Duration) time.Duration {
- if d > 15*time.Second {
- return d + jitter(d)
+ if d > 30*time.Second {
+ return 30*time.Second + jitter(d)
}
return d*2 + jitter(d*2)
}
diff --git a/b2/baseline.go b/b2/baseline.go
index ffbe8d5..bfa4c80 100644
--- a/b2/baseline.go
+++ b/b2/baseline.go
@@ -1,4 +1,4 @@
-// Copyright 2016, Google
+// Copyright 2016, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -27,19 +27,22 @@ import (
// the only file in b2 that imports base.
type b2RootInterface interface {
- authorizeAccount(context.Context, string, string, ...ClientOption) error
+ authorizeAccount(context.Context, string, string, clientOptions) error
transient(error) bool
backoff(error) time.Duration
reauth(error) bool
reupload(error) bool
createBucket(context.Context, string, string, map[string]string, []LifecycleRule) (b2BucketInterface, error)
listBuckets(context.Context) ([]b2BucketInterface, error)
+ createKey(context.Context, string, []string, time.Duration, string, string) (b2KeyInterface, error)
+ listKeys(context.Context, int, string) ([]b2KeyInterface, string, error)
}
type b2BucketInterface interface {
name() string
btype() string
attrs() *BucketAttrs
+ id() string
updateBucket(context.Context, *BucketAttrs) error
deleteBucket(context.Context) error
getUploadURL(context.Context) (b2URLInterface, error)
@@ -49,7 +52,7 @@ type b2BucketInterface interface {
listUnfinishedLargeFiles(context.Context, int, string) ([]b2FileInterface, string, error)
downloadFileByName(context.Context, string, int64, int64) (b2FileReaderInterface, error)
hideFile(context.Context, string) (b2FileInterface, error)
- getDownloadAuthorization(context.Context, string, time.Duration) (string, error)
+ getDownloadAuthorization(context.Context, string, time.Duration, string) (string, error)
baseURL() string
file(string, string) b2FileInterface
}
@@ -73,6 +76,7 @@ type b2FileInterface interface {
type b2LargeFileInterface interface {
finishLargeFile(context.Context) (b2FileInterface, error)
getUploadPartURL(context.Context) (b2FileChunkInterface, error)
+ cancel(context.Context) error
}
type b2FileChunkInterface interface {
@@ -96,6 +100,15 @@ type b2FilePartInterface interface {
size() int64
}
+type b2KeyInterface interface {
+ del(context.Context) error
+ caps() []string
+ name() string
+ expires() time.Time
+ secret() string
+ id() string
+}
+
type b2Root struct {
b *base.B2
}
@@ -132,11 +145,11 @@ type b2FilePart struct {
b *base.FilePart
}
-func (b *b2Root) authorizeAccount(ctx context.Context, account, key string, opts ...ClientOption) error {
- c := &clientOptions{}
- for _, f := range opts {
- f(c)
- }
+type b2Key struct {
+ b *base.Key
+}
+
+func (b *b2Root) authorizeAccount(ctx context.Context, account, key string, c clientOptions) error {
var aopts []base.AuthOption
ct := &clientTransport{client: c.client}
if c.transport != nil {
@@ -152,6 +165,9 @@ func (b *b2Root) authorizeAccount(ctx context.Context, account, key string, opts
if c.capExceeded {
aopts = append(aopts, base.ForceCapExceeded())
}
+ if c.apiBase != "" {
+ aopts = append(aopts, base.SetAPIBase(c.apiBase))
+ }
for _, agent := range c.userAgents {
aopts = append(aopts, base.UserAgent(agent))
}
@@ -249,6 +265,26 @@ func (b *b2Bucket) updateBucket(ctx context.Context, attrs *BucketAttrs) error {
return err
}
+func (b *b2Root) createKey(ctx context.Context, name string, caps []string, valid time.Duration, bucketID string, prefix string) (b2KeyInterface, error) {
+ k, err := b.b.CreateKey(ctx, name, caps, valid, bucketID, prefix)
+ if err != nil {
+ return nil, err
+ }
+ return &b2Key{k}, nil
+}
+
+func (b *b2Root) listKeys(ctx context.Context, max int, next string) ([]b2KeyInterface, string, error) {
+ keys, next, err := b.b.ListKeys(ctx, max, next)
+ if err != nil {
+ return nil, "", err
+ }
+ var k []b2KeyInterface
+ for _, key := range keys {
+ k = append(k, &b2Key{key})
+ }
+ return k, next, nil
+}
+
func (b *b2Bucket) deleteBucket(ctx context.Context) error {
return b.b.DeleteBucket(ctx)
}
@@ -277,6 +313,8 @@ func (b *b2Bucket) attrs() *BucketAttrs {
}
}
+func (b *b2Bucket) id() string { return b.b.ID }
+
func (b *b2Bucket) getUploadURL(ctx context.Context) (b2URLInterface, error) {
url, err := b.b.GetUploadURL(ctx)
if err != nil {
@@ -352,8 +390,8 @@ func (b *b2Bucket) hideFile(ctx context.Context, name string) (b2FileInterface,
return &b2File{f}, nil
}
-func (b *b2Bucket) getDownloadAuthorization(ctx context.Context, p string, v time.Duration) (string, error) {
- return b.b.GetDownloadAuthorization(ctx, p, v)
+func (b *b2Bucket) getDownloadAuthorization(ctx context.Context, p string, v time.Duration, s string) (string, error) {
+ return b.b.GetDownloadAuthorization(ctx, p, v, s)
}
func (b *b2Bucket) baseURL() string {
@@ -437,6 +475,10 @@ func (b *b2LargeFile) getUploadPartURL(ctx context.Context) (b2FileChunkInterfac
return &b2FileChunk{c}, nil
}
+func (b *b2LargeFile) cancel(ctx context.Context) error {
+ return b.b.CancelLargeFile(ctx)
+}
+
func (b *b2FileChunk) reload(ctx context.Context) error {
return b.b.Reload(ctx)
}
@@ -466,3 +508,10 @@ func (b *b2FileInfo) stats() (string, string, int64, string, map[string]string,
func (b *b2FilePart) number() int { return b.b.Number }
func (b *b2FilePart) sha1() string { return b.b.SHA1 }
func (b *b2FilePart) size() int64 { return b.b.Size }
+
+func (b *b2Key) del(ctx context.Context) error { return b.b.Delete(ctx) }
+func (b *b2Key) caps() []string { return b.b.Capabilities }
+func (b *b2Key) name() string { return b.b.Name }
+func (b *b2Key) expires() time.Time { return b.b.Expires }
+func (b *b2Key) secret() string { return b.b.Secret }
+func (b *b2Key) id() string { return b.b.ID }
diff --git a/b2/buffer.go b/b2/buffer.go
index ab8fef0..4c62afb 100644
--- a/b2/buffer.go
+++ b/b2/buffer.go
@@ -1,4 +1,4 @@
-// Copyright 2017, Google
+// Copyright 2017, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/b2/integration_test.go b/b2/integration_test.go
index 1f7acef..45777ec 100644
--- a/b2/integration_test.go
+++ b/b2/integration_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016, Google
+// Copyright 2016, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -22,13 +22,16 @@ import (
"encoding/hex"
"fmt"
"io"
+ "io/ioutil"
"net/http"
"os"
"reflect"
+ "strings"
"sync/atomic"
"testing"
"time"
+ "github.com/kurin/blazer/internal/blog"
"github.com/kurin/blazer/x/transport"
)
@@ -137,6 +140,9 @@ func TestReaderFromLive(t *testing.T) {
if rn != n {
t.Errorf("Read from B2: got %d bytes, want %d bytes", rn, n)
}
+ if err, ok := r.Verify(); ok && err != nil {
+ t.Errorf("Read from B2: %v", err)
+ }
if err := r.Close(); err != nil {
t.Errorf("r.Close(): %v", err)
}
@@ -275,6 +281,20 @@ func TestResumeWriter(t *testing.T) {
}
}
+func TestResumeWriterWithoutExtantFile(t *testing.T) {
+ ctx := context.Background()
+ bucket, done := startLiveTest(ctx, t)
+ defer done()
+
+ r := io.LimitReader(zReader{}, 15e6)
+ w := bucket.Object("foo").NewWriter(ctx)
+ w.ChunkSize = 5e6
+ w.Resume = true
+ if _, err := io.Copy(w, r); err != nil {
+ t.Fatalf("io.Copy: %v", err)
+ }
+}
+
func TestAttrs(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
@@ -323,6 +343,7 @@ func TestAttrs(t *testing.T) {
for _, attrs := range attrlist {
o := bucket.Object(e.name)
w := o.NewWriter(ctx).WithAttrs(attrs)
+ w.ChunkSize = 5e6
if _, err := io.Copy(w, io.LimitReader(zReader{}, e.size)); err != nil {
t.Error(err)
continue
@@ -364,7 +385,9 @@ func TestFileBufferLive(t *testing.T) {
w.UseFileBuffer = true
- w.Write(nil)
+ if _, err := io.CopyN(w, r, 1); err != nil {
+ t.Fatalf("CopyN: %v", err)
+ }
wb, ok := w.w.(*fileBuffer)
if !ok {
t.Fatalf("writer isn't using file buffer: %T", w.w)
@@ -429,6 +452,57 @@ func TestAuthTokLive(t *testing.T) {
}
}
+func TestObjAuthTokLive(t *testing.T) {
+ ctx := context.Background()
+ ctx, cancel := context.WithTimeout(ctx, time.Minute)
+ defer cancel()
+ bucket, done := startLiveTest(ctx, t)
+ defer done()
+
+ table := []struct {
+ obj string
+ d time.Duration
+ b2cd string
+ }{
+ {
+ obj: "foo/bar",
+ d: time.Minute,
+ },
+ {
+ obj: "foo2/thing.pdf",
+ d: time.Minute,
+ b2cd: "attachment",
+ },
+ {
+ obj: "foo2/thing.pdf",
+ d: time.Minute,
+ b2cd: `attachment; filename="what.png"`,
+ },
+ }
+
+ for _, e := range table {
+ fw := bucket.Object(e.obj).NewWriter(ctx)
+ io.Copy(fw, io.LimitReader(zReader{}, 1e5))
+ if err := fw.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ url, err := bucket.Object(e.obj).AuthURL(ctx, e.d, e.b2cd)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ blog.V(2).Infof("downloading %s", url.String())
+ frsp, err := http.Get(url.String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if frsp.StatusCode != 200 {
+ t.Fatalf("%s: got %s, want 200", url.String(), frsp.Status)
+ }
+ }
+}
+
func TestRangeReaderLive(t *testing.T) {
ctx := context.Background()
ctx, cancel := context.WithTimeout(ctx, time.Minute)
@@ -510,6 +584,9 @@ func TestRangeReaderLive(t *testing.T) {
if got != want {
t.Errorf("NewRangeReader(_, %d, %d): got %q, want %q", e.offset, e.length, got, want)
}
+ if err, ok := r.Verify(); ok && err != nil {
+ t.Errorf("NewRangeReader(_, %d, %d): %v", e.offset, e.length, err)
+ }
}
}
@@ -863,29 +940,281 @@ func TestReauthPreservesOptions(t *testing.T) {
bucket, done := startLiveTest(ctx, t)
defer done()
- var first []ClientOption
- opts := bucket.r.(*beRoot).options
- for _, o := range opts {
- first = append(first, o)
- }
-
+ first := bucket.r.(*beRoot).options
if err := bucket.r.reauthorizeAccount(ctx); err != nil {
t.Fatalf("reauthorizeAccount: %v", err)
}
-
second := bucket.r.(*beRoot).options
- if len(second) != len(first) {
- t.Fatalf("options mismatch: got %d options, wanted %d", len(second), len(first))
+ if !reflect.DeepEqual(first, second) {
+ // Test that they are literally the same set of options, which is an
+ // implementation detail but is fine for now.
+ t.Errorf("options mismatch: got %v, want %v", second, first)
+ }
+}
+
+func TestVerifyReader(t *testing.T) {
+ ctx := context.Background()
+ bucket, done := startLiveTest(ctx, t)
+ defer done()
+
+ table := []struct {
+ name string
+ fakeSHA string
+ size int64
+ off, len int64
+ valid bool
+ }{
+ {
+ name: "first",
+ size: 100,
+ off: 0,
+ len: -1,
+ valid: true,
+ },
+ {
+ name: "second",
+ size: 100,
+ off: 0,
+ len: 100,
+ valid: true,
+ },
+ {
+ name: "third",
+ size: 100,
+ off: 0,
+ len: 99,
+ valid: false,
+ },
+ {
+ name: "fourth",
+ size: 5e6 + 100,
+ off: 0,
+ len: -1,
+ valid: false,
+ },
+ {
+ name: "fifth",
+ size: 5e6 + 100,
+ off: 0,
+ len: -1,
+ fakeSHA: "fbc815f2d6518858dec83ccb46263875fc894d88",
+ valid: true,
+ },
+ }
+
+ for _, e := range table {
+ o := bucket.Object(e.name)
+ w := o.NewWriter(ctx)
+ if e.fakeSHA != "" {
+ w = w.WithAttrs(&Attrs{SHA1: e.fakeSHA})
+ }
+ w.ChunkSize = 5e6
+ if _, err := io.Copy(w, io.LimitReader(zReader{}, e.size)); err != nil {
+ t.Error(err)
+ continue
+ }
+ if err := w.Close(); err != nil {
+ t.Error(err)
+ continue
+ }
+ r := o.NewRangeReader(ctx, e.off, e.len)
+ if _, err := io.Copy(ioutil.Discard, r); err != nil {
+ t.Error(err)
+ }
+ err, ok := r.Verify()
+ if ok != e.valid {
+ t.Errorf("%s: bad validity: got %v, want %v", e.name, ok, e.valid)
+ }
+ if e.valid && err != nil {
+ t.Errorf("%s does not verify: %v", e.name, err)
+ }
+ }
+}
+
+func TestListBucketsWithKey(t *testing.T) {
+ ctx := context.Background()
+ bucket, done := startLiveTest(ctx, t)
+ defer done()
+
+ key, err := bucket.CreateKey(ctx, "testKey", Capabilities("listBuckets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ client, err := NewClient(ctx, key.ID(), key.Secret())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := client.Bucket(ctx, bucket.Name()); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestListBucketContentsWithKey(t *testing.T) {
+ ctx := context.Background()
+ bucket, done := startLiveTest(ctx, t)
+ defer done()
+
+ for _, path := range []string{"foo/bar", "foo/baz", "foo", "bar", "baz"} {
+ if _, _, err := writeFile(ctx, bucket, path, 1, 1e8); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ key, err := bucket.CreateKey(ctx, "testKey", Capabilities("listBuckets", "listFiles"), Prefix("foo/"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ client, err := NewClient(ctx, key.ID(), key.Secret())
+ if err != nil {
+ t.Fatal(err)
+ }
+ obucket, err := client.Bucket(ctx, bucket.Name())
+ if err != nil {
+ t.Fatal(err)
+ }
+ iter := obucket.List(ctx)
+ var got []string
+ for iter.Next() {
+ got = append(got, iter.Object().Name())
+ }
+ if iter.Err() != nil {
+ t.Fatal(iter.Err())
+ }
+ want := []string{"foo/bar", "foo/baz"}
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("error listing objects with restricted key: got %v, want %v", got, want)
+ }
+ iter2 := obucket.List(ctx, ListHidden())
+ for iter2.Next() {
+ }
+ if iter2.Err() != nil {
+ t.Error(iter2.Err())
+ }
+}
+
+func TestCreateDeleteKey(t *testing.T) {
+ ctx := context.Background()
+ bucket, done := startLiveTest(ctx, t)
+ defer done()
+
+ table := []struct {
+ d time.Duration
+ e time.Time
+ bucket bool
+ cap []string
+ pfx string
+ }{
+ {
+ cap: []string{"deleteKeys"},
+ },
+ {
+ d: time.Minute,
+ cap: []string{"deleteKeys"},
+ pfx: "prefox",
+ },
+ {
+ e: time.Now().Add(time.Minute), // <shrug emojis>
+ cap: []string{"writeFiles", "listFiles"},
+ bucket: true,
+ },
+ {
+ d: time.Minute,
+ cap: []string{"writeFiles", "listFiles"},
+ pfx: "prefox",
+ bucket: true,
+ },
}
- var f, s clientOptions
- for i := range first {
- first[i](&f)
- second[i](&s)
+ for _, e := range table {
+ var opts []KeyOption
+ opts = append(opts, Capabilities(e.cap...))
+ if e.d != 0 {
+ opts = append(opts, Lifetime(e.d))
+ }
+ if !e.e.IsZero() {
+ opts = append(opts, Deadline(e.e))
+ }
+ var key *Key
+ if e.bucket {
+ opts = append(opts, Prefix(e.pfx))
+ bkey, err := bucket.CreateKey(ctx, "whee", opts...)
+ if err != nil {
+ t.Errorf("Bucket.CreateKey(%v, %v): %v", bucket.Name(), e, err)
+ continue
+ }
+ key = bkey
+ } else {
+ gkey, err := bucket.c.CreateKey(ctx, "whee", opts...)
+ if err != nil {
+ t.Errorf("Client.CreateKey(%v): %v", e, err)
+ continue
+ }
+ key = gkey
+ }
+ if err := key.Delete(ctx); err != nil {
+ t.Errorf("key.Delete(): %v", err)
+ }
}
+}
- if !f.eq(s) {
- t.Errorf("options mismatch: got %v, want %v", s, f)
+func TestListKeys(t *testing.T) {
+ ctx := context.Background()
+ bucket, done := startLiveTest(ctx, t)
+ defer done()
+
+ n := 20
+
+ for i := 0; i < n; i++ {
+ key, err := bucket.CreateKey(ctx, fmt.Sprintf("%d-list-key-test", i), Capabilities("listBuckets"))
+ if err != nil {
+ t.Fatalf("CreateKey(%d): %v", i, err)
+ }
+ defer key.Delete(ctx)
+ }
+
+ var got []string
+ var cur string
+ for {
+ ks, c, err := bucket.c.ListKeys(ctx, 10, cur)
+ if err != nil && err != io.EOF {
+ t.Fatalf("ListKeys(): %v", err)
+ }
+ for _, k := range ks {
+ if strings.HasSuffix(k.Name(), "list-key-test") {
+ got = append(got, k.Name())
+ }
+ }
+ cur = c
+ if err == io.EOF {
+ break
+ }
+ }
+ if len(got) != n {
+ t.Errorf("ListKeys(): got %d, want %d: %v", len(got), n, got)
+ }
+}
+
+func TestEmptyObject(t *testing.T) {
+ ctx := context.Background()
+ bucket, done := startLiveTest(ctx, t)
+ defer done()
+
+ obj := bucket.Object("empty")
+ w := obj.NewWriter(ctx)
+ if _, err := w.Write([]byte{}); err != nil {
+ t.Fatalf("Write: %v", err)
+ }
+ if err := w.Close(); err != nil {
+ t.Fatalf("Close: %v", err)
+ }
+
+ attrs, err := obj.Attrs(ctx)
+ if err != nil {
+ t.Fatalf("Attrs: %v", err)
+ }
+ if attrs.Size != 0 {
+ t.Fatalf("Unexpected object size: got %d, want 0", attrs.Size)
}
}
diff --git a/b2/iterator.go b/b2/iterator.go
index 1679c81..c85ab01 100644
--- a/b2/iterator.go
+++ b/b2/iterator.go
@@ -1,4 +1,4 @@
-// Copyright 2018, Google
+// Copyright 2018, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -50,7 +50,7 @@ type ObjectIterator struct {
final bool
err error
idx int
- c *Cursor
+ c *cursor
opts objectIteratorOptions
objs []*Object
init sync.Once
@@ -58,7 +58,7 @@ type ObjectIterator struct {
count int
}
-type lister func(context.Context, int, *Cursor) ([]*Object, *Cursor, error)
+type lister func(context.Context, int, *cursor) ([]*Object, *cursor, error)
func (o *ObjectIterator) page(ctx context.Context) error {
if o.opts.locker != nil {
@@ -96,18 +96,18 @@ func (o *ObjectIterator) Next() bool {
}
switch {
case o.opts.unfinished:
- o.l = o.bucket.ListUnfinishedLargeFiles
+ o.l = o.bucket.listUnfinishedLargeFiles
if o.count > 100 {
o.count = 100
}
case o.opts.hidden:
- o.l = o.bucket.ListObjects
+ o.l = o.bucket.listObjects
default:
- o.l = o.bucket.ListCurrentObjects
+ o.l = o.bucket.listCurrentObjects
}
- o.c = &Cursor{
- Prefix: o.opts.prefix,
- Delimiter: o.opts.delimiter,
+ o.c = &cursor{
+ prefix: o.opts.prefix,
+ delimiter: o.opts.delimiter,
}
})
if o.err != nil {
@@ -215,3 +215,117 @@ func ListLocker(l sync.Locker) ListOption {
o.locker = l
}
}
+
+type cursor struct {
+ // Prefix limits the listed objects to those that begin with this string.
+ prefix string
+
+ // Delimiter denotes the path separator. If set, object listings will be
+ // truncated at this character.
+ //
+ // For example, if the bucket contains objects foo/bar, foo/baz, and foo,
+ // then a delimiter of "/" will cause the listing to return "foo" and "foo/".
+ // Otherwise, the listing would have returned all object names.
+ //
+ // Note that objects returned that end in the delimiter may not be actual
+ // objects, e.g. you cannot read from (or write to, or delete) an object "foo/",
+ // both because no actual object exists and because B2 disallows object names
+ // that end with "/". If you want to ensure that all objects returned by
+ // ListObjects and ListCurrentObjects are actual objects, leave this unset.
+ delimiter string
+
+ name string
+ id string
+}
+
+func (b *Bucket) listObjects(ctx context.Context, count int, c *cursor) ([]*Object, *cursor, error) {
+ if c == nil {
+ c = &cursor{}
+ }
+ fs, name, id, err := b.b.listFileVersions(ctx, count, c.name, c.id, c.prefix, c.delimiter)
+ if err != nil {
+ return nil, nil, err
+ }
+ var next *cursor
+ if name != "" && id != "" {
+ next = &cursor{
+ prefix: c.prefix,
+ delimiter: c.delimiter,
+ name: name,
+ id: id,
+ }
+ }
+ var objects []*Object
+ for _, f := range fs {
+ objects = append(objects, &Object{
+ name: f.name(),
+ f: f,
+ b: b,
+ })
+ }
+ var rtnErr error
+ if len(objects) == 0 || next == nil {
+ rtnErr = io.EOF
+ }
+ return objects, next, rtnErr
+}
+
+func (b *Bucket) listCurrentObjects(ctx context.Context, count int, c *cursor) ([]*Object, *cursor, error) {
+ if c == nil {
+ c = &cursor{}
+ }
+ fs, name, err := b.b.listFileNames(ctx, count, c.name, c.prefix, c.delimiter)
+ if err != nil {
+ return nil, nil, err
+ }
+ var next *cursor
+ if name != "" {
+ next = &cursor{
+ prefix: c.prefix,
+ delimiter: c.delimiter,
+ name: name,
+ }
+ }
+ var objects []*Object
+ for _, f := range fs {
+ objects = append(objects, &Object{
+ name: f.name(),
+ f: f,
+ b: b,
+ })
+ }
+ var rtnErr error
+ if len(objects) == 0 || next == nil {
+ rtnErr = io.EOF
+ }
+ return objects, next, rtnErr
+}
+
+func (b *Bucket) listUnfinishedLargeFiles(ctx context.Context, count int, c *cursor) ([]*Object, *cursor, error) {
+ if c == nil {
+ c = &cursor{}
+ }
+ fs, name, err := b.b.listUnfinishedLargeFiles(ctx, count, c.name)
+ if err != nil {
+ return nil, nil, err
+ }
+ var next *cursor
+ if name != "" {
+ next = &cursor{
+ name: name,
+ }
+ }
+ var objects []*Object
+ for _, f := range fs {
+ objects = append(objects, &Object{
+ name: f.name(),
+ f: f,
+ b: b,
+ })
+ }
+ var rtnErr error
+ if len(objects) == 0 || next == nil {
+ rtnErr = io.EOF
+ }
+ return objects, next, rtnErr
+}
diff --git a/b2/key.go b/b2/key.go
new file mode 100644
index 0000000..68649a6
--- /dev/null
+++ b/b2/key.go
@@ -0,0 +1,156 @@
+// Copyright 2018, the Blazer authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package b2
+
+import (
+ "context"
+ "errors"
+ "io"
+ "time"
+)
+
+// Key is a B2 application key. A Key grants limited access on a global or
+// per-bucket basis.
+type Key struct {
+ c *Client
+ k beKeyInterface
+}
+
+// Capabilities returns the list of capabilites granted by this application
+// key.
+func (k *Key) Capabilities() []string { return k.k.caps() }
+
+// Name returns the user-supplied name of this application key. Key names are
+// useless.
+func (k *Key) Name() string { return k.k.name() }
+
+// Expires returns the expiration date of this application key.
+func (k *Key) Expires() time.Time { return k.k.expires() }
+
+// Delete removes the key from B2.
+func (k *Key) Delete(ctx context.Context) error { return k.k.del(ctx) }
+
+// Secret returns the value that should be passed into NewClient(). It is only
+// available on newly created keys; it is not available from ListKey
+// operations.
+func (k *Key) Secret() string { return k.k.secret() }
+
+// ID returns the application key ID. This, plus the secret, is necessary to
+// authenticate to B2.
+func (k *Key) ID() string { return k.k.id() }
+
+type keyOptions struct {
+ caps []string
+ prefix string
+ lifetime time.Duration
+}
+
+// KeyOption specifies desired properties for application keys.
+type KeyOption func(*keyOptions)
+
+// Lifetime requests a key with the given lifetime.
+func Lifetime(d time.Duration) KeyOption {
+ return func(k *keyOptions) {
+ k.lifetime = d
+ }
+}
+
+// Deadline requests a key that expires after the given date.
+func Deadline(t time.Time) KeyOption {
+ d := t.Sub(time.Now())
+ return Lifetime(d)
+}
+
+// Capabilities requests a key with the given capability.
+func Capabilities(caps ...string) KeyOption {
+ return func(k *keyOptions) {
+ k.caps = append(k.caps, caps...)
+ }
+}
+
+// Prefix limits the requested application key to be valid only for objects
+// that begin with prefix. This can only be used when requesting an
+// application key within a specific bucket.
+func Prefix(prefix string) KeyOption {
+ return func(k *keyOptions) {
+ k.prefix = prefix
+ }
+}
+
+// CreateKey creates a global application key that is valid for all buckets in
+// this project. The key's secret will only be accessible on the object
+// returned from this call.
+func (c *Client) CreateKey(ctx context.Context, name string, opts ...KeyOption) (*Key, error) {
+ var ko keyOptions
+ for _, o := range opts {
+ o(&ko)
+ }
+ if ko.prefix != "" {
+ return nil, errors.New("Prefix is not a valid option for global application keys")
+ }
+ ki, err := c.backend.createKey(ctx, name, ko.caps, ko.lifetime, "", "")
+ if err != nil {
+ return nil, err
+ }
+ return &Key{
+ c: c,
+ k: ki,
+ }, nil
+}
+
+// ListKeys lists all the keys associated with this project. It takes the
+// maximum number of keys it should return in a call, as well as a cursor
+// (which should be empty for the initial call). It will return up to count
+// keys, as well as the cursor for the next invocation.
+//
+// ListKeys returns io.EOF when there are no more keys, although it may do so
+// concurrently with the final set of keys.
+func (c *Client) ListKeys(ctx context.Context, count int, cursor string) ([]*Key, string, error) {
+ ks, next, err := c.backend.listKeys(ctx, count, cursor)
+ if err != nil {
+ return nil, "", err
+ }
+ if len(ks) == 0 {
+ return nil, "", io.EOF
+ }
+ var keys []*Key
+ for _, k := range ks {
+ keys = append(keys, &Key{
+ c: c,
+ k: k,
+ })
+ }
+ var rerr error
+ if next == "" {
+ rerr = io.EOF
+ }
+ return keys, next, rerr
+}
+
+// CreateKey creates a scoped application key that is valid only for this bucket.
+func (b *Bucket) CreateKey(ctx context.Context, name string, opts ...KeyOption) (*Key, error) {
+ var ko keyOptions
+ for _, o := range opts {
+ o(&ko)
+ }
+ ki, err := b.r.createKey(ctx, name, ko.caps, ko.lifetime, b.b.id(), ko.prefix)
+ if err != nil {
+ return nil, err
+ }
+ return &Key{
+ c: b.c,
+ k: ki,
+ }, nil
+}
diff --git a/b2/monitor.go b/b2/monitor.go
index 16d0473..ccf5d13 100644
--- a/b2/monitor.go
+++ b/b2/monitor.go
@@ -1,4 +1,4 @@
-// Copyright 2017, Google
+// Copyright 2017, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/b2/reader.go b/b2/reader.go
index 643e17d..e8e85ac 100644
--- a/b2/reader.go
+++ b/b2/reader.go
@@ -1,4 +1,4 @@
-// Copyright 2016, Google
+// Copyright 2016, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -17,9 +17,13 @@ package b2
import (
"bytes"
"context"
+ "crypto/sha1"
"errors"
+ "fmt"
+ "hash"
"io"
"sync"
+ "time"
"github.com/kurin/blazer/internal/blog"
)
@@ -38,21 +42,25 @@ type Reader struct {
// 10MB.
ChunkSize int
- ctx context.Context
- cancel context.CancelFunc // cancels ctx
- o *Object
- name string
- offset int64 // the start of the file
- length int64 // the length to read, or -1
- csize int // chunk size
- read int // amount read
- chwid int // chunks written
- chrid int // chunks read
- chbuf chan *rchunk
- init sync.Once
- rmux sync.Mutex // guards rcond
- rcond *sync.Cond
- chunks map[int]*rchunk
+ ctx context.Context
+ cancel context.CancelFunc // cancels ctx
+ o *Object
+ name string
+ offset int64 // the start of the file
+ length int64 // the length to read, or -1
+ csize int // chunk size
+ read int // amount read
+ chwid int // chunks written
+ chrid int // chunks read
+ chbuf chan *rchunk
+ init sync.Once
+ chunks map[int]*rchunk
+ vrfy hash.Hash
+ readOffEnd bool
+ sha1 string
+
+ rmux sync.Mutex // guards rcond
+ rcond *sync.Cond
emux sync.RWMutex // guards err, believe it or not
err error
@@ -122,10 +130,12 @@ func (r *Reader) thread() {
}
r.length -= size
}
+ var b backoff
redo:
fr, err := r.o.b.b.downloadFileByName(r.ctx, r.name, offset, size)
if err == errNoMoreContent {
// this read generated a 416 so we are entirely past the end of the object
+ r.readOffEnd = true
buf.final = true
r.rmux.Lock()
r.chunks[chunkID] = buf
@@ -138,7 +148,10 @@ func (r *Reader) thread() {
r.rcond.Broadcast()
return
}
- rsize, _, _, _ := fr.stats()
+ rsize, _, sha1, _ := fr.stats()
+ if len(sha1) == 40 && r.sha1 != sha1 {
+ r.sha1 = sha1
+ }
mr := &meteredReader{r: noopResetter{fr}, size: int(rsize)}
r.smux.Lock()
r.smap[chunkID] = mr
@@ -150,7 +163,12 @@ func (r *Reader) thread() {
r.smux.Unlock()
if i < int64(rsize) || err == io.ErrUnexpectedEOF {
// Probably the network connection was closed early. Retry.
- blog.V(1).Infof("b2 reader %d: got %dB of %dB; retrying", chunkID, i, rsize)
+ blog.V(1).Infof("b2 reader %d: got %dB of %dB; retrying after %v", chunkID, i, rsize, b)
+ if err := b.wait(r.ctx); err != nil {
+ r.setErr(err)
+ r.rcond.Broadcast()
+ return
+ }
buf.Reset()
goto redo
}
@@ -211,13 +229,13 @@ func (r *Reader) initFunc() {
r.thread()
r.chbuf <- &rchunk{}
}
+ r.vrfy = sha1.New()
}
func (r *Reader) Read(p []byte) (int, error) {
if err := r.getErr(); err != nil {
return 0, err
}
- // TODO: check the SHA1 hash here and verify it on Close.
r.init.Do(r.initFunc)
chunk, err := r.curChunk()
if err != nil {
@@ -225,6 +243,7 @@ func (r *Reader) Read(p []byte) (int, error) {
return 0, err
}
n, err := chunk.Read(p)
+ r.vrfy.Write(p[:n]) // Hash.Write never returns an error.
r.read += n
if err == io.EOF {
if chunk.final {
@@ -256,38 +275,49 @@ func (r *Reader) status() *ReaderStatus {
return rs
}
-// copied from io.Copy, basically.
-func copyContext(ctx context.Context, dst io.Writer, src io.Reader) (written int64, err error) {
- buf := make([]byte, 32*1024)
- for {
- if ctx.Err() != nil {
- err = ctx.Err()
- return
- }
- nr, er := src.Read(buf)
- if nr > 0 {
- nw, ew := dst.Write(buf[0:nr])
- if nw > 0 {
- written += int64(nw)
- }
- if ew != nil {
- err = ew
- break
- }
- if nr != nw {
- err = io.ErrShortWrite
- break
- }
- }
- if er == io.EOF {
- break
- }
- if er != nil {
- err = er
- break
+// Verify checks the SHA1 hash on download and compares it to the SHA1 hash
+// submitted on upload. If the two differ, this returns an error. If the
+// correct hash could not be calculated (if, for example, the entire object was
+// not read, or if the object was uploaded as a "large file" and thus the SHA1
+// hash was not sent), this returns (nil, false).
+func (r *Reader) Verify() (error, bool) {
+ got := fmt.Sprintf("%x", r.vrfy.Sum(nil))
+ if r.sha1 == got {
+ return nil, true
+ }
+ // TODO: if the exact length of the file is requested AND the checksum is
+ // bad, this will return (nil, false) instead of (an error, true). This is
+ // because there's no good way that I can tell to determine that we've hit
+ // the end of the file without reading off the end. Consider reading N+1
+ // bytes at the very end to close this hole.
+ if r.offset > 0 || !r.readOffEnd || len(r.sha1) != 40 {
+ return nil, false
+ }
+ return fmt.Errorf("bad hash: got %v, want %v", got, r.sha1), true
+}
+
+// strip a writer of any non-Write methods
+type onlyWriter struct{ w io.Writer }
+
+func (ow onlyWriter) Write(p []byte) (int, error) { return ow.w.Write(p) }
+
+func copyContext(ctx context.Context, w io.Writer, r io.Reader) (int64, error) {
+ var n int64
+ var err error
+ done := make(chan struct{})
+ go func() {
+ if _, ok := w.(*Writer); ok {
+ w = onlyWriter{w}
}
+ n, err = io.Copy(w, r)
+ close(done)
+ }()
+ select {
+ case <-done:
+ return n, err
+ case <-ctx.Done():
+ return 0, ctx.Err()
}
- return written, err
}
type noopResetter struct {
@@ -295,3 +325,24 @@ type noopResetter struct {
}
func (noopResetter) Reset() error { return nil }
+
+type backoff time.Duration
+
+func (b *backoff) wait(ctx context.Context) error {
+ if *b == 0 {
+ *b = backoff(time.Millisecond)
+ }
+ select {
+ case <-time.After(time.Duration(*b)):
+ if time.Duration(*b) < time.Second*10 {
+ *b <<= 1
+ }
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+func (b backoff) String() string {
+ return time.Duration(b).String()
+}
diff --git a/b2/readerat.go b/b2/readerat.go
index 7ef1352..84ee982 100644
--- a/b2/readerat.go
+++ b/b2/readerat.go
@@ -1,4 +1,4 @@
-// Copyright 2017, Google
+// Copyright 2017, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/b2/writer.go b/b2/writer.go
index becfc58..cce60e7 100644
--- a/b2/writer.go
+++ b/b2/writer.go
@@ -1,4 +1,4 @@
-// Copyright 2016, Google
+// Copyright 2016, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -65,8 +65,11 @@ type Writer struct {
csize int
ctx context.Context
- cancel context.CancelFunc
+ cancel context.CancelFunc // cancels ctx
+ ctxf func() context.Context
+ errf func(error)
ready chan chunk
+ cdone chan struct{}
wg sync.WaitGroup
start sync.Once
once sync.Once
@@ -100,11 +103,19 @@ func (w *Writer) setErr(err error) {
}
w.emux.Lock()
defer w.emux.Unlock()
- if w.err == nil {
- blog.V(1).Infof("error writing %s: %v", w.name, err)
- w.err = err
- w.cancel()
+ if w.err != nil {
+ return
+ }
+ blog.V(1).Infof("error writing %s: %v", w.name, err)
+ w.err = err
+ w.cancel()
+ if w.ctxf == nil {
+ return
}
+ if w.errf == nil {
+ w.errf = func(error) {}
+ }
+ w.errf(w.file.cancel(w.ctxf()))
}
func (w *Writer) getErr() error {
@@ -127,6 +138,15 @@ func (w *Writer) completeChunk(id int) {
var gid int32
+func sleepCtx(ctx context.Context, d time.Duration) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-time.After(d):
+ return nil
+ }
+}
+
func (w *Writer) thread() {
w.wg.Add(1)
go func() {
@@ -138,57 +158,63 @@ func (w *Writer) thread() {
return
}
for {
- chunk, ok := <-w.ready
- if !ok {
+ var cnk chunk
+ select {
+ case cnk = <-w.ready:
+ case <-w.cdone:
return
}
- if sha, ok := w.seen[chunk.id]; ok {
- if sha != chunk.buf.Hash() {
+ if sha, ok := w.seen[cnk.id]; ok {
+ if sha != cnk.buf.Hash() {
w.setErr(errors.New("resumable upload was requested, but chunks don't match"))
return
}
- chunk.buf.Close()
- w.completeChunk(chunk.id)
- blog.V(2).Infof("skipping chunk %d", chunk.id)
+ cnk.buf.Close()
+ w.completeChunk(cnk.id)
+ blog.V(2).Infof("skipping chunk %d", cnk.id)
continue
}
- blog.V(2).Infof("thread %d handling chunk %d", id, chunk.id)
- r, err := chunk.buf.Reader()
+ blog.V(2).Infof("thread %d handling chunk %d", id, cnk.id)
+ r, err := cnk.buf.Reader()
if err != nil {
w.setErr(err)
return
}
- mr := &meteredReader{r: r, size: chunk.buf.Len()}
- w.registerChunk(chunk.id, mr)
+ mr := &meteredReader{r: r, size: cnk.buf.Len()}
+ w.registerChunk(cnk.id, mr)
sleep := time.Millisecond * 15
redo:
- n, err := fc.uploadPart(w.ctx, mr, chunk.buf.Hash(), chunk.buf.Len(), chunk.id)
- if n != chunk.buf.Len() || err != nil {
+ n, err := fc.uploadPart(w.ctx, mr, cnk.buf.Hash(), cnk.buf.Len(), cnk.id)
+ if n != cnk.buf.Len() || err != nil {
if w.o.b.r.reupload(err) {
- time.Sleep(sleep)
+ if err := sleepCtx(w.ctx, sleep); err != nil {
+ w.setErr(err)
+ w.completeChunk(cnk.id)
+ cnk.buf.Close() // TODO: log error
+ }
sleep *= 2
if sleep > time.Second*15 {
sleep = time.Second * 15
}
- blog.V(1).Infof("b2 writer: wrote %d of %d: error: %v; retrying", n, chunk.buf.Len(), err)
+ blog.V(1).Infof("b2 writer: wrote %d of %d: error: %v; retrying", n, cnk.buf.Len(), err)
f, err := w.file.getUploadPartURL(w.ctx)
if err != nil {
w.setErr(err)
- w.completeChunk(chunk.id)
- chunk.buf.Close() // TODO: log error
+ w.completeChunk(cnk.id)
+ cnk.buf.Close() // TODO: log error
return
}
fc = f
goto redo
}
w.setErr(err)
- w.completeChunk(chunk.id)
- chunk.buf.Close() // TODO: log error
+ w.completeChunk(cnk.id)
+ cnk.buf.Close() // TODO: log error
return
}
- w.completeChunk(chunk.id)
- chunk.buf.Close() // TODO: log error
- blog.V(2).Infof("chunk %d handled", chunk.id)
+ w.completeChunk(cnk.id)
+ cnk.buf.Close() // TODO: log error
+ blog.V(2).Infof("chunk %d handled", cnk.id)
}
}()
}
@@ -221,6 +247,9 @@ func (w *Writer) init() {
// Write satisfies the io.Writer interface.
func (w *Writer) Write(p []byte) (int, error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
w.init()
if err := w.getErr(); err != nil {
return 0, err
@@ -300,21 +329,28 @@ func (w *Writer) getLargeFile() (beLargeFileInterface, error) {
}
return w.o.b.b.startLargeFile(w.ctx, w.name, ctype, w.info)
}
+ var got bool
+ iter := w.o.b.List(w.ctx, ListPrefix(w.name), ListUnfinished())
+ var fi beFileInterface
+ for iter.Next() {
+ obj := iter.Object()
+ if obj.Name() == w.name {
+ got = true
+ fi = obj.f
+ }
+ }
+ if iter.Err() != nil {
+ return nil, iter.Err()
+ }
+ if !got {
+ w.Resume = false
+ return w.getLargeFile()
+ }
+
next := 1
seen := make(map[int]string)
var size int64
- var fi beFileInterface
for {
- cur := &Cursor{name: w.name}
- objs, _, err := w.o.b.ListObjects(w.ctx, 1, cur)
- if err != nil {
- return nil, err
- }
- if len(objs) < 1 || objs[0].name != w.name {
- w.Resume = false
- return w.getLargeFile()
- }
- fi = objs[0].f
parts, n, err := fi.listParts(w.ctx, next, 100)
if err != nil {
return nil, err
@@ -348,6 +384,7 @@ func (w *Writer) sendChunk() error {
}
w.file = lf
w.ready = make(chan chunk)
+ w.cdone = make(chan struct{})
if w.ConcurrentUploads < 1 {
w.ConcurrentUploads = 1
}
@@ -359,6 +396,8 @@ func (w *Writer) sendChunk() error {
return err
}
select {
+ case <-w.cdone:
+ return nil
case w.ready <- chunk{
id: w.cidx + 1,
buf: w.w,
@@ -443,6 +482,8 @@ func (w *Writer) ReadFrom(r io.Reader) (int64, error) {
func (w *Writer) Close() error {
w.done.Do(func() {
if !w.everStarted {
+ w.init()
+ w.setErr(w.simpleWriteFile())
return
}
defer w.o.b.c.removeWriter(w)
@@ -462,7 +503,9 @@ func (w *Writer) Close() error {
return
}
}
- close(w.ready)
+ // See https://github.com/kurin/blazer/issues/60 for why we use a special
+ // channel for this.
+ close(w.cdone)
w.wg.Wait()
f, err := w.file.finishLargeFile(w.ctx)
if err != nil {
@@ -476,18 +519,54 @@ func (w *Writer) Close() error {
// WithAttrs sets the writable attributes of the resulting file to given
// values. WithAttrs must be called before the first call to Write.
+//
+// DEPRECATED: Use WithAttrsOption instead.
func (w *Writer) WithAttrs(attrs *Attrs) *Writer {
w.contentType = attrs.ContentType
w.info = make(map[string]string)
for k, v := range attrs.Info {
w.info[k] = v
}
+ if len(w.info) < 10 && attrs.SHA1 != "" {
+ w.info["large_file_sha1"] = attrs.SHA1
+ }
if len(w.info) < 10 && !attrs.LastModified.IsZero() {
w.info["src_last_modified_millis"] = fmt.Sprintf("%d", attrs.LastModified.UnixNano()/1e6)
}
return w
}
+// A WriterOption sets Writer-specific behavior.
+type WriterOption func(*Writer)
+
+// WithAttrs attaches the given Attrs to the writer.
+func WithAttrsOption(attrs *Attrs) WriterOption {
+ return func(w *Writer) {
+ w.WithAttrs(attrs)
+ }
+}
+
+// WithCancelOnError requests the writer, if it has started a large file
+// upload, to call b2_cancel_large_file on any permanent error. It calls ctxf
+// to obtain a context with which to cancel the file; this is to allow callers
+// to set specific timeouts. If errf is non-nil, then it is called with the
+// (possibly nil) output of b2_cancel_large_file.
+func WithCancelOnError(ctxf func() context.Context, errf func(error)) WriterOption {
+ return func(w *Writer) {
+ w.ctxf = ctxf
+ w.errf = errf
+ }
+}
+
+// DefaultWriterOptions returns a ClientOption that will apply the given
+// WriterOptions to every Writer. These options can be overridden by passing
+// new options to NewWriter.
+func DefaultWriterOptions(opts ...WriterOption) ClientOption {
+ return func(c *clientOptions) {
+ c.writerOpts = opts
+ }
+}
+
func (w *Writer) status() *WriterStatus {
w.smux.RLock()
defer w.smux.RUnlock()
diff --git a/base/base.go b/base/base.go
index 11cd0da..8ca0950 100644
--- a/base/base.go
+++ b/base/base.go
@@ -1,4 +1,4 @@
-// Copyright 2016, Google
+// Copyright 2016, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -42,7 +42,7 @@ import (
const (
APIBase = "https://api.backblazeb2.com"
- DefaultUserAgent = "blazer/0.4.4"
+ DefaultUserAgent = "blazer/0.5.3"
)
type b2err struct {
@@ -268,6 +268,8 @@ type B2 struct {
downloadURI string
minPartSize int
opts *b2Options
+ bucket string // restricted to this bucket if present
+ pfx string // restricted to objects with this prefix if present
}
// Update replaces the B2 object with a new one, in-place.
@@ -319,6 +321,13 @@ func (rb *requestBody) getBody() io.Reader {
if rb == nil {
return nil
}
+ if rb.getSize() == 0 {
+ // https://github.com/kurin/blazer/issues/57
+ // When body is non-nil, but the request's ContentLength is 0, it is
+ // replaced with -1, which causes the client to send a chunked encoding,
+ // which confuses B2.
+ return http.NoBody
+ }
return rb.body
}
@@ -427,7 +436,9 @@ func AuthorizeAccount(ctx context.Context, account, key string, opts ...AuthOpti
authToken: b2resp.AuthToken,
apiURI: b2resp.URI,
downloadURI: b2resp.DownloadURI,
- minPartSize: b2resp.MinPartSize,
+ minPartSize: b2resp.PartSize,
+ bucket: b2resp.Allowed.Bucket,
+ pfx: b2resp.Allowed.Prefix,
opts: b2opts,
}, nil
}
@@ -479,6 +490,14 @@ func ForceCapExceeded() AuthOption {
}
}
+// SetAPIBase returns an AuthOption that uses the given URL as the base for API
+// requests.
+func SetAPIBase(url string) AuthOption {
+ return func(o *b2Options) {
+ o.apiBase = url
+ }
+}
+
type LifecycleRule struct {
Prefix string
DaysNewUntilHidden int
@@ -524,7 +543,7 @@ func (b *B2) CreateBucket(ctx context.Context, name, btype string, info map[stri
Name: name,
Info: b2resp.Info,
LifecycleRules: respRules,
- id: b2resp.BucketID,
+ ID: b2resp.BucketID,
rev: b2resp.Revision,
b2: b,
}, nil
@@ -534,7 +553,7 @@ func (b *B2) CreateBucket(ctx context.Context, name, btype string, info map[stri
func (b *Bucket) DeleteBucket(ctx context.Context) error {
b2req := &b2types.DeleteBucketRequest{
AccountID: b.b2.accountID,
- BucketID: b.id,
+ BucketID: b.ID,
}
headers := map[string]string{
"Authorization": b.b2.authToken,
@@ -548,7 +567,7 @@ type Bucket struct {
Type string
Info map[string]string
LifecycleRules []LifecycleRule
- id string
+ ID string
rev int
b2 *B2
}
@@ -565,7 +584,7 @@ func (b *Bucket) Update(ctx context.Context) (*Bucket, error) {
}
b2req := &b2types.UpdateBucketRequest{
AccountID: b.b2.accountID,
- BucketID: b.id,
+ BucketID: b.ID,
// Name: b.Name,
Type: b.Type,
Info: b.Info,
@@ -592,7 +611,7 @@ func (b *Bucket) Update(ctx context.Context) (*Bucket, error) {
Type: b2resp.Type,
Info: b2resp.Info,
LifecycleRules: respRules,
- id: b2resp.BucketID,
+ ID: b2resp.BucketID,
b2: b.b2,
}, nil
}
@@ -606,6 +625,7 @@ func (b *Bucket) BaseURL() string {
func (b *B2) ListBuckets(ctx context.Context) ([]*Bucket, error) {
b2req := &b2types.ListBucketsRequest{
AccountID: b.accountID,
+ Bucket: b.bucket,
}
b2resp := &b2types.ListBucketsResponse{}
headers := map[string]string{
@@ -629,7 +649,7 @@ func (b *B2) ListBuckets(ctx context.Context) ([]*Bucket, error) {
Type: bucket.Type,
Info: bucket.Info,
LifecycleRules: rules,
- id: bucket.BucketID,
+ ID: bucket.BucketID,
rev: bucket.Revision,
b2: b,
})
@@ -660,7 +680,7 @@ func (url *URL) Reload(ctx context.Context) error {
// GetUploadURL wraps b2_get_upload_url.
func (b *Bucket) GetUploadURL(ctx context.Context) (*URL, error) {
b2req := &b2types.GetUploadURLRequest{
- BucketID: b.id,
+ BucketID: b.ID,
}
b2resp := &b2types.GetUploadURLResponse{}
headers := map[string]string{
@@ -745,7 +765,7 @@ type LargeFile struct {
// StartLargeFile wraps b2_start_large_file.
func (b *Bucket) StartLargeFile(ctx context.Context, name, contentType string, info map[string]string) (*LargeFile, error) {
b2req := &b2types.StartLargeFileRequest{
- BucketID: b.id,
+ BucketID: b.ID,
Name: name,
ContentType: contentType,
Info: info,
@@ -927,7 +947,7 @@ func (l *LargeFile) FinishLargeFile(ctx context.Context) (*File, error) {
// ListUnfinishedLargeFiles wraps b2_list_unfinished_large_files.
func (b *Bucket) ListUnfinishedLargeFiles(ctx context.Context, count int, continuation string) ([]*File, string, error) {
b2req := &b2types.ListUnfinishedLargeFilesRequest{
- BucketID: b.id,
+ BucketID: b.ID,
Continuation: continuation,
Count: count,
}
@@ -959,10 +979,13 @@ func (b *Bucket) ListUnfinishedLargeFiles(ctx context.Context, count int, contin
// ListFileNames wraps b2_list_file_names.
func (b *Bucket) ListFileNames(ctx context.Context, count int, continuation, prefix, delimiter string) ([]*File, string, error) {
+ if prefix == "" {
+ prefix = b.b2.pfx
+ }
b2req := &b2types.ListFileNamesRequest{
Count: count,
Continuation: continuation,
- BucketID: b.id,
+ BucketID: b.ID,
Prefix: prefix,
Delimiter: delimiter,
}
@@ -999,8 +1022,11 @@ func (b *Bucket) ListFileNames(ctx context.Context, count int, continuation, pre
// ListFileVersions wraps b2_list_file_versions.
func (b *Bucket) ListFileVersions(ctx context.Context, count int, startName, startID, prefix, delimiter string) ([]*File, string, string, error) {
+ if prefix == "" {
+ prefix = b.b2.pfx
+ }
b2req := &b2types.ListFileVersionsRequest{
- BucketID: b.id,
+ BucketID: b.ID,
Count: count,
StartName: startName,
StartID: startID,
@@ -1038,11 +1064,12 @@ func (b *Bucket) ListFileVersions(ctx context.Context, count int, startName, sta
}
// GetDownloadAuthorization wraps b2_get_download_authorization.
-func (b *Bucket) GetDownloadAuthorization(ctx context.Context, prefix string, valid time.Duration) (string, error) {
+func (b *Bucket) GetDownloadAuthorization(ctx context.Context, prefix string, valid time.Duration, contentDisposition string) (string, error) {
b2req := &b2types.GetDownloadAuthorizationRequest{
- BucketID: b.id,
- Prefix: prefix,
- Valid: int(valid.Seconds()),
+ BucketID: b.ID,
+ Prefix: prefix,
+ Valid: int(valid.Seconds()),
+ ContentDisposition: contentDisposition,
}
b2resp := &b2types.GetDownloadAuthorizationResponse{}
headers := map[string]string{
@@ -1121,9 +1148,13 @@ func (b *Bucket) DownloadFileByName(ctx context.Context, name string, offset, si
}
info[name] = val
}
+ sha1 := resp.Header.Get("X-Bz-Content-Sha1")
+ if sha1 == "none" && info["Large_file_sha1"] != "" {
+ sha1 = info["Large_file_sha1"]
+ }
return &FileReader{
ReadCloser: resp.Body,
- SHA1: resp.Header.Get("X-Bz-Content-Sha1"),
+ SHA1: sha1,
ID: resp.Header.Get("X-Bz-File-Id"),
ContentType: resp.Header.Get("Content-Type"),
ContentLength: int(clen),
@@ -1134,7 +1165,7 @@ func (b *Bucket) DownloadFileByName(ctx context.Context, name string, offset, si
// HideFile wraps b2_hide_file.
func (b *Bucket) HideFile(ctx context.Context, name string) (*File, error) {
b2req := &b2types.HideFileRequest{
- BucketID: b.id,
+ BucketID: b.ID,
File: name,
}
b2resp := &b2types.HideFileResponse{}
@@ -1190,3 +1221,78 @@ func (f *File) GetFileInfo(ctx context.Context) (*FileInfo, error) {
}
return f.Info, nil
}
+
+// Key is a B2 application key.
+type Key struct {
+ ID string
+ Secret string
+ Name string
+ Capabilities []string
+ Expires time.Time
+ b2 *B2
+}
+
+// CreateKey wraps b2_create_key.
+func (b *B2) CreateKey(ctx context.Context, name string, caps []string, valid time.Duration, bucketID string, prefix string) (*Key, error) {
+ b2req := &b2types.CreateKeyRequest{
+ AccountID: b.accountID,
+ Capabilities: caps,
+ Name: name,
+ Valid: int(valid.Seconds()),
+ BucketID: bucketID,
+ Prefix: prefix,
+ }
+ b2resp := &b2types.CreateKeyResponse{}
+ headers := map[string]string{
+ "Authorization": b.authToken,
+ }
+ if err := b.opts.makeRequest(ctx, "b2_create_key", "POST", b.apiURI+b2types.V1api+"b2_create_key", b2req, b2resp, headers, nil); err != nil {
+ return nil, err
+ }
+ return &Key{
+ Name: b2resp.Name,
+ ID: b2resp.ID,
+ Secret: b2resp.Secret,
+ Capabilities: b2resp.Capabilities,
+ Expires: millitime(b2resp.Expires),
+ b2: b,
+ }, nil
+}
+
+// Delete wraps b2_delete_key.
+func (k *Key) Delete(ctx context.Context) error {
+ b2req := &b2types.DeleteKeyRequest{
+ KeyID: k.ID,
+ }
+ headers := map[string]string{
+ "Authorization": k.b2.authToken,
+ }
+ return k.b2.opts.makeRequest(ctx, "b2_delete_key", "POST", k.b2.apiURI+b2types.V1api+"b2_delete_key", b2req, nil, headers, nil)
+}
+
+// ListKeys wraps b2_list_keys.
+func (b *B2) ListKeys(ctx context.Context, max int, next string) ([]*Key, string, error) {
+ b2req := &b2types.ListKeysRequest{
+ AccountID: b.accountID,
+ Max: max,
+ Next: next,
+ }
+ headers := map[string]string{
+ "Authorization": b.authToken,
+ }
+ b2resp := &b2types.ListKeysResponse{}
+ if err := b.opts.makeRequest(ctx, "b2_list_keys", "POST", b.apiURI+b2types.V1api+"b2_list_keys", b2req, b2resp, headers, nil); err != nil {
+ return nil, "", err
+ }
+ var keys []*Key
+ for _, key := range b2resp.Keys {
+ keys = append(keys, &Key{
+ Name: key.Name,
+ ID: key.ID,
+ Capabilities: key.Capabilities,
+ Expires: millitime(key.Expires),
+ b2: b,
+ })
+ }
+ return keys, b2resp.Next, nil
+}
diff --git a/base/integration_test.go b/base/integration_test.go
index 7e615bc..2680b7f 100644
--- a/base/integration_test.go
+++ b/base/integration_test.go
@@ -1,4 +1,4 @@
-// Copyright 2016, Google
+// Copyright 2016, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -265,7 +265,7 @@ func TestStorage(t *testing.T) {
}
// b2_get_download_authorization
- if _, err := bucket.GetDownloadAuthorization(ctx, "foo/", 24*time.Hour); err != nil {
+ if _, err := bucket.GetDownloadAuthorization(ctx, "foo/", 24*time.Hour, "attachment"); err != nil {
t.Errorf("failed to get download auth token: %v", err)
}
}
@@ -280,7 +280,7 @@ func TestUploadAuthAfterConnectionHang(t *testing.T) {
hung := make(chan struct{})
- // An http.RoundTripper that dies after sending ~10k bytes.
+ // An http.RoundTripper that dies and hangs after sending ~10k bytes.
hang := func() {
close(hung)
select {}
@@ -317,7 +317,6 @@ func TestUploadAuthAfterConnectionHang(t *testing.T) {
go func() {
ue.UploadFile(ctx, buf, buf.Len(), smallFileName, "application/octet-stream", smallSHA1, nil)
- t.Fatal("this ought not to be reachable")
}()
<-hung
diff --git a/base/strings.go b/base/strings.go
index 9ad08dc..f313666 100644
--- a/base/strings.go
+++ b/base/strings.go
@@ -1,4 +1,4 @@
-// Copyright 2017, Google
+// Copyright 2017, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/bin/b2keys/b2keys.go b/bin/b2keys/b2keys.go
new file mode 100644
index 0000000..a082036
--- /dev/null
+++ b/bin/b2keys/b2keys.go
@@ -0,0 +1,98 @@
+// b2keys is a small utility for managing Backblaze B2 keys.
+package main
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/google/subcommands"
+ "github.com/kurin/blazer/b2"
+)
+
+const (
+ apiID = "B2_ACCOUNT_ID"
+ apiKey = "B2_SECRET_KEY"
+)
+
+func main() {
+ subcommands.Register(&create{}, "")
+ flag.Parse()
+ ctx := context.Background()
+ os.Exit(int(subcommands.Execute(ctx)))
+}
+
+type create struct {
+ d *time.Duration
+ bucket *string
+ pfx *string
+}
+
+func (c *create) Name() string { return "create" }
+func (c *create) Synopsis() string { return "create a new application key" }
+func (c *create) Usage() string {
+ return "b2keys create [-bucket bucket] [-duration duration] [-prefix pfx] name capability [capability ...]"
+}
+
+func (c *create) SetFlags(fs *flag.FlagSet) {
+ c.d = fs.Duration("duration", 0, "the lifetime of the new key")
+ c.bucket = fs.String("bucket", "", "limit the key to the given bucket")
+ c.pfx = fs.String("prefix", "", "limit the key to the objects starting with prefix")
+}
+
+func (c *create) Execute(ctx context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
+ id := os.Getenv(apiID)
+ key := os.Getenv(apiKey)
+ if id == "" || key == "" {
+ fmt.Fprintf(os.Stderr, "both %s and %s must be set in the environment", apiID, apiKey)
+ return subcommands.ExitUsageError
+ }
+
+ args := f.Args()
+ if len(args) < 2 {
+ fmt.Fprintf(os.Stderr, "%s\n", c.Usage())
+ return subcommands.ExitUsageError
+ }
+ name := args[0]
+ caps := args[1:]
+
+ var opts []b2.KeyOption
+ if *c.d > 0 {
+ opts = append(opts, b2.Lifetime(*c.d))
+ }
+ if *c.pfx != "" {
+ opts = append(opts, b2.Prefix(*c.pfx))
+ }
+ opts = append(opts, b2.Capabilities(caps...))
+
+ client, err := b2.NewClient(ctx, id, key, b2.UserAgent("b2keys"))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "%v\n", err)
+ return subcommands.ExitFailure
+ }
+
+ var cr creater = client
+
+ if *c.bucket != "" {
+ bucket, err := client.Bucket(ctx, *c.bucket)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "%v\n", err)
+ return subcommands.ExitFailure
+ }
+ cr = bucket
+ }
+
+ b2key, err := cr.CreateKey(ctx, name, opts...)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "%v\n", err)
+ return subcommands.ExitFailure
+ }
+ fmt.Printf("key=%s, secret=%s\n", b2key.ID(), b2key.Secret())
+ return subcommands.ExitSuccess
+}
+
+type creater interface {
+ CreateKey(context.Context, string, ...b2.KeyOption) (*b2.Key, error)
+}
diff --git a/bin/bonfire/bonfire.go b/bin/bonfire/bonfire.go
new file mode 100644
index 0000000..9b62b0b
--- /dev/null
+++ b/bin/bonfire/bonfire.go
@@ -0,0 +1,43 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+
+ "github.com/kurin/blazer/bonfire"
+ "github.com/kurin/blazer/internal/pyre"
+)
+
+type superManager struct {
+ *bonfire.LocalBucket
+ bonfire.FS
+}
+
+func main() {
+ ctx := context.Background()
+ mux := http.NewServeMux()
+
+ fs := bonfire.FS("/tmp/b2")
+ bm := &bonfire.LocalBucket{Port: 8822}
+
+ if err := pyre.RegisterServerOnMux(ctx, &pyre.Server{
+ Account: bonfire.Localhost(8822),
+ LargeFile: fs,
+ Bucket: bm,
+ }, mux); err != nil {
+ fmt.Println(err)
+ return
+ }
+
+ sm := superManager{
+ LocalBucket: bm,
+ FS: fs,
+ }
+
+ pyre.RegisterLargeFileManagerOnMux(fs, mux)
+ pyre.RegisterSimpleFileManagerOnMux(fs, mux)
+ pyre.RegisterDownloadManagerOnMux(sm, mux)
+ fmt.Println("ok")
+ fmt.Println(http.ListenAndServe("localhost:8822", mux))
+}
diff --git a/bonfire/bonfire.go b/bonfire/bonfire.go
new file mode 100644
index 0000000..1c0106c
--- /dev/null
+++ b/bonfire/bonfire.go
@@ -0,0 +1,257 @@
+// Copyright 2018, the Blazer authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package bonfire implements the B2 service.
+package bonfire
+
+import (
+ "crypto/sha1"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "sync"
+
+ "github.com/kurin/blazer/internal/pyre"
+)
+
+type FS string
+
+func (f FS) open(fp string) (io.WriteCloser, error) {
+ if err := os.MkdirAll(filepath.Dir(fp), 0755); err != nil {
+ return nil, err
+ }
+ return os.Create(fp)
+}
+
+func (f FS) PartWriter(id string, part int) (io.WriteCloser, error) {
+ fp := filepath.Join(string(f), id, fmt.Sprintf("%d", part))
+ return f.open(fp)
+}
+
+func (f FS) Writer(bucket, name, id string) (io.WriteCloser, error) {
+ fp := filepath.Join(string(f), bucket, name, id)
+ return f.open(fp)
+}
+
+func (f FS) Parts(id string) ([]string, error) {
+ dir := filepath.Join(string(f), id)
+ file, err := os.Open(dir)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ fs, err := file.Readdir(0)
+ if err != nil {
+ return nil, err
+ }
+ shas := make([]string, len(fs)-1)
+ for _, fi := range fs {
+ if fi.Name() == "info" {
+ continue
+ }
+ i, err := strconv.ParseInt(fi.Name(), 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ p, err := os.Open(filepath.Join(dir, fi.Name()))
+ if err != nil {
+ return nil, err
+ }
+ sha := sha1.New()
+ if _, err := io.Copy(sha, p); err != nil {
+ p.Close()
+ return nil, err
+ }
+ p.Close()
+ shas[int(i)-1] = fmt.Sprintf("%x", sha.Sum(nil))
+ }
+ return shas, nil
+}
+
+type fi struct {
+ Name string
+ Bucket string
+}
+
+func (f FS) Start(bucketId, fileName, fileId string, bs []byte) error {
+ w, err := f.open(filepath.Join(string(f), fileId, "info"))
+ if err != nil {
+ return err
+ }
+ if err := json.NewEncoder(w).Encode(fi{Name: fileName, Bucket: bucketId}); err != nil {
+ w.Close()
+ return err
+ }
+ return w.Close()
+}
+
+func (f FS) Finish(fileId string) error {
+ r, err := os.Open(filepath.Join(string(f), fileId, "info"))
+ if err != nil {
+ return err
+ }
+ defer r.Close()
+ var info fi
+ if err := json.NewDecoder(r).Decode(&info); err != nil {
+ return err
+ }
+ shas, err := f.Parts(fileId) // oh my god this is terrible
+ if err != nil {
+ return err
+ }
+ w, err := f.open(filepath.Join(string(f), info.Bucket, info.Name, fileId))
+ if err != nil {
+ return err
+ }
+ for i := 1; i <= len(shas); i++ {
+ r, err := os.Open(filepath.Join(string(f), fileId, fmt.Sprintf("%d", i)))
+ if err != nil {
+ w.Close()
+ return err
+ }
+ if _, err := io.Copy(w, r); err != nil {
+ w.Close()
+ r.Close()
+ return err
+ }
+ r.Close()
+ }
+ if err := w.Close(); err != nil {
+ return err
+ }
+ return os.RemoveAll(filepath.Join(string(f), fileId))
+}
+
+func (f FS) ObjectByName(bucket, name string) (pyre.DownloadableObject, error) {
+ dir := filepath.Join(string(f), bucket, name)
+ d, err := os.Open(dir)
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+ fis, err := d.Readdir(0)
+ if err != nil {
+ return nil, err
+ }
+ sort.Slice(fis, func(i, j int) bool { return fis[i].ModTime().Before(fis[j].ModTime()) })
+ o, err := os.Open(filepath.Join(dir, fis[0].Name()))
+ if err != nil {
+ return nil, err
+ }
+ return do{
+ o: o,
+ size: fis[0].Size(),
+ }, nil
+}
+
+type do struct {
+ size int64
+ o *os.File
+}
+
+func (d do) Size() int64 { return d.size }
+func (d do) Reader() io.ReaderAt { return d.o }
+func (d do) Close() error { return d.o.Close() }
+
+func (f FS) Get(fileId string) ([]byte, error) { return nil, nil }
+
+type Localhost int
+
+func (l Localhost) String() string { return fmt.Sprintf("http://localhost:%d", l) }
+func (l Localhost) UploadHost(id string) (string, error) { return l.String(), nil }
+func (Localhost) Authorize(string, string) (string, error) { return "ok", nil }
+func (Localhost) CheckCreds(string, string) error { return nil }
+func (l Localhost) APIRoot(string) string { return l.String() }
+func (l Localhost) DownloadRoot(string) string { return l.String() }
+func (Localhost) Sizes(string) (int32, int32) { return 1e5, 1 }
+func (l Localhost) UploadPartHost(fileId string) (string, error) { return l.String(), nil }
+
+type LocalBucket struct {
+ Port int
+
+ mux sync.Mutex
+ b map[string][]byte
+ nti map[string]string
+}
+
+func (lb *LocalBucket) AddBucket(id, name string, bs []byte) error {
+ lb.mux.Lock()
+ defer lb.mux.Unlock()
+
+ if lb.b == nil {
+ lb.b = make(map[string][]byte)
+ }
+
+ if lb.nti == nil {
+ lb.nti = make(map[string]string)
+ }
+
+ lb.b[id] = bs
+ lb.nti[name] = id
+ return nil
+}
+
+func (lb *LocalBucket) RemoveBucket(id string) error {
+ lb.mux.Lock()
+ defer lb.mux.Unlock()
+
+ if lb.b == nil {
+ lb.b = make(map[string][]byte)
+ }
+
+ delete(lb.b, id)
+ return nil
+}
+
+func (lb *LocalBucket) UpdateBucket(id string, rev int, bs []byte) error {
+ return errors.New("no")
+}
+
+func (lb *LocalBucket) ListBuckets(acct string) ([][]byte, error) {
+ lb.mux.Lock()
+ defer lb.mux.Unlock()
+
+ var bss [][]byte
+ for _, bs := range lb.b {
+ bss = append(bss, bs)
+ }
+ return bss, nil
+}
+
+func (lb *LocalBucket) GetBucket(id string) ([]byte, error) {
+ lb.mux.Lock()
+ defer lb.mux.Unlock()
+
+ bs, ok := lb.b[id]
+ if !ok {
+ return nil, errors.New("not found")
+ }
+ return bs, nil
+}
+
+func (lb *LocalBucket) GetBucketID(name string) (string, error) {
+ lb.mux.Lock()
+ defer lb.mux.Unlock()
+
+ id, ok := lb.nti[name]
+ if !ok {
+ return "", errors.New("not found")
+ }
+ return id, nil
+}
diff --git a/debian/changelog b/debian/changelog
index a9cd1ef..d9d754b 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+golang-github-kurin-blazer (0.5.3-1) UNRELEASED; urgency=low
+
+ * New upstream release.
+
+ -- Debian Janitor <janitor@jelmer.uk> Tue, 08 Mar 2022 05:35:21 -0000
+
golang-github-kurin-blazer (0.4.4-1) unstable; urgency=medium
* New upstream version 0.4.4
diff --git a/examples/simple/simple.go b/examples/simple/simple.go
index f1f73a5..24fce3b 100644
--- a/examples/simple/simple.go
+++ b/examples/simple/simple.go
@@ -1,4 +1,4 @@
-// Copyright 2017, Google
+// Copyright 2017, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/internal/b2assets/gen.go b/internal/b2assets/gen.go
index 2b9c731..8c4ac4f 100644
--- a/internal/b2assets/gen.go
+++ b/internal/b2assets/gen.go
@@ -1,4 +1,4 @@
-// Copyright 2018, Google
+// Copyright 2018, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/internal/b2types/b2types.go b/internal/b2types/b2types.go
index 4730110..bbc3476 100644
--- a/internal/b2types/b2types.go
+++ b/internal/b2types/b2types.go
@@ -1,4 +1,4 @@
-// Copyright 2016, Google
+// Copyright 2016, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -29,11 +29,20 @@ type ErrorMessage struct {
}
type AuthorizeAccountResponse struct {
- AccountID string `json:"accountId"`
- AuthToken string `json:"authorizationToken"`
- URI string `json:"apiUrl"`
- DownloadURI string `json:"downloadUrl"`
- MinPartSize int `json:"minimumPartSize"`
+ AccountID string `json:"accountId"`
+ AuthToken string `json:"authorizationToken"`
+ URI string `json:"apiUrl"`
+ DownloadURI string `json:"downloadUrl"`
+ MinPartSize int `json:"minimumPartSize"`
+ PartSize int `json:"recommendedPartSize"`
+ AbsMinPartSize int `json:"absoluteMinimumPartSize"`
+ Allowed Allowance `json:"allowed"`
+}
+
+type Allowance struct {
+ Capabilities []string `json:"capabilities"`
+ Bucket string `json:"bucketId"`
+ Prefix string `json:"namePrefix"`
}
type LifecycleRule struct {
@@ -66,6 +75,7 @@ type DeleteBucketRequest struct {
type ListBucketsRequest struct {
AccountID string `json:"accountId"`
+ Bucket string `json:"bucketId,omitempty"`
}
type ListBucketsResponse struct {
@@ -73,15 +83,8 @@ type ListBucketsResponse struct {
}
type UpdateBucketRequest struct {
- AccountID string `json:"accountId"`
- BucketID string `json:"bucketId"`
- // bucketName is a required field according to
- // https://www.backblaze.com/b2/docs/b2_update_bucket.html.
- //
- // However, actually setting it returns 400: unknown field in
- // com.backblaze.modules.b2.data.UpdateBucketRequest: bucketName
- //
- //Name string `json:"bucketName"`
+ AccountID string `json:"accountId"`
+ BucketID string `json:"bucketId"`
Type string `json:"bucketType,omitempty"`
Info map[string]string `json:"bucketInfo,omitempty"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
@@ -99,11 +102,7 @@ type GetUploadURLResponse struct {
Token string `json:"authorizationToken"`
}
-type UploadFileResponse struct {
- FileID string `json:"fileId"`
- Timestamp int64 `json:"uploadTimestamp"`
- Action string `json:"action"`
-}
+type UploadFileResponse GetFileInfoResponse
type DeleteFileVersionRequest struct {
Name string `json:"fileName"`
@@ -206,20 +205,23 @@ type GetFileInfoRequest struct {
}
type GetFileInfoResponse struct {
- FileID string `json:"fileId"`
- Name string `json:"fileName"`
- SHA1 string `json:"contentSha1"`
- Size int64 `json:"contentLength"`
- ContentType string `json:"contentType"`
- Info map[string]string `json:"fileInfo"`
- Action string `json:"action"`
- Timestamp int64 `json:"uploadTimestamp"`
+ FileID string `json:"fileId,omitempty"`
+ Name string `json:"fileName,omitempty"`
+ AccountID string `json:"accountId,omitempty"`
+ BucketID string `json:"bucketId,omitempty"`
+ Size int64 `json:"contentLength,omitempty"`
+ SHA1 string `json:"contentSha1,omitempty"`
+ ContentType string `json:"contentType,omitempty"`
+ Info map[string]string `json:"fileInfo,omitempty"`
+ Action string `json:"action,omitempty"`
+ Timestamp int64 `json:"uploadTimestamp,omitempty"`
}
type GetDownloadAuthorizationRequest struct {
- BucketID string `json:"bucketId"`
- Prefix string `json:"fileNamePrefix"`
- Valid int `json:"validDurationInSeconds"`
+ BucketID string `json:"bucketId"`
+ Prefix string `json:"fileNamePrefix"`
+ Valid int `json:"validDurationInSeconds"`
+ ContentDisposition string `json:"b2ContentDisposition,omitempty"`
}
type GetDownloadAuthorizationResponse struct {
@@ -238,3 +240,42 @@ type ListUnfinishedLargeFilesResponse struct {
Files []GetFileInfoResponse `json:"files"`
Continuation string `json:"nextFileId"`
}
+
+type CreateKeyRequest struct {
+ AccountID string `json:"accountId"`
+ Capabilities []string `json:"capabilities"`
+ Name string `json:"keyName"`
+ Valid int `json:"validDurationInSeconds,omitempty"`
+ BucketID string `json:"bucketId,omitempty"`
+ Prefix string `json:"namePrefix,omitempty"`
+}
+
+type Key struct {
+ ID string `json:"applicationKeyId"`
+ Secret string `json:"applicationKey"`
+ AccountID string `json:"accountId"`
+ Capabilities []string `json:"capabilities"`
+ Name string `json:"keyName"`
+ Expires int64 `json:"expirationTimestamp"`
+ BucketID string `json:"bucketId"`
+ Prefix string `json:"namePrefix"`
+}
+
+type CreateKeyResponse Key
+
+type DeleteKeyRequest struct {
+ KeyID string `json:"applicationKeyId"`
+}
+
+type DeleteKeyResponse Key
+
+type ListKeysRequest struct {
+ AccountID string `json:"accountId"`
+ Max int `json:"maxKeyCount,omitempty"`
+ Next string `json:"startApplicationKeyId,omitempty"`
+}
+
+type ListKeysResponse struct {
+ Keys []Key `json:"keys"`
+ Next string `json:"nextApplicationKeyId"`
+}
diff --git a/internal/blog/blog.go b/internal/blog/blog.go
index 6ffe5cb..793301a 100644
--- a/internal/blog/blog.go
+++ b/internal/blog/blog.go
@@ -1,4 +1,4 @@
-// Copyright 2017, Google
+// Copyright 2017, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/internal/pyre/api.go b/internal/pyre/api.go
new file mode 100644
index 0000000..79a0b2f
--- /dev/null
+++ b/internal/pyre/api.go
@@ -0,0 +1,354 @@
+// Copyright 2018, the Blazer authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pyre
+
+import (
+ "context"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "os"
+ "reflect"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/google/uuid"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/metadata"
+
+ pb "github.com/kurin/blazer/internal/pyre/proto"
+)
+
+type apiErr struct {
+ Status int `json:"status"`
+ Code string `json:"code"`
+ Message string `json:"message"`
+}
+
+func serveMuxOptions() []runtime.ServeMuxOption {
+ return []runtime.ServeMuxOption{
+ runtime.WithMarshalerOption("*", &runtime.JSONPb{}),
+ runtime.WithProtoErrorHandler(func(ctx context.Context, mux *runtime.ServeMux, m runtime.Marshaler, rw http.ResponseWriter, req *http.Request, err error) {
+ aErr := apiErr{
+ Status: 400,
+ Code: "uh oh",
+ Message: err.Error(),
+ }
+ rw.WriteHeader(aErr.Status)
+ if err := m.NewEncoder(rw).Encode(aErr); err != nil {
+ fmt.Fprintln(os.Stdout, err)
+ }
+ }),
+ }
+}
+
+func getAuth(ctx context.Context) (string, error) {
+ md, ok := metadata.FromIncomingContext(ctx)
+ if !ok {
+ return "", errors.New("no metadata")
+ }
+ data := md.Get("authorization")
+ if len(data) == 0 {
+ return "", nil
+ }
+ return data[0], nil
+}
+
+func RegisterServerOnMux(ctx context.Context, srv *Server, mux *http.ServeMux) error {
+ rmux := runtime.NewServeMux(serveMuxOptions()...)
+ l, err := net.Listen("tcp", "localhost:0")
+ if err != nil {
+ return err
+ }
+ gsrv := grpc.NewServer()
+ if err := pb.RegisterPyreServiceHandlerFromEndpoint(ctx, rmux, l.Addr().String(), []grpc.DialOption{grpc.WithInsecure()}); err != nil {
+ return err
+ }
+ pb.RegisterPyreServiceServer(gsrv, srv)
+ mux.Handle("/b2api/v1/", rmux)
+ go gsrv.Serve(l)
+ go func() {
+ <-ctx.Done()
+ gsrv.GracefulStop()
+ }()
+ return nil
+}
+
+type AccountManager interface {
+ Authorize(acct, key string) (string, error)
+ CheckCreds(token, api string) error
+ APIRoot(acct string) string
+ DownloadRoot(acct string) string
+ UploadPartHost(fileID string) (string, error)
+ UploadHost(id string) (string, error)
+ Sizes(acct string) (recommended, minimum int32)
+}
+
+type BucketManager interface {
+ AddBucket(id, name string, bs []byte) error
+ RemoveBucket(id string) error
+ UpdateBucket(id string, rev int, bs []byte) error
+ ListBuckets(acct string) ([][]byte, error)
+ GetBucket(id string) ([]byte, error)
+}
+
+type LargeFileOrganizer interface {
+ Start(bucketID, fileName, fileID string, bs []byte) error
+ Get(fileID string) ([]byte, error)
+ Parts(fileID string) ([]string, error)
+ Finish(fileID string) error
+}
+
+type Server struct {
+ Account AccountManager
+ Bucket BucketManager
+ LargeFile LargeFileOrganizer
+ List ListManager
+}
+
+func (s *Server) AuthorizeAccount(ctx context.Context, req *pb.AuthorizeAccountRequest) (*pb.AuthorizeAccountResponse, error) {
+ auth, err := getAuth(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if !strings.HasPrefix(auth, "Basic ") {
+ return nil, errors.New("basic auth required")
+ }
+ auth = strings.TrimPrefix(auth, "Basic ")
+ bs, err := base64.StdEncoding.DecodeString(auth)
+ if err != nil {
+ return nil, err
+ }
+ split := strings.Split(string(bs), ":")
+ if len(split) != 2 {
+ return nil, errors.New("bad auth")
+ }
+ acct, key := split[0], split[1]
+ token, err := s.Account.Authorize(acct, key)
+ if err != nil {
+ return nil, err
+ }
+ rec, min := s.Account.Sizes(acct)
+ return &pb.AuthorizeAccountResponse{
+ AuthorizationToken: token,
+ ApiUrl: s.Account.APIRoot(acct),
+ DownloadUrl: s.Account.DownloadRoot(acct),
+ RecommendedPartSize: rec,
+ MinimumPartSize: rec,
+ AbsoluteMinimumPartSize: min,
+ }, nil
+}
+
+func (s *Server) ListBuckets(ctx context.Context, req *pb.ListBucketsRequest) (*pb.ListBucketsResponse, error) {
+ resp := &pb.ListBucketsResponse{}
+ buckets, err := s.Bucket.ListBuckets(req.AccountId)
+ if err != nil {
+ return nil, err
+ }
+ for _, bs := range buckets {
+ var bucket pb.Bucket
+ if err := proto.Unmarshal(bs, &bucket); err != nil {
+ return nil, err
+ }
+ resp.Buckets = append(resp.Buckets, &bucket)
+ }
+ return resp, nil
+}
+
+func (s *Server) CreateBucket(ctx context.Context, req *pb.Bucket) (*pb.Bucket, error) {
+ req.BucketId = uuid.New().String()
+ bs, err := proto.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+ if err := s.Bucket.AddBucket(req.BucketId, req.BucketName, bs); err != nil {
+ return nil, err
+ }
+ return req, nil
+}
+
+func (s *Server) DeleteBucket(ctx context.Context, req *pb.Bucket) (*pb.Bucket, error) {
+ bs, err := s.Bucket.GetBucket(req.BucketId)
+ if err != nil {
+ return nil, err
+ }
+ var bucket pb.Bucket
+ if err := proto.Unmarshal(bs, &bucket); err != nil {
+ return nil, err
+ }
+ if err := s.Bucket.RemoveBucket(req.BucketId); err != nil {
+ return nil, err
+ }
+ return &bucket, nil
+}
+
+func (s *Server) GetUploadUrl(ctx context.Context, req *pb.GetUploadUrlRequest) (*pb.GetUploadUrlResponse, error) {
+ host, err := s.Account.UploadHost(req.BucketId)
+ if err != nil {
+ return nil, err
+ }
+ return &pb.GetUploadUrlResponse{
+ UploadUrl: fmt.Sprintf("%s/b2api/v1/b2_upload_file/%s", host, req.BucketId),
+ BucketId: req.BucketId,
+ }, nil
+}
+
+func (s *Server) StartLargeFile(ctx context.Context, req *pb.StartLargeFileRequest) (*pb.StartLargeFileResponse, error) {
+ fileID := uuid.New().String()
+ resp := &pb.StartLargeFileResponse{
+ FileId: fileID,
+ FileName: req.FileName,
+ BucketId: req.BucketId,
+ ContentType: req.ContentType,
+ FileInfo: req.FileInfo,
+ }
+ bs, err := proto.Marshal(resp)
+ if err != nil {
+ return nil, err
+ }
+ if err := s.LargeFile.Start(req.BucketId, req.FileName, fileID, bs); err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (s *Server) GetUploadPartUrl(ctx context.Context, req *pb.GetUploadPartUrlRequest) (*pb.GetUploadPartUrlResponse, error) {
+ host, err := s.Account.UploadPartHost(req.FileId)
+ if err != nil {
+ return nil, err
+ }
+ return &pb.GetUploadPartUrlResponse{
+ UploadUrl: fmt.Sprintf("%s/b2api/v1/b2_upload_part/%s", host, req.FileId),
+ }, nil
+}
+
+func (s *Server) FinishLargeFile(ctx context.Context, req *pb.FinishLargeFileRequest) (*pb.FinishLargeFileResponse, error) {
+ parts, err := s.LargeFile.Parts(req.FileId)
+ if err != nil {
+ return nil, err
+ }
+ if !reflect.DeepEqual(parts, req.PartSha1Array) {
+ return nil, errors.New("sha1 array mismatch")
+ }
+ if err := s.LargeFile.Finish(req.FileId); err != nil {
+ return nil, err
+ }
+ return &pb.FinishLargeFileResponse{}, nil
+}
+
+func (s *Server) ListFileVersions(ctx context.Context, req *pb.ListFileVersionsRequest) (*pb.ListFileVersionsResponse, error) {
+ return nil, nil
+}
+
+type objTuple struct {
+ name, version string
+}
+
+type ListManager interface {
+ // NextN returns the next n objects, sorted by lexicographical order by name,
+ // beginning at and including, if it exists, fileName. If withPrefix is not
+ // empty, it only returns names that begin with that prefix. If skipPrefix
+ // is not empty, then the no files with that prefix are returned. If the two
+ // conflict, skipPrefix wins (i.e., do not return the entry).
+ //
+ // If fewer than n entries are returned, this signifies that no more names
+ // exist that meet these criteria.
+ NextN(bucketID, fileName, withPrefix, skipPrefix string, n int) ([]VersionedObject, error)
+}
+
+type VersionedObject interface {
+ Name() string
+ NextNVersions(begin string, n int) ([]string, error)
+}
+
+func getDirNames(lm ListManager, bucket, name, prefix, delim string, n int) ([]string, error) {
+ var sfx string
+ var out []string
+ for n > 0 {
+ vo, err := lm.NextN(bucket, name, prefix, sfx, 1)
+ if err != nil {
+ return nil, err
+ }
+ if len(vo) == 0 {
+ return out, nil
+ }
+ v := vo[0]
+ name = v.Name()
+ suffix := name[len(prefix):]
+ i := strings.Index(suffix, delim)
+ if i < 0 {
+ sfx = ""
+ out = append(out, name)
+ name += "\000"
+ n--
+ continue
+ }
+ sfx = v.Name()[:len(prefix)+i+1]
+ out = append(out, sfx)
+ n--
+ }
+ return out, nil
+}
+
+//func getNextObjects(lm ListManager, bucket, name, prefix, delimiter string, n int) ([]VersionedObject, error) {
+// if delimiter == "" {
+// return lm.NextN(bucket, name, prefix, "", n)
+// }
+// afterPfx := strings.TrimPrefix(name, prefix)
+// i := strings.Index(afterPfx, delimiter)
+// if i == 0 {
+//
+// }
+// if i < 0 {
+// return lm.NextN(bucket, name, prefix, "", n)
+// }
+// skipPfx := name[:len(prefix)+i]
+// // TO
+//}
+//
+//func listFileVersions(lm ListManager, bucket, name, version, prefix, delimiter string, n int) ([]objTuple, error) {
+// var tups []objTuple
+// var got int
+// for {
+// objs, err := getNextObjects(bucket, name, prefix, delimiter, n-got)
+// if err != nil {
+// return nil, err
+// }
+// if len(objs) == 0 {
+// break
+// }
+// for _, o := range objs {
+// var begin string
+// if len(tups) == 0 {
+// begin = version
+// }
+// vers, err := lm.NextNVersions(begin, n-got)
+// if err != nil {
+// return nil, err
+// }
+// got += len(vers)
+// for _, ver := range vers {
+// tups = append(tups, objTuple{name: o.Name(), version: ver})
+// }
+// if got >= n {
+// return tups[:n], nil
+// }
+// }
+// }
+// return tups, nil
+//}
diff --git a/internal/pyre/api_test.go b/internal/pyre/api_test.go
new file mode 100644
index 0000000..3f71d33
--- /dev/null
+++ b/internal/pyre/api_test.go
@@ -0,0 +1,137 @@
+// Copyright 2018, the Blazer authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pyre
+
+import (
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "testing"
+)
+
+type testVersionedObject struct {
+ name string
+ versions []string
+}
+
+func (t testVersionedObject) Name() string { return t.name }
+
+func (t testVersionedObject) NextNVersions(b string, n int) ([]string, error) {
+ var out []string
+ var seen bool
+ if b == "" {
+ seen = true
+ }
+ for _, v := range t.versions {
+ if b == v {
+ seen = true
+ }
+ if !seen {
+ continue
+ }
+ if len(out) >= n {
+ return out, nil
+ }
+ out = append(out, v)
+ }
+ return out, nil
+}
+
+type testListManager struct {
+ objs map[string][]string
+ m sync.Mutex
+}
+
+func (t *testListManager) NextN(b, fn, pfx, spfx string, n int) ([]VersionedObject, error) {
+ t.m.Lock()
+ defer t.m.Unlock()
+
+ var out []VersionedObject
+ var keys []string
+ for k := range t.objs {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ if k < fn {
+ continue
+ }
+ if !strings.HasPrefix(k, pfx) {
+ continue
+ }
+ if spfx != "" && strings.HasPrefix(k, spfx) {
+ continue
+ }
+ out = append(out, testVersionedObject{name: k, versions: t.objs[k]})
+ n--
+ if n <= 0 {
+ return out, nil
+ }
+ }
+ return out, nil
+}
+
+func TestGetDirNames(t *testing.T) {
+ table := []struct {
+ lm ListManager
+ name string
+ pfx string
+ delim string
+ num int
+ want []string
+ }{
+ {
+ lm: &testListManager{
+ objs: map[string][]string{
+ "/usr/local/etc/foo/bar": {"a"},
+ "/usr/local/etc/foo/baz": {"a"},
+ "/usr/local/etc/foo": {"a"},
+ "/usr/local/etc/fool": {"a"},
+ },
+ },
+ num: 2,
+ pfx: "/usr/local/etc/",
+ delim: "/",
+ want: []string{"/usr/local/etc/foo", "/usr/local/etc/foo/"},
+ },
+ {
+ lm: &testListManager{
+ objs: map[string][]string{
+ "/usr/local/etc/foo/bar": {"a"},
+ "/usr/local/etc/foo/baz": {"a"},
+ "/usr/local/etc/foo": {"a"},
+ "/usr/local/etc/fool": {"a"},
+ "/usr/local/etc/bar": {"a"},
+ },
+ },
+ num: 4,
+ pfx: "/usr/local/etc/",
+ delim: "/",
+ want: []string{"/usr/local/etc/bar", "/usr/local/etc/foo", "/usr/local/etc/foo/", "/usr/local/etc/fool"},
+ },
+ }
+
+ for _, e := range table {
+ got, err := getDirNames(e.lm, "", e.name, e.pfx, e.delim, e.num)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ if !reflect.DeepEqual(got, e.want) {
+ t.Errorf("getDirNames(%v, %q, %q, %q, %d): got %v, want %v", e.lm, e.name, e.pfx, e.delim, e.num, got, e.want)
+ }
+ }
+}
diff --git a/internal/pyre/download.go b/internal/pyre/download.go
new file mode 100644
index 0000000..160d93d
--- /dev/null
+++ b/internal/pyre/download.go
@@ -0,0 +1,136 @@
+// Copyright 2018, the Blazer authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pyre
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+ "strings"
+)
+
+type DownloadableObject interface {
+ Size() int64
+ Reader() io.ReaderAt
+ io.Closer
+}
+
+type DownloadManager interface {
+ ObjectByName(bucketID, name string) (DownloadableObject, error)
+ GetBucketID(bucket string) (string, error)
+ GetBucket(id string) ([]byte, error)
+}
+
+type downloadServer struct {
+ dm DownloadManager
+}
+
+type downloadRequest struct {
+ off, n int64
+}
+
+func parseDownloadHeaders(r *http.Request) (*downloadRequest, error) {
+ rang := r.Header.Get("Range")
+ if rang == "" {
+ return &downloadRequest{}, nil
+ }
+ if !strings.HasPrefix(rang, "bytes=") {
+ return nil, fmt.Errorf("unknown range format: %q", rang)
+ }
+ rang = strings.TrimPrefix(rang, "bytes=")
+ if !strings.Contains(rang, "-") {
+ return nil, fmt.Errorf("unknown range format: %q", rang)
+ }
+ parts := strings.Split(rang, "-")
+ off, err := strconv.ParseInt(parts[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ end, err := strconv.ParseInt(parts[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ return &downloadRequest{
+ off: off,
+ n: (end + 1) - off,
+ }, nil
+}
+
+func (fs *downloadServer) serveWholeObject(rw http.ResponseWriter, obj DownloadableObject) {
+ rw.Header().Set("Content-Length", fmt.Sprintf("%d", obj.Size()))
+ sr := io.NewSectionReader(obj.Reader(), 0, obj.Size())
+ if _, err := io.Copy(rw, sr); err != nil {
+ http.Error(rw, err.Error(), 503)
+ fmt.Println("no reader", err)
+ }
+}
+
+func (fs *downloadServer) servePartialObject(rw http.ResponseWriter, obj DownloadableObject, off, len int64) {
+ if off >= obj.Size() {
+ http.Error(rw, "hell naw", 416)
+ fmt.Printf("range not good (%d-%d for %d)\n", off, len, obj.Size())
+ return
+ }
+ if off+len > obj.Size() {
+ len = obj.Size() - off
+ }
+ sr := io.NewSectionReader(obj.Reader(), off, len)
+ rw.Header().Set("Content-Length", fmt.Sprintf("%d", len))
+ rw.WriteHeader(206) // this goes after headers are set
+ if _, err := io.Copy(rw, sr); err != nil {
+ fmt.Println("bad read:", err)
+ }
+}
+
+func (fs *downloadServer) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
+ req, err := parseDownloadHeaders(r)
+ if err != nil {
+ http.Error(rw, err.Error(), 503)
+ fmt.Println("weird header")
+ return
+ }
+ path := strings.TrimPrefix(r.URL.Path, "/")
+ parts := strings.Split(path, "/")
+ if len(parts) < 3 {
+ http.Error(rw, err.Error(), 404)
+ fmt.Println("weird file")
+ return
+ }
+ bucket := parts[1]
+ bid, err := fs.dm.GetBucketID(bucket)
+ if err != nil {
+ http.Error(rw, err.Error(), 503)
+ fmt.Println("no bucket:", err)
+ return
+ }
+ file := strings.Join(parts[2:], "/")
+ obj, err := fs.dm.ObjectByName(bid, file)
+ if err != nil {
+ http.Error(rw, err.Error(), 503)
+ fmt.Println("no reader", err)
+ return
+ }
+ defer obj.Close()
+ if req.off == 0 && req.n == 0 {
+ fs.serveWholeObject(rw, obj)
+ return
+ }
+ fs.servePartialObject(rw, obj, req.off, req.n)
+}
+
+func RegisterDownloadManagerOnMux(d DownloadManager, mux *http.ServeMux) {
+ mux.Handle("/file/", &downloadServer{dm: d})
+}
diff --git a/internal/pyre/large.go b/internal/pyre/large.go
new file mode 100644
index 0000000..d4ef82b
--- /dev/null
+++ b/internal/pyre/large.go
@@ -0,0 +1,91 @@
+// Copyright 2018, the Blazer authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pyre
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+ "strings"
+)
+
+const uploadFilePartPrefix = "/b2api/v1/b2_upload_part/"
+
+type LargeFileManager interface {
+ PartWriter(id string, part int) (io.WriteCloser, error)
+}
+
+type largeFileServer struct {
+ fm LargeFileManager
+}
+
+type uploadPartRequest struct {
+ ID string `json:"fileId"`
+ Part int `json:"partNumber"`
+ Size int64 `json:"contentLength"`
+ Hash string `json:"contentSha1"`
+}
+
+func parseUploadPartHeaders(r *http.Request) (uploadPartRequest, error) {
+ var ur uploadPartRequest
+ ur.Hash = r.Header.Get("X-Bz-Content-Sha1")
+ part, err := strconv.ParseInt(r.Header.Get("X-Bz-Part-Number"), 10, 64)
+ if err != nil {
+ return ur, err
+ }
+ ur.Part = int(part)
+ size, err := strconv.ParseInt(r.Header.Get("Content-Length"), 10, 64)
+ if err != nil {
+ return ur, err
+ }
+ ur.Size = size
+ ur.ID = strings.TrimPrefix(r.URL.Path, uploadFilePartPrefix)
+ return ur, nil
+}
+
+func (fs *largeFileServer) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
+ req, err := parseUploadPartHeaders(r)
+ if err != nil {
+ http.Error(rw, err.Error(), 500)
+ fmt.Println("oh no")
+ return
+ }
+ w, err := fs.fm.PartWriter(req.ID, req.Part)
+ if err != nil {
+ http.Error(rw, err.Error(), 500)
+ fmt.Println("oh no")
+ return
+ }
+ if _, err := io.Copy(w, io.LimitReader(r.Body, req.Size)); err != nil {
+ w.Close()
+ http.Error(rw, err.Error(), 500)
+ fmt.Println("oh no")
+ return
+ }
+ if err := w.Close(); err != nil {
+ http.Error(rw, err.Error(), 500)
+ fmt.Println("oh no")
+ return
+ }
+ if err := json.NewEncoder(rw).Encode(req); err != nil {
+ fmt.Println("oh no")
+ }
+}
+
+func RegisterLargeFileManagerOnMux(f LargeFileManager, mux *http.ServeMux) {
+ mux.Handle(uploadFilePartPrefix, &largeFileServer{fm: f})
+}
diff --git a/internal/pyre/proto/pyre.pb.go b/internal/pyre/proto/pyre.pb.go
new file mode 100644
index 0000000..29a906e
--- /dev/null
+++ b/internal/pyre/proto/pyre.pb.go
@@ -0,0 +1,1896 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: proto/pyre.proto
+
+package pyre_proto
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "google.golang.org/genproto/googleapis/api/annotations"
+
+import (
+ context "golang.org/x/net/context"
+ grpc "google.golang.org/grpc"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type AuthorizeAccountRequest struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthorizeAccountRequest) Reset() { *m = AuthorizeAccountRequest{} }
+func (m *AuthorizeAccountRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthorizeAccountRequest) ProtoMessage() {}
+func (*AuthorizeAccountRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_pyre_492df08819220afa, []int{0}
+}
+func (m *AuthorizeAccountRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AuthorizeAccountRequest.Unmarshal(m, b)
+}
+func (m *AuthorizeAccountRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AuthorizeAccountRequest.Marshal(b, m, deterministic)
+}
+func (dst *AuthorizeAccountRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthorizeAccountRequest.Merge(dst, src)
+}
+func (m *AuthorizeAccountRequest) XXX_Size() int {
+ return xxx_messageInfo_AuthorizeAccountRequest.Size(m)
+}
+func (m *AuthorizeAccountRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthorizeAccountRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthorizeAccountRequest proto.InternalMessageInfo
+
+type AuthorizeAccountResponse struct {
+ // The identifier for the account.
+ AccountId string `protobuf:"bytes,1,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"`
+ // An authorization token to use with all calls, other than
+ // b2_authorize_account, that need an Authorization header. This
+ // authorization token is valid for at most 24 hours.
+ AuthorizationToken string `protobuf:"bytes,2,opt,name=authorization_token,json=authorizationToken,proto3" json:"authorization_token,omitempty"`
+ // The base URL to use for all API calls except for uploading and downloading
+ // files.
+ ApiUrl string `protobuf:"bytes,3,opt,name=api_url,json=apiUrl,proto3" json:"api_url,omitempty"`
+ // The base URL to use for downloading files.
+ DownloadUrl string `protobuf:"bytes,4,opt,name=download_url,json=downloadUrl,proto3" json:"download_url,omitempty"`
+ // The recommended size for each part of a large file. We recommend using
+ // this part size for optimal upload performance.
+ RecommendedPartSize int32 `protobuf:"varint,5,opt,name=recommended_part_size,json=recommendedPartSize,proto3" json:"recommended_part_size,omitempty"`
+ // The smallest possible size of a part of a large file (except the last
+ // one). This is smaller than the recommended part size. If you use it, you
+ // may find that it takes longer overall to upload a large file.
+ AbsoluteMinimumPartSize int32 `protobuf:"varint,6,opt,name=absolute_minimum_part_size,json=absoluteMinimumPartSize,proto3" json:"absolute_minimum_part_size,omitempty"`
+ MinimumPartSize int32 `protobuf:"varint,7,opt,name=minimum_part_size,json=minimumPartSize,proto3" json:"minimum_part_size,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthorizeAccountResponse) Reset() { *m = AuthorizeAccountResponse{} }
+func (m *AuthorizeAccountResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthorizeAccountResponse) ProtoMessage() {}
+func (*AuthorizeAccountResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_pyre_492df08819220afa, []int{1}
+}
+func (m *AuthorizeAccountResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AuthorizeAccountResponse.Unmarshal(m, b)
+}
+func (m *AuthorizeAccountResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AuthorizeAccountResponse.Marshal(b, m, deterministic)
+}
+func (dst *AuthorizeAccountResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthorizeAccountResponse.Merge(dst, src)
+}
+func (m *AuthorizeAccountResponse) XXX_Size() int {
+ return xxx_messageInfo_AuthorizeAccountResponse.Size(m)
+}
+func (m *AuthorizeAccountResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthorizeAccountResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthorizeAccountResponse proto.InternalMessageInfo
+
+func (m *AuthorizeAccountResponse) GetAccountId() string {
+ if m != nil {
+ return m.AccountId
+ }
+ return ""
+}
+
+func (m *AuthorizeAccountResponse) GetAuthorizationToken() string {
+ if m != nil {
+ return m.AuthorizationToken
+ }
+ return ""
+}
+
+func (m *AuthorizeAccountResponse) GetApiUrl() string {
+ if m != nil {
+ return m.ApiUrl
+ }
+ return ""
+}
+
+func (m *AuthorizeAccountResponse) GetDownloadUrl() string {
+ if m != nil {
+ return m.DownloadUrl
+ }
+ return ""
+}
+
+func (m *AuthorizeAccountResponse) GetRecommendedPartSize() int32 {
+ if m != nil {
+ return m.RecommendedPartSize
+ }
+ return 0
+}
+
+func (m *AuthorizeAccountResponse) GetAbsoluteMinimumPartSize() int32 {
+ if m != nil {
+ return m.AbsoluteMinimumPartSize
+ }
+ return 0
+}
+
+func (m *AuthorizeAccountResponse) GetMinimumPartSize() int32 {
+ if m != nil {
+ return m.MinimumPartSize
+ }
+ return 0
+}
+
+type ListBucketsRequest struct {
+ // The ID of your account.
+ AccountId string `protobuf:"bytes,1,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"`
+ // When specified, the result will be a list containing just this bucket, if
+ // it's present in the account, or no buckets if the account does not have a
+ // bucket with this ID.
+ BucketId string `protobuf:"bytes,2,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"`
+ // When specified, the result will be a list containing just this bucket, if
+ // it's present in the account, or no buckets if the account does not have a
+ // bucket with this ID.
+ BucketName string `protobuf:"bytes,3,opt,name=bucket_name,json=bucketName,proto3" json:"bucket_name,omitempty"`
+ // If present, B2 will use it as a filter for bucket types returned in the
+ // list buckets response. If not present, only buckets with bucket types
+ // "allPublic", "allPrivate" and "snapshot" will be returned. A special
+ // filter value of ["all"] will return all bucket types.
+ //
+ // If present, it must be in the form of a json array of strings containing
+ // valid bucket types in quotes and separated by a comma. Valid bucket types
+ // include "allPrivate", "allPublic", "snapshot", and other values added in
+ // the future.
+ //
+ // A bad request error will be returned if "all" is used with other bucket
+ // types, this field is empty, or invalid bucket types are requested.
+ BucketTypes []string `protobuf:"bytes,4,rep,name=bucket_types,json=bucketTypes,proto3" json:"bucket_types,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListBucketsRequest) Reset() { *m = ListBucketsRequest{} }
+func (m *ListBucketsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListBucketsRequest) ProtoMessage() {}
+func (*ListBucketsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_pyre_492df08819220afa, []int{2}
+}
+func (m *ListBucketsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListBucketsRequest.Unmarshal(m, b)
+}
+func (m *ListBucketsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListBucketsRequest.Marshal(b, m, deterministic)
+}
+func (dst *ListBucketsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListBucketsRequest.Merge(dst, src)
+}
+func (m *ListBucketsRequest) XXX_Size() int {
+ return xxx_messageInfo_ListBucketsRequest.Size(m)
+}
+func (m *ListBucketsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListBucketsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListBucketsRequest proto.InternalMessageInfo
+
+func (m *ListBucketsRequest) GetAccountId() string {
+ if m != nil {
+ return m.AccountId
+ }
+ return ""
+}
+
+func (m *ListBucketsRequest) GetBucketId() string {
+ if m != nil {
+ return m.BucketId
+ }
+ return ""
+}
+
+func (m *ListBucketsRequest) GetBucketName() string {
+ if m != nil {
+ return m.BucketName
+ }
+ return ""
+}
+
+func (m *ListBucketsRequest) GetBucketTypes() []string {
+ if m != nil {
+ return m.BucketTypes
+ }
+ return nil
+}
+
+type LifecycleRule struct {
+ // After a file is uploaded, the number of days before it can be hidden.
+ DaysFromUploadingToHiding int32 `protobuf:"varint,1,opt,name=days_from_uploading_to_hiding,json=daysFromUploadingToHiding,proto3" json:"days_from_uploading_to_hiding,omitempty"`
+ // After a file is hidden, the number of days before it can be deleted.
+ DaysFromHidingToDeleting int32 `protobuf:"varint,2,opt,name=days_from_hiding_to_deleting,json=daysFromHidingToDeleting,proto3" json:"days_from_hiding_to_deleting,omitempty"`
+ // The rule applies to files whose names start with this prefix.
+ FileNamePrefix string `protobuf:"bytes,3,opt,name=file_name_prefix,json=fileNamePrefix,proto3" json:"file_name_prefix,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LifecycleRule) Reset() { *m = LifecycleRule{} }
+func (m *LifecycleRule) String() string { return proto.CompactTextString(m) }
+func (*LifecycleRule) ProtoMessage() {}
+func (*LifecycleRule) Descriptor() ([]byte, []int) {
+ return fileDescriptor_pyre_492df08819220afa, []int{3}
+}
+func (m *LifecycleRule) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LifecycleRule.Unmarshal(m, b)
+}
+func (m *LifecycleRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LifecycleRule.Marshal(b, m, deterministic)
+}
+func (dst *LifecycleRule) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LifecycleRule.Merge(dst, src)
+}
+func (m *LifecycleRule) XXX_Size() int {
+ return xxx_messageInfo_LifecycleRule.Size(m)
+}
+func (m *LifecycleRule) XXX_DiscardUnknown() {
+ xxx_messageInfo_LifecycleRule.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LifecycleRule proto.InternalMessageInfo
+
+func (m *LifecycleRule) GetDaysFromUploadingToHiding() int32 {
+ if m != nil {
+ return m.DaysFromUploadingToHiding
+ }
+ return 0
+}
+
+func (m *LifecycleRule) GetDaysFromHidingToDeleting() int32 {
+ if m != nil {
+ return m.DaysFromHidingToDeleting
+ }
+ return 0
+}
+
+func (m *LifecycleRule) GetFileNamePrefix() string {
+ if m != nil {
+ return m.FileNamePrefix
+ }
+ return ""
+}
+
+type CorsRule struct {
+ // A name for humans to recognize the rule in a user interface. Names must be
+ // unique within a bucket. Names can consist of upper-case and lower-case
+ // English letters, numbers, and "-". No other characters are allowed. A name
+ // must be at least 6 characters long, and can be at most 50 characters long.
+ // These are all allowed names: myPhotosSite, allowAnyHttps,
+ // backblaze-images. Names that start with "b2-" are reserved for Backblaze
+ // use.
+ CorsRuleName string `protobuf:"bytes,1,opt,name=cors_rule_name,json=corsRuleName,proto3" json:"cors_rule_name,omitempty"`
+ // A non-empty list specifying which origins the rule covers. Each value may
+ // have one of many formats:
+ //
+ // * The origin can be fully specified, such as http://www.example.com:8180
+ // or https://www.example.com:4433.
+ //
+ // * The origin can omit a default port, such as https://www.example.com.
+ //
+ // * The origin may have a single '*' as part of the domain name, such as
+ // https://*.example.com, https://*:8443 or https://*.
+ //
+ // * The origin may be 'https' to match any origin that uses HTTPS. (This is
+ // broader than 'https://*' because it matches any port.)
+ //
+ // * Finally, the origin can be a single '*' to match any origin.
+ //
+ // If any entry is "*", it must be the only entry. There can be at most one
+ // "https" entry and no entry after it may start with "https:".
+ AllowedOrigins []string `protobuf:"bytes,2,rep,name=allowed_origins,json=allowedOrigins,proto3" json:"allowed_origins,omitempty"`
+ // A list specifying which operations the rule allows. At least one value
+ // must be specified. All values must be from the following list. More values
+ // may be added to this list at any time.
+ //
+ // b2_download_file_by_name
+ // b2_download_file_by_id
+ // b2_upload_file
+ // b2_upload_part
+ AllowedOperations []string `protobuf:"bytes,3,rep,name=allowed_operations,json=allowedOperations,proto3" json:"allowed_operations,omitempty"`
+ // If present, this is a list of headers that are allowed in a pre-flight
+ // OPTIONS's request's Access-Control-Request-Headers header value. Each
+ // value may have one of many formats:
+ //
+ // * It may be a complete header name, such as x-bz-content-sha1.
+ //
+ // * It may end with an asterisk, such as x-bz-info-*.
+ //
+ // * Finally, it may be a single '*' to match any header.
+ //
+ // If any entry is "*", it must be the only entry in the list. If this list
+ // is missing, it is be treated as if it is a list with no entries.
+ AllowedHeaders []string `protobuf:"bytes,4,rep,name=allowed_headers,json=allowedHeaders,proto3" json:"allowed_headers,omitempty"`
+ // If present, this is a list of headers that may be exposed to an
+ // application inside the client (eg. exposed to Javascript in a browser).
+ // Each entry in the list must be a complete header name (eg.
+ // "x-bz-content-sha1"). If this list is missing or empty, no headers will be
+ // exposed.
+ ExposeHeaders []string `protobuf:"bytes,5,rep,name=expose_headers,json=exposeHeaders,proto3" json:"expose_headers,omitempty"`
+ // This specifies the maximum number of seconds that a browser may cache the
+ // response to a preflight request. The value must not be negative and it
+ // must not be more than 86,400 seconds (one day).
+ MaxAgeSeconds int32 `protobuf:"varint,6,opt,name=max_age_seconds,json=maxAgeSeconds,proto3" json:"max_age_seconds,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CorsRule) Reset() { *m = CorsRule{} }
+func (m *CorsRule) String() string { return proto.CompactTextString(m) }
+func (*CorsRule) ProtoMessage() {}
+func (*CorsRule) Descriptor() ([]byte, []int) {
+ return fileDescriptor_pyre_492df08819220afa, []int{4}
+}
+func (m *CorsRule) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CorsRule.Unmarshal(m, b)
+}
+func (m *CorsRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CorsRule.Marshal(b, m, deterministic)
+}
+func (dst *CorsRule) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CorsRule.Merge(dst, src)
+}
+func (m *CorsRule) XXX_Size() int {
+ return xxx_messageInfo_CorsRule.Size(m)
+}
+func (m *CorsRule) XXX_DiscardUnknown() {
+ xxx_messageInfo_CorsRule.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CorsRule proto.InternalMessageInfo
+
+func (m *CorsRule) GetCorsRuleName() string {
+ if m != nil {
+ return m.CorsRuleName
+ }
+ return ""
+}
+
+func (m *CorsRule) GetAllowedOrigins() []string {
+ if m != nil {
+ return m.AllowedOrigins
+ }
+ return nil
+}
+
+func (m *CorsRule) GetAllowedOperations() []string {
+ if m != nil {
+ return m.AllowedOperations
+ }
+ return nil
+}
+
+func (m *CorsRule) GetAllowedHeaders() []string {
+ if m != nil {
+ return m.AllowedHeaders
+ }
+ return nil
+}
+
+func (m *CorsRule) GetExposeHeaders() []string {
+ if m != nil {
+ return m.ExposeHeaders
+ }
+ return nil
+}
+
+func (m *CorsRule) GetMaxAgeSeconds() int32 {
+ if m != nil {
+ return m.MaxAgeSeconds
+ }
+ return 0
+}
+
+type Bucket struct {
+ AccountId string `protobuf:"bytes,1,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"`
+ BucketId string `protobuf:"bytes,2,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"`
+ BucketName string `protobuf:"bytes,3,opt,name=bucket_name,json=bucketName,proto3" json:"bucket_name,omitempty"`
+ BucketType string `protobuf:"bytes,4,opt,name=bucket_type,json=bucketType,proto3" json:"bucket_type,omitempty"`
+ BucketInfo map[string]string `protobuf:"bytes,5,rep,name=bucket_info,json=bucketInfo,proto3" json:"bucket_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ CoresRules []*CorsRule `protobuf:"bytes,6,rep,name=cores_rules,json=coresRules,proto3" json:"cores_rules,omitempty"`
+ LifecycleRules []*LifecycleRule `protobuf:"bytes,7,rep,name=lifecycle_rules,json=lifecycleRules,proto3" json:"lifecycle_rules,omitempty"`
+ Revision int32 `protobuf:"varint,8,opt,name=revision,proto3" json:"revision,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Bucket) Reset() { *m = Bucket{} }
+func (m *Bucket) String() string { return proto.CompactTextString(m) }
+func (*Bucket) ProtoMessage() {}
+func (*Bucket) Descriptor() ([]byte, []int) {
+ return fileDescriptor_pyre_492df08819220afa, []int{5}
+}
+func (m *Bucket) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Bucket.Unmarshal(m, b)
+}
+func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
+}
+func (dst *Bucket) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Bucket.Merge(dst, src)
+}
+func (m *Bucket) XXX_Size() int {
+ return xxx_messageInfo_Bucket.Size(m)
+}
+func (m *Bucket) XXX_DiscardUnknown() {
+ xxx_messageInfo_Bucket.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Bucket proto.InternalMessageInfo
+
+func (m *Bucket) GetAccountId() string {
+ if m != nil {
+ return m.AccountId
+ }
+ return ""
+}
+
+func (m *Bucket) GetBucketId() string {
+ if m != nil {
+ return m.BucketId
+ }
+ return ""
+}
+
+func (m *Bucket) GetBucketName() string {
+ if m != nil {
+ return m.BucketName
+ }
+ return ""
+}
+
+func (m *Bucket) GetBucketType() string {
+ if m != nil {
+ return m.BucketType
+ }
+ return ""
+}
+
+func (m *Bucket) GetBucketInfo() map[string]string {
+ if m != nil {
+ return m.BucketInfo
+ }
+ return nil
+}
+
+func (m *Bucket) GetCoresRules() []*CorsRule {
+ if m != nil {
+ return m.CoresRules
+ }
+ return nil
+}
+
+func (m *Bucket) GetLifecycleRules() []*LifecycleRule {
+ if m != nil {
+ return m.LifecycleRules
+ }
+ return nil
+}
+
+func (m *Bucket) GetRevision() int32 {
+ if m != nil {
+ return m.Revision
+ }
+ return 0
+}
+
+type ListBucketsResponse struct {
+ Buckets []*Bucket `protobuf:"bytes,1,rep,name=buckets,proto3" json:"buckets,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListBucketsResponse) Reset() { *m = ListBucketsResponse{} }
+func (m *ListBucketsResponse) String() string { return proto.CompactTextString(m) }
+func (*ListBucketsResponse) ProtoMessage() {}
+func (*ListBucketsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_pyre_492df08819220afa, []int{6}
+}
+func (m *ListBucketsResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListBucketsResponse.Unmarshal(m, b)
+}
+func (m *ListBucketsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListBucketsResponse.Marshal(b, m, deterministic)
+}
+func (dst *ListBucketsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListBucketsResponse.Merge(dst, src)
+}
+func (m *ListBucketsResponse) XXX_Size() int {
+ return xxx_messageInfo_ListBucketsResponse.Size(m)
+}
+func (m *ListBucketsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListBucketsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListBucketsResponse proto.InternalMessageInfo
+
+func (m *ListBucketsResponse) GetBuckets() []*Bucket {
+ if m != nil {
+ return m.Buckets
+ }
+ return nil
+}
+
+type GetUploadUrlRequest struct {
+ BucketId string `protobuf:"bytes,1,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetUploadUrlRequest) Reset() { *m = GetUploadUrlRequest{} }
+func (m *GetUploadUrlRequest) String() string { return proto.CompactTextString(m) }
+func (*GetUploadUrlRequest) ProtoMessage() {}
+func (*GetUploadUrlRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_pyre_492df08819220afa, []int{7}
+}
+func (m *GetUploadUrlRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetUploadUrlRequest.Unmarshal(m, b)
+}
+func (m *GetUploadUrlRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetUploadUrlRequest.Marshal(b, m, deterministic)
+}
+func (dst *GetUploadUrlRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetUploadUrlRequest.Merge(dst, src)
+}
+func (m *GetUploadUrlRequest) XXX_Size() int {
+ return xxx_messageInfo_GetUploadUrlRequest.Size(m)
+}
+func (m *GetUploadUrlRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetUploadUrlRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetUploadUrlRequest proto.InternalMessageInfo
+
+func (m *GetUploadUrlRequest) GetBucketId() string {
+ if m != nil {
+ return m.BucketId
+ }
+ return ""
+}
+
+type GetUploadUrlResponse struct {
+ BucketId string `protobuf:"bytes,1,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"`
+ UploadUrl string `protobuf:"bytes,2,opt,name=upload_url,json=uploadUrl,proto3" json:"upload_url,omitempty"`
+ AuthorizationToken string `protobuf:"bytes,3,opt,name=authorization_token,json=authorizationToken,proto3" json:"authorization_token,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetUploadUrlResponse) Reset() { *m = GetUploadUrlResponse{} }
+func (m *GetUploadUrlResponse) String() string { return proto.CompactTextString(m) }
+func (*GetUploadUrlResponse) ProtoMessage() {}
+func (*GetUploadUrlResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_pyre_492df08819220afa, []int{8}
+}
+func (m *GetUploadUrlResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetUploadUrlResponse.Unmarshal(m, b)
+}
+func (m *GetUploadUrlResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetUploadUrlResponse.Marshal(b, m, deterministic)
+}
+func (dst *GetUploadUrlResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetUploadUrlResponse.Merge(dst, src)
+}
+func (m *GetUploadUrlResponse) XXX_Size() int {
+ return xxx_messageInfo_GetUploadUrlResponse.Size(m)
+}
+func (m *GetUploadUrlResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetUploadUrlResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetUploadUrlResponse proto.InternalMessageInfo
+
+func (m *GetUploadUrlResponse) GetBucketId() string {
+ if m != nil {
+ return m.BucketId
+ }
+ return ""
+}
+
+func (m *GetUploadUrlResponse) GetUploadUrl() string {
+ if m != nil {
+ return m.UploadUrl
+ }
+ return ""
+}
+
+func (m *GetUploadUrlResponse) GetAuthorizationToken() string {
+ if m != nil {
+ return m.AuthorizationToken
+ }
+ return ""
+}
+
+type UploadFileResponse struct {
+ FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"`
+ FileName string `protobuf:"bytes,2,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"`
+ AccountId string `protobuf:"bytes,3,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"`
+ BucketId string `protobuf:"bytes,4,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"`
+ ContentLength int32 `protobuf:"varint,5,opt,name=content_length,json=contentLength,proto3" json:"content_length,omitempty"`
+ ContentSha1 string `protobuf:"bytes,6,opt,name=content_sha1,json=contentSha1,proto3" json:"content_sha1,omitempty"`
+ ContentType string `protobuf:"bytes,7,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"`
+ FileInfo map[string]string `protobuf:"bytes,8,rep,name=file_info,json=fileInfo,proto3" json:"file_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Action string `protobuf:"bytes,9,opt,name=action,proto3" json:"action,omitempty"`
+ UploadTimestamp int64 `protobuf:"varint,10,opt,name=upload_timestamp,json=uploadTimestamp,proto3" json:"upload_timestamp,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UploadFileResponse) Reset() { *m = UploadFileResponse{} }
+func (m *UploadFileResponse) String() string { return proto.CompactTextString(m) }
+func (*UploadFileResponse) ProtoMessage() {}
+func (*UploadFileResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_pyre_492df08819220afa, []int{9}
+}
+func (m *UploadFileResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UploadFileResponse.Unmarshal(m, b)
+}
+func (m *UploadFileResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UploadFileResponse.Marshal(b, m, deterministic)
+}
+func (dst *UploadFileResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UploadFileResponse.Merge(dst, src)
+}
+func (m *UploadFileResponse) XXX_Size() int {
+ return xxx_messageInfo_UploadFileResponse.Size(m)
+}
+func (m *UploadFileResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_UploadFileResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UploadFileResponse proto.InternalMessageInfo
+
+func (m *UploadFileResponse) GetFileId() string {
+ if m != nil {
+ return m.FileId
+ }
+ return ""
+}
+
+func (m *UploadFileResponse) GetFileName() string {
+ if m != nil {
+ return m.FileName
+ }
+ return ""
+}
+
+func (m *UploadFileResponse) GetAccountId() string {
+ if m != nil {
+ return m.AccountId
+ }
+ return ""
+}
+
+func (m *UploadFileResponse) GetBucketId() string {
+ if m != nil {
+ return m.BucketId
+ }
+ return ""
+}
+
+func (m *UploadFileResponse) GetContentLength() int32 {
+ if m != nil {
+ return m.ContentLength
+ }
+ return 0
+}
+
+func (m *UploadFileResponse) GetContentSha1() string {
+ if m != nil {
+ return m.ContentSha1
+ }
+ return ""
+}
+
+func (m *UploadFileResponse) GetContentType() string {
+ if m != nil {
+ return m.ContentType
+ }
+ return ""
+}
+
+func (m *UploadFileResponse) GetFileInfo() map[string]string {
+ if m != nil {
+ return m.FileInfo
+ }
+ return nil
+}
+
+func (m *UploadFileResponse) GetAction() string {
+ if m != nil {
+ return m.Action
+ }
+ return ""
+}
+
+func (m *UploadFileResponse) GetUploadTimestamp() int64 {
+ if m != nil {
+ return m.UploadTimestamp
+ }
+ return 0
+}
+
+type StartLargeFileRequest struct {
+ BucketId string `protobuf:"bytes,1,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"`
+ FileName string `protobuf:"bytes,2,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"`
+ ContentType string `protobuf:"bytes,3,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"`
+ FileInfo map[string]string `protobuf:"bytes,4,rep,name=file_info,json=fileInfo,proto3" json:"file_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *StartLargeFileRequest) Reset() { *m = StartLargeFileRequest{} }
+func (m *StartLargeFileRequest) String() string { return proto.CompactTextString(m) }
+func (*StartLargeFileRequest) ProtoMessage() {}
+func (*StartLargeFileRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_pyre_492df08819220afa, []int{10}
+}
+func (m *StartLargeFileRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_StartLargeFileRequest.Unmarshal(m, b)
+}
+func (m *StartLargeFileRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_StartLargeFileRequest.Marshal(b, m, deterministic)
+}
+func (dst *StartLargeFileRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StartLargeFileRequest.Merge(dst, src)
+}
+func (m *StartLargeFileRequest) XXX_Size() int {
+ return xxx_messageInfo_StartLargeFileRequest.Size(m)
+}
+func (m *StartLargeFileRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_StartLargeFileRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StartLargeFileRequest proto.InternalMessageInfo
+
+func (m *StartLargeFileRequest) GetBucketId() string {
+ if m != nil {
+ return m.BucketId
+ }
+ return ""
+}
+
+func (m *StartLargeFileRequest) GetFileName() string {
+ if m != nil {
+ return m.FileName
+ }
+ return ""
+}
+
+func (m *StartLargeFileRequest) GetContentType() string {
+ if m != nil {
+ return m.ContentType
+ }
+ return ""
+}
+
+func (m *StartLargeFileRequest) GetFileInfo() map[string]string {
+ if m != nil {
+ return m.FileInfo
+ }
+ return nil
+}
+
+type StartLargeFileResponse struct {
+ FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"`
+ FileName string `protobuf:"bytes,2,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"`
+ AccountId string `protobuf:"bytes,3,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"`
+ BucketId string `protobuf:"bytes,4,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"`
+ ContentType string `protobuf:"bytes,5,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"`
+ FileInfo map[string]string `protobuf:"bytes,6,rep,name=file_info,json=fileInfo,proto3" json:"file_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ UploadTimestamp int64 `protobuf:"varint,7,opt,name=upload_timestamp,json=uploadTimestamp,proto3" json:"upload_timestamp,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *StartLargeFileResponse) Reset() { *m = StartLargeFileResponse{} }
+func (m *StartLargeFileResponse) String() string { return proto.CompactTextString(m) }
+func (*StartLargeFileResponse) ProtoMessage() {}
+func (*StartLargeFileResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_pyre_492df08819220afa, []int{11}
+}
+func (m *StartLargeFileResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_StartLargeFileResponse.Unmarshal(m, b)
+}
+func (m *StartLargeFileResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_StartLargeFileResponse.Marshal(b, m, deterministic)
+}
+func (dst *StartLargeFileResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StartLargeFileResponse.Merge(dst, src)
+}
+func (m *StartLargeFileResponse) XXX_Size() int {
+ return xxx_messageInfo_StartLargeFileResponse.Size(m)
+}
+func (m *StartLargeFileResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_StartLargeFileResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StartLargeFileResponse proto.InternalMessageInfo
+
+func (m *StartLargeFileResponse) GetFileId() string {
+ if m != nil {
+ return m.FileId
+ }
+ return ""
+}
+
+func (m *StartLargeFileResponse) GetFileName() string {
+ if m != nil {
+ return m.FileName
+ }
+ return ""
+}
+
+func (m *StartLargeFileResponse) GetAccountId() string {
+ if m != nil {
+ return m.AccountId
+ }
+ return ""
+}
+
+func (m *StartLargeFileResponse) GetBucketId() string {
+ if m != nil {
+ return m.BucketId
+ }
+ return ""
+}
+
+func (m *StartLargeFileResponse) GetContentType() string {
+ if m != nil {
+ return m.ContentType
+ }
+ return ""
+}
+
+func (m *StartLargeFileResponse) GetFileInfo() map[string]string {
+ if m != nil {
+ return m.FileInfo
+ }
+ return nil
+}
+
+func (m *StartLargeFileResponse) GetUploadTimestamp() int64 {
+ if m != nil {
+ return m.UploadTimestamp
+ }
+ return 0
+}
+
+type GetUploadPartUrlRequest struct {
+ FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetUploadPartUrlRequest) Reset() { *m = GetUploadPartUrlRequest{} }
+func (m *GetUploadPartUrlRequest) String() string { return proto.CompactTextString(m) }
+func (*GetUploadPartUrlRequest) ProtoMessage() {}
+func (*GetUploadPartUrlRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_pyre_492df08819220afa, []int{12}
+}
+func (m *GetUploadPartUrlRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetUploadPartUrlRequest.Unmarshal(m, b)
+}
+func (m *GetUploadPartUrlRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetUploadPartUrlRequest.Marshal(b, m, deterministic)
+}
+func (dst *GetUploadPartUrlRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetUploadPartUrlRequest.Merge(dst, src)
+}
+func (m *GetUploadPartUrlRequest) XXX_Size() int {
+ return xxx_messageInfo_GetUploadPartUrlRequest.Size(m)
+}
+func (m *GetUploadPartUrlRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetUploadPartUrlRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetUploadPartUrlRequest proto.InternalMessageInfo
+
+func (m *GetUploadPartUrlRequest) GetFileId() string {
+ if m != nil {
+ return m.FileId
+ }
+ return ""
+}
+
+type GetUploadPartUrlResponse struct {
+ FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"`
+ UploadUrl string `protobuf:"bytes,2,opt,name=upload_url,json=uploadUrl,proto3" json:"upload_url,omitempty"`
+ AuthorizationToken string `protobuf:"bytes,3,opt,name=authorization_token,json=authorizationToken,proto3" json:"authorization_token,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetUploadPartUrlResponse) Reset() { *m = GetUploadPartUrlResponse{} }
+func (m *GetUploadPartUrlResponse) String() string { return proto.CompactTextString(m) }
+func (*GetUploadPartUrlResponse) ProtoMessage() {}
+func (*GetUploadPartUrlResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_pyre_492df08819220afa, []int{13}
+}
+func (m *GetUploadPartUrlResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetUploadPartUrlResponse.Unmarshal(m, b)
+}
+func (m *GetUploadPartUrlResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetUploadPartUrlResponse.Marshal(b, m, deterministic)
+}
+func (dst *GetUploadPartUrlResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetUploadPartUrlResponse.Merge(dst, src)
+}
+func (m *GetUploadPartUrlResponse) XXX_Size() int {
+ return xxx_messageInfo_GetUploadPartUrlResponse.Size(m)
+}
+func (m *GetUploadPartUrlResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetUploadPartUrlResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetUploadPartUrlResponse proto.InternalMessageInfo
+
+func (m *GetUploadPartUrlResponse) GetFileId() string {
+ if m != nil {
+ return m.FileId
+ }
+ return ""
+}
+
+func (m *GetUploadPartUrlResponse) GetUploadUrl() string {
+ if m != nil {
+ return m.UploadUrl
+ }
+ return ""
+}
+
+func (m *GetUploadPartUrlResponse) GetAuthorizationToken() string {
+ if m != nil {
+ return m.AuthorizationToken
+ }
+ return ""
+}
+
+type FinishLargeFileRequest struct {
+ FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"`
+ PartSha1Array []string `protobuf:"bytes,2,rep,name=part_sha1_array,json=partSha1Array,proto3" json:"part_sha1_array,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *FinishLargeFileRequest) Reset() { *m = FinishLargeFileRequest{} }
+func (m *FinishLargeFileRequest) String() string { return proto.CompactTextString(m) }
+func (*FinishLargeFileRequest) ProtoMessage() {}
+func (*FinishLargeFileRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_pyre_492df08819220afa, []int{14}
+}
+func (m *FinishLargeFileRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_FinishLargeFileRequest.Unmarshal(m, b)
+}
+func (m *FinishLargeFileRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_FinishLargeFileRequest.Marshal(b, m, deterministic)
+}
+func (dst *FinishLargeFileRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_FinishLargeFileRequest.Merge(dst, src)
+}
+func (m *FinishLargeFileRequest) XXX_Size() int {
+ return xxx_messageInfo_FinishLargeFileRequest.Size(m)
+}
+func (m *FinishLargeFileRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_FinishLargeFileRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FinishLargeFileRequest proto.InternalMessageInfo
+
+func (m *FinishLargeFileRequest) GetFileId() string {
+ if m != nil {
+ return m.FileId
+ }
+ return ""
+}
+
+func (m *FinishLargeFileRequest) GetPartSha1Array() []string {
+ if m != nil {
+ return m.PartSha1Array
+ }
+ return nil
+}
+
+type FinishLargeFileResponse struct {
+ FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"`
+ FileName string `protobuf:"bytes,2,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"`
+ AccountId string `protobuf:"bytes,3,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"`
+ BucketId string `protobuf:"bytes,4,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"`
+ ContentLength int64 `protobuf:"varint,5,opt,name=content_length,json=contentLength,proto3" json:"content_length,omitempty"`
+ ContentSha1 string `protobuf:"bytes,6,opt,name=content_sha1,json=contentSha1,proto3" json:"content_sha1,omitempty"`
+ ContentType string `protobuf:"bytes,7,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"`
+ FileInfo map[string]string `protobuf:"bytes,8,rep,name=file_info,json=fileInfo,proto3" json:"file_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Action string `protobuf:"bytes,9,opt,name=action,proto3" json:"action,omitempty"`
+ UploadTimestamp int64 `protobuf:"varint,10,opt,name=upload_timestamp,json=uploadTimestamp,proto3" json:"upload_timestamp,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *FinishLargeFileResponse) Reset() { *m = FinishLargeFileResponse{} }
+func (m *FinishLargeFileResponse) String() string { return proto.CompactTextString(m) }
+func (*FinishLargeFileResponse) ProtoMessage() {}
+func (*FinishLargeFileResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_pyre_492df08819220afa, []int{15}
+}
+func (m *FinishLargeFileResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_FinishLargeFileResponse.Unmarshal(m, b)
+}
+func (m *FinishLargeFileResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_FinishLargeFileResponse.Marshal(b, m, deterministic)
+}
+func (dst *FinishLargeFileResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_FinishLargeFileResponse.Merge(dst, src)
+}
+func (m *FinishLargeFileResponse) XXX_Size() int {
+ return xxx_messageInfo_FinishLargeFileResponse.Size(m)
+}
+func (m *FinishLargeFileResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_FinishLargeFileResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FinishLargeFileResponse proto.InternalMessageInfo
+
+func (m *FinishLargeFileResponse) GetFileId() string {
+ if m != nil {
+ return m.FileId
+ }
+ return ""
+}
+
+func (m *FinishLargeFileResponse) GetFileName() string {
+ if m != nil {
+ return m.FileName
+ }
+ return ""
+}
+
+func (m *FinishLargeFileResponse) GetAccountId() string {
+ if m != nil {
+ return m.AccountId
+ }
+ return ""
+}
+
+func (m *FinishLargeFileResponse) GetBucketId() string {
+ if m != nil {
+ return m.BucketId
+ }
+ return ""
+}
+
+func (m *FinishLargeFileResponse) GetContentLength() int64 {
+ if m != nil {
+ return m.ContentLength
+ }
+ return 0
+}
+
+func (m *FinishLargeFileResponse) GetContentSha1() string {
+ if m != nil {
+ return m.ContentSha1
+ }
+ return ""
+}
+
+func (m *FinishLargeFileResponse) GetContentType() string {
+ if m != nil {
+ return m.ContentType
+ }
+ return ""
+}
+
+func (m *FinishLargeFileResponse) GetFileInfo() map[string]string {
+ if m != nil {
+ return m.FileInfo
+ }
+ return nil
+}
+
+func (m *FinishLargeFileResponse) GetAction() string {
+ if m != nil {
+ return m.Action
+ }
+ return ""
+}
+
+func (m *FinishLargeFileResponse) GetUploadTimestamp() int64 {
+ if m != nil {
+ return m.UploadTimestamp
+ }
+ return 0
+}
+
+type ListFileVersionsRequest struct {
+ BucketId string `protobuf:"bytes,1,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"`
+ StartFileName string `protobuf:"bytes,2,opt,name=start_file_name,json=startFileName,proto3" json:"start_file_name,omitempty"`
+ StartFileId string `protobuf:"bytes,3,opt,name=start_file_id,json=startFileId,proto3" json:"start_file_id,omitempty"`
+ MaxFileCount int32 `protobuf:"varint,4,opt,name=max_file_count,json=maxFileCount,proto3" json:"max_file_count,omitempty"`
+ Prefix string `protobuf:"bytes,5,opt,name=prefix,proto3" json:"prefix,omitempty"`
+ Delimiter string `protobuf:"bytes,6,opt,name=delimiter,proto3" json:"delimiter,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListFileVersionsRequest) Reset() { *m = ListFileVersionsRequest{} }
+func (m *ListFileVersionsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListFileVersionsRequest) ProtoMessage() {}
+func (*ListFileVersionsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_pyre_492df08819220afa, []int{16}
+}
+func (m *ListFileVersionsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListFileVersionsRequest.Unmarshal(m, b)
+}
+func (m *ListFileVersionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListFileVersionsRequest.Marshal(b, m, deterministic)
+}
+func (dst *ListFileVersionsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListFileVersionsRequest.Merge(dst, src)
+}
+func (m *ListFileVersionsRequest) XXX_Size() int {
+ return xxx_messageInfo_ListFileVersionsRequest.Size(m)
+}
+func (m *ListFileVersionsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListFileVersionsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListFileVersionsRequest proto.InternalMessageInfo
+
+func (m *ListFileVersionsRequest) GetBucketId() string {
+ if m != nil {
+ return m.BucketId
+ }
+ return ""
+}
+
+func (m *ListFileVersionsRequest) GetStartFileName() string {
+ if m != nil {
+ return m.StartFileName
+ }
+ return ""
+}
+
+func (m *ListFileVersionsRequest) GetStartFileId() string {
+ if m != nil {
+ return m.StartFileId
+ }
+ return ""
+}
+
+func (m *ListFileVersionsRequest) GetMaxFileCount() int32 {
+ if m != nil {
+ return m.MaxFileCount
+ }
+ return 0
+}
+
+func (m *ListFileVersionsRequest) GetPrefix() string {
+ if m != nil {
+ return m.Prefix
+ }
+ return ""
+}
+
+func (m *ListFileVersionsRequest) GetDelimiter() string {
+ if m != nil {
+ return m.Delimiter
+ }
+ return ""
+}
+
+type ListFileVersionsResponse struct {
+ Files []*File `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty"`
+ NextFileName string `protobuf:"bytes,2,opt,name=next_file_name,json=nextFileName,proto3" json:"next_file_name,omitempty"`
+ NextFileId string `protobuf:"bytes,3,opt,name=next_file_id,json=nextFileId,proto3" json:"next_file_id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListFileVersionsResponse) Reset() { *m = ListFileVersionsResponse{} }
+func (m *ListFileVersionsResponse) String() string { return proto.CompactTextString(m) }
+func (*ListFileVersionsResponse) ProtoMessage() {}
+func (*ListFileVersionsResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_pyre_492df08819220afa, []int{17}
+}
+func (m *ListFileVersionsResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListFileVersionsResponse.Unmarshal(m, b)
+}
+func (m *ListFileVersionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListFileVersionsResponse.Marshal(b, m, deterministic)
+}
+func (dst *ListFileVersionsResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListFileVersionsResponse.Merge(dst, src)
+}
+func (m *ListFileVersionsResponse) XXX_Size() int {
+ return xxx_messageInfo_ListFileVersionsResponse.Size(m)
+}
+func (m *ListFileVersionsResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListFileVersionsResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListFileVersionsResponse proto.InternalMessageInfo
+
+func (m *ListFileVersionsResponse) GetFiles() []*File {
+ if m != nil {
+ return m.Files
+ }
+ return nil
+}
+
+func (m *ListFileVersionsResponse) GetNextFileName() string {
+ if m != nil {
+ return m.NextFileName
+ }
+ return ""
+}
+
+func (m *ListFileVersionsResponse) GetNextFileId() string {
+ if m != nil {
+ return m.NextFileId
+ }
+ return ""
+}
+
+type File struct {
+ FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"`
+ FileName string `protobuf:"bytes,2,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"`
+ ContentLength int64 `protobuf:"varint,3,opt,name=content_length,json=contentLength,proto3" json:"content_length,omitempty"`
+ ContentType string `protobuf:"bytes,4,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"`
+ ContentSha1 string `protobuf:"bytes,5,opt,name=content_sha1,json=contentSha1,proto3" json:"content_sha1,omitempty"`
+ FileInfo map[string]string `protobuf:"bytes,6,rep,name=file_info,json=fileInfo,proto3" json:"file_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Action string `protobuf:"bytes,7,opt,name=action,proto3" json:"action,omitempty"`
+ Size int64 `protobuf:"varint,8,opt,name=size,proto3" json:"size,omitempty"`
+ UploadTimestamp int64 `protobuf:"varint,9,opt,name=upload_timestamp,json=uploadTimestamp,proto3" json:"upload_timestamp,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *File) Reset() { *m = File{} }
+func (m *File) String() string { return proto.CompactTextString(m) }
+func (*File) ProtoMessage() {}
+func (*File) Descriptor() ([]byte, []int) {
+ return fileDescriptor_pyre_492df08819220afa, []int{18}
+}
+func (m *File) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_File.Unmarshal(m, b)
+}
+func (m *File) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_File.Marshal(b, m, deterministic)
+}
+func (dst *File) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_File.Merge(dst, src)
+}
+func (m *File) XXX_Size() int {
+ return xxx_messageInfo_File.Size(m)
+}
+func (m *File) XXX_DiscardUnknown() {
+ xxx_messageInfo_File.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_File proto.InternalMessageInfo
+
+func (m *File) GetFileId() string {
+ if m != nil {
+ return m.FileId
+ }
+ return ""
+}
+
+func (m *File) GetFileName() string {
+ if m != nil {
+ return m.FileName
+ }
+ return ""
+}
+
+func (m *File) GetContentLength() int64 {
+ if m != nil {
+ return m.ContentLength
+ }
+ return 0
+}
+
+func (m *File) GetContentType() string {
+ if m != nil {
+ return m.ContentType
+ }
+ return ""
+}
+
+func (m *File) GetContentSha1() string {
+ if m != nil {
+ return m.ContentSha1
+ }
+ return ""
+}
+
+func (m *File) GetFileInfo() map[string]string {
+ if m != nil {
+ return m.FileInfo
+ }
+ return nil
+}
+
+func (m *File) GetAction() string {
+ if m != nil {
+ return m.Action
+ }
+ return ""
+}
+
+func (m *File) GetSize() int64 {
+ if m != nil {
+ return m.Size
+ }
+ return 0
+}
+
+func (m *File) GetUploadTimestamp() int64 {
+ if m != nil {
+ return m.UploadTimestamp
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*AuthorizeAccountRequest)(nil), "pyre.proto.AuthorizeAccountRequest")
+ proto.RegisterType((*AuthorizeAccountResponse)(nil), "pyre.proto.AuthorizeAccountResponse")
+ proto.RegisterType((*ListBucketsRequest)(nil), "pyre.proto.ListBucketsRequest")
+ proto.RegisterType((*LifecycleRule)(nil), "pyre.proto.LifecycleRule")
+ proto.RegisterType((*CorsRule)(nil), "pyre.proto.CorsRule")
+ proto.RegisterType((*Bucket)(nil), "pyre.proto.Bucket")
+ proto.RegisterMapType((map[string]string)(nil), "pyre.proto.Bucket.BucketInfoEntry")
+ proto.RegisterType((*ListBucketsResponse)(nil), "pyre.proto.ListBucketsResponse")
+ proto.RegisterType((*GetUploadUrlRequest)(nil), "pyre.proto.GetUploadUrlRequest")
+ proto.RegisterType((*GetUploadUrlResponse)(nil), "pyre.proto.GetUploadUrlResponse")
+ proto.RegisterType((*UploadFileResponse)(nil), "pyre.proto.UploadFileResponse")
+ proto.RegisterMapType((map[string]string)(nil), "pyre.proto.UploadFileResponse.FileInfoEntry")
+ proto.RegisterType((*StartLargeFileRequest)(nil), "pyre.proto.StartLargeFileRequest")
+ proto.RegisterMapType((map[string]string)(nil), "pyre.proto.StartLargeFileRequest.FileInfoEntry")
+ proto.RegisterType((*StartLargeFileResponse)(nil), "pyre.proto.StartLargeFileResponse")
+ proto.RegisterMapType((map[string]string)(nil), "pyre.proto.StartLargeFileResponse.FileInfoEntry")
+ proto.RegisterType((*GetUploadPartUrlRequest)(nil), "pyre.proto.GetUploadPartUrlRequest")
+ proto.RegisterType((*GetUploadPartUrlResponse)(nil), "pyre.proto.GetUploadPartUrlResponse")
+ proto.RegisterType((*FinishLargeFileRequest)(nil), "pyre.proto.FinishLargeFileRequest")
+ proto.RegisterType((*FinishLargeFileResponse)(nil), "pyre.proto.FinishLargeFileResponse")
+ proto.RegisterMapType((map[string]string)(nil), "pyre.proto.FinishLargeFileResponse.FileInfoEntry")
+ proto.RegisterType((*ListFileVersionsRequest)(nil), "pyre.proto.ListFileVersionsRequest")
+ proto.RegisterType((*ListFileVersionsResponse)(nil), "pyre.proto.ListFileVersionsResponse")
+ proto.RegisterType((*File)(nil), "pyre.proto.File")
+ proto.RegisterMapType((map[string]string)(nil), "pyre.proto.File.FileInfoEntry")
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// PyreServiceClient is the client API for PyreService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type PyreServiceClient interface {
+ // Used to log in to the B2 API. Returns an authorization token that can be
+ // used for account-level operations, and a URL that should be used as the
+ // base URL for subsequent API calls.
+ AuthorizeAccount(ctx context.Context, in *AuthorizeAccountRequest, opts ...grpc.CallOption) (*AuthorizeAccountResponse, error)
+ // Lists buckets associated with an account, in alphabetical order by bucket
+ // name.
+ ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error)
+ // Creates a new bucket. A bucket belongs to the account used to create it.
+ //
+ // Buckets can be named. The name must be globally unique. No account can use
+ // a bucket with the same name. Buckets are assigned a unique bucketId which
+ // is used when uploading, downloading, or deleting files.
+ //
+ // There is a limit of 100 buckets per account.
+ CreateBucket(ctx context.Context, in *Bucket, opts ...grpc.CallOption) (*Bucket, error)
+ // Deletes the bucket specified. Only buckets that contain no version of any
+ // files can be deleted.
+ DeleteBucket(ctx context.Context, in *Bucket, opts ...grpc.CallOption) (*Bucket, error)
+ GetUploadUrl(ctx context.Context, in *GetUploadUrlRequest, opts ...grpc.CallOption) (*GetUploadUrlResponse, error)
+ // Prepares for uploading the parts of a large file.
+ StartLargeFile(ctx context.Context, in *StartLargeFileRequest, opts ...grpc.CallOption) (*StartLargeFileResponse, error)
+ // Gets an URL to use for uploading parts of a large file.
+ GetUploadPartUrl(ctx context.Context, in *GetUploadPartUrlRequest, opts ...grpc.CallOption) (*GetUploadPartUrlResponse, error)
+ // Converts the parts that have been uploaded into a single B2 file.
+ FinishLargeFile(ctx context.Context, in *FinishLargeFileRequest, opts ...grpc.CallOption) (*FinishLargeFileResponse, error)
+ // Lists all of the versions of all of the files contained in one bucket, in
+ // alphabetical order by file name, and by reverse of date/time uploaded for
+ // versions of files with the same name.
+ ListFileVersions(ctx context.Context, in *ListFileVersionsRequest, opts ...grpc.CallOption) (*ListFileVersionsResponse, error)
+}
+
+type pyreServiceClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewPyreServiceClient(cc *grpc.ClientConn) PyreServiceClient {
+ return &pyreServiceClient{cc}
+}
+
+func (c *pyreServiceClient) AuthorizeAccount(ctx context.Context, in *AuthorizeAccountRequest, opts ...grpc.CallOption) (*AuthorizeAccountResponse, error) {
+ out := new(AuthorizeAccountResponse)
+ err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/AuthorizeAccount", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *pyreServiceClient) ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) {
+ out := new(ListBucketsResponse)
+ err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/ListBuckets", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *pyreServiceClient) CreateBucket(ctx context.Context, in *Bucket, opts ...grpc.CallOption) (*Bucket, error) {
+ out := new(Bucket)
+ err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/CreateBucket", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *pyreServiceClient) DeleteBucket(ctx context.Context, in *Bucket, opts ...grpc.CallOption) (*Bucket, error) {
+ out := new(Bucket)
+ err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/DeleteBucket", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *pyreServiceClient) GetUploadUrl(ctx context.Context, in *GetUploadUrlRequest, opts ...grpc.CallOption) (*GetUploadUrlResponse, error) {
+ out := new(GetUploadUrlResponse)
+ err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/GetUploadUrl", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *pyreServiceClient) StartLargeFile(ctx context.Context, in *StartLargeFileRequest, opts ...grpc.CallOption) (*StartLargeFileResponse, error) {
+ out := new(StartLargeFileResponse)
+ err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/StartLargeFile", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *pyreServiceClient) GetUploadPartUrl(ctx context.Context, in *GetUploadPartUrlRequest, opts ...grpc.CallOption) (*GetUploadPartUrlResponse, error) {
+ out := new(GetUploadPartUrlResponse)
+ err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/GetUploadPartUrl", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *pyreServiceClient) FinishLargeFile(ctx context.Context, in *FinishLargeFileRequest, opts ...grpc.CallOption) (*FinishLargeFileResponse, error) {
+ out := new(FinishLargeFileResponse)
+ err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/FinishLargeFile", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *pyreServiceClient) ListFileVersions(ctx context.Context, in *ListFileVersionsRequest, opts ...grpc.CallOption) (*ListFileVersionsResponse, error) {
+ out := new(ListFileVersionsResponse)
+ err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/ListFileVersions", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// PyreServiceServer is the server API for PyreService service.
+type PyreServiceServer interface {
+ // Used to log in to the B2 API. Returns an authorization token that can be
+ // used for account-level operations, and a URL that should be used as the
+ // base URL for subsequent API calls.
+ AuthorizeAccount(context.Context, *AuthorizeAccountRequest) (*AuthorizeAccountResponse, error)
+ // Lists buckets associated with an account, in alphabetical order by bucket
+ // name.
+ ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error)
+ // Creates a new bucket. A bucket belongs to the account used to create it.
+ //
+ // Buckets can be named. The name must be globally unique. No account can use
+ // a bucket with the same name. Buckets are assigned a unique bucketId which
+ // is used when uploading, downloading, or deleting files.
+ //
+ // There is a limit of 100 buckets per account.
+ CreateBucket(context.Context, *Bucket) (*Bucket, error)
+ // Deletes the bucket specified. Only buckets that contain no version of any
+ // files can be deleted.
+ DeleteBucket(context.Context, *Bucket) (*Bucket, error)
+ GetUploadUrl(context.Context, *GetUploadUrlRequest) (*GetUploadUrlResponse, error)
+ // Prepares for uploading the parts of a large file.
+ StartLargeFile(context.Context, *StartLargeFileRequest) (*StartLargeFileResponse, error)
+ // Gets an URL to use for uploading parts of a large file.
+ GetUploadPartUrl(context.Context, *GetUploadPartUrlRequest) (*GetUploadPartUrlResponse, error)
+ // Converts the parts that have been uploaded into a single B2 file.
+ FinishLargeFile(context.Context, *FinishLargeFileRequest) (*FinishLargeFileResponse, error)
+ // Lists all of the versions of all of the files contained in one bucket, in
+ // alphabetical order by file name, and by reverse of date/time uploaded for
+ // versions of files with the same name.
+ ListFileVersions(context.Context, *ListFileVersionsRequest) (*ListFileVersionsResponse, error)
+}
+
+func RegisterPyreServiceServer(s *grpc.Server, srv PyreServiceServer) {
+ s.RegisterService(&_PyreService_serviceDesc, srv)
+}
+
+func _PyreService_AuthorizeAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthorizeAccountRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(PyreServiceServer).AuthorizeAccount(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/pyre.proto.PyreService/AuthorizeAccount",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(PyreServiceServer).AuthorizeAccount(ctx, req.(*AuthorizeAccountRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _PyreService_ListBuckets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListBucketsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(PyreServiceServer).ListBuckets(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/pyre.proto.PyreService/ListBuckets",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(PyreServiceServer).ListBuckets(ctx, req.(*ListBucketsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _PyreService_CreateBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(Bucket)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(PyreServiceServer).CreateBucket(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/pyre.proto.PyreService/CreateBucket",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(PyreServiceServer).CreateBucket(ctx, req.(*Bucket))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _PyreService_DeleteBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(Bucket)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(PyreServiceServer).DeleteBucket(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/pyre.proto.PyreService/DeleteBucket",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(PyreServiceServer).DeleteBucket(ctx, req.(*Bucket))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _PyreService_GetUploadUrl_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetUploadUrlRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(PyreServiceServer).GetUploadUrl(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/pyre.proto.PyreService/GetUploadUrl",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(PyreServiceServer).GetUploadUrl(ctx, req.(*GetUploadUrlRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _PyreService_StartLargeFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(StartLargeFileRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(PyreServiceServer).StartLargeFile(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/pyre.proto.PyreService/StartLargeFile",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(PyreServiceServer).StartLargeFile(ctx, req.(*StartLargeFileRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _PyreService_GetUploadPartUrl_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetUploadPartUrlRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(PyreServiceServer).GetUploadPartUrl(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/pyre.proto.PyreService/GetUploadPartUrl",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(PyreServiceServer).GetUploadPartUrl(ctx, req.(*GetUploadPartUrlRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _PyreService_FinishLargeFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(FinishLargeFileRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(PyreServiceServer).FinishLargeFile(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/pyre.proto.PyreService/FinishLargeFile",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(PyreServiceServer).FinishLargeFile(ctx, req.(*FinishLargeFileRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _PyreService_ListFileVersions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListFileVersionsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(PyreServiceServer).ListFileVersions(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/pyre.proto.PyreService/ListFileVersions",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(PyreServiceServer).ListFileVersions(ctx, req.(*ListFileVersionsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _PyreService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "pyre.proto.PyreService",
+ HandlerType: (*PyreServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "AuthorizeAccount",
+ Handler: _PyreService_AuthorizeAccount_Handler,
+ },
+ {
+ MethodName: "ListBuckets",
+ Handler: _PyreService_ListBuckets_Handler,
+ },
+ {
+ MethodName: "CreateBucket",
+ Handler: _PyreService_CreateBucket_Handler,
+ },
+ {
+ MethodName: "DeleteBucket",
+ Handler: _PyreService_DeleteBucket_Handler,
+ },
+ {
+ MethodName: "GetUploadUrl",
+ Handler: _PyreService_GetUploadUrl_Handler,
+ },
+ {
+ MethodName: "StartLargeFile",
+ Handler: _PyreService_StartLargeFile_Handler,
+ },
+ {
+ MethodName: "GetUploadPartUrl",
+ Handler: _PyreService_GetUploadPartUrl_Handler,
+ },
+ {
+ MethodName: "FinishLargeFile",
+ Handler: _PyreService_FinishLargeFile_Handler,
+ },
+ {
+ MethodName: "ListFileVersions",
+ Handler: _PyreService_ListFileVersions_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "proto/pyre.proto",
+}
+
+func init() { proto.RegisterFile("proto/pyre.proto", fileDescriptor_pyre_492df08819220afa) }
+
+var fileDescriptor_pyre_492df08819220afa = []byte{
+ // 1591 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcb, 0x6f, 0x1b, 0x55,
+ 0x17, 0xd7, 0xd8, 0x71, 0x1c, 0x1f, 0xc7, 0xb1, 0x7b, 0xd3, 0x36, 0x13, 0xb7, 0x4d, 0xdc, 0xdb,
+ 0x24, 0x5f, 0x9a, 0xaf, 0x5f, 0xfc, 0x25, 0x08, 0x09, 0xb5, 0x02, 0x91, 0x06, 0x42, 0x23, 0xa5,
+ 0xa5, 0x72, 0x52, 0x24, 0x16, 0x68, 0x74, 0xe3, 0xb9, 0xb1, 0xaf, 0x3a, 0x0f, 0x73, 0x67, 0x9c,
+ 0xc6, 0x5d, 0xf1, 0x50, 0x17, 0x74, 0x0b, 0xff, 0x00, 0x7f, 0x07, 0x12, 0x1b, 0x36, 0xec, 0xd9,
+ 0xb3, 0xea, 0x86, 0x7f, 0x81, 0x0d, 0xe8, 0x3e, 0xc6, 0x9e, 0x47, 0x9c, 0x14, 0x68, 0x45, 0x57,
+ 0x9e, 0x39, 0xf7, 0x77, 0xce, 0x3d, 0xf7, 0x77, 0x1e, 0x73, 0xae, 0xa1, 0xd6, 0xe3, 0x7e, 0xe8,
+ 0x37, 0x7b, 0x03, 0x4e, 0xd7, 0xe5, 0x23, 0x82, 0xd1, 0x73, 0xfd, 0x6a, 0xc7, 0xf7, 0x3b, 0x0e,
+ 0x6d, 0x92, 0x1e, 0x6b, 0x12, 0xcf, 0xf3, 0x43, 0x12, 0x32, 0xdf, 0x0b, 0xd4, 0x2a, 0x9e, 0x87,
+ 0xb9, 0xad, 0x7e, 0xd8, 0xf5, 0x39, 0x7b, 0x4a, 0xb7, 0xda, 0x6d, 0xbf, 0xef, 0x85, 0x2d, 0xfa,
+ 0x79, 0x9f, 0x06, 0x21, 0xfe, 0x29, 0x07, 0x66, 0x76, 0x2d, 0xe8, 0xf9, 0x5e, 0x40, 0xd1, 0x35,
+ 0x00, 0xa2, 0x44, 0x16, 0xb3, 0x4d, 0xa3, 0x61, 0xac, 0x96, 0x5a, 0x25, 0x2d, 0xd9, 0xb5, 0x51,
+ 0x13, 0x66, 0x89, 0x56, 0x95, 0xdb, 0x59, 0xa1, 0xff, 0x98, 0x7a, 0x66, 0x4e, 0xe2, 0x50, 0x62,
+ 0xe9, 0x40, 0xac, 0xa0, 0x39, 0x28, 0x92, 0x1e, 0xb3, 0xfa, 0xdc, 0x31, 0xf3, 0x12, 0x34, 0x49,
+ 0x7a, 0xec, 0x11, 0x77, 0xd0, 0x75, 0x98, 0xb6, 0xfd, 0x27, 0x9e, 0xe3, 0x13, 0x5b, 0xae, 0x4e,
+ 0xc8, 0xd5, 0x72, 0x24, 0x13, 0x90, 0x4d, 0xb8, 0xc4, 0x69, 0xdb, 0x77, 0x5d, 0xea, 0xd9, 0xd4,
+ 0xb6, 0x7a, 0x84, 0x87, 0x56, 0xc0, 0x9e, 0x52, 0xb3, 0xd0, 0x30, 0x56, 0x0b, 0xad, 0xd9, 0xd8,
+ 0xe2, 0x43, 0xc2, 0xc3, 0x7d, 0xf6, 0x94, 0xa2, 0x3b, 0x50, 0x27, 0x87, 0x81, 0xef, 0xf4, 0x43,
+ 0x6a, 0xb9, 0xcc, 0x63, 0x6e, 0xdf, 0x8d, 0x29, 0x4e, 0x4a, 0xc5, 0xb9, 0x08, 0x71, 0x5f, 0x01,
+ 0x86, 0xca, 0x6b, 0x70, 0x21, 0xab, 0x53, 0x94, 0x3a, 0x55, 0x37, 0x89, 0xc5, 0xdf, 0x19, 0x80,
+ 0xf6, 0x58, 0x10, 0xde, 0xed, 0xb7, 0x1f, 0xd3, 0x30, 0xd0, 0xe4, 0x9e, 0xc7, 0xdf, 0x15, 0x28,
+ 0x1d, 0x4a, 0x05, 0xb1, 0xaa, 0x58, 0x9b, 0x52, 0x82, 0x5d, 0x1b, 0x2d, 0x42, 0x59, 0x2f, 0x7a,
+ 0xc4, 0xa5, 0x9a, 0x2f, 0x50, 0xa2, 0x07, 0xc4, 0xa5, 0x82, 0x33, 0x0d, 0x08, 0x07, 0x3d, 0x1a,
+ 0x98, 0x13, 0x8d, 0xbc, 0xe0, 0x4c, 0xc9, 0x0e, 0x84, 0x08, 0xff, 0x60, 0x40, 0x65, 0x8f, 0x1d,
+ 0xd1, 0xf6, 0xa0, 0xed, 0xd0, 0x56, 0xdf, 0xa1, 0xe8, 0x7d, 0xb8, 0x66, 0x93, 0x41, 0x60, 0x1d,
+ 0x71, 0xdf, 0xb5, 0xfa, 0x3d, 0x41, 0x2e, 0xf3, 0x3a, 0x56, 0xe8, 0x5b, 0x5d, 0x26, 0x9e, 0xa4,
+ 0x93, 0x85, 0xd6, 0xbc, 0x00, 0xed, 0x70, 0xdf, 0x7d, 0x14, 0x41, 0x0e, 0xfc, 0x7b, 0x12, 0x80,
+ 0xde, 0x83, 0xab, 0x23, 0x0b, 0x4a, 0x49, 0xa8, 0xdb, 0xd4, 0xa1, 0xa1, 0x30, 0x90, 0x93, 0x06,
+ 0xcc, 0xc8, 0x80, 0xd2, 0x3a, 0xf0, 0x3f, 0xd0, 0xeb, 0x68, 0x15, 0x6a, 0x47, 0xcc, 0xa1, 0xf2,
+ 0x54, 0x56, 0x8f, 0xd3, 0x23, 0x76, 0xa2, 0x0f, 0x37, 0x23, 0xe4, 0xe2, 0x68, 0x0f, 0xa5, 0x14,
+ 0x7f, 0x91, 0x83, 0xa9, 0x6d, 0x9f, 0x07, 0xd2, 0xf1, 0x25, 0x98, 0x69, 0xfb, 0x3c, 0xb0, 0x78,
+ 0x5f, 0xeb, 0x6a, 0x3a, 0xa7, 0xdb, 0x1a, 0x21, 0x39, 0xf9, 0x0f, 0x54, 0x89, 0xe3, 0xf8, 0x4f,
+ 0xa8, 0x6d, 0xf9, 0x9c, 0x75, 0x98, 0x17, 0x98, 0x39, 0x49, 0xcb, 0x8c, 0x16, 0x7f, 0xac, 0xa4,
+ 0xe8, 0x7f, 0x80, 0x86, 0xc0, 0x1e, 0xe5, 0xaa, 0x5a, 0xcc, 0xbc, 0xc4, 0x5e, 0x88, 0xb0, 0xc3,
+ 0x85, 0xb8, 0xdd, 0x2e, 0x25, 0x36, 0xe5, 0x11, 0xdd, 0x91, 0xdd, 0x7b, 0x4a, 0x8a, 0x96, 0x61,
+ 0x86, 0x9e, 0xf4, 0xfc, 0x80, 0x0e, 0x71, 0x05, 0x89, 0xab, 0x28, 0x69, 0x04, 0x5b, 0x81, 0xaa,
+ 0x4b, 0x4e, 0x2c, 0xd2, 0xa1, 0x56, 0x40, 0xdb, 0xbe, 0x67, 0x07, 0x3a, 0x1b, 0x2b, 0x2e, 0x39,
+ 0xd9, 0xea, 0xd0, 0x7d, 0x25, 0xc4, 0xdf, 0xe7, 0x61, 0x52, 0xe5, 0xd4, 0xeb, 0xcd, 0xa5, 0x11,
+ 0x40, 0xe4, 0x92, 0x2e, 0x3f, 0x18, 0xa5, 0x12, 0xda, 0x1e, 0x02, 0x98, 0x77, 0xe4, 0xcb, 0x43,
+ 0x95, 0x37, 0xf1, 0xfa, 0xa8, 0x03, 0xad, 0x2b, 0x37, 0xf5, 0xcf, 0xae, 0x77, 0xe4, 0x7f, 0xe8,
+ 0x85, 0x7c, 0x10, 0x19, 0x11, 0x02, 0xf4, 0x36, 0x94, 0xdb, 0x3e, 0xa7, 0x2a, 0x88, 0xe2, 0xc4,
+ 0xc2, 0xc8, 0xc5, 0xb8, 0x91, 0x28, 0xdc, 0x2d, 0x90, 0x40, 0xf1, 0x18, 0xa0, 0xbb, 0x50, 0x75,
+ 0xa2, 0x24, 0xd6, 0xaa, 0x45, 0xa9, 0x3a, 0x1f, 0x57, 0x4d, 0xe4, 0x79, 0x6b, 0xc6, 0x89, 0xbf,
+ 0x06, 0xa8, 0x0e, 0x53, 0x9c, 0x1e, 0xb3, 0x80, 0xf9, 0x9e, 0x39, 0x25, 0x99, 0x1e, 0xbe, 0xd7,
+ 0xdf, 0x85, 0x6a, 0xca, 0x6b, 0x54, 0x83, 0xfc, 0x63, 0x3a, 0xd0, 0x2c, 0x8b, 0x47, 0x74, 0x11,
+ 0x0a, 0xc7, 0xc4, 0xe9, 0x53, 0xcd, 0xad, 0x7a, 0xb9, 0x9d, 0x7b, 0xc7, 0xc0, 0xdb, 0x30, 0x9b,
+ 0x28, 0x7d, 0xdd, 0x3b, 0x6f, 0x41, 0x51, 0x1d, 0x3d, 0x30, 0x0d, 0xe9, 0x2d, 0xca, 0xb2, 0xd5,
+ 0x8a, 0x20, 0x78, 0x13, 0x66, 0x3f, 0xa2, 0xa1, 0xaa, 0xb6, 0x47, 0xdc, 0x89, 0x1a, 0x48, 0x22,
+ 0xaa, 0x46, 0x32, 0xaa, 0xf8, 0x6b, 0x03, 0x2e, 0x26, 0x95, 0xf4, 0xd6, 0x67, 0x69, 0x89, 0x3c,
+ 0x52, 0x75, 0x2f, 0x1b, 0xad, 0x3a, 0x4d, 0xa9, 0x1f, 0xd9, 0x18, 0xd7, 0xd3, 0xf3, 0xe3, 0x7a,
+ 0x3a, 0xfe, 0x31, 0x0f, 0x48, 0xb9, 0xb0, 0xc3, 0x1c, 0x3a, 0xf4, 0x61, 0x0e, 0x8a, 0xb2, 0xcc,
+ 0x87, 0x1e, 0x4c, 0x8a, 0x57, 0x95, 0xa8, 0xc3, 0xfa, 0x8f, 0x12, 0x35, 0x2a, 0xfc, 0x54, 0x92,
+ 0xe7, 0xcf, 0x4c, 0xf2, 0x89, 0xd4, 0xc1, 0x96, 0x45, 0x87, 0xf0, 0x42, 0xea, 0x85, 0x96, 0x43,
+ 0xbd, 0x4e, 0xd8, 0xd5, 0x5f, 0x86, 0x8a, 0x96, 0xee, 0x49, 0xa1, 0x68, 0x9b, 0x11, 0x2c, 0xe8,
+ 0x92, 0x0d, 0x59, 0x77, 0xa5, 0x56, 0x59, 0xcb, 0xf6, 0xbb, 0x64, 0x23, 0x0e, 0x91, 0xe5, 0x50,
+ 0x4c, 0x40, 0x64, 0x3d, 0xec, 0xea, 0x53, 0xc8, 0x6a, 0x98, 0x92, 0xf1, 0xbd, 0x15, 0x8f, 0x6f,
+ 0x96, 0x91, 0x75, 0xf1, 0x32, 0xaa, 0x0b, 0x79, 0x66, 0x59, 0x15, 0x97, 0x61, 0x92, 0xb4, 0x05,
+ 0x9f, 0x66, 0x49, 0x7f, 0x13, 0xe5, 0x1b, 0xba, 0x09, 0x35, 0x1d, 0xa8, 0x90, 0xb9, 0x34, 0x08,
+ 0x89, 0xdb, 0x33, 0xa1, 0x61, 0xac, 0xe6, 0x5b, 0x55, 0x25, 0x3f, 0x88, 0xc4, 0xf5, 0x3b, 0x50,
+ 0x49, 0x58, 0xff, 0x4b, 0xf9, 0xfb, 0x87, 0x01, 0x97, 0xf6, 0x43, 0xc2, 0xc3, 0x3d, 0xc2, 0x3b,
+ 0x54, 0xb9, 0x7c, 0x7e, 0xf6, 0x9d, 0x1d, 0xc7, 0x34, 0x83, 0xf9, 0x2c, 0x83, 0x7b, 0x71, 0x06,
+ 0x27, 0x24, 0x83, 0xcd, 0x38, 0x83, 0xa7, 0xba, 0x34, 0x8e, 0xc4, 0x7f, 0xc6, 0xc0, 0x8b, 0x1c,
+ 0x5c, 0x4e, 0x6f, 0xf7, 0xaf, 0xa5, 0x71, 0x9a, 0xba, 0x42, 0x96, 0xba, 0xfb, 0x71, 0xea, 0x54,
+ 0x17, 0xfd, 0xff, 0x59, 0xd4, 0x9d, 0x93, 0x80, 0xa7, 0x25, 0x5a, 0xf1, 0x35, 0x24, 0xda, 0x26,
+ 0xcc, 0x0d, 0xdb, 0x95, 0x98, 0x9c, 0x62, 0x7d, 0x6e, 0x1c, 0xcd, 0xa2, 0xc7, 0x99, 0x59, 0xa5,
+ 0xf3, 0x82, 0xf3, 0xaa, 0x7b, 0xdc, 0xa7, 0x70, 0x79, 0x87, 0x79, 0x2c, 0xe8, 0x66, 0x4a, 0x64,
+ 0xac, 0x0b, 0x2b, 0x50, 0x55, 0x53, 0x63, 0x97, 0x6c, 0x58, 0x84, 0x73, 0x32, 0xd0, 0x93, 0x48,
+ 0x45, 0x88, 0x45, 0x9b, 0xd9, 0x12, 0x42, 0xfc, 0x73, 0x1e, 0xe6, 0x32, 0xb6, 0xdf, 0xb0, 0x1e,
+ 0x9a, 0x7f, 0x3d, 0x3d, 0xf4, 0x41, 0xb6, 0x87, 0x6e, 0xc4, 0xd3, 0x78, 0x0c, 0x2d, 0x6f, 0x6c,
+ 0x23, 0xfd, 0xd5, 0x80, 0x39, 0x31, 0x09, 0x08, 0x0b, 0x9f, 0x50, 0x2e, 0x66, 0x8b, 0xe0, 0xa5,
+ 0x5a, 0xe9, 0x0a, 0x54, 0x03, 0x51, 0xb2, 0x56, 0x3a, 0xa8, 0x15, 0x29, 0xde, 0x89, 0x22, 0x8b,
+ 0xa1, 0x12, 0xc3, 0x0d, 0x83, 0x5b, 0x1e, 0xa2, 0x76, 0x6d, 0x31, 0x27, 0x8b, 0xc9, 0x52, 0x22,
+ 0x64, 0xc8, 0x65, 0x8c, 0x0b, 0xad, 0x69, 0x97, 0x9c, 0x08, 0xc8, 0xb6, 0x90, 0x09, 0xaa, 0xf4,
+ 0xe8, 0xad, 0xda, 0x8b, 0x7e, 0x43, 0x57, 0xa1, 0x64, 0x53, 0x87, 0xb9, 0x2c, 0xa4, 0x5c, 0x47,
+ 0x75, 0x24, 0xc0, 0xcf, 0x0d, 0x30, 0xb3, 0x07, 0xd4, 0xc9, 0xba, 0x02, 0x05, 0xb1, 0x69, 0x34,
+ 0xed, 0xd4, 0x92, 0x91, 0x74, 0x68, 0x4b, 0x2d, 0x0b, 0x07, 0x3d, 0x7a, 0x92, 0x3d, 0xeb, 0xb4,
+ 0x90, 0x0e, 0x8f, 0xda, 0x80, 0xe9, 0x11, 0x6a, 0x78, 0x52, 0x88, 0x30, 0xbb, 0x36, 0xfe, 0x3d,
+ 0x07, 0x13, 0xe2, 0xf1, 0x6f, 0x56, 0x49, 0x36, 0xd3, 0xf3, 0xe7, 0x64, 0x7a, 0x6c, 0x32, 0x4e,
+ 0xa4, 0x71, 0xba, 0x18, 0x0a, 0xd9, 0x62, 0xb8, 0x93, 0x6d, 0xd8, 0x0b, 0x69, 0x7e, 0x5e, 0x22,
+ 0xad, 0x8b, 0x89, 0xb4, 0x46, 0x30, 0x21, 0xaf, 0xa4, 0x53, 0xd2, 0x6f, 0xf9, 0x7c, 0x6a, 0xaa,
+ 0x97, 0x5e, 0x7d, 0xaa, 0x6f, 0xfe, 0x36, 0x05, 0xe5, 0x87, 0x03, 0x4e, 0xf7, 0x29, 0x3f, 0x66,
+ 0x6d, 0x8a, 0x9e, 0x19, 0x50, 0x4b, 0xff, 0x8b, 0x80, 0x6e, 0xc4, 0x8f, 0x38, 0xe6, 0xff, 0x87,
+ 0xfa, 0xd2, 0xd9, 0x20, 0x95, 0x5c, 0x78, 0xe5, 0xab, 0x5f, 0x5e, 0x7c, 0x9b, 0x6b, 0xa0, 0x85,
+ 0xe6, 0xe1, 0x26, 0xe9, 0xb1, 0xe6, 0xf1, 0x46, 0xf3, 0x70, 0xd3, 0x8a, 0x3a, 0x35, 0xb5, 0x74,
+ 0x7b, 0x43, 0x21, 0x94, 0x63, 0xb3, 0x38, 0x5a, 0x48, 0x5e, 0x10, 0xd2, 0xf7, 0xf3, 0xfa, 0xe2,
+ 0xd8, 0x75, 0xbd, 0xef, 0x92, 0xdc, 0x77, 0x01, 0xcf, 0x27, 0xf6, 0x75, 0x58, 0x10, 0x5a, 0x7a,
+ 0x72, 0xbf, 0x6d, 0xac, 0xa1, 0xcf, 0x60, 0x7a, 0x9b, 0x53, 0x12, 0x52, 0x7d, 0x55, 0x3b, 0x65,
+ 0xd2, 0xaf, 0x9f, 0x22, 0xc3, 0xcb, 0xd2, 0xfa, 0x22, 0xae, 0x27, 0xac, 0xb7, 0xa5, 0x29, 0x6d,
+ 0x5f, 0x9b, 0x97, 0xb7, 0xe7, 0x57, 0x63, 0x5e, 0x5e, 0xd4, 0xe3, 0xe6, 0x07, 0x30, 0x1d, 0xbf,
+ 0x45, 0xa0, 0x04, 0x29, 0xa7, 0x5c, 0x4a, 0xea, 0x8d, 0xf1, 0x80, 0x64, 0xb8, 0xf0, 0x95, 0xc4,
+ 0xce, 0x1d, 0x1a, 0x5a, 0xa3, 0xcf, 0xb2, 0xd8, 0xfa, 0x4b, 0x03, 0x66, 0x92, 0xc3, 0x0a, 0xba,
+ 0x7e, 0xee, 0x0c, 0x58, 0xc7, 0xe7, 0xcf, 0x3a, 0x78, 0x55, 0x7a, 0x80, 0xf1, 0xb5, 0x84, 0x07,
+ 0xaa, 0x7b, 0x3a, 0x02, 0x2d, 0x3b, 0x8b, 0xf0, 0xe1, 0xb9, 0x01, 0xb5, 0xf4, 0x84, 0x91, 0x4c,
+ 0xdd, 0x31, 0x43, 0x4b, 0x32, 0x75, 0xc7, 0x0d, 0x29, 0xf8, 0xbf, 0xd2, 0x93, 0x65, 0xdc, 0x18,
+ 0xc7, 0x85, 0x1c, 0x13, 0x34, 0x21, 0xcf, 0x0c, 0xa8, 0xa6, 0x3e, 0x7b, 0x08, 0x9f, 0xf9, 0x4d,
+ 0x54, 0xae, 0xdc, 0x78, 0x89, 0xef, 0x26, 0xbe, 0x29, 0x3d, 0xb9, 0x81, 0x93, 0x45, 0x74, 0x24,
+ 0xd1, 0x29, 0x52, 0xbe, 0x31, 0xa0, 0x96, 0xee, 0xf4, 0x49, 0x52, 0xc6, 0x7c, 0xe8, 0x92, 0xa4,
+ 0x8c, 0xfb, 0x58, 0xe0, 0x35, 0xe9, 0xca, 0x12, 0x5e, 0xcc, 0xd6, 0x95, 0x6c, 0x94, 0xc7, 0x5a,
+ 0xe1, 0xb6, 0xb1, 0x76, 0x38, 0x29, 0x6d, 0xbd, 0xf5, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0x09,
+ 0x84, 0xd9, 0x63, 0x01, 0x15, 0x00, 0x00,
+}
diff --git a/internal/pyre/proto/pyre.pb.gw.go b/internal/pyre/proto/pyre.pb.gw.go
new file mode 100644
index 0000000..f3765a2
--- /dev/null
+++ b/internal/pyre/proto/pyre.pb.gw.go
@@ -0,0 +1,484 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: proto/pyre.proto
+
+/*
+Package pyre_proto is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package pyre_proto
+
+import (
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/utilities"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/status"
+)
+
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+
+func request_PyreService_AuthorizeAccount_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq AuthorizeAccountRequest
+ var metadata runtime.ServerMetadata
+
+ msg, err := client.AuthorizeAccount(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_PyreService_ListBuckets_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq ListBucketsRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.ListBuckets(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_PyreService_CreateBucket_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq Bucket
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.CreateBucket(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_PyreService_DeleteBucket_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq Bucket
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.DeleteBucket(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_PyreService_GetUploadUrl_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq GetUploadUrlRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.GetUploadUrl(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_PyreService_StartLargeFile_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq StartLargeFileRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.StartLargeFile(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_PyreService_GetUploadPartUrl_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq GetUploadPartUrlRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.GetUploadPartUrl(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_PyreService_FinishLargeFile_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq FinishLargeFileRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.FinishLargeFile(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_PyreService_ListFileVersions_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq ListFileVersionsRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.ListFileVersions(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+// RegisterPyreServiceHandlerFromEndpoint is same as RegisterPyreServiceHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterPyreServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.Dial(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+
+ return RegisterPyreServiceHandler(ctx, mux, conn)
+}
+
+// RegisterPyreServiceHandler registers the http handlers for service PyreService to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterPyreServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterPyreServiceHandlerClient(ctx, mux, NewPyreServiceClient(conn))
+}
+
+// RegisterPyreServiceHandlerClient registers the http handlers for service PyreService
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "PyreServiceClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "PyreServiceClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "PyreServiceClient" to call the correct interceptors.
+func RegisterPyreServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client PyreServiceClient) error {
+
+ mux.Handle("GET", pattern_PyreService_AuthorizeAccount_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_PyreService_AuthorizeAccount_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_PyreService_AuthorizeAccount_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_PyreService_ListBuckets_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_PyreService_ListBuckets_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_PyreService_ListBuckets_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_PyreService_CreateBucket_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_PyreService_CreateBucket_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_PyreService_CreateBucket_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_PyreService_DeleteBucket_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_PyreService_DeleteBucket_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_PyreService_DeleteBucket_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_PyreService_GetUploadUrl_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_PyreService_GetUploadUrl_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_PyreService_GetUploadUrl_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_PyreService_StartLargeFile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_PyreService_StartLargeFile_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_PyreService_StartLargeFile_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_PyreService_GetUploadPartUrl_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_PyreService_GetUploadPartUrl_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_PyreService_GetUploadPartUrl_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_PyreService_FinishLargeFile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_PyreService_FinishLargeFile_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_PyreService_FinishLargeFile_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_PyreService_ListFileVersions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_PyreService_ListFileVersions_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_PyreService_ListFileVersions_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+var (
+ pattern_PyreService_AuthorizeAccount_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_authorize_account"}, ""))
+
+ pattern_PyreService_ListBuckets_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_list_buckets"}, ""))
+
+ pattern_PyreService_CreateBucket_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_create_bucket"}, ""))
+
+ pattern_PyreService_DeleteBucket_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_delete_bucket"}, ""))
+
+ pattern_PyreService_GetUploadUrl_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_get_upload_url"}, ""))
+
+ pattern_PyreService_StartLargeFile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_start_large_file"}, ""))
+
+ pattern_PyreService_GetUploadPartUrl_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_get_upload_part_url"}, ""))
+
+ pattern_PyreService_FinishLargeFile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_finish_large_file"}, ""))
+
+ pattern_PyreService_ListFileVersions_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_list_file_versions"}, ""))
+)
+
+var (
+ forward_PyreService_AuthorizeAccount_0 = runtime.ForwardResponseMessage
+
+ forward_PyreService_ListBuckets_0 = runtime.ForwardResponseMessage
+
+ forward_PyreService_CreateBucket_0 = runtime.ForwardResponseMessage
+
+ forward_PyreService_DeleteBucket_0 = runtime.ForwardResponseMessage
+
+ forward_PyreService_GetUploadUrl_0 = runtime.ForwardResponseMessage
+
+ forward_PyreService_StartLargeFile_0 = runtime.ForwardResponseMessage
+
+ forward_PyreService_GetUploadPartUrl_0 = runtime.ForwardResponseMessage
+
+ forward_PyreService_FinishLargeFile_0 = runtime.ForwardResponseMessage
+
+ forward_PyreService_ListFileVersions_0 = runtime.ForwardResponseMessage
+)
diff --git a/internal/pyre/proto/pyre.proto b/internal/pyre/proto/pyre.proto
new file mode 100644
index 0000000..102b97d
--- /dev/null
+++ b/internal/pyre/proto/pyre.proto
@@ -0,0 +1,335 @@
+// Copyright 2018, the Blazer authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+import "google/api/annotations.proto";
+
+package pyre.proto;
+
+message AuthorizeAccountRequest {}
+
+message AuthorizeAccountResponse {
+ // The identifier for the account.
+ string account_id = 1;
+ // An authorization token to use with all calls, other than
+ // b2_authorize_account, that need an Authorization header. This
+ // authorization token is valid for at most 24 hours.
+ string authorization_token = 2;
+ // The base URL to use for all API calls except for uploading and downloading
+ // files.
+ string api_url = 3;
+ // The base URL to use for downloading files.
+ string download_url = 4;
+ // The recommended size for each part of a large file. We recommend using
+ // this part size for optimal upload performance.
+ int32 recommended_part_size = 5;
+ // The smallest possible size of a part of a large file (except the last
+ // one). This is smaller than the recommended part size. If you use it, you
+ // may find that it takes longer overall to upload a large file.
+ int32 absolute_minimum_part_size = 6;
+ int32 minimum_part_size = 7; // alias for recommended_part_size
+}
+
+message ListBucketsRequest {
+ // The ID of your account.
+ string account_id = 1;
+ // When specified, the result will be a list containing just this bucket, if
+ // it's present in the account, or no buckets if the account does not have a
+ // bucket with this ID.
+ string bucket_id = 2;
+ // When specified, the result will be a list containing just this bucket, if
+ // it's present in the account, or no buckets if the account does not have a
+ // bucket with this ID.
+ string bucket_name = 3;
+ // If present, B2 will use it as a filter for bucket types returned in the
+ // list buckets response. If not present, only buckets with bucket types
+ // "allPublic", "allPrivate" and "snapshot" will be returned. A special
+ // filter value of ["all"] will return all bucket types.
+ //
+ // If present, it must be in the form of a json array of strings containing
+ // valid bucket types in quotes and separated by a comma. Valid bucket types
+ // include "allPrivate", "allPublic", "snapshot", and other values added in
+ // the future.
+ //
+ // A bad request error will be returned if "all" is used with other bucket
+ // types, this field is empty, or invalid bucket types are requested.
+ repeated string bucket_types = 4;
+}
+
+message LifecycleRule {
+ // After a file is uploaded, the number of days before it can be hidden.
+ int32 days_from_uploading_to_hiding = 1;
+ // After a file is hidden, the number of days before it can be deleted.
+ int32 days_from_hiding_to_deleting = 2;
+ // The rule applies to files whose names start with this prefix.
+ string file_name_prefix = 3;
+}
+
+message CorsRule {
+ // A name for humans to recognize the rule in a user interface. Names must be
+ // unique within a bucket. Names can consist of upper-case and lower-case
+ // English letters, numbers, and "-". No other characters are allowed. A name
+ // must be at least 6 characters long, and can be at most 50 characters long.
+ // These are all allowed names: myPhotosSite, allowAnyHttps,
+ // backblaze-images. Names that start with "b2-" are reserved for Backblaze
+ // use.
+ string cors_rule_name = 1;
+ // A non-empty list specifying which origins the rule covers. Each value may
+ // have one of many formats:
+ //
+ // * The origin can be fully specified, such as http://www.example.com:8180
+ // or https://www.example.com:4433.
+ //
+ // * The origin can omit a default port, such as https://www.example.com.
+ //
+ // * The origin may have a single '*' as part of the domain name, such as
+ // https://*.example.com, https://*:8443 or https://*.
+ //
+ // * The origin may be 'https' to match any origin that uses HTTPS. (This is
+ // broader than 'https://*' because it matches any port.)
+ //
+ // * Finally, the origin can be a single '*' to match any origin.
+ //
+ // If any entry is "*", it must be the only entry. There can be at most one
+ // "https" entry and no entry after it may start with "https:".
+ repeated string allowed_origins = 2;
+ // A list specifying which operations the rule allows. At least one value
+ // must be specified. All values must be from the following list. More values
+ // may be added to this list at any time.
+ //
+ // b2_download_file_by_name
+ // b2_download_file_by_id
+ // b2_upload_file
+ // b2_upload_part
+ repeated string allowed_operations = 3;
+ // If present, this is a list of headers that are allowed in a pre-flight
+ // OPTIONS's request's Access-Control-Request-Headers header value. Each
+ // value may have one of many formats:
+ //
+ // * It may be a complete header name, such as x-bz-content-sha1.
+ //
+ // * It may end with an asterisk, such as x-bz-info-*.
+ //
+ // * Finally, it may be a single '*' to match any header.
+ //
+ // If any entry is "*", it must be the only entry in the list. If this list
+ // is missing, it is be treated as if it is a list with no entries.
+ repeated string allowed_headers = 4;
+ // If present, this is a list of headers that may be exposed to an
+ // application inside the client (eg. exposed to Javascript in a browser).
+ // Each entry in the list must be a complete header name (eg.
+ // "x-bz-content-sha1"). If this list is missing or empty, no headers will be
+ // exposed.
+ repeated string expose_headers = 5;
+ // This specifies the maximum number of seconds that a browser may cache the
+ // response to a preflight request. The value must not be negative and it
+ // must not be more than 86,400 seconds (one day).
+ int32 max_age_seconds = 6;
+}
+
+message Bucket {
+ string account_id = 1;
+ string bucket_id = 2;
+ string bucket_name = 3;
+ string bucket_type = 4;
+ map<string, string> bucket_info = 5;
+ repeated CorsRule cores_rules = 6;
+ repeated LifecycleRule lifecycle_rules = 7;
+ int32 revision = 8;
+}
+
+message ListBucketsResponse {
+ repeated Bucket buckets = 1;
+}
+
+message GetUploadUrlRequest {
+ string bucket_id = 1;
+}
+
+message GetUploadUrlResponse {
+ string bucket_id = 1;
+ string upload_url = 2;
+ string authorization_token = 3;
+}
+
+message UploadFileResponse {
+ string file_id = 1;
+ string file_name = 2;
+ string account_id = 3;
+ string bucket_id = 4;
+ int32 content_length = 5;
+ string content_sha1 = 6;
+ string content_type = 7;
+ map<string, string> file_info = 8;
+ string action = 9;
+ int64 upload_timestamp = 10;
+}
+
+message StartLargeFileRequest {
+ string bucket_id = 1;
+ string file_name = 2;
+ string content_type = 3;
+ map<string, string> file_info = 4;
+}
+
+message StartLargeFileResponse {
+ string file_id = 1;
+ string file_name = 2;
+ string account_id = 3;
+ string bucket_id = 4;
+ string content_type = 5;
+ map<string, string> file_info = 6;
+ int64 upload_timestamp = 7;
+}
+
+message GetUploadPartUrlRequest {
+ string file_id = 1;
+}
+
+message GetUploadPartUrlResponse {
+ string file_id = 1;
+ string upload_url = 2;
+ string authorization_token = 3;
+}
+
+message FinishLargeFileRequest {
+ string file_id = 1;
+ repeated string part_sha1_array = 2;
+ // string sha1 = 3;
+}
+
+message FinishLargeFileResponse {
+ string file_id = 1;
+ string file_name = 2;
+ string account_id = 3;
+ string bucket_id = 4;
+ int64 content_length = 5;
+ string content_sha1 = 6; // always "none"
+ string content_type = 7;
+ map<string, string> file_info = 8;
+ string action = 9;
+ int64 upload_timestamp = 10;
+}
+
+message ListFileVersionsRequest {
+ string bucket_id = 1;
+ string start_file_name = 2;
+ string start_file_id = 3;
+ int32 max_file_count = 4;
+ string prefix = 5;
+ string delimiter = 6;
+}
+
+message ListFileVersionsResponse {
+ repeated File files = 1;
+ string next_file_name = 2;
+ string next_file_id = 3;
+}
+
+message File {
+ string file_id = 1;
+ string file_name = 2;
+ int64 content_length = 3;
+ string content_type = 4;
+ string content_sha1 = 5;
+ map<string, string> file_info = 6;
+ string action = 7;
+ int64 size = 8;
+ int64 upload_timestamp = 9;
+}
+
+service PyreService {
+ // Used to log in to the B2 API. Returns an authorization token that can be
+ // used for account-level operations, and a URL that should be used as the
+ // base URL for subsequent API calls.
+ rpc AuthorizeAccount(AuthorizeAccountRequest) returns (AuthorizeAccountResponse) {
+ option (google.api.http) = {
+ get: "/b2api/v1/b2_authorize_account"
+ };
+ }
+
+ // Lists buckets associated with an account, in alphabetical order by bucket
+ // name.
+ rpc ListBuckets(ListBucketsRequest) returns (ListBucketsResponse) {
+ option (google.api.http) = {
+ post: "/b2api/v1/b2_list_buckets"
+ body: "*"
+ };
+ }
+
+ // Creates a new bucket. A bucket belongs to the account used to create it.
+ //
+ // Buckets can be named. The name must be globally unique. No account can use
+ // a bucket with the same name. Buckets are assigned a unique bucketId which
+ // is used when uploading, downloading, or deleting files.
+ //
+ // There is a limit of 100 buckets per account.
+ rpc CreateBucket(Bucket) returns (Bucket) {
+ option (google.api.http) = {
+ post: "/b2api/v1/b2_create_bucket"
+ body: "*"
+ };
+ }
+
+ // Deletes the bucket specified. Only buckets that contain no version of any
+ // files can be deleted.
+ rpc DeleteBucket(Bucket) returns (Bucket) {
+ option (google.api.http) = {
+ post: "/b2api/v1/b2_delete_bucket"
+ body: "*"
+ };
+ }
+
+ rpc GetUploadUrl(GetUploadUrlRequest) returns (GetUploadUrlResponse) {
+ option (google.api.http) = {
+ post: "/b2api/v1/b2_get_upload_url"
+ body: "*"
+ };
+ }
+
+ // Prepares for uploading the parts of a large file.
+ rpc StartLargeFile(StartLargeFileRequest) returns (StartLargeFileResponse) {
+ option (google.api.http) = {
+ post: "/b2api/v1/b2_start_large_file"
+ body: "*"
+ };
+ }
+
+ // Gets an URL to use for uploading parts of a large file.
+ rpc GetUploadPartUrl(GetUploadPartUrlRequest) returns (GetUploadPartUrlResponse) {
+ option (google.api.http) = {
+ post: "/b2api/v1/b2_get_upload_part_url"
+ body: "*"
+ };
+ }
+
+ // Converts the parts that have been uploaded into a single B2 file.
+ rpc FinishLargeFile(FinishLargeFileRequest) returns (FinishLargeFileResponse) {
+ option (google.api.http) = {
+ post: "/b2api/v1/b2_finish_large_file"
+ body: "*"
+ };
+ }
+
+ // Lists all of the versions of all of the files contained in one bucket, in
+ // alphabetical order by file name, and by reverse of date/time uploaded for
+ // versions of files with the same name.
+ rpc ListFileVersions(ListFileVersionsRequest) returns (ListFileVersionsResponse) {
+ option (google.api.http) = {
+ post: "/b2api/v1/b2_list_file_versions"
+ body: "*"
+ };
+ }
+}
diff --git a/internal/pyre/pyre.go b/internal/pyre/pyre.go
new file mode 100644
index 0000000..adb1b2a
--- /dev/null
+++ b/internal/pyre/pyre.go
@@ -0,0 +1,20 @@
+// Copyright 2018, the Blazer authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pyre provides a gRPC-based implementation of B2, as well as a
+// RESTful gateway on top of it.
+package pyre
+
+//go:generate protoc -I/usr/local/include -I. -I$GOPATH/src -I$GOPATH/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis --grpc-gateway_out=logtostderr=true:. proto/pyre.proto
+//go:generate protoc -I/usr/local/include -I. -I$GOPATH/src -I$GOPATH/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis --go_out=plugins=grpc:. proto/pyre.proto
diff --git a/internal/pyre/simple.go b/internal/pyre/simple.go
new file mode 100644
index 0000000..7a0d50f
--- /dev/null
+++ b/internal/pyre/simple.go
@@ -0,0 +1,109 @@
+// Copyright 2018, the Blazer authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pyre
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+ "strings"
+
+ "github.com/google/uuid"
+ "github.com/kurin/blazer/internal/b2types"
+)
+
+const uploadFilePrefix = "/b2api/v1/b2_upload_file/"
+
+type SimpleFileManager interface {
+ Writer(bucket, name, id string) (io.WriteCloser, error)
+}
+
+type simpleFileServer struct {
+ fm SimpleFileManager
+}
+
+type uploadRequest struct {
+ name string
+ contentType string
+ size int64
+ sha1 string
+ bucket string
+ info map[string]string
+}
+
+func parseUploadHeaders(r *http.Request) (*uploadRequest, error) {
+ ur := &uploadRequest{info: make(map[string]string)}
+ ur.name = r.Header.Get("X-Bz-File-Name")
+ ur.contentType = r.Header.Get("Content-Type")
+ ur.sha1 = r.Header.Get("X-Bz-Content-Sha1")
+ size, err := strconv.ParseInt(r.Header.Get("Content-Length"), 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ ur.size = size
+ for k := range r.Header {
+ if !strings.HasPrefix("X-Bz-Info-", k) {
+ continue
+ }
+ name := strings.TrimPrefix("X-Bz-Info-", k)
+ ur.info[name] = r.Header.Get(k)
+ }
+ ur.bucket = strings.TrimPrefix(r.URL.Path, uploadFilePrefix)
+ return ur, nil
+}
+
+func (fs *simpleFileServer) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
+ req, err := parseUploadHeaders(r)
+ if err != nil {
+ http.Error(rw, err.Error(), 500)
+ fmt.Println("oh no")
+ return
+ }
+ id := uuid.New().String()
+ w, err := fs.fm.Writer(req.bucket, req.name, id)
+ if err != nil {
+ http.Error(rw, err.Error(), 500)
+ fmt.Println("oh no")
+ return
+ }
+ if _, err := io.Copy(w, io.LimitReader(r.Body, req.size)); err != nil {
+ w.Close()
+ http.Error(rw, err.Error(), 500)
+ fmt.Println("oh no")
+ return
+ }
+ if err := w.Close(); err != nil {
+ http.Error(rw, err.Error(), 500)
+ fmt.Println("oh no")
+ return
+ }
+ resp := &b2types.UploadFileResponse{
+ FileID: id,
+ Name: req.name,
+ SHA1: req.sha1,
+ BucketID: req.bucket,
+ }
+ if err := json.NewEncoder(rw).Encode(resp); err != nil {
+ http.Error(rw, err.Error(), 500)
+ fmt.Println("oh no")
+ return
+ }
+}
+
+func RegisterSimpleFileManagerOnMux(f SimpleFileManager, mux *http.ServeMux) {
+ mux.Handle(uploadFilePrefix, &simpleFileServer{fm: f})
+}
diff --git a/x/consistent/consistent.go b/x/consistent/consistent.go
index d9f47c1..2139d29 100644
--- a/x/consistent/consistent.go
+++ b/x/consistent/consistent.go
@@ -1,4 +1,4 @@
-// Copyright 2016, Google
+// Copyright 2016, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -27,6 +27,7 @@ import (
"io"
"io/ioutil"
"reflect"
+ "time"
"github.com/kurin/blazer/b2"
)
@@ -56,6 +57,16 @@ type Group struct {
ba *b2.BucketAttrs
}
+// Mutex returns a new mutex on the given group. Only one caller can hold the
+// lock on a mutex with a given name, for a given group.
+func (g *Group) Mutex(ctx context.Context, name string) *Mutex {
+ return &Mutex{
+ g: g,
+ name: name,
+ ctx: ctx,
+ }
+}
+
// Operate calls f with the contents of the group object given by name, and
// updates that object with the output of f if f returns no error. Operate
// guarantees that no other callers have modified the contents of name in the
@@ -335,6 +346,44 @@ func (g *Group) List(ctx context.Context) ([]string, error) {
return l, nil
}
+// A Mutex is a sync.Locker that is backed by data in B2.
+type Mutex struct {
+ g *Group
+ name string
+ ctx context.Context
+}
+
+// Lock locks the mutex. If the mutex is already locked, lock will wait,
+// polling at 1 second intervals, until it can acquire the lock.
+func (m *Mutex) Lock() {
+ cont := errors.New("continue")
+ for {
+ err := m.g.Operate(m.ctx, m.name, func(b []byte) ([]byte, error) {
+ if len(b) != 0 {
+ return nil, cont
+ }
+ return []byte{1}, nil
+ })
+ if err == nil {
+ return
+ }
+ if err != cont {
+ panic(err)
+ }
+ time.Sleep(time.Second)
+ }
+}
+
+// Unlock unconditionally unlocks the mutex. This allows programs to clear
+// stale locks.
+func (m *Mutex) Unlock() {
+ if err := m.g.Operate(m.ctx, m.name, func([]byte) ([]byte, error) {
+ return nil, nil
+ }); err != nil {
+ panic(err)
+ }
+}
+
type consistentInfo struct {
Version int
@@ -349,7 +398,7 @@ type consistentInfo struct {
//
// However, it is still necessary for higher level constructs to confirm that
// the serial number they expect is good. The writer does this, for example,
- // but comparing the "key" of the file it is replacing.
+ // by comparing the "key" of the file it is replacing.
Serial int
Locations map[string]string
}
diff --git a/x/consistent/consistent_test.go b/x/consistent/consistent_test.go
index 0a94df7..c9185b8 100644
--- a/x/consistent/consistent_test.go
+++ b/x/consistent/consistent_test.go
@@ -6,7 +6,9 @@ import (
"os"
"strconv"
"sync"
+ "sync/atomic"
"testing"
+ "time"
"github.com/kurin/blazer/b2"
)
@@ -30,9 +32,9 @@ func TestOperationLive(t *testing.T) {
wg.Add(1)
i := i
go func() {
- var n int
defer wg.Done()
for j := 0; j < 10; j++ {
+ var n int
if err := g.Operate(ctx, name, func(b []byte) ([]byte, error) {
if len(b) > 0 {
i, err := strconv.Atoi(string(b))
@@ -123,6 +125,38 @@ func TestOperationJSONLive(t *testing.T) {
}
}
+func TestMutex(t *testing.T) {
+ ctx := context.Background()
+ bucket, done := startLiveTest(ctx, t)
+ defer done()
+
+ g := NewGroup(bucket, "tester")
+ m := g.Mutex(ctx, "mootex")
+ var a int32
+ var wg sync.WaitGroup
+ for i := 0; i < 5; i++ {
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+ for j := 0; j < 5; j++ {
+ m.Lock()
+ new := atomic.AddInt32(&a, 1)
+ if new != 1 {
+ t.Fatalf("two threads locked at once")
+ }
+ time.Sleep(20 * time.Millisecond)
+ new = atomic.AddInt32(&a, -1)
+ if new != 0 {
+ t.Fatalf("two threads locked at once")
+ }
+ t.Logf("thread %d: lock %d", i, j)
+ m.Unlock()
+ }
+ }(i)
+ }
+ wg.Wait()
+}
+
func startLiveTest(ctx context.Context, t *testing.T) (*b2.Bucket, func()) {
id := os.Getenv(apiID)
key := os.Getenv(apiKey)
diff --git a/x/transport/transport.go b/x/transport/transport.go
index 03a878f..b4a83f4 100644
--- a/x/transport/transport.go
+++ b/x/transport/transport.go
@@ -1,4 +1,4 @@
-// Copyright 2017, Google
+// Copyright 2017, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/x/window/accum_test.go b/x/window/accum_test.go
index c832e51..00ea650 100644
--- a/x/window/accum_test.go
+++ b/x/window/accum_test.go
@@ -1,4 +1,4 @@
-// Copyright 2018, Google
+// Copyright 2018, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/x/window/counter_test.go b/x/window/counter_test.go
index 61a9865..e982e53 100644
--- a/x/window/counter_test.go
+++ b/x/window/counter_test.go
@@ -1,4 +1,4 @@
-// Copyright 2018, Google
+// Copyright 2018, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/x/window/window.go b/x/window/window.go
index d18a887..1f0fd26 100644
--- a/x/window/window.go
+++ b/x/window/window.go
@@ -1,4 +1,4 @@
-// Copyright 2018, Google
+// Copyright 2018, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/x/window/window_test.go b/x/window/window_test.go
index d0fc91a..9a812ca 100644
--- a/x/window/window_test.go
+++ b/x/window/window_test.go
@@ -1,4 +1,4 @@
-// Copyright 2018, Google
+// Copyright 2018, the Blazer authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.