Import upstream version 0.5.3+git20200818.1.9d61c73
Debian Janitor
2 years ago
0 | bonfire | |
0 | bin/bonfire/bonfire | |
1 | 1 | |
2 | 2 | # Compiled Object files, Static and Dynamic libs (Shared Objects) |
3 | 3 | *.o |
0 | # This is the list of Blazer authors for copyright purposes. | |
1 | # | |
2 | # This does not necessarily list everyone who has contributed code, since in | |
3 | # some cases, their employer may be the copyright holder. To see the full list | |
4 | # of contributors, see the revision history in source control. | |
5 | # | |
6 | # Tag yourself. | |
7 | Google LLC |
0 | Want to contribute? Great! First, read this page (including the small print at the end). | |
1 | ||
2 | ### Before you contribute | |
3 | Before we can use your code, you must sign the | |
4 | [Google Individual Contributor License Agreement] | |
5 | (https://cla.developers.google.com/about/google-individual) | |
6 | (CLA), which you can do online. The CLA is necessary mainly because you own the | |
7 | copyright to your changes, even after your contribution becomes part of our | |
8 | codebase, so we need your permission to use and distribute your code. We also | |
9 | need to be sure of various other things—for instance that you'll tell us if you | |
10 | know that your code infringes on other people's patents. You don't have to sign | |
11 | the CLA until after you've submitted your code for review and a member has | |
12 | approved it, but you must do it before we can put your code into our codebase. | |
13 | Before you start working on a larger contribution, you should get in touch with | |
14 | us first through the issue tracker with your idea so that we can help out and | |
15 | possibly guide you. Coordinating up front makes it much easier to avoid | |
16 | frustration later on. | |
0 | Want to contribute? Great! First, read this page. | |
17 | 1 | |
18 | 2 | ### Code reviews |
19 | 3 | All submissions, including submissions by project members, require review. We |
20 | 4 | use Github pull requests for this purpose. |
21 | ||
22 | ### The small print | |
23 | Contributions made by corporations are covered by a different agreement than | |
24 | the one above, the | |
25 | [Software Grant and Corporate Contributor License Agreement] | |
26 | (https://cla.developers.google.com/about/google-corporate). |
0 | Copyright 2016, Google | |
0 | Copyright 2016, the Blazer authors | |
1 | 1 | |
2 | 2 | Licensed under the Apache License, Version 2.0 (the "License"); |
3 | 3 | you may not use this file except in compliance with the License. |
18 | 18 | import "github.com/kurin/blazer/b2" |
19 | 19 | ``` |
20 | 20 | |
21 | Except see https://www.backblaze.com/blog/backblaze-b2-s3-compatible-api/; you probably don't need this library anymore. | |
22 | ||
21 | 23 | ## Examples |
24 | ||
25 | ### Getting started | |
26 | ```go | |
27 | id := "B2_ACCOUNT_ID" | |
28 | key := "B2_ACCOUNT_KEY" | |
29 | ||
30 | ctx := context.Background() | |
31 | ||
32 | // b2_authorize_account | |
33 | b2, err := b2.NewClient(ctx, id, key) | |
34 | if err != nil { | |
35 | log.Fatalln(e) | |
36 | } | |
37 | ||
38 | buckets, err := b2.ListBuckets(ctx) | |
39 | if err != nil { | |
40 | log.Fatalln(e) | |
41 | } | |
42 | ``` | |
22 | 43 | |
23 | 44 | ### Copy a file into B2 |
24 | 45 | |
25 | 46 | ```go |
26 | 47 | func copyFile(ctx context.Context, bucket *b2.Bucket, src, dst string) error { |
27 | f, err := file.Open(src) | |
48 | f, err := os.Open(src) | |
28 | 49 | if err != nil { |
29 | 50 | return err |
30 | 51 | } |
51 | 72 | |
52 | 73 | ```go |
53 | 74 | func copyFile(ctx context.Context, bucket *b2.Bucket, writers int, src, dst string) error { |
54 | f, err := file.Open(src) | |
75 | f, err := os.Open(src) | |
55 | 76 | if err != nil { |
56 | 77 | return err |
57 | 78 | } |
79 | 100 | r := bucket.Object(src).NewReader(ctx) |
80 | 101 | defer r.Close() |
81 | 102 | |
82 | f, err := file.Create(dst) | |
103 | f, err := os.Create(dst) | |
83 | 104 | if err != nil { |
84 | 105 | return err |
85 | 106 | } |
98 | 119 | func printObjects(ctx context.Context, bucket *b2.Bucket) error { |
99 | 120 | iterator := bucket.List(ctx) |
100 | 121 | for iterator.Next() { |
101 | fmt.Println(itrator.Object()) | |
122 | fmt.Println(iterator.Object()) | |
102 | 123 | } |
103 | 124 | return iterator.Err() |
104 | 125 | } |
123 | 144 | ```go |
124 | 145 | base := bucket.BaseURL() |
125 | 146 | ``` |
126 | ||
127 | --- | |
128 | ||
129 | This is not an official Google product. |
0 | // Copyright 2016, Google | |
0 | // Copyright 2016, the Blazer authors | |
1 | 1 | // |
2 | 2 | // Licensed under the Apache License, Version 2.0 (the "License"); |
3 | 3 | // you may not use this file except in compliance with the License. |
31 | 31 | "fmt" |
32 | 32 | "io" |
33 | 33 | "net/http" |
34 | "net/url" | |
34 | 35 | "regexp" |
35 | 36 | "strconv" |
36 | 37 | "sync" |
45 | 46 | sWriters map[string]*Writer |
46 | 47 | sReaders map[string]*Reader |
47 | 48 | sMethods []methodCounter |
49 | opts clientOptions | |
48 | 50 | } |
49 | 51 | |
50 | 52 | // NewClient creates and returns a new Client with valid B2 service account |
62 | 64 | }, |
63 | 65 | } |
64 | 66 | opts = append(opts, client(c)) |
65 | if err := c.backend.authorizeAccount(ctx, account, key, opts...); err != nil { | |
67 | for _, f := range opts { | |
68 | f(&c.opts) | |
69 | } | |
70 | if err := c.backend.authorizeAccount(ctx, account, key, c.opts); err != nil { | |
66 | 71 | return nil, err |
67 | 72 | } |
68 | 73 | return c, nil |
74 | 79 | failSomeUploads bool |
75 | 80 | expireTokens bool |
76 | 81 | capExceeded bool |
82 | apiBase string | |
77 | 83 | userAgents []string |
78 | } | |
79 | ||
80 | // for testing | |
81 | func (c clientOptions) eq(o clientOptions) bool { | |
82 | if c.client != o.client || | |
83 | c.transport != o.transport || | |
84 | c.failSomeUploads != o.failSomeUploads || | |
85 | c.expireTokens != o.expireTokens || | |
86 | c.capExceeded != o.capExceeded { | |
87 | return false | |
88 | } | |
89 | if len(c.userAgents) != len(o.userAgents) { | |
90 | return false | |
91 | } | |
92 | for i := range c.userAgents { | |
93 | if c.userAgents[i] != o.userAgents[i] { | |
94 | return false | |
95 | } | |
96 | } | |
97 | return true | |
84 | writerOpts []WriterOption | |
98 | 85 | } |
99 | 86 | |
100 | 87 | // A ClientOption allows callers to adjust various per-client settings. |
108 | 95 | func UserAgent(agent string) ClientOption { |
109 | 96 | return func(o *clientOptions) { |
110 | 97 | o.userAgents = append(o.userAgents, agent) |
98 | } | |
99 | } | |
100 | ||
101 | // APIBase returns a ClientOption specifying the URL root of API requests. | |
102 | func APIBase(url string) ClientOption { | |
103 | return func(o *clientOptions) { | |
104 | o.apiBase = url | |
111 | 105 | } |
112 | 106 | } |
113 | 107 | |
287 | 281 | |
288 | 282 | // Bucket returns a bucket if it exists. |
289 | 283 | func (c *Client) Bucket(ctx context.Context, name string) (*Bucket, error) { |
290 | buckets, err := c.backend.listBuckets(ctx) | |
284 | buckets, err := c.backend.listBuckets(ctx, name) | |
291 | 285 | if err != nil { |
292 | 286 | return nil, err |
293 | 287 | } |
311 | 305 | // if it does not already exist. If attrs is nil, it is created as a private |
312 | 306 | // bucket with no info metadata and no lifecycle rules. |
313 | 307 | func (c *Client) NewBucket(ctx context.Context, name string, attrs *BucketAttrs) (*Bucket, error) { |
314 | buckets, err := c.backend.listBuckets(ctx) | |
308 | buckets, err := c.backend.listBuckets(ctx, name) | |
315 | 309 | if err != nil { |
316 | 310 | return nil, err |
317 | 311 | } |
342 | 336 | |
343 | 337 | // ListBuckets returns all the available buckets. |
344 | 338 | func (c *Client) ListBuckets(ctx context.Context) ([]*Bucket, error) { |
345 | bs, err := c.backend.listBuckets(ctx) | |
339 | bs, err := c.backend.listBuckets(ctx, "") | |
346 | 340 | if err != nil { |
347 | 341 | return nil, err |
348 | 342 | } |
433 | 427 | ContentType string // Used on upload, default is "application/octet-stream". |
434 | 428 | Status ObjectState // Not used on upload. |
435 | 429 | UploadTimestamp time.Time // Not used on upload. |
436 | SHA1 string // Not used on upload. Can be "none" for large files. | |
430 | SHA1 string // Can be "none" for large files. If set on upload, will be used for large files. | |
437 | 431 | LastModified time.Time // If present, and there are fewer than 10 keys in the Info field, this is saved on upload. |
438 | 432 | Info map[string]string // Save arbitrary metadata on upload, but limited to 10 keys. |
439 | 433 | } |
473 | 467 | mtime = time.Unix(ms/1e3, (ms%1e3)*1e6) |
474 | 468 | delete(info, "src_last_modified_millis") |
475 | 469 | } |
470 | if v, ok := info["large_file_sha1"]; ok { | |
471 | sha = v | |
472 | } | |
476 | 473 | return &Attrs{ |
477 | 474 | Name: name, |
478 | 475 | Size: size, |
523 | 520 | // overwritten are not deleted, but are "hidden". |
524 | 521 | // |
525 | 522 | // Callers must close the writer when finished and check the error status. |
526 | func (o *Object) NewWriter(ctx context.Context) *Writer { | |
523 | func (o *Object) NewWriter(ctx context.Context, opts ...WriterOption) *Writer { | |
527 | 524 | ctx, cancel := context.WithCancel(ctx) |
528 | return &Writer{ | |
525 | w := &Writer{ | |
529 | 526 | o: o, |
530 | 527 | name: o.name, |
531 | 528 | ctx: ctx, |
532 | 529 | cancel: cancel, |
533 | 530 | } |
531 | for _, f := range o.b.c.opts.writerOpts { | |
532 | f(w) | |
533 | } | |
534 | for _, f := range opts { | |
535 | f(w) | |
536 | } | |
537 | return w | |
534 | 538 | } |
535 | 539 | |
536 | 540 | // NewRangeReader returns a reader for the given object, reading up to length |
572 | 576 | return o.f.deleteFileVersion(ctx) |
573 | 577 | } |
574 | 578 | |
575 | // Cursor is passed to ListObjects to return subsequent pages. | |
576 | // | |
577 | // DEPRECATED. Will be removed in a future release. | |
578 | type Cursor struct { | |
579 | // Prefix limits the listed objects to those that begin with this string. | |
580 | Prefix string | |
581 | ||
582 | // Delimiter denotes the path separator. If set, object listings will be | |
583 | // truncated at this character. | |
584 | // | |
585 | // For example, if the bucket contains objects foo/bar, foo/baz, and foo, | |
586 | // then a delimiter of "/" will cause the listing to return "foo" and "foo/". | |
587 | // Otherwise, the listing would have returned all object names. | |
588 | // | |
589 | // Note that objects returned that end in the delimiter may not be actual | |
590 | // objects, e.g. you cannot read from (or write to, or delete) an object "foo/", | |
591 | // both because no actual object exists and because B2 disallows object names | |
592 | // that end with "/". If you want to ensure that all objects returned by | |
593 | // ListObjects and ListCurrentObjects are actual objects, leave this unset. | |
594 | Delimiter string | |
595 | ||
596 | name string | |
597 | id string | |
598 | } | |
599 | ||
600 | // ListObjects returns all objects in the bucket, including multiple versions | |
601 | // of the same object. Cursor may be nil; when passed to a subsequent query, | |
602 | // it will continue the listing. | |
603 | // | |
604 | // ListObjects will return io.EOF when there are no objects left in the bucket, | |
605 | // however it may do so concurrently with the last objects. | |
606 | // | |
607 | // DEPRECATED. Will be removed in a future release. | |
608 | func (b *Bucket) ListObjects(ctx context.Context, count int, c *Cursor) ([]*Object, *Cursor, error) { | |
609 | if c == nil { | |
610 | c = &Cursor{} | |
611 | } | |
612 | fs, name, id, err := b.b.listFileVersions(ctx, count, c.name, c.id, c.Prefix, c.Delimiter) | |
613 | if err != nil { | |
614 | return nil, nil, err | |
615 | } | |
616 | var next *Cursor | |
617 | if name != "" && id != "" { | |
618 | next = &Cursor{ | |
619 | Prefix: c.Prefix, | |
620 | Delimiter: c.Delimiter, | |
621 | name: name, | |
622 | id: id, | |
623 | } | |
624 | } | |
625 | var objects []*Object | |
626 | for _, f := range fs { | |
627 | objects = append(objects, &Object{ | |
628 | name: f.name(), | |
629 | f: f, | |
630 | b: b, | |
631 | }) | |
632 | } | |
633 | var rtnErr error | |
634 | if len(objects) == 0 || next == nil { | |
635 | rtnErr = io.EOF | |
636 | } | |
637 | return objects, next, rtnErr | |
638 | } | |
639 | ||
640 | // ListCurrentObjects is similar to ListObjects, except that it returns only | |
641 | // current, unhidden objects in the bucket. | |
642 | // | |
643 | // DEPRECATED. Will be removed in a future release. | |
644 | func (b *Bucket) ListCurrentObjects(ctx context.Context, count int, c *Cursor) ([]*Object, *Cursor, error) { | |
645 | if c == nil { | |
646 | c = &Cursor{} | |
647 | } | |
648 | fs, name, err := b.b.listFileNames(ctx, count, c.name, c.Prefix, c.Delimiter) | |
649 | if err != nil { | |
650 | return nil, nil, err | |
651 | } | |
652 | var next *Cursor | |
653 | if name != "" { | |
654 | next = &Cursor{ | |
655 | Prefix: c.Prefix, | |
656 | Delimiter: c.Delimiter, | |
657 | name: name, | |
658 | } | |
659 | } | |
660 | var objects []*Object | |
661 | for _, f := range fs { | |
662 | objects = append(objects, &Object{ | |
663 | name: f.name(), | |
664 | f: f, | |
665 | b: b, | |
666 | }) | |
667 | } | |
668 | var rtnErr error | |
669 | if len(objects) == 0 || next == nil { | |
670 | rtnErr = io.EOF | |
671 | } | |
672 | return objects, next, rtnErr | |
673 | } | |
674 | ||
675 | // ListUnfinishedLargeFiles lists any objects that correspond to large file uploads that haven't been completed. | |
676 | // This can happen for example when an upload is interrupted. | |
677 | // | |
678 | // DEPRECATED. Will be removed in a future release. | |
679 | func (b *Bucket) ListUnfinishedLargeFiles(ctx context.Context, count int, c *Cursor) ([]*Object, *Cursor, error) { | |
680 | if c == nil { | |
681 | c = &Cursor{} | |
682 | } | |
683 | fs, name, err := b.b.listUnfinishedLargeFiles(ctx, count, c.name) | |
684 | if err != nil { | |
685 | return nil, nil, err | |
686 | } | |
687 | var next *Cursor | |
688 | if name != "" { | |
689 | next = &Cursor{ | |
690 | name: name, | |
691 | } | |
692 | } | |
693 | var objects []*Object | |
694 | for _, f := range fs { | |
695 | objects = append(objects, &Object{ | |
696 | name: f.name(), | |
697 | f: f, | |
698 | b: b, | |
699 | }) | |
700 | } | |
701 | var rtnErr error | |
702 | if len(objects) == 0 || next == nil { | |
703 | rtnErr = io.EOF | |
704 | } | |
705 | return objects, next, rtnErr | |
706 | } | |
707 | ||
708 | 579 | // Hide hides the object from name-based listing. |
709 | 580 | func (o *Object) Hide(ctx context.Context) error { |
710 | 581 | if err := o.ensure(ctx); err != nil { |
717 | 588 | // Reveal unhides (if hidden) the named object. If there are multiple objects |
718 | 589 | // of a given name, it will reveal the most recent. |
719 | 590 | func (b *Bucket) Reveal(ctx context.Context, name string) error { |
720 | cur := &Cursor{ | |
721 | name: name, | |
722 | } | |
723 | objs, _, err := b.ListObjects(ctx, 1, cur) | |
724 | if err != nil && err != io.EOF { | |
725 | return err | |
726 | } | |
727 | if len(objs) < 1 || objs[0].name != name { | |
728 | return b2err{err: fmt.Errorf("%s: not found", name), notFoundErr: true} | |
729 | } | |
730 | obj := objs[0] | |
731 | if obj.f.status() != "hide" { | |
732 | return nil | |
733 | } | |
734 | return obj.Delete(ctx) | |
591 | iter := b.List(ctx, ListPrefix(name), ListHidden()) | |
592 | for iter.Next() { | |
593 | obj := iter.Object() | |
594 | if obj.Name() == name { | |
595 | if obj.f.status() == "hide" { | |
596 | return obj.Delete(ctx) | |
597 | } | |
598 | return nil | |
599 | } | |
600 | if obj.Name() > name { | |
601 | break | |
602 | } | |
603 | } | |
604 | return b2err{err: fmt.Errorf("%s: not found", name), notFoundErr: true} | |
735 | 605 | } |
736 | 606 | |
737 | 607 | // I don't want to import all of ioutil for this. |
742 | 612 | } |
743 | 613 | |
744 | 614 | func (b *Bucket) getObject(ctx context.Context, name string) (*Object, error) { |
745 | fr, err := b.b.downloadFileByName(ctx, name, 0, 1) | |
746 | if err != nil { | |
615 | fr, err := b.b.downloadFileByName(ctx, name, 0, 0, true) | |
616 | if err != nil { | |
617 | fmt.Printf("%v: %T\n", err, err) | |
747 | 618 | return nil, err |
748 | 619 | } |
749 | 620 | io.Copy(discard{}, fr) |
759 | 630 | // in a private bucket. Only objects that begin with prefix can be accessed. |
760 | 631 | // The token expires after the given duration. |
761 | 632 | func (b *Bucket) AuthToken(ctx context.Context, prefix string, valid time.Duration) (string, error) { |
762 | return b.b.getDownloadAuthorization(ctx, prefix, valid) | |
763 | } | |
633 | return b.b.getDownloadAuthorization(ctx, prefix, valid, "") | |
634 | } | |
635 | ||
636 | // AuthURL returns a URL for the given object with embedded token and, | |
637 | // possibly, b2ContentDisposition arguments. Leave b2cd blank for no content | |
638 | // disposition. | |
639 | func (o *Object) AuthURL(ctx context.Context, valid time.Duration, b2cd string) (*url.URL, error) { | |
640 | token, err := o.b.b.getDownloadAuthorization(ctx, o.name, valid, b2cd) | |
641 | if err != nil { | |
642 | return nil, err | |
643 | } | |
644 | urlString := fmt.Sprintf("%s?Authorization=%s", o.URL(), url.QueryEscape(token)) | |
645 | if b2cd != "" { | |
646 | urlString = fmt.Sprintf("%s&b2ContentDisposition=%s", urlString, url.QueryEscape(b2cd)) | |
647 | } | |
648 | u, err := url.Parse(urlString) | |
649 | if err != nil { | |
650 | return nil, err | |
651 | } | |
652 | return u, nil | |
653 | } |
0 | // Copyright 2016, Google | |
0 | // Copyright 2016, the Blazer authors | |
1 | 1 | // |
2 | 2 | // Licensed under the Apache License, Version 2.0 (the "License"); |
3 | 3 | // you may not use this file except in compliance with the License. |
70 | 70 | bucketMap map[string]map[string]string |
71 | 71 | } |
72 | 72 | |
73 | func (t *testRoot) authorizeAccount(context.Context, string, string, ...ClientOption) error { | |
73 | func (t *testRoot) authorizeAccount(context.Context, string, string, clientOptions) error { | |
74 | 74 | t.auths++ |
75 | 75 | return nil |
76 | 76 | } |
105 | 105 | return false |
106 | 106 | } |
107 | 107 | return e.retry || e.reupload || e.backoff > 0 |
108 | } | |
109 | ||
110 | func (t *testRoot) createKey(context.Context, string, []string, time.Duration, string, string) (b2KeyInterface, error) { | |
111 | return nil, nil | |
112 | } | |
113 | func (t *testRoot) listKeys(context.Context, int, string) ([]b2KeyInterface, string, error) { | |
114 | return nil, "", nil | |
108 | 115 | } |
109 | 116 | |
110 | 117 | func (t *testRoot) createBucket(_ context.Context, name, _ string, _ map[string]string, _ []LifecycleRule) (b2BucketInterface, error) { |
123 | 130 | }, nil |
124 | 131 | } |
125 | 132 | |
126 | func (t *testRoot) listBuckets(context.Context) ([]b2BucketInterface, error) { | |
133 | func (t *testRoot) listBuckets(context.Context, string) ([]b2BucketInterface, error) { | |
127 | 134 | var b []b2BucketInterface |
128 | 135 | for k, v := range t.bucketMap { |
129 | 136 | b = append(b, &testBucket{ |
146 | 153 | func (t *testBucket) attrs() *BucketAttrs { return nil } |
147 | 154 | func (t *testBucket) deleteBucket(context.Context) error { return nil } |
148 | 155 | func (t *testBucket) updateBucket(context.Context, *BucketAttrs) error { return nil } |
156 | func (t *testBucket) id() string { return "" } | |
149 | 157 | |
150 | 158 | func (t *testBucket) getUploadURL(context.Context) (b2URLInterface, error) { |
151 | 159 | if err := t.errs.getError("getUploadURL"); err != nil { |
201 | 209 | return nil, "", fmt.Errorf("testBucket.listUnfinishedLargeFiles(ctx, %d, %q): not implemented", count, cont) |
202 | 210 | } |
203 | 211 | |
204 | func (t *testBucket) downloadFileByName(_ context.Context, name string, offset, size int64) (b2FileReaderInterface, error) { | |
212 | func (t *testBucket) downloadFileByName(_ context.Context, name string, offset, size int64, _ bool) (b2FileReaderInterface, error) { | |
205 | 213 | gmux.Lock() |
206 | 214 | defer gmux.Unlock() |
207 | 215 | f := t.files[name] |
220 | 228 | } |
221 | 229 | |
222 | 230 | func (t *testBucket) hideFile(context.Context, string) (b2FileInterface, error) { return nil, nil } |
223 | func (t *testBucket) getDownloadAuthorization(context.Context, string, time.Duration) (string, error) { | |
231 | func (t *testBucket) getDownloadAuthorization(context.Context, string, time.Duration, string) (string, error) { | |
224 | 232 | return "", nil |
225 | 233 | } |
226 | 234 | func (t *testBucket) baseURL() string { return "" } |
277 | 285 | errs: t.errs, |
278 | 286 | }, nil |
279 | 287 | } |
288 | ||
289 | func (t *testLargeFile) cancel(ctx context.Context) error { return ctx.Err() } | |
280 | 290 | |
281 | 291 | type testFileChunk struct { |
282 | 292 | parts map[int][]byte |
668 | 678 | |
669 | 679 | if err := readFile(ctx, lobj, wshaL, 1e7, 10); err != nil { |
670 | 680 | t.Error(err) |
681 | } | |
682 | } | |
683 | ||
684 | func TestLargeFileCancellation(t *testing.T) { | |
685 | ctx := context.Background() | |
686 | ctx, cancel := context.WithCancel(ctx) | |
687 | defer cancel() | |
688 | ||
689 | client := &Client{ | |
690 | backend: &beRoot{ | |
691 | b2i: &testRoot{ | |
692 | bucketMap: make(map[string]map[string]string), | |
693 | errs: &errCont{}, | |
694 | }, | |
695 | }, | |
696 | } | |
697 | ||
698 | b, err := client.NewBucket(ctx, bucketName, nil) | |
699 | if err != nil { | |
700 | t.Fatal(err) | |
701 | } | |
702 | var called bool | |
703 | w := b.Object("foo").NewWriter(ctx, WithCancelOnError(func() context.Context { return context.Background() }, func(err error) { | |
704 | called = true | |
705 | if err != nil { | |
706 | t.Errorf("expected no error, got %v", err) | |
707 | } | |
708 | })) | |
709 | w.ChunkSize = 10 | |
710 | ||
711 | for i := 0; i < 10; i++ { | |
712 | r := io.LimitReader(zReader{}, 20) | |
713 | if _, err := io.Copy(w, r); err != nil && err != context.Canceled { | |
714 | t.Errorf("Copy: %v", err) | |
715 | } | |
716 | cancel() | |
717 | } | |
718 | ||
719 | if err := w.Close(); err != context.Canceled { | |
720 | t.Errorf("expected cancelled context; got %v", err) | |
721 | } | |
722 | ||
723 | if !called { | |
724 | t.Errorf("error callback not called") | |
671 | 725 | } |
672 | 726 | } |
673 | 727 |
0 | // Copyright 2016, Google | |
0 | // Copyright 2016, the Blazer authors | |
1 | 1 | // |
2 | 2 | // Licensed under the Apache License, Version 2.0 (the "License"); |
3 | 3 | // you may not use this file except in compliance with the License. |
27 | 27 | reauth(error) bool |
28 | 28 | transient(error) bool |
29 | 29 | reupload(error) bool |
30 | authorizeAccount(context.Context, string, string, ...ClientOption) error | |
30 | authorizeAccount(context.Context, string, string, clientOptions) error | |
31 | 31 | reauthorizeAccount(context.Context) error |
32 | 32 | createBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (beBucketInterface, error) |
33 | listBuckets(context.Context) ([]beBucketInterface, error) | |
33 | listBuckets(context.Context, string) ([]beBucketInterface, error) | |
34 | createKey(context.Context, string, []string, time.Duration, string, string) (beKeyInterface, error) | |
35 | listKeys(context.Context, int, string) ([]beKeyInterface, string, error) | |
34 | 36 | } |
35 | 37 | |
36 | 38 | type beRoot struct { |
37 | 39 | account, key string |
38 | 40 | b2i b2RootInterface |
39 | options []ClientOption | |
41 | options clientOptions | |
40 | 42 | } |
41 | 43 | |
42 | 44 | type beBucketInterface interface { |
43 | 45 | name() string |
44 | 46 | btype() BucketType |
45 | 47 | attrs() *BucketAttrs |
48 | id() string | |
46 | 49 | updateBucket(context.Context, *BucketAttrs) error |
47 | 50 | deleteBucket(context.Context) error |
48 | 51 | getUploadURL(context.Context) (beURLInterface, error) |
50 | 53 | listFileNames(context.Context, int, string, string, string) ([]beFileInterface, string, error) |
51 | 54 | listFileVersions(context.Context, int, string, string, string, string) ([]beFileInterface, string, string, error) |
52 | 55 | listUnfinishedLargeFiles(context.Context, int, string) ([]beFileInterface, string, error) |
53 | downloadFileByName(context.Context, string, int64, int64) (beFileReaderInterface, error) | |
56 | downloadFileByName(context.Context, string, int64, int64, bool) (beFileReaderInterface, error) | |
54 | 57 | hideFile(context.Context, string) (beFileInterface, error) |
55 | getDownloadAuthorization(context.Context, string, time.Duration) (string, error) | |
58 | getDownloadAuthorization(context.Context, string, time.Duration, string) (string, error) | |
56 | 59 | baseURL() string |
57 | 60 | file(string, string) beFileInterface |
58 | 61 | } |
91 | 94 | type beLargeFileInterface interface { |
92 | 95 | finishLargeFile(context.Context) (beFileInterface, error) |
93 | 96 | getUploadPartURL(context.Context) (beFileChunkInterface, error) |
97 | cancel(context.Context) error | |
94 | 98 | } |
95 | 99 | |
96 | 100 | type beLargeFile struct { |
144 | 148 | stamp time.Time |
145 | 149 | } |
146 | 150 | |
151 | type beKeyInterface interface { | |
152 | del(context.Context) error | |
153 | caps() []string | |
154 | name() string | |
155 | expires() time.Time | |
156 | secret() string | |
157 | id() string | |
158 | } | |
159 | ||
160 | type beKey struct { | |
161 | b2i beRootInterface | |
162 | k b2KeyInterface | |
163 | } | |
164 | ||
147 | 165 | func (r *beRoot) backoff(err error) time.Duration { return r.b2i.backoff(err) } |
148 | 166 | func (r *beRoot) reauth(err error) bool { return r.b2i.reauth(err) } |
149 | 167 | func (r *beRoot) reupload(err error) bool { return r.b2i.reupload(err) } |
150 | 168 | func (r *beRoot) transient(err error) bool { return r.b2i.transient(err) } |
151 | 169 | |
152 | func (r *beRoot) authorizeAccount(ctx context.Context, account, key string, opts ...ClientOption) error { | |
153 | f := func() error { | |
154 | if err := r.b2i.authorizeAccount(ctx, account, key, opts...); err != nil { | |
170 | func (r *beRoot) authorizeAccount(ctx context.Context, account, key string, c clientOptions) error { | |
171 | f := func() error { | |
172 | if err := r.b2i.authorizeAccount(ctx, account, key, c); err != nil { | |
155 | 173 | return err |
156 | 174 | } |
157 | 175 | r.account = account |
158 | 176 | r.key = key |
159 | r.options = opts | |
177 | r.options = c | |
160 | 178 | return nil |
161 | 179 | } |
162 | 180 | return withBackoff(ctx, r, f) |
163 | 181 | } |
164 | 182 | |
165 | 183 | func (r *beRoot) reauthorizeAccount(ctx context.Context) error { |
166 | return r.authorizeAccount(ctx, r.account, r.key, r.options...) | |
184 | return r.authorizeAccount(ctx, r.account, r.key, r.options) | |
167 | 185 | } |
168 | 186 | |
169 | 187 | func (r *beRoot) createBucket(ctx context.Context, name, btype string, info map[string]string, rules []LifecycleRule) (beBucketInterface, error) { |
188 | 206 | return bi, nil |
189 | 207 | } |
190 | 208 | |
191 | func (r *beRoot) listBuckets(ctx context.Context) ([]beBucketInterface, error) { | |
209 | func (r *beRoot) listBuckets(ctx context.Context, name string) ([]beBucketInterface, error) { | |
192 | 210 | var buckets []beBucketInterface |
193 | 211 | f := func() error { |
194 | 212 | g := func() error { |
195 | bs, err := r.b2i.listBuckets(ctx) | |
213 | bs, err := r.b2i.listBuckets(ctx, name) | |
196 | 214 | if err != nil { |
197 | 215 | return err |
198 | 216 | } |
212 | 230 | return buckets, nil |
213 | 231 | } |
214 | 232 | |
215 | func (b *beBucket) name() string { | |
216 | return b.b2bucket.name() | |
217 | } | |
218 | ||
219 | func (b *beBucket) btype() BucketType { | |
220 | return BucketType(b.b2bucket.btype()) | |
221 | } | |
222 | ||
223 | func (b *beBucket) attrs() *BucketAttrs { | |
224 | return b.b2bucket.attrs() | |
225 | } | |
233 | func (r *beRoot) createKey(ctx context.Context, name string, caps []string, valid time.Duration, bucketID string, prefix string) (beKeyInterface, error) { | |
234 | var k *beKey | |
235 | f := func() error { | |
236 | g := func() error { | |
237 | got, err := r.b2i.createKey(ctx, name, caps, valid, bucketID, prefix) | |
238 | if err != nil { | |
239 | return err | |
240 | } | |
241 | k = &beKey{ | |
242 | b2i: r, | |
243 | k: got, | |
244 | } | |
245 | return nil | |
246 | } | |
247 | return withReauth(ctx, r, g) | |
248 | } | |
249 | if err := withBackoff(ctx, r, f); err != nil { | |
250 | return nil, err | |
251 | } | |
252 | return k, nil | |
253 | } | |
254 | ||
255 | func (r *beRoot) listKeys(ctx context.Context, max int, next string) ([]beKeyInterface, string, error) { | |
256 | var keys []beKeyInterface | |
257 | var cur string | |
258 | f := func() error { | |
259 | g := func() error { | |
260 | got, n, err := r.b2i.listKeys(ctx, max, next) | |
261 | if err != nil { | |
262 | return err | |
263 | } | |
264 | cur = n | |
265 | for _, g := range got { | |
266 | keys = append(keys, &beKey{ | |
267 | b2i: r, | |
268 | k: g, | |
269 | }) | |
270 | } | |
271 | return nil | |
272 | } | |
273 | return withReauth(ctx, r, g) | |
274 | } | |
275 | if err := withBackoff(ctx, r, f); err != nil { | |
276 | return nil, "", err | |
277 | } | |
278 | return keys, cur, nil | |
279 | } | |
280 | ||
281 | func (b *beBucket) name() string { return b.b2bucket.name() } | |
282 | func (b *beBucket) btype() BucketType { return BucketType(b.b2bucket.btype()) } | |
283 | func (b *beBucket) attrs() *BucketAttrs { return b.b2bucket.attrs() } | |
284 | func (b *beBucket) id() string { return b.b2bucket.id() } | |
226 | 285 | |
227 | 286 | func (b *beBucket) updateBucket(ctx context.Context, attrs *BucketAttrs) error { |
228 | 287 | f := func() error { |
367 | 426 | return files, cont, nil |
368 | 427 | } |
369 | 428 | |
370 | func (b *beBucket) downloadFileByName(ctx context.Context, name string, offset, size int64) (beFileReaderInterface, error) { | |
429 | func (b *beBucket) downloadFileByName(ctx context.Context, name string, offset, size int64, header bool) (beFileReaderInterface, error) { | |
371 | 430 | var reader beFileReaderInterface |
372 | 431 | f := func() error { |
373 | 432 | g := func() error { |
374 | fr, err := b.b2bucket.downloadFileByName(ctx, name, offset, size) | |
433 | fr, err := b.b2bucket.downloadFileByName(ctx, name, offset, size, header) | |
375 | 434 | if err != nil { |
376 | 435 | return err |
377 | 436 | } |
411 | 470 | return file, nil |
412 | 471 | } |
413 | 472 | |
414 | func (b *beBucket) getDownloadAuthorization(ctx context.Context, p string, v time.Duration) (string, error) { | |
473 | func (b *beBucket) getDownloadAuthorization(ctx context.Context, p string, v time.Duration, s string) (string, error) { | |
415 | 474 | var tok string |
416 | 475 | f := func() error { |
417 | 476 | g := func() error { |
418 | t, err := b.b2bucket.getDownloadAuthorization(ctx, p, v) | |
477 | t, err := b.b2bucket.getDownloadAuthorization(ctx, p, v, s) | |
419 | 478 | if err != nil { |
420 | 479 | return err |
421 | 480 | } |
595 | 654 | return file, nil |
596 | 655 | } |
597 | 656 | |
657 | func (b *beLargeFile) cancel(ctx context.Context) error { | |
658 | f := func() error { | |
659 | g := func() error { | |
660 | return b.b2largeFile.cancel(ctx) | |
661 | } | |
662 | return withReauth(ctx, b.ri, g) | |
663 | } | |
664 | return withBackoff(ctx, b.ri, f) | |
665 | } | |
666 | ||
598 | 667 | func (b *beFileChunk) reload(ctx context.Context) error { |
599 | 668 | f := func() error { |
600 | 669 | g := func() error { |
647 | 716 | func (b *beFilePart) number() int { return b.b2filePart.number() } |
648 | 717 | func (b *beFilePart) sha1() string { return b.b2filePart.sha1() } |
649 | 718 | func (b *beFilePart) size() int64 { return b.b2filePart.size() } |
719 | ||
720 | func (b *beKey) del(ctx context.Context) error { return b.k.del(ctx) } | |
721 | func (b *beKey) caps() []string { return b.k.caps() } | |
722 | func (b *beKey) name() string { return b.k.name() } | |
723 | func (b *beKey) expires() time.Time { return b.k.expires() } | |
724 | func (b *beKey) secret() string { return b.k.secret() } | |
725 | func (b *beKey) id() string { return b.k.id() } | |
650 | 726 | |
651 | 727 | func jitter(d time.Duration) time.Duration { |
652 | 728 | f := float64(d) |
656 | 732 | } |
657 | 733 | |
658 | 734 | func getBackoff(d time.Duration) time.Duration { |
659 | if d > 15*time.Second { | |
660 | return d + jitter(d) | |
735 | if d > 30*time.Second { | |
736 | return 30*time.Second + jitter(d) | |
661 | 737 | } |
662 | 738 | return d*2 + jitter(d*2) |
663 | 739 | } |
0 | // Copyright 2016, Google | |
0 | // Copyright 2016, the Blazer authors | |
1 | 1 | // |
2 | 2 | // Licensed under the Apache License, Version 2.0 (the "License"); |
3 | 3 | // you may not use this file except in compliance with the License. |
26 | 26 | // the only file in b2 that imports base. |
27 | 27 | |
28 | 28 | type b2RootInterface interface { |
29 | authorizeAccount(context.Context, string, string, ...ClientOption) error | |
29 | authorizeAccount(context.Context, string, string, clientOptions) error | |
30 | 30 | transient(error) bool |
31 | 31 | backoff(error) time.Duration |
32 | 32 | reauth(error) bool |
33 | 33 | reupload(error) bool |
34 | 34 | createBucket(context.Context, string, string, map[string]string, []LifecycleRule) (b2BucketInterface, error) |
35 | listBuckets(context.Context) ([]b2BucketInterface, error) | |
35 | listBuckets(context.Context, string) ([]b2BucketInterface, error) | |
36 | createKey(context.Context, string, []string, time.Duration, string, string) (b2KeyInterface, error) | |
37 | listKeys(context.Context, int, string) ([]b2KeyInterface, string, error) | |
36 | 38 | } |
37 | 39 | |
38 | 40 | type b2BucketInterface interface { |
39 | 41 | name() string |
40 | 42 | btype() string |
41 | 43 | attrs() *BucketAttrs |
44 | id() string | |
42 | 45 | updateBucket(context.Context, *BucketAttrs) error |
43 | 46 | deleteBucket(context.Context) error |
44 | 47 | getUploadURL(context.Context) (b2URLInterface, error) |
46 | 49 | listFileNames(context.Context, int, string, string, string) ([]b2FileInterface, string, error) |
47 | 50 | listFileVersions(context.Context, int, string, string, string, string) ([]b2FileInterface, string, string, error) |
48 | 51 | listUnfinishedLargeFiles(context.Context, int, string) ([]b2FileInterface, string, error) |
49 | downloadFileByName(context.Context, string, int64, int64) (b2FileReaderInterface, error) | |
52 | downloadFileByName(context.Context, string, int64, int64, bool) (b2FileReaderInterface, error) | |
50 | 53 | hideFile(context.Context, string) (b2FileInterface, error) |
51 | getDownloadAuthorization(context.Context, string, time.Duration) (string, error) | |
54 | getDownloadAuthorization(context.Context, string, time.Duration, string) (string, error) | |
52 | 55 | baseURL() string |
53 | 56 | file(string, string) b2FileInterface |
54 | 57 | } |
72 | 75 | type b2LargeFileInterface interface { |
73 | 76 | finishLargeFile(context.Context) (b2FileInterface, error) |
74 | 77 | getUploadPartURL(context.Context) (b2FileChunkInterface, error) |
78 | cancel(context.Context) error | |
75 | 79 | } |
76 | 80 | |
77 | 81 | type b2FileChunkInterface interface { |
95 | 99 | size() int64 |
96 | 100 | } |
97 | 101 | |
102 | type b2KeyInterface interface { | |
103 | del(context.Context) error | |
104 | caps() []string | |
105 | name() string | |
106 | expires() time.Time | |
107 | secret() string | |
108 | id() string | |
109 | } | |
110 | ||
98 | 111 | type b2Root struct { |
99 | 112 | b *base.B2 |
100 | 113 | } |
131 | 144 | b *base.FilePart |
132 | 145 | } |
133 | 146 | |
134 | func (b *b2Root) authorizeAccount(ctx context.Context, account, key string, opts ...ClientOption) error { | |
135 | c := &clientOptions{} | |
136 | for _, f := range opts { | |
137 | f(c) | |
138 | } | |
147 | type b2Key struct { | |
148 | b *base.Key | |
149 | } | |
150 | ||
151 | func (b *b2Root) authorizeAccount(ctx context.Context, account, key string, c clientOptions) error { | |
139 | 152 | var aopts []base.AuthOption |
140 | 153 | ct := &clientTransport{client: c.client} |
141 | 154 | if c.transport != nil { |
150 | 163 | } |
151 | 164 | if c.capExceeded { |
152 | 165 | aopts = append(aopts, base.ForceCapExceeded()) |
166 | } | |
167 | if c.apiBase != "" { | |
168 | aopts = append(aopts, base.SetAPIBase(c.apiBase)) | |
153 | 169 | } |
154 | 170 | for _, agent := range c.userAgents { |
155 | 171 | aopts = append(aopts, base.UserAgent(agent)) |
201 | 217 | return &b2Bucket{bucket}, nil |
202 | 218 | } |
203 | 219 | |
204 | func (b *b2Root) listBuckets(ctx context.Context) ([]b2BucketInterface, error) { | |
205 | buckets, err := b.b.ListBuckets(ctx) | |
220 | func (b *b2Root) listBuckets(ctx context.Context, name string) ([]b2BucketInterface, error) { | |
221 | buckets, err := b.b.ListBuckets(ctx, name) | |
206 | 222 | if err != nil { |
207 | 223 | return nil, err |
208 | 224 | } |
248 | 264 | return err |
249 | 265 | } |
250 | 266 | |
267 | func (b *b2Root) createKey(ctx context.Context, name string, caps []string, valid time.Duration, bucketID string, prefix string) (b2KeyInterface, error) { | |
268 | k, err := b.b.CreateKey(ctx, name, caps, valid, bucketID, prefix) | |
269 | if err != nil { | |
270 | return nil, err | |
271 | } | |
272 | return &b2Key{k}, nil | |
273 | } | |
274 | ||
275 | func (b *b2Root) listKeys(ctx context.Context, max int, next string) ([]b2KeyInterface, string, error) { | |
276 | keys, next, err := b.b.ListKeys(ctx, max, next) | |
277 | if err != nil { | |
278 | return nil, "", err | |
279 | } | |
280 | var k []b2KeyInterface | |
281 | for _, key := range keys { | |
282 | k = append(k, &b2Key{key}) | |
283 | } | |
284 | return k, next, nil | |
285 | } | |
286 | ||
251 | 287 | func (b *b2Bucket) deleteBucket(ctx context.Context) error { |
252 | 288 | return b.b.DeleteBucket(ctx) |
253 | 289 | } |
276 | 312 | } |
277 | 313 | } |
278 | 314 | |
315 | func (b *b2Bucket) id() string { return b.b.ID } | |
316 | ||
279 | 317 | func (b *b2Bucket) getUploadURL(ctx context.Context) (b2URLInterface, error) { |
280 | 318 | url, err := b.b.GetUploadURL(ctx) |
281 | 319 | if err != nil { |
328 | 366 | return files, cont, nil |
329 | 367 | } |
330 | 368 | |
331 | func (b *b2Bucket) downloadFileByName(ctx context.Context, name string, offset, size int64) (b2FileReaderInterface, error) { | |
332 | fr, err := b.b.DownloadFileByName(ctx, name, offset, size) | |
369 | func (b *b2Bucket) downloadFileByName(ctx context.Context, name string, offset, size int64, header bool) (b2FileReaderInterface, error) { | |
370 | fr, err := b.b.DownloadFileByName(ctx, name, offset, size, header) | |
333 | 371 | if err != nil { |
334 | 372 | code, _ := base.Code(err) |
335 | 373 | switch code { |
351 | 389 | return &b2File{f}, nil |
352 | 390 | } |
353 | 391 | |
354 | func (b *b2Bucket) getDownloadAuthorization(ctx context.Context, p string, v time.Duration) (string, error) { | |
355 | return b.b.GetDownloadAuthorization(ctx, p, v) | |
392 | func (b *b2Bucket) getDownloadAuthorization(ctx context.Context, p string, v time.Duration, s string) (string, error) { | |
393 | return b.b.GetDownloadAuthorization(ctx, p, v, s) | |
356 | 394 | } |
357 | 395 | |
358 | 396 | func (b *b2Bucket) baseURL() string { |
436 | 474 | return &b2FileChunk{c}, nil |
437 | 475 | } |
438 | 476 | |
477 | func (b *b2LargeFile) cancel(ctx context.Context) error { | |
478 | return b.b.CancelLargeFile(ctx) | |
479 | } | |
480 | ||
439 | 481 | func (b *b2FileChunk) reload(ctx context.Context) error { |
440 | 482 | return b.b.Reload(ctx) |
441 | 483 | } |
465 | 507 | func (b *b2FilePart) number() int { return b.b.Number } |
466 | 508 | func (b *b2FilePart) sha1() string { return b.b.SHA1 } |
467 | 509 | func (b *b2FilePart) size() int64 { return b.b.Size } |
510 | ||
511 | func (b *b2Key) del(ctx context.Context) error { return b.b.Delete(ctx) } | |
512 | func (b *b2Key) caps() []string { return b.b.Capabilities } | |
513 | func (b *b2Key) name() string { return b.b.Name } | |
514 | func (b *b2Key) expires() time.Time { return b.b.Expires } | |
515 | func (b *b2Key) secret() string { return b.b.Secret } | |
516 | func (b *b2Key) id() string { return b.b.ID } |
0 | // Copyright 2017, Google | |
0 | // Copyright 2017, the Blazer authors | |
1 | 1 | // |
2 | 2 | // Licensed under the Apache License, Version 2.0 (the "License"); |
3 | 3 | // you may not use this file except in compliance with the License. |
0 | // Copyright 2016, Google | |
0 | // Copyright 2016, the Blazer authors | |
1 | 1 | // |
2 | 2 | // Licensed under the Apache License, Version 2.0 (the "License"); |
3 | 3 | // you may not use this file except in compliance with the License. |
21 | 21 | "encoding/hex" |
22 | 22 | "fmt" |
23 | 23 | "io" |
24 | "io/ioutil" | |
24 | 25 | "net/http" |
25 | 26 | "os" |
26 | 27 | "reflect" |
28 | "strings" | |
27 | 29 | "sync/atomic" |
28 | 30 | "testing" |
29 | 31 | "time" |
30 | 32 | |
33 | "github.com/kurin/blazer/internal/blog" | |
31 | 34 | "github.com/kurin/blazer/x/transport" |
32 | 35 | ) |
33 | 36 | |
136 | 139 | if rn != n { |
137 | 140 | t.Errorf("Read from B2: got %d bytes, want %d bytes", rn, n) |
138 | 141 | } |
142 | if err, ok := r.Verify(); ok && err != nil { | |
143 | t.Errorf("Read from B2: %v", err) | |
144 | } | |
139 | 145 | if err := r.Close(); err != nil { |
140 | 146 | t.Errorf("r.Close(): %v", err) |
141 | 147 | } |
274 | 280 | } |
275 | 281 | } |
276 | 282 | |
283 | func TestResumeWriterWithoutExtantFile(t *testing.T) { | |
284 | ctx := context.Background() | |
285 | bucket, done := startLiveTest(ctx, t) | |
286 | defer done() | |
287 | ||
288 | r := io.LimitReader(zReader{}, 15e6) | |
289 | w := bucket.Object("foo").NewWriter(ctx) | |
290 | w.ChunkSize = 5e6 | |
291 | w.Resume = true | |
292 | if _, err := io.Copy(w, r); err != nil { | |
293 | t.Fatalf("io.Copy: %v", err) | |
294 | } | |
295 | } | |
296 | ||
277 | 297 | func TestAttrs(t *testing.T) { |
278 | 298 | ctx := context.Background() |
279 | 299 | ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) |
321 | 341 | for _, e := range table { |
322 | 342 | for _, attrs := range attrlist { |
323 | 343 | o := bucket.Object(e.name) |
324 | w := o.NewWriter(ctx).WithAttrs(attrs) | |
344 | w := o.NewWriter(ctx, WithAttrsOption(attrs)) | |
345 | w.ChunkSize = 5e6 | |
325 | 346 | if _, err := io.Copy(w, io.LimitReader(zReader{}, e.size)); err != nil { |
326 | 347 | t.Error(err) |
327 | 348 | continue |
363 | 384 | |
364 | 385 | w.UseFileBuffer = true |
365 | 386 | |
366 | w.Write(nil) | |
387 | if _, err := io.CopyN(w, r, 1); err != nil { | |
388 | t.Fatalf("CopyN: %v", err) | |
389 | } | |
367 | 390 | wb, ok := w.w.(*fileBuffer) |
368 | 391 | if !ok { |
369 | 392 | t.Fatalf("writer isn't using file buffer: %T", w.w) |
425 | 448 | } |
426 | 449 | if brsp.StatusCode != 401 { |
427 | 450 | t.Fatalf("%s: got %s, want 401", burl, brsp.Status) |
451 | } | |
452 | } | |
453 | ||
454 | func TestObjAuthTokLive(t *testing.T) { | |
455 | ctx := context.Background() | |
456 | ctx, cancel := context.WithTimeout(ctx, time.Minute) | |
457 | defer cancel() | |
458 | bucket, done := startLiveTest(ctx, t) | |
459 | defer done() | |
460 | ||
461 | table := []struct { | |
462 | obj string | |
463 | d time.Duration | |
464 | b2cd string | |
465 | }{ | |
466 | { | |
467 | obj: "foo/bar", | |
468 | d: time.Minute, | |
469 | }, | |
470 | { | |
471 | obj: "foo2/thing.pdf", | |
472 | d: time.Minute, | |
473 | b2cd: "attachment", | |
474 | }, | |
475 | { | |
476 | obj: "foo2/thing.pdf", | |
477 | d: time.Minute, | |
478 | b2cd: `attachment; filename="what.png"`, | |
479 | }, | |
480 | } | |
481 | ||
482 | for _, e := range table { | |
483 | fw := bucket.Object(e.obj).NewWriter(ctx) | |
484 | io.Copy(fw, io.LimitReader(zReader{}, 1e5)) | |
485 | if err := fw.Close(); err != nil { | |
486 | t.Fatal(err) | |
487 | } | |
488 | ||
489 | url, err := bucket.Object(e.obj).AuthURL(ctx, e.d, e.b2cd) | |
490 | if err != nil { | |
491 | t.Fatal(err) | |
492 | } | |
493 | ||
494 | blog.V(2).Infof("downloading %s", url.String()) | |
495 | frsp, err := http.Get(url.String()) | |
496 | if err != nil { | |
497 | t.Fatal(err) | |
498 | } | |
499 | if frsp.StatusCode != 200 { | |
500 | t.Fatalf("%s: got %s, want 200", url.String(), frsp.Status) | |
501 | } | |
428 | 502 | } |
429 | 503 | } |
430 | 504 | |
508 | 582 | want := fmt.Sprintf("%x", hw.Sum(nil)) |
509 | 583 | if got != want { |
510 | 584 | t.Errorf("NewRangeReader(_, %d, %d): got %q, want %q", e.offset, e.length, got, want) |
585 | } | |
586 | if err, ok := r.Verify(); ok && err != nil { | |
587 | t.Errorf("NewRangeReader(_, %d, %d): %v", e.offset, e.length, err) | |
511 | 588 | } |
512 | 589 | } |
513 | 590 | } |
841 | 918 | } |
842 | 919 | } |
843 | 920 | |
921 | func TestZeroByteObject(t *testing.T) { | |
922 | ctx := context.Background() | |
923 | ctx, cancel := context.WithTimeout(ctx, 10*time.Minute) | |
924 | defer cancel() | |
925 | ||
926 | bucket, done := startLiveTest(ctx, t) | |
927 | defer done() | |
928 | ||
929 | _, _, err := writeFile(ctx, bucket, smallFileName, 0, 0) | |
930 | if err != nil { | |
931 | t.Fatal(err) | |
932 | } | |
933 | ||
934 | if err := bucket.Object(smallFileName).Delete(ctx); err != nil { | |
935 | t.Fatal(err) | |
936 | } | |
937 | } | |
938 | ||
844 | 939 | func TestListUnfinishedLargeFiles(t *testing.T) { |
845 | 940 | ctx := context.Background() |
846 | 941 | bucket, done := startLiveTest(ctx, t) |
862 | 957 | bucket, done := startLiveTest(ctx, t) |
863 | 958 | defer done() |
864 | 959 | |
865 | var first []ClientOption | |
866 | opts := bucket.r.(*beRoot).options | |
867 | for _, o := range opts { | |
868 | first = append(first, o) | |
869 | } | |
870 | ||
960 | first := bucket.r.(*beRoot).options | |
871 | 961 | if err := bucket.r.reauthorizeAccount(ctx); err != nil { |
872 | 962 | t.Fatalf("reauthorizeAccount: %v", err) |
873 | 963 | } |
874 | ||
875 | 964 | second := bucket.r.(*beRoot).options |
876 | if len(second) != len(first) { | |
877 | t.Fatalf("options mismatch: got %d options, wanted %d", len(second), len(first)) | |
878 | } | |
879 | ||
880 | var f, s clientOptions | |
881 | for i := range first { | |
882 | first[i](&f) | |
883 | second[i](&s) | |
884 | } | |
885 | ||
886 | if !f.eq(s) { | |
887 | t.Errorf("options mismatch: got %v, want %v", s, f) | |
965 | if !reflect.DeepEqual(first, second) { | |
966 | // Test that they are literally the same set of options, which is an | |
967 | // implementation detail but is fine for now. | |
968 | t.Errorf("options mismatch: got %v, want %v", second, first) | |
969 | } | |
970 | } | |
971 | ||
972 | func TestVerifyReader(t *testing.T) { | |
973 | ctx := context.Background() | |
974 | bucket, done := startLiveTest(ctx, t) | |
975 | defer done() | |
976 | ||
977 | table := []struct { | |
978 | name string | |
979 | fakeSHA string | |
980 | size int64 | |
981 | off, len int64 | |
982 | valid bool | |
983 | }{ | |
984 | { | |
985 | name: "first", | |
986 | size: 100, | |
987 | off: 0, | |
988 | len: -1, | |
989 | valid: true, | |
990 | }, | |
991 | { | |
992 | name: "second", | |
993 | size: 100, | |
994 | off: 0, | |
995 | len: 100, | |
996 | valid: true, | |
997 | }, | |
998 | { | |
999 | name: "third", | |
1000 | size: 100, | |
1001 | off: 0, | |
1002 | len: 99, | |
1003 | valid: false, | |
1004 | }, | |
1005 | { | |
1006 | name: "fourth", | |
1007 | size: 5e6 + 100, | |
1008 | off: 0, | |
1009 | len: -1, | |
1010 | valid: false, | |
1011 | }, | |
1012 | { | |
1013 | name: "fifth", | |
1014 | size: 5e6 + 100, | |
1015 | off: 0, | |
1016 | len: -1, | |
1017 | fakeSHA: "fbc815f2d6518858dec83ccb46263875fc894d88", | |
1018 | valid: true, | |
1019 | }, | |
1020 | } | |
1021 | ||
1022 | for _, e := range table { | |
1023 | o := bucket.Object(e.name) | |
1024 | var opts []WriterOption | |
1025 | if e.fakeSHA != "" { | |
1026 | opts = append(opts, WithAttrsOption(&Attrs{SHA1: e.fakeSHA})) | |
1027 | } | |
1028 | w := o.NewWriter(ctx, opts...) | |
1029 | w.ChunkSize = 5e6 | |
1030 | if _, err := io.Copy(w, io.LimitReader(zReader{}, e.size)); err != nil { | |
1031 | t.Error(err) | |
1032 | continue | |
1033 | } | |
1034 | if err := w.Close(); err != nil { | |
1035 | t.Error(err) | |
1036 | continue | |
1037 | } | |
1038 | r := o.NewRangeReader(ctx, e.off, e.len) | |
1039 | if _, err := io.Copy(ioutil.Discard, r); err != nil { | |
1040 | t.Error(err) | |
1041 | } | |
1042 | err, ok := r.Verify() | |
1043 | if ok != e.valid { | |
1044 | t.Errorf("%s: bad validity: got %v, want %v", e.name, ok, e.valid) | |
1045 | } | |
1046 | if e.valid && err != nil { | |
1047 | t.Errorf("%s does not verify: %v", e.name, err) | |
1048 | } | |
1049 | } | |
1050 | } | |
1051 | ||
1052 | func TestListBucketsWithKey(t *testing.T) { | |
1053 | ctx := context.Background() | |
1054 | bucket, done := startLiveTest(ctx, t) | |
1055 | defer done() | |
1056 | ||
1057 | key, err := bucket.CreateKey(ctx, "testKey", Capabilities("listBuckets")) | |
1058 | if err != nil { | |
1059 | t.Fatal(err) | |
1060 | } | |
1061 | ||
1062 | client, err := NewClient(ctx, key.ID(), key.Secret()) | |
1063 | if err != nil { | |
1064 | t.Fatal(err) | |
1065 | } | |
1066 | if _, err := client.Bucket(ctx, bucket.Name()); err != nil { | |
1067 | t.Fatal(err) | |
1068 | } | |
1069 | } | |
1070 | ||
1071 | func TestListBucketContentsWithKey(t *testing.T) { | |
1072 | ctx := context.Background() | |
1073 | bucket, done := startLiveTest(ctx, t) | |
1074 | defer done() | |
1075 | ||
1076 | for _, path := range []string{"foo/bar", "foo/baz", "foo", "bar", "baz"} { | |
1077 | if _, _, err := writeFile(ctx, bucket, path, 1, 1e8); err != nil { | |
1078 | t.Fatal(err) | |
1079 | } | |
1080 | } | |
1081 | ||
1082 | key, err := bucket.CreateKey(ctx, "testKey", Capabilities("listBuckets", "listFiles"), Prefix("foo/")) | |
1083 | if err != nil { | |
1084 | t.Fatal(err) | |
1085 | } | |
1086 | client, err := NewClient(ctx, key.ID(), key.Secret()) | |
1087 | if err != nil { | |
1088 | t.Fatal(err) | |
1089 | } | |
1090 | obucket, err := client.Bucket(ctx, bucket.Name()) | |
1091 | if err != nil { | |
1092 | t.Fatal(err) | |
1093 | } | |
1094 | iter := obucket.List(ctx) | |
1095 | var got []string | |
1096 | for iter.Next() { | |
1097 | got = append(got, iter.Object().Name()) | |
1098 | } | |
1099 | if iter.Err() != nil { | |
1100 | t.Fatal(iter.Err()) | |
1101 | } | |
1102 | want := []string{"foo/bar", "foo/baz"} | |
1103 | if !reflect.DeepEqual(got, want) { | |
1104 | t.Errorf("error listing objects with restricted key: got %v, want %v", got, want) | |
1105 | } | |
1106 | iter2 := obucket.List(ctx, ListHidden()) | |
1107 | for iter2.Next() { | |
1108 | } | |
1109 | if iter2.Err() != nil { | |
1110 | t.Error(iter2.Err()) | |
1111 | } | |
1112 | } | |
1113 | ||
1114 | func TestCreateDeleteKey(t *testing.T) { | |
1115 | ctx := context.Background() | |
1116 | bucket, done := startLiveTest(ctx, t) | |
1117 | defer done() | |
1118 | ||
1119 | table := []struct { | |
1120 | d time.Duration | |
1121 | e time.Time | |
1122 | bucket bool | |
1123 | cap []string | |
1124 | pfx string | |
1125 | }{ | |
1126 | { | |
1127 | cap: []string{"deleteKeys"}, | |
1128 | }, | |
1129 | { | |
1130 | d: time.Minute, | |
1131 | cap: []string{"deleteKeys"}, | |
1132 | pfx: "prefox", | |
1133 | }, | |
1134 | { | |
1135 | e: time.Now().Add(time.Minute), // <shrug emojis> | |
1136 | cap: []string{"writeFiles", "listFiles"}, | |
1137 | bucket: true, | |
1138 | }, | |
1139 | { | |
1140 | d: time.Minute, | |
1141 | cap: []string{"writeFiles", "listFiles"}, | |
1142 | pfx: "prefox", | |
1143 | bucket: true, | |
1144 | }, | |
1145 | } | |
1146 | ||
1147 | for _, e := range table { | |
1148 | var opts []KeyOption | |
1149 | opts = append(opts, Capabilities(e.cap...)) | |
1150 | if e.d != 0 { | |
1151 | opts = append(opts, Lifetime(e.d)) | |
1152 | } | |
1153 | if !e.e.IsZero() { | |
1154 | opts = append(opts, Deadline(e.e)) | |
1155 | } | |
1156 | var key *Key | |
1157 | if e.bucket { | |
1158 | opts = append(opts, Prefix(e.pfx)) | |
1159 | bkey, err := bucket.CreateKey(ctx, "whee", opts...) | |
1160 | if err != nil { | |
1161 | t.Errorf("Bucket.CreateKey(%v, %v): %v", bucket.Name(), e, err) | |
1162 | continue | |
1163 | } | |
1164 | key = bkey | |
1165 | } else { | |
1166 | gkey, err := bucket.c.CreateKey(ctx, "whee", opts...) | |
1167 | if err != nil { | |
1168 | t.Errorf("Client.CreateKey(%v): %v", e, err) | |
1169 | continue | |
1170 | } | |
1171 | key = gkey | |
1172 | } | |
1173 | if err := key.Delete(ctx); err != nil { | |
1174 | t.Errorf("key.Delete(): %v", err) | |
1175 | } | |
1176 | } | |
1177 | } | |
1178 | ||
1179 | func TestListKeys(t *testing.T) { | |
1180 | ctx := context.Background() | |
1181 | bucket, done := startLiveTest(ctx, t) | |
1182 | defer done() | |
1183 | ||
1184 | n := 20 | |
1185 | ||
1186 | for i := 0; i < n; i++ { | |
1187 | key, err := bucket.CreateKey(ctx, fmt.Sprintf("%d-list-key-test", i), Capabilities("listBuckets")) | |
1188 | if err != nil { | |
1189 | t.Fatalf("CreateKey(%d): %v", i, err) | |
1190 | } | |
1191 | defer key.Delete(ctx) | |
1192 | } | |
1193 | ||
1194 | var got []string | |
1195 | var cur string | |
1196 | for { | |
1197 | ks, c, err := bucket.c.ListKeys(ctx, 10, cur) | |
1198 | if err != nil && err != io.EOF { | |
1199 | t.Fatalf("ListKeys(): %v", err) | |
1200 | } | |
1201 | for _, k := range ks { | |
1202 | if strings.HasSuffix(k.Name(), "list-key-test") { | |
1203 | got = append(got, k.Name()) | |
1204 | } | |
1205 | } | |
1206 | cur = c | |
1207 | if err == io.EOF { | |
1208 | break | |
1209 | } | |
1210 | } | |
1211 | if len(got) != n { | |
1212 | t.Errorf("ListKeys(): got %d, want %d: %v", len(got), n, got) | |
1213 | } | |
1214 | } | |
1215 | ||
1216 | func TestEmptyObject(t *testing.T) { | |
1217 | ctx := context.Background() | |
1218 | bucket, done := startLiveTest(ctx, t) | |
1219 | defer done() | |
1220 | ||
1221 | obj := bucket.Object("empty") | |
1222 | w := obj.NewWriter(ctx) | |
1223 | if _, err := w.Write([]byte{}); err != nil { | |
1224 | t.Fatalf("Write: %v", err) | |
1225 | } | |
1226 | if err := w.Close(); err != nil { | |
1227 | t.Fatalf("Close: %v", err) | |
1228 | } | |
1229 | ||
1230 | attrs, err := obj.Attrs(ctx) | |
1231 | if err != nil { | |
1232 | t.Fatalf("Attrs: %v", err) | |
1233 | } | |
1234 | if attrs.Size != 0 { | |
1235 | t.Fatalf("Unexpected object size: got %d, want 0", attrs.Size) | |
888 | 1236 | } |
889 | 1237 | } |
890 | 1238 |
0 | // Copyright 2018, Google | |
0 | // Copyright 2018, the Blazer authors | |
1 | 1 | // |
2 | 2 | // Licensed under the Apache License, Version 2.0 (the "License"); |
3 | 3 | // you may not use this file except in compliance with the License. |
49 | 49 | final bool |
50 | 50 | err error |
51 | 51 | idx int |
52 | c *Cursor | |
52 | c *cursor | |
53 | 53 | opts objectIteratorOptions |
54 | 54 | objs []*Object |
55 | 55 | init sync.Once |
57 | 57 | count int |
58 | 58 | } |
59 | 59 | |
60 | type lister func(context.Context, int, *Cursor) ([]*Object, *Cursor, error) | |
60 | type lister func(context.Context, int, *cursor) ([]*Object, *cursor, error) | |
61 | 61 | |
62 | 62 | func (o *ObjectIterator) page(ctx context.Context) error { |
63 | 63 | if o.opts.locker != nil { |
95 | 95 | } |
96 | 96 | switch { |
97 | 97 | case o.opts.unfinished: |
98 | o.l = o.bucket.ListUnfinishedLargeFiles | |
98 | o.l = o.bucket.listUnfinishedLargeFiles | |
99 | 99 | if o.count > 100 { |
100 | 100 | o.count = 100 |
101 | 101 | } |
102 | 102 | case o.opts.hidden: |
103 | o.l = o.bucket.ListObjects | |
103 | o.l = o.bucket.listObjects | |
104 | 104 | default: |
105 | o.l = o.bucket.ListCurrentObjects | |
106 | } | |
107 | o.c = &Cursor{ | |
108 | Prefix: o.opts.prefix, | |
109 | Delimiter: o.opts.delimiter, | |
105 | o.l = o.bucket.listCurrentObjects | |
106 | } | |
107 | o.c = &cursor{ | |
108 | prefix: o.opts.prefix, | |
109 | delimiter: o.opts.delimiter, | |
110 | 110 | } |
111 | 111 | }) |
112 | 112 | if o.err != nil { |
214 | 214 | o.locker = l |
215 | 215 | } |
216 | 216 | } |
217 | ||
218 | type cursor struct { | |
219 | // Prefix limits the listed objects to those that begin with this string. | |
220 | prefix string | |
221 | ||
222 | // Delimiter denotes the path separator. If set, object listings will be | |
223 | // truncated at this character. | |
224 | // | |
225 | // For example, if the bucket contains objects foo/bar, foo/baz, and foo, | |
226 | // then a delimiter of "/" will cause the listing to return "foo" and "foo/". | |
227 | // Otherwise, the listing would have returned all object names. | |
228 | // | |
229 | // Note that objects returned that end in the delimiter may not be actual | |
230 | // objects, e.g. you cannot read from (or write to, or delete) an object "foo/", | |
231 | // both because no actual object exists and because B2 disallows object names | |
232 | // that end with "/". If you want to ensure that all objects returned by | |
233 | // ListObjects and ListCurrentObjects are actual objects, leave this unset. | |
234 | delimiter string | |
235 | ||
236 | name string | |
237 | id string | |
238 | } | |
239 | ||
240 | func (b *Bucket) listObjects(ctx context.Context, count int, c *cursor) ([]*Object, *cursor, error) { | |
241 | if c == nil { | |
242 | c = &cursor{} | |
243 | } | |
244 | fs, name, id, err := b.b.listFileVersions(ctx, count, c.name, c.id, c.prefix, c.delimiter) | |
245 | if err != nil { | |
246 | return nil, nil, err | |
247 | } | |
248 | var next *cursor | |
249 | if name != "" && id != "" { | |
250 | next = &cursor{ | |
251 | prefix: c.prefix, | |
252 | delimiter: c.delimiter, | |
253 | name: name, | |
254 | id: id, | |
255 | } | |
256 | } | |
257 | var objects []*Object | |
258 | for _, f := range fs { | |
259 | objects = append(objects, &Object{ | |
260 | name: f.name(), | |
261 | f: f, | |
262 | b: b, | |
263 | }) | |
264 | } | |
265 | var rtnErr error | |
266 | if len(objects) == 0 || next == nil { | |
267 | rtnErr = io.EOF | |
268 | } | |
269 | return objects, next, rtnErr | |
270 | } | |
271 | ||
272 | func (b *Bucket) listCurrentObjects(ctx context.Context, count int, c *cursor) ([]*Object, *cursor, error) { | |
273 | if c == nil { | |
274 | c = &cursor{} | |
275 | } | |
276 | fs, name, err := b.b.listFileNames(ctx, count, c.name, c.prefix, c.delimiter) | |
277 | if err != nil { | |
278 | return nil, nil, err | |
279 | } | |
280 | var next *cursor | |
281 | if name != "" { | |
282 | next = &cursor{ | |
283 | prefix: c.prefix, | |
284 | delimiter: c.delimiter, | |
285 | name: name, | |
286 | } | |
287 | } | |
288 | var objects []*Object | |
289 | for _, f := range fs { | |
290 | objects = append(objects, &Object{ | |
291 | name: f.name(), | |
292 | f: f, | |
293 | b: b, | |
294 | }) | |
295 | } | |
296 | var rtnErr error | |
297 | if len(objects) == 0 || next == nil { | |
298 | rtnErr = io.EOF | |
299 | } | |
300 | return objects, next, rtnErr | |
301 | } | |
302 | ||
303 | func (b *Bucket) listUnfinishedLargeFiles(ctx context.Context, count int, c *cursor) ([]*Object, *cursor, error) { | |
304 | if c == nil { | |
305 | c = &cursor{} | |
306 | } | |
307 | fs, name, err := b.b.listUnfinishedLargeFiles(ctx, count, c.name) | |
308 | if err != nil { | |
309 | return nil, nil, err | |
310 | } | |
311 | var next *cursor | |
312 | if name != "" { | |
313 | next = &cursor{ | |
314 | name: name, | |
315 | } | |
316 | } | |
317 | var objects []*Object | |
318 | for _, f := range fs { | |
319 | objects = append(objects, &Object{ | |
320 | name: f.name(), | |
321 | f: f, | |
322 | b: b, | |
323 | }) | |
324 | } | |
325 | var rtnErr error | |
326 | if len(objects) == 0 || next == nil { | |
327 | rtnErr = io.EOF | |
328 | } | |
329 | return objects, next, rtnErr | |
330 | } |
0 | // Copyright 2018, the Blazer authors | |
1 | // | |
2 | // Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | // you may not use this file except in compliance with the License. | |
4 | // You may obtain a copy of the License at | |
5 | // | |
6 | // http://www.apache.org/licenses/LICENSE-2.0 | |
7 | // | |
8 | // Unless required by applicable law or agreed to in writing, software | |
9 | // distributed under the License is distributed on an "AS IS" BASIS, | |
10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | // See the License for the specific language governing permissions and | |
12 | // limitations under the License. | |
13 | ||
14 | package b2 | |
15 | ||
16 | import ( | |
17 | "context" | |
18 | "errors" | |
19 | "io" | |
20 | "time" | |
21 | ) | |
22 | ||
23 | // Key is a B2 application key. A Key grants limited access on a global or | |
24 | // per-bucket basis. | |
25 | type Key struct { | |
26 | c *Client | |
27 | k beKeyInterface | |
28 | } | |
29 | ||
30 | // Capabilities returns the list of capabilites granted by this application | |
31 | // key. | |
32 | func (k *Key) Capabilities() []string { return k.k.caps() } | |
33 | ||
34 | // Name returns the user-supplied name of this application key. Key names are | |
35 | // useless. | |
36 | func (k *Key) Name() string { return k.k.name() } | |
37 | ||
38 | // Expires returns the expiration date of this application key. | |
39 | func (k *Key) Expires() time.Time { return k.k.expires() } | |
40 | ||
41 | // Delete removes the key from B2. | |
42 | func (k *Key) Delete(ctx context.Context) error { return k.k.del(ctx) } | |
43 | ||
44 | // Secret returns the value that should be passed into NewClient(). It is only | |
45 | // available on newly created keys; it is not available from ListKey | |
46 | // operations. | |
47 | func (k *Key) Secret() string { return k.k.secret() } | |
48 | ||
49 | // ID returns the application key ID. This, plus the secret, is necessary to | |
50 | // authenticate to B2. | |
51 | func (k *Key) ID() string { return k.k.id() } | |
52 | ||
53 | type keyOptions struct { | |
54 | caps []string | |
55 | prefix string | |
56 | lifetime time.Duration | |
57 | } | |
58 | ||
59 | // KeyOption specifies desired properties for application keys. | |
60 | type KeyOption func(*keyOptions) | |
61 | ||
62 | // Lifetime requests a key with the given lifetime. | |
63 | func Lifetime(d time.Duration) KeyOption { | |
64 | return func(k *keyOptions) { | |
65 | k.lifetime = d | |
66 | } | |
67 | } | |
68 | ||
69 | // Deadline requests a key that expires after the given date. | |
70 | func Deadline(t time.Time) KeyOption { | |
71 | d := t.Sub(time.Now()) | |
72 | return Lifetime(d) | |
73 | } | |
74 | ||
75 | // Capabilities requests a key with the given capability. | |
76 | func Capabilities(caps ...string) KeyOption { | |
77 | return func(k *keyOptions) { | |
78 | k.caps = append(k.caps, caps...) | |
79 | } | |
80 | } | |
81 | ||
82 | // Prefix limits the requested application key to be valid only for objects | |
83 | // that begin with prefix. This can only be used when requesting an | |
84 | // application key within a specific bucket. | |
85 | func Prefix(prefix string) KeyOption { | |
86 | return func(k *keyOptions) { | |
87 | k.prefix = prefix | |
88 | } | |
89 | } | |
90 | ||
91 | // CreateKey creates a global application key that is valid for all buckets in | |
92 | // this project. The key's secret will only be accessible on the object | |
93 | // returned from this call. | |
94 | func (c *Client) CreateKey(ctx context.Context, name string, opts ...KeyOption) (*Key, error) { | |
95 | var ko keyOptions | |
96 | for _, o := range opts { | |
97 | o(&ko) | |
98 | } | |
99 | if ko.prefix != "" { | |
100 | return nil, errors.New("Prefix is not a valid option for global application keys") | |
101 | } | |
102 | ki, err := c.backend.createKey(ctx, name, ko.caps, ko.lifetime, "", "") | |
103 | if err != nil { | |
104 | return nil, err | |
105 | } | |
106 | return &Key{ | |
107 | c: c, | |
108 | k: ki, | |
109 | }, nil | |
110 | } | |
111 | ||
112 | // ListKeys lists all the keys associated with this project. It takes the | |
113 | // maximum number of keys it should return in a call, as well as a cursor | |
114 | // (which should be empty for the initial call). It will return up to count | |
115 | // keys, as well as the cursor for the next invocation. | |
116 | // | |
117 | // ListKeys returns io.EOF when there are no more keys, although it may do so | |
118 | // concurrently with the final set of keys. | |
119 | func (c *Client) ListKeys(ctx context.Context, count int, cursor string) ([]*Key, string, error) { | |
120 | ks, next, err := c.backend.listKeys(ctx, count, cursor) | |
121 | if err != nil { | |
122 | return nil, "", err | |
123 | } | |
124 | if len(ks) == 0 { | |
125 | return nil, "", io.EOF | |
126 | } | |
127 | var keys []*Key | |
128 | for _, k := range ks { | |
129 | keys = append(keys, &Key{ | |
130 | c: c, | |
131 | k: k, | |
132 | }) | |
133 | } | |
134 | var rerr error | |
135 | if next == "" { | |
136 | rerr = io.EOF | |
137 | } | |
138 | return keys, next, rerr | |
139 | } | |
140 | ||
141 | // CreateKey creates a scoped application key that is valid only for this bucket. | |
142 | func (b *Bucket) CreateKey(ctx context.Context, name string, opts ...KeyOption) (*Key, error) { | |
143 | var ko keyOptions | |
144 | for _, o := range opts { | |
145 | o(&ko) | |
146 | } | |
147 | ki, err := b.r.createKey(ctx, name, ko.caps, ko.lifetime, b.b.id(), ko.prefix) | |
148 | if err != nil { | |
149 | return nil, err | |
150 | } | |
151 | return &Key{ | |
152 | c: b.c, | |
153 | k: ki, | |
154 | }, nil | |
155 | } |
0 | // Copyright 2017, Google | |
0 | // Copyright 2017, the Blazer authors | |
1 | 1 | // |
2 | 2 | // Licensed under the Apache License, Version 2.0 (the "License"); |
3 | 3 | // you may not use this file except in compliance with the License. |
0 | // Copyright 2016, Google | |
0 | // Copyright 2016, the Blazer authors | |
1 | 1 | // |
2 | 2 | // Licensed under the Apache License, Version 2.0 (the "License"); |
3 | 3 | // you may not use this file except in compliance with the License. |
16 | 16 | import ( |
17 | 17 | "bytes" |
18 | 18 | "context" |
19 | "crypto/sha1" | |
19 | 20 | "errors" |
21 | "fmt" | |
22 | "hash" | |
20 | 23 | "io" |
21 | 24 | "sync" |
25 | "time" | |
22 | 26 | |
23 | 27 | "github.com/kurin/blazer/internal/blog" |
24 | 28 | ) |
37 | 41 | // 10MB. |
38 | 42 | ChunkSize int |
39 | 43 | |
40 | ctx context.Context | |
41 | cancel context.CancelFunc // cancels ctx | |
42 | o *Object | |
43 | name string | |
44 | offset int64 // the start of the file | |
45 | length int64 // the length to read, or -1 | |
46 | csize int // chunk size | |
47 | read int // amount read | |
48 | chwid int // chunks written | |
49 | chrid int // chunks read | |
50 | chbuf chan *rchunk | |
51 | init sync.Once | |
52 | rmux sync.Mutex // guards rcond | |
53 | rcond *sync.Cond | |
54 | chunks map[int]*rchunk | |
44 | ctx context.Context | |
45 | cancel context.CancelFunc // cancels ctx | |
46 | o *Object | |
47 | name string | |
48 | offset int64 // the start of the file | |
49 | length int64 // the length to read, or -1 | |
50 | csize int // chunk size | |
51 | read int // amount read | |
52 | chwid int // chunks written | |
53 | chrid int // chunks read | |
54 | chbuf chan *rchunk | |
55 | init sync.Once | |
56 | chunks map[int]*rchunk | |
57 | vrfy hash.Hash | |
58 | readOffEnd bool | |
59 | sha1 string | |
60 | ||
61 | rmux sync.Mutex // guards rcond | |
62 | rcond *sync.Cond | |
55 | 63 | |
56 | 64 | emux sync.RWMutex // guards err, believe it or not |
57 | 65 | err error |
121 | 129 | } |
122 | 130 | r.length -= size |
123 | 131 | } |
132 | var b backoff | |
124 | 133 | redo: |
125 | fr, err := r.o.b.b.downloadFileByName(r.ctx, r.name, offset, size) | |
134 | fr, err := r.o.b.b.downloadFileByName(r.ctx, r.name, offset, size, false) | |
126 | 135 | if err == errNoMoreContent { |
127 | 136 | // this read generated a 416 so we are entirely past the end of the object |
137 | r.readOffEnd = true | |
128 | 138 | buf.final = true |
129 | 139 | r.rmux.Lock() |
130 | 140 | r.chunks[chunkID] = buf |
137 | 147 | r.rcond.Broadcast() |
138 | 148 | return |
139 | 149 | } |
140 | rsize, _, _, _ := fr.stats() | |
150 | rsize, _, sha1, _ := fr.stats() | |
151 | if len(sha1) == 40 && r.sha1 != sha1 { | |
152 | r.sha1 = sha1 | |
153 | } | |
141 | 154 | mr := &meteredReader{r: noopResetter{fr}, size: int(rsize)} |
142 | 155 | r.smux.Lock() |
143 | 156 | r.smap[chunkID] = mr |
149 | 162 | r.smux.Unlock() |
150 | 163 | if i < int64(rsize) || err == io.ErrUnexpectedEOF { |
151 | 164 | // Probably the network connection was closed early. Retry. |
152 | blog.V(1).Infof("b2 reader %d: got %dB of %dB; retrying", chunkID, i, rsize) | |
165 | blog.V(1).Infof("b2 reader %d: got %dB of %dB; retrying after %v", chunkID, i, rsize, b) | |
166 | if err := b.wait(r.ctx); err != nil { | |
167 | r.setErr(err) | |
168 | r.rcond.Broadcast() | |
169 | return | |
170 | } | |
153 | 171 | buf.Reset() |
154 | 172 | goto redo |
155 | 173 | } |
210 | 228 | r.thread() |
211 | 229 | r.chbuf <- &rchunk{} |
212 | 230 | } |
231 | r.vrfy = sha1.New() | |
213 | 232 | } |
214 | 233 | |
215 | 234 | func (r *Reader) Read(p []byte) (int, error) { |
216 | 235 | if err := r.getErr(); err != nil { |
217 | 236 | return 0, err |
218 | 237 | } |
219 | // TODO: check the SHA1 hash here and verify it on Close. | |
220 | 238 | r.init.Do(r.initFunc) |
221 | 239 | chunk, err := r.curChunk() |
222 | 240 | if err != nil { |
224 | 242 | return 0, err |
225 | 243 | } |
226 | 244 | n, err := chunk.Read(p) |
245 | r.vrfy.Write(p[:n]) // Hash.Write never returns an error. | |
227 | 246 | r.read += n |
228 | 247 | if err == io.EOF { |
229 | 248 | if chunk.final { |
255 | 274 | return rs |
256 | 275 | } |
257 | 276 | |
258 | // copied from io.Copy, basically. | |
259 | func copyContext(ctx context.Context, dst io.Writer, src io.Reader) (written int64, err error) { | |
260 | buf := make([]byte, 32*1024) | |
261 | for { | |
262 | if ctx.Err() != nil { | |
263 | err = ctx.Err() | |
264 | return | |
265 | } | |
266 | nr, er := src.Read(buf) | |
267 | if nr > 0 { | |
268 | nw, ew := dst.Write(buf[0:nr]) | |
269 | if nw > 0 { | |
270 | written += int64(nw) | |
271 | } | |
272 | if ew != nil { | |
273 | err = ew | |
274 | break | |
275 | } | |
276 | if nr != nw { | |
277 | err = io.ErrShortWrite | |
278 | break | |
279 | } | |
280 | } | |
281 | if er == io.EOF { | |
282 | break | |
283 | } | |
284 | if er != nil { | |
285 | err = er | |
286 | break | |
287 | } | |
288 | } | |
289 | return written, err | |
277 | // Verify checks the SHA1 hash on download and compares it to the SHA1 hash | |
278 | // submitted on upload. If the two differ, this returns an error. If the | |
279 | // correct hash could not be calculated (if, for example, the entire object was | |
280 | // not read, or if the object was uploaded as a "large file" and thus the SHA1 | |
281 | // hash was not sent), this returns (nil, false). | |
282 | func (r *Reader) Verify() (error, bool) { | |
283 | got := fmt.Sprintf("%x", r.vrfy.Sum(nil)) | |
284 | if r.sha1 == got { | |
285 | return nil, true | |
286 | } | |
287 | // TODO: if the exact length of the file is requested AND the checksum is | |
288 | // bad, this will return (nil, false) instead of (an error, true). This is | |
289 | // because there's no good way that I can tell to determine that we've hit | |
290 | // the end of the file without reading off the end. Consider reading N+1 | |
291 | // bytes at the very end to close this hole. | |
292 | if r.offset > 0 || !r.readOffEnd || len(r.sha1) != 40 { | |
293 | return nil, false | |
294 | } | |
295 | return fmt.Errorf("bad hash: got %v, want %v", got, r.sha1), true | |
296 | } | |
297 | ||
298 | // strip a writer of any non-Write methods | |
299 | type onlyWriter struct{ w io.Writer } | |
300 | ||
301 | func (ow onlyWriter) Write(p []byte) (int, error) { return ow.w.Write(p) } | |
302 | ||
303 | func copyContext(ctx context.Context, w io.Writer, r io.Reader) (int64, error) { | |
304 | var n int64 | |
305 | var err error | |
306 | done := make(chan struct{}) | |
307 | go func() { | |
308 | if _, ok := w.(*Writer); ok { | |
309 | w = onlyWriter{w} | |
310 | } | |
311 | n, err = io.Copy(w, r) | |
312 | close(done) | |
313 | }() | |
314 | select { | |
315 | case <-done: | |
316 | return n, err | |
317 | case <-ctx.Done(): | |
318 | return 0, ctx.Err() | |
319 | } | |
290 | 320 | } |
291 | 321 | |
292 | 322 | type noopResetter struct { |
294 | 324 | } |
295 | 325 | |
296 | 326 | func (noopResetter) Reset() error { return nil } |
327 | ||
328 | type backoff time.Duration | |
329 | ||
330 | func (b *backoff) wait(ctx context.Context) error { | |
331 | if *b == 0 { | |
332 | *b = backoff(time.Millisecond) | |
333 | } | |
334 | select { | |
335 | case <-time.After(time.Duration(*b)): | |
336 | if time.Duration(*b) < time.Second*10 { | |
337 | *b <<= 1 | |
338 | } | |
339 | return nil | |
340 | case <-ctx.Done(): | |
341 | return ctx.Err() | |
342 | } | |
343 | } | |
344 | ||
345 | func (b backoff) String() string { | |
346 | return time.Duration(b).String() | |
347 | } |
0 | // Copyright 2017, Google | |
0 | // Copyright 2017, the Blazer authors | |
1 | 1 | // |
2 | 2 | // Licensed under the Apache License, Version 2.0 (the "License"); |
3 | 3 | // you may not use this file except in compliance with the License. |
0 | // Copyright 2016, Google | |
0 | // Copyright 2016, the Blazer authors | |
1 | 1 | // |
2 | 2 | // Licensed under the Apache License, Version 2.0 (the "License"); |
3 | 3 | // you may not use this file except in compliance with the License. |
64 | 64 | |
65 | 65 | csize int |
66 | 66 | ctx context.Context |
67 | cancel context.CancelFunc | |
67 | cancel context.CancelFunc // cancels ctx | |
68 | ctxf func() context.Context | |
69 | errf func(error) | |
68 | 70 | ready chan chunk |
71 | cdone chan struct{} | |
69 | 72 | wg sync.WaitGroup |
70 | 73 | start sync.Once |
71 | 74 | once sync.Once |
99 | 102 | } |
100 | 103 | w.emux.Lock() |
101 | 104 | defer w.emux.Unlock() |
102 | if w.err == nil { | |
103 | blog.V(1).Infof("error writing %s: %v", w.name, err) | |
104 | w.err = err | |
105 | w.cancel() | |
106 | } | |
105 | if w.err != nil { | |
106 | return | |
107 | } | |
108 | blog.V(1).Infof("error writing %s: %v", w.name, err) | |
109 | w.err = err | |
110 | w.cancel() | |
111 | if w.ctxf == nil { | |
112 | return | |
113 | } | |
114 | if w.errf == nil { | |
115 | w.errf = func(error) {} | |
116 | } | |
117 | w.errf(w.file.cancel(w.ctxf())) | |
107 | 118 | } |
108 | 119 | |
109 | 120 | func (w *Writer) getErr() error { |
125 | 136 | } |
126 | 137 | |
127 | 138 | var gid int32 |
139 | ||
140 | func sleepCtx(ctx context.Context, d time.Duration) error { | |
141 | select { | |
142 | case <-ctx.Done(): | |
143 | return ctx.Err() | |
144 | case <-time.After(d): | |
145 | return nil | |
146 | } | |
147 | } | |
128 | 148 | |
129 | 149 | func (w *Writer) thread() { |
130 | 150 | w.wg.Add(1) |
137 | 157 | return |
138 | 158 | } |
139 | 159 | for { |
140 | chunk, ok := <-w.ready | |
141 | if !ok { | |
160 | var cnk chunk | |
161 | select { | |
162 | case cnk = <-w.ready: | |
163 | case <-w.cdone: | |
142 | 164 | return |
143 | 165 | } |
144 | if sha, ok := w.seen[chunk.id]; ok { | |
145 | if sha != chunk.buf.Hash() { | |
166 | if sha, ok := w.seen[cnk.id]; ok { | |
167 | if sha != cnk.buf.Hash() { | |
146 | 168 | w.setErr(errors.New("resumable upload was requested, but chunks don't match")) |
147 | 169 | return |
148 | 170 | } |
149 | chunk.buf.Close() | |
150 | w.completeChunk(chunk.id) | |
151 | blog.V(2).Infof("skipping chunk %d", chunk.id) | |
171 | cnk.buf.Close() | |
172 | w.completeChunk(cnk.id) | |
173 | blog.V(2).Infof("skipping chunk %d", cnk.id) | |
152 | 174 | continue |
153 | 175 | } |
154 | blog.V(2).Infof("thread %d handling chunk %d", id, chunk.id) | |
155 | r, err := chunk.buf.Reader() | |
176 | blog.V(2).Infof("thread %d handling chunk %d", id, cnk.id) | |
177 | r, err := cnk.buf.Reader() | |
156 | 178 | if err != nil { |
157 | 179 | w.setErr(err) |
158 | 180 | return |
159 | 181 | } |
160 | mr := &meteredReader{r: r, size: chunk.buf.Len()} | |
161 | w.registerChunk(chunk.id, mr) | |
182 | mr := &meteredReader{r: r, size: cnk.buf.Len()} | |
183 | w.registerChunk(cnk.id, mr) | |
162 | 184 | sleep := time.Millisecond * 15 |
163 | 185 | redo: |
164 | n, err := fc.uploadPart(w.ctx, mr, chunk.buf.Hash(), chunk.buf.Len(), chunk.id) | |
165 | if n != chunk.buf.Len() || err != nil { | |
186 | n, err := fc.uploadPart(w.ctx, mr, cnk.buf.Hash(), cnk.buf.Len(), cnk.id) | |
187 | if n != cnk.buf.Len() || err != nil { | |
166 | 188 | if w.o.b.r.reupload(err) { |
167 | time.Sleep(sleep) | |
189 | if err := sleepCtx(w.ctx, sleep); err != nil { | |
190 | w.setErr(err) | |
191 | w.completeChunk(cnk.id) | |
192 | cnk.buf.Close() // TODO: log error | |
193 | } | |
168 | 194 | sleep *= 2 |
169 | 195 | if sleep > time.Second*15 { |
170 | 196 | sleep = time.Second * 15 |
171 | 197 | } |
172 | blog.V(1).Infof("b2 writer: wrote %d of %d: error: %v; retrying", n, chunk.buf.Len(), err) | |
198 | blog.V(1).Infof("b2 writer: wrote %d of %d: error: %v; retrying", n, cnk.buf.Len(), err) | |
173 | 199 | f, err := w.file.getUploadPartURL(w.ctx) |
174 | 200 | if err != nil { |
175 | 201 | w.setErr(err) |
176 | w.completeChunk(chunk.id) | |
177 | chunk.buf.Close() // TODO: log error | |
202 | w.completeChunk(cnk.id) | |
203 | cnk.buf.Close() // TODO: log error | |
178 | 204 | return |
179 | 205 | } |
180 | 206 | fc = f |
181 | 207 | goto redo |
182 | 208 | } |
183 | 209 | w.setErr(err) |
184 | w.completeChunk(chunk.id) | |
185 | chunk.buf.Close() // TODO: log error | |
210 | w.completeChunk(cnk.id) | |
211 | cnk.buf.Close() // TODO: log error | |
186 | 212 | return |
187 | 213 | } |
188 | w.completeChunk(chunk.id) | |
189 | chunk.buf.Close() // TODO: log error | |
190 | blog.V(2).Infof("chunk %d handled", chunk.id) | |
214 | w.completeChunk(cnk.id) | |
215 | cnk.buf.Close() // TODO: log error | |
216 | blog.V(2).Infof("chunk %d handled", cnk.id) | |
191 | 217 | } |
192 | 218 | }() |
193 | 219 | } |
220 | 246 | |
221 | 247 | // Write satisfies the io.Writer interface. |
222 | 248 | func (w *Writer) Write(p []byte) (int, error) { |
249 | if len(p) == 0 { | |
250 | return 0, nil | |
251 | } | |
223 | 252 | w.init() |
224 | 253 | if err := w.getErr(); err != nil { |
225 | 254 | return 0, err |
299 | 328 | } |
300 | 329 | return w.o.b.b.startLargeFile(w.ctx, w.name, ctype, w.info) |
301 | 330 | } |
331 | var got bool | |
332 | iter := w.o.b.List(w.ctx, ListPrefix(w.name), ListUnfinished()) | |
333 | var fi beFileInterface | |
334 | for iter.Next() { | |
335 | obj := iter.Object() | |
336 | if obj.Name() == w.name { | |
337 | got = true | |
338 | fi = obj.f | |
339 | } | |
340 | } | |
341 | if iter.Err() != nil { | |
342 | return nil, iter.Err() | |
343 | } | |
344 | if !got { | |
345 | w.Resume = false | |
346 | return w.getLargeFile() | |
347 | } | |
348 | ||
302 | 349 | next := 1 |
303 | 350 | seen := make(map[int]string) |
304 | 351 | var size int64 |
305 | var fi beFileInterface | |
306 | 352 | for { |
307 | cur := &Cursor{name: w.name} | |
308 | objs, _, err := w.o.b.ListObjects(w.ctx, 1, cur) | |
309 | if err != nil { | |
310 | return nil, err | |
311 | } | |
312 | if len(objs) < 1 || objs[0].name != w.name { | |
313 | w.Resume = false | |
314 | return w.getLargeFile() | |
315 | } | |
316 | fi = objs[0].f | |
317 | 353 | parts, n, err := fi.listParts(w.ctx, next, 100) |
318 | 354 | if err != nil { |
319 | 355 | return nil, err |
347 | 383 | } |
348 | 384 | w.file = lf |
349 | 385 | w.ready = make(chan chunk) |
386 | w.cdone = make(chan struct{}) | |
350 | 387 | if w.ConcurrentUploads < 1 { |
351 | 388 | w.ConcurrentUploads = 1 |
352 | 389 | } |
358 | 395 | return err |
359 | 396 | } |
360 | 397 | select { |
398 | case <-w.cdone: | |
399 | return nil | |
361 | 400 | case w.ready <- chunk{ |
362 | 401 | id: w.cidx + 1, |
363 | 402 | buf: w.w, |
442 | 481 | func (w *Writer) Close() error { |
443 | 482 | w.done.Do(func() { |
444 | 483 | if !w.everStarted { |
484 | w.init() | |
485 | w.setErr(w.simpleWriteFile()) | |
445 | 486 | return |
446 | 487 | } |
447 | 488 | defer w.o.b.c.removeWriter(w) |
461 | 502 | return |
462 | 503 | } |
463 | 504 | } |
464 | close(w.ready) | |
505 | // See https://github.com/kurin/blazer/issues/60 for why we use a special | |
506 | // channel for this. | |
507 | close(w.cdone) | |
465 | 508 | w.wg.Wait() |
466 | 509 | f, err := w.file.finishLargeFile(w.ctx) |
467 | 510 | if err != nil { |
473 | 516 | return w.getErr() |
474 | 517 | } |
475 | 518 | |
476 | // WithAttrs sets the writable attributes of the resulting file to given | |
477 | // values. WithAttrs must be called before the first call to Write. | |
478 | func (w *Writer) WithAttrs(attrs *Attrs) *Writer { | |
519 | func (w *Writer) withAttrs(attrs *Attrs) *Writer { | |
479 | 520 | w.contentType = attrs.ContentType |
480 | 521 | w.info = make(map[string]string) |
481 | 522 | for k, v := range attrs.Info { |
482 | 523 | w.info[k] = v |
483 | 524 | } |
525 | if len(w.info) < 10 && attrs.SHA1 != "" { | |
526 | w.info["large_file_sha1"] = attrs.SHA1 | |
527 | } | |
484 | 528 | if len(w.info) < 10 && !attrs.LastModified.IsZero() { |
485 | 529 | w.info["src_last_modified_millis"] = fmt.Sprintf("%d", attrs.LastModified.UnixNano()/1e6) |
486 | 530 | } |
487 | 531 | return w |
532 | } | |
533 | ||
534 | // A WriterOption sets Writer-specific behavior. | |
535 | type WriterOption func(*Writer) | |
536 | ||
537 | // WithAttrs attaches the given Attrs to the writer. | |
538 | func WithAttrsOption(attrs *Attrs) WriterOption { | |
539 | return func(w *Writer) { | |
540 | w.withAttrs(attrs) | |
541 | } | |
542 | } | |
543 | ||
544 | // WithCancelOnError requests the writer, if it has started a large file | |
545 | // upload, to call b2_cancel_large_file on any permanent error. It calls ctxf | |
546 | // to obtain a context with which to cancel the file; this is to allow callers | |
547 | // to set specific timeouts. If errf is non-nil, then it is called with the | |
548 | // (possibly nil) output of b2_cancel_large_file. | |
549 | func WithCancelOnError(ctxf func() context.Context, errf func(error)) WriterOption { | |
550 | return func(w *Writer) { | |
551 | w.ctxf = ctxf | |
552 | w.errf = errf | |
553 | } | |
554 | } | |
555 | ||
556 | // DefaultWriterOptions returns a ClientOption that will apply the given | |
557 | // WriterOptions to every Writer. These options can be overridden by passing | |
558 | // new options to NewWriter. | |
559 | func DefaultWriterOptions(opts ...WriterOption) ClientOption { | |
560 | return func(c *clientOptions) { | |
561 | c.writerOpts = opts | |
562 | } | |
488 | 563 | } |
489 | 564 | |
490 | 565 | func (w *Writer) status() *WriterStatus { |
0 | // Copyright 2016, Google | |
0 | // Copyright 2016, the Blazer authors | |
1 | 1 | // |
2 | 2 | // Licensed under the Apache License, Version 2.0 (the "License"); |
3 | 3 | // you may not use this file except in compliance with the License. |
41 | 41 | |
42 | 42 | const ( |
43 | 43 | APIBase = "https://api.backblazeb2.com" |
44 | DefaultUserAgent = "blazer/0.4.4" | |
44 | DefaultUserAgent = "blazer/0.5.3" | |
45 | 45 | ) |
46 | 46 | |
47 | 47 | type b2err struct { |
48 | msg string | |
49 | method string | |
50 | retry int | |
51 | code int | |
48 | msg string | |
49 | method string | |
50 | retry int | |
51 | code int | |
52 | msgCode string | |
52 | 53 | } |
53 | 54 | |
54 | 55 | func (e b2err) Error() string { |
106 | 107 | return e.code, e.msg |
107 | 108 | } |
108 | 109 | |
110 | // MsgCode returns the error code, msgCode and message. | |
111 | func MsgCode(err error) (int, string, string) { | |
112 | e, ok := err.(b2err) | |
113 | if !ok { | |
114 | return 0, "", "" | |
115 | } | |
116 | return e.code, e.msgCode, e.msg | |
117 | } | |
118 | ||
109 | 119 | const ( |
110 | 120 | // ReAuthenticate indicates that the B2 account authentication tokens have |
111 | 121 | // expired, and should be refreshed with a new call to AuthorizeAccount. |
152 | 162 | retryAfter = int(r) |
153 | 163 | } |
154 | 164 | return b2err{ |
155 | msg: msgBody, | |
156 | retry: retryAfter, | |
157 | code: resp.StatusCode, | |
158 | method: resp.Request.Header.Get("X-Blazer-Method"), | |
165 | msg: msgBody, | |
166 | retry: retryAfter, | |
167 | code: resp.StatusCode, | |
168 | msgCode: msg.Code, | |
169 | method: resp.Request.Header.Get("X-Blazer-Method"), | |
159 | 170 | } |
160 | 171 | } |
161 | 172 | |
185 | 196 | hstr := strings.Join(headers, ";") |
186 | 197 | method := req.Header.Get("X-Blazer-Method") |
187 | 198 | if args != nil { |
188 | blog.V(2).Infof(">> %s uri: %v headers: {%s} args: (%s)", method, req.URL, hstr, string(args)) | |
199 | blog.V(2).Infof(">> %s %v: %v headers: {%s} args: (%s)", method, req.Method, req.URL, hstr, string(args)) | |
189 | 200 | return |
190 | 201 | } |
191 | blog.V(2).Infof(">> %s uri: %v {%s} (no args)", method, req.URL, hstr) | |
202 | blog.V(2).Infof(">> %s %v: %v {%s} (no args)", method, req.Method, req.URL, hstr) | |
192 | 203 | } |
193 | 204 | |
194 | 205 | var authRegexp = regexp.MustCompile(`"authorizationToken": ".[^"]*"`) |
267 | 278 | downloadURI string |
268 | 279 | minPartSize int |
269 | 280 | opts *b2Options |
281 | bucket string // restricted to this bucket if present | |
282 | pfx string // restricted to objects with this prefix if present | |
270 | 283 | } |
271 | 284 | |
272 | 285 | // Update replaces the B2 object with a new one, in-place. |
318 | 331 | if rb == nil { |
319 | 332 | return nil |
320 | 333 | } |
334 | if rb.getSize() == 0 { | |
335 | // https://github.com/kurin/blazer/issues/57 | |
336 | // When body is non-nil, but the request's ContentLength is 0, it is | |
337 | // replaced with -1, which causes the client to send a chunked encoding, | |
338 | // which confuses B2. | |
339 | return http.NoBody | |
340 | } | |
321 | 341 | return rb.body |
322 | 342 | } |
323 | 343 | |
426 | 446 | authToken: b2resp.AuthToken, |
427 | 447 | apiURI: b2resp.URI, |
428 | 448 | downloadURI: b2resp.DownloadURI, |
429 | minPartSize: b2resp.MinPartSize, | |
449 | minPartSize: b2resp.PartSize, | |
450 | bucket: b2resp.Allowed.Bucket, | |
451 | pfx: b2resp.Allowed.Prefix, | |
430 | 452 | opts: b2opts, |
431 | 453 | }, nil |
432 | 454 | } |
478 | 500 | } |
479 | 501 | } |
480 | 502 | |
503 | // SetAPIBase returns an AuthOption that uses the given URL as the base for API | |
504 | // requests. | |
505 | func SetAPIBase(url string) AuthOption { | |
506 | return func(o *b2Options) { | |
507 | o.apiBase = url | |
508 | } | |
509 | } | |
510 | ||
481 | 511 | type LifecycleRule struct { |
482 | 512 | Prefix string |
483 | 513 | DaysNewUntilHidden int |
523 | 553 | Name: name, |
524 | 554 | Info: b2resp.Info, |
525 | 555 | LifecycleRules: respRules, |
526 | id: b2resp.BucketID, | |
556 | ID: b2resp.BucketID, | |
527 | 557 | rev: b2resp.Revision, |
528 | 558 | b2: b, |
529 | 559 | }, nil |
533 | 563 | func (b *Bucket) DeleteBucket(ctx context.Context) error { |
534 | 564 | b2req := &b2types.DeleteBucketRequest{ |
535 | 565 | AccountID: b.b2.accountID, |
536 | BucketID: b.id, | |
566 | BucketID: b.ID, | |
537 | 567 | } |
538 | 568 | headers := map[string]string{ |
539 | 569 | "Authorization": b.b2.authToken, |
547 | 577 | Type string |
548 | 578 | Info map[string]string |
549 | 579 | LifecycleRules []LifecycleRule |
550 | id string | |
580 | ID string | |
551 | 581 | rev int |
552 | 582 | b2 *B2 |
553 | 583 | } |
564 | 594 | } |
565 | 595 | b2req := &b2types.UpdateBucketRequest{ |
566 | 596 | AccountID: b.b2.accountID, |
567 | BucketID: b.id, | |
597 | BucketID: b.ID, | |
568 | 598 | // Name: b.Name, |
569 | 599 | Type: b.Type, |
570 | 600 | Info: b.Info, |
591 | 621 | Type: b2resp.Type, |
592 | 622 | Info: b2resp.Info, |
593 | 623 | LifecycleRules: respRules, |
594 | id: b2resp.BucketID, | |
624 | ID: b2resp.BucketID, | |
595 | 625 | b2: b.b2, |
596 | 626 | }, nil |
597 | 627 | } |
601 | 631 | return b.b2.downloadURI |
602 | 632 | } |
603 | 633 | |
604 | // ListBuckets wraps b2_list_buckets. | |
605 | func (b *B2) ListBuckets(ctx context.Context) ([]*Bucket, error) { | |
634 | // ListBuckets wraps b2_list_buckets. If name is non-empty, only that bucket | |
635 | // will be returned if it exists; else nothing will be returned. | |
636 | func (b *B2) ListBuckets(ctx context.Context, name string) ([]*Bucket, error) { | |
606 | 637 | b2req := &b2types.ListBucketsRequest{ |
607 | 638 | AccountID: b.accountID, |
639 | Bucket: b.bucket, | |
640 | Name: name, | |
608 | 641 | } |
609 | 642 | b2resp := &b2types.ListBucketsResponse{} |
610 | 643 | headers := map[string]string{ |
628 | 661 | Type: bucket.Type, |
629 | 662 | Info: bucket.Info, |
630 | 663 | LifecycleRules: rules, |
631 | id: bucket.BucketID, | |
664 | ID: bucket.BucketID, | |
632 | 665 | rev: bucket.Revision, |
633 | 666 | b2: b, |
634 | 667 | }) |
659 | 692 | // GetUploadURL wraps b2_get_upload_url. |
660 | 693 | func (b *Bucket) GetUploadURL(ctx context.Context) (*URL, error) { |
661 | 694 | b2req := &b2types.GetUploadURLRequest{ |
662 | BucketID: b.id, | |
695 | BucketID: b.ID, | |
663 | 696 | } |
664 | 697 | b2resp := &b2types.GetUploadURLResponse{} |
665 | 698 | headers := map[string]string{ |
683 | 716 | Status string |
684 | 717 | Timestamp time.Time |
685 | 718 | Info *FileInfo |
686 | id string | |
719 | ID string | |
687 | 720 | b2 *B2 |
688 | 721 | } |
689 | 722 | |
690 | 723 | // File returns a bare File struct, but with the appropriate id and b2 |
691 | 724 | // interfaces. |
692 | 725 | func (b *Bucket) File(id, name string) *File { |
693 | return &File{id: id, b2: b.b2, Name: name} | |
726 | return &File{ID: id, b2: b.b2, Name: name} | |
694 | 727 | } |
695 | 728 | |
696 | 729 | // UploadFile wraps b2_upload_file. |
714 | 747 | Size: int64(size), |
715 | 748 | Timestamp: millitime(b2resp.Timestamp), |
716 | 749 | Status: b2resp.Action, |
717 | id: b2resp.FileID, | |
750 | ID: b2resp.FileID, | |
718 | 751 | b2: url.b2, |
719 | 752 | }, nil |
720 | 753 | } |
723 | 756 | func (f *File) DeleteFileVersion(ctx context.Context) error { |
724 | 757 | b2req := &b2types.DeleteFileVersionRequest{ |
725 | 758 | Name: f.Name, |
726 | FileID: f.id, | |
759 | FileID: f.ID, | |
727 | 760 | } |
728 | 761 | headers := map[string]string{ |
729 | 762 | "Authorization": f.b2.authToken, |
733 | 766 | |
734 | 767 | // LargeFile holds information necessary to implement B2 large file support. |
735 | 768 | type LargeFile struct { |
736 | id string | |
769 | ID string | |
737 | 770 | b2 *B2 |
738 | 771 | |
739 | 772 | mu sync.Mutex |
744 | 777 | // StartLargeFile wraps b2_start_large_file. |
745 | 778 | func (b *Bucket) StartLargeFile(ctx context.Context, name, contentType string, info map[string]string) (*LargeFile, error) { |
746 | 779 | b2req := &b2types.StartLargeFileRequest{ |
747 | BucketID: b.id, | |
780 | BucketID: b.ID, | |
748 | 781 | Name: name, |
749 | 782 | ContentType: contentType, |
750 | 783 | Info: info, |
757 | 790 | return nil, err |
758 | 791 | } |
759 | 792 | return &LargeFile{ |
760 | id: b2resp.ID, | |
793 | ID: b2resp.ID, | |
761 | 794 | b2: b.b2, |
762 | 795 | hashes: make(map[int]string), |
763 | 796 | }, nil |
766 | 799 | // CancelLargeFile wraps b2_cancel_large_file. |
767 | 800 | func (l *LargeFile) CancelLargeFile(ctx context.Context) error { |
768 | 801 | b2req := &b2types.CancelLargeFileRequest{ |
769 | ID: l.id, | |
802 | ID: l.ID, | |
770 | 803 | } |
771 | 804 | headers := map[string]string{ |
772 | 805 | "Authorization": l.b2.authToken, |
784 | 817 | // ListParts wraps b2_list_parts. |
785 | 818 | func (f *File) ListParts(ctx context.Context, next, count int) ([]*FilePart, int, error) { |
786 | 819 | b2req := &b2types.ListPartsRequest{ |
787 | ID: f.id, | |
820 | ID: f.ID, | |
788 | 821 | Start: next, |
789 | 822 | Count: count, |
790 | 823 | } |
815 | 848 | s[k] = v |
816 | 849 | } |
817 | 850 | return &LargeFile{ |
818 | id: f.id, | |
851 | ID: f.ID, | |
819 | 852 | b2: f.b2, |
820 | 853 | size: size, |
821 | 854 | hashes: s, |
841 | 874 | // GetUploadPartURL wraps b2_get_upload_part_url. |
842 | 875 | func (l *LargeFile) GetUploadPartURL(ctx context.Context) (*FileChunk, error) { |
843 | 876 | b2req := &getUploadPartURLRequest{ |
844 | ID: l.id, | |
877 | ID: l.ID, | |
845 | 878 | } |
846 | 879 | b2resp := &getUploadPartURLResponse{} |
847 | 880 | headers := map[string]string{ |
897 | 930 | l.mu.Lock() |
898 | 931 | defer l.mu.Unlock() |
899 | 932 | b2req := &b2types.FinishLargeFileRequest{ |
900 | ID: l.id, | |
933 | ID: l.ID, | |
901 | 934 | Hashes: make([]string, len(l.hashes)), |
902 | 935 | } |
903 | 936 | b2resp := &b2types.FinishLargeFileResponse{} |
918 | 951 | Size: l.size, |
919 | 952 | Timestamp: millitime(b2resp.Timestamp), |
920 | 953 | Status: b2resp.Action, |
921 | id: b2resp.FileID, | |
954 | ID: b2resp.FileID, | |
922 | 955 | b2: l.b2, |
923 | 956 | }, nil |
924 | 957 | } |
926 | 959 | // ListUnfinishedLargeFiles wraps b2_list_unfinished_large_files. |
927 | 960 | func (b *Bucket) ListUnfinishedLargeFiles(ctx context.Context, count int, continuation string) ([]*File, string, error) { |
928 | 961 | b2req := &b2types.ListUnfinishedLargeFilesRequest{ |
929 | BucketID: b.id, | |
962 | BucketID: b.ID, | |
930 | 963 | Continuation: continuation, |
931 | 964 | Count: count, |
932 | 965 | } |
944 | 977 | Name: f.Name, |
945 | 978 | Timestamp: millitime(f.Timestamp), |
946 | 979 | b2: b.b2, |
947 | id: f.FileID, | |
980 | ID: f.FileID, | |
948 | 981 | Info: &FileInfo{ |
949 | 982 | Name: f.Name, |
950 | 983 | ContentType: f.ContentType, |
958 | 991 | |
959 | 992 | // ListFileNames wraps b2_list_file_names. |
960 | 993 | func (b *Bucket) ListFileNames(ctx context.Context, count int, continuation, prefix, delimiter string) ([]*File, string, error) { |
994 | if prefix == "" { | |
995 | prefix = b.b2.pfx | |
996 | } | |
961 | 997 | b2req := &b2types.ListFileNamesRequest{ |
962 | 998 | Count: count, |
963 | 999 | Continuation: continuation, |
964 | BucketID: b.id, | |
1000 | BucketID: b.ID, | |
965 | 1001 | Prefix: prefix, |
966 | 1002 | Delimiter: delimiter, |
967 | 1003 | } |
983 | 1019 | Info: &FileInfo{ |
984 | 1020 | Name: f.Name, |
985 | 1021 | SHA1: f.SHA1, |
1022 | MD5: f.MD5, | |
986 | 1023 | Size: f.Size, |
987 | 1024 | ContentType: f.ContentType, |
988 | 1025 | Info: f.Info, |
989 | 1026 | Status: f.Action, |
990 | 1027 | Timestamp: millitime(f.Timestamp), |
991 | 1028 | }, |
992 | id: f.FileID, | |
1029 | ID: f.FileID, | |
993 | 1030 | b2: b.b2, |
994 | 1031 | }) |
995 | 1032 | } |
998 | 1035 | |
999 | 1036 | // ListFileVersions wraps b2_list_file_versions. |
1000 | 1037 | func (b *Bucket) ListFileVersions(ctx context.Context, count int, startName, startID, prefix, delimiter string) ([]*File, string, string, error) { |
1038 | if prefix == "" { | |
1039 | prefix = b.b2.pfx | |
1040 | } | |
1001 | 1041 | b2req := &b2types.ListFileVersionsRequest{ |
1002 | BucketID: b.id, | |
1042 | BucketID: b.ID, | |
1003 | 1043 | Count: count, |
1004 | 1044 | StartName: startName, |
1005 | 1045 | StartID: startID, |
1023 | 1063 | Info: &FileInfo{ |
1024 | 1064 | Name: f.Name, |
1025 | 1065 | SHA1: f.SHA1, |
1066 | MD5: f.MD5, | |
1026 | 1067 | Size: f.Size, |
1027 | 1068 | ContentType: f.ContentType, |
1028 | 1069 | Info: f.Info, |
1029 | 1070 | Status: f.Action, |
1030 | 1071 | Timestamp: millitime(f.Timestamp), |
1031 | 1072 | }, |
1032 | id: f.FileID, | |
1073 | ID: f.FileID, | |
1033 | 1074 | b2: b.b2, |
1034 | 1075 | }) |
1035 | 1076 | } |
1037 | 1078 | } |
1038 | 1079 | |
1039 | 1080 | // GetDownloadAuthorization wraps b2_get_download_authorization. |
1040 | func (b *Bucket) GetDownloadAuthorization(ctx context.Context, prefix string, valid time.Duration) (string, error) { | |
1081 | func (b *Bucket) GetDownloadAuthorization(ctx context.Context, prefix string, valid time.Duration, contentDisposition string) (string, error) { | |
1041 | 1082 | b2req := &b2types.GetDownloadAuthorizationRequest{ |
1042 | BucketID: b.id, | |
1043 | Prefix: prefix, | |
1044 | Valid: int(valid.Seconds()), | |
1083 | BucketID: b.ID, | |
1084 | Prefix: prefix, | |
1085 | Valid: int(valid.Seconds()), | |
1086 | ContentDisposition: contentDisposition, | |
1045 | 1087 | } |
1046 | 1088 | b2resp := &b2types.GetDownloadAuthorizationResponse{} |
1047 | 1089 | headers := map[string]string{ |
1074 | 1116 | } |
1075 | 1117 | |
1076 | 1118 | // DownloadFileByName wraps b2_download_file_by_name. |
1077 | func (b *Bucket) DownloadFileByName(ctx context.Context, name string, offset, size int64) (*FileReader, error) { | |
1119 | func (b *Bucket) DownloadFileByName(ctx context.Context, name string, offset, size int64, header bool) (*FileReader, error) { | |
1078 | 1120 | uri := fmt.Sprintf("%s/file/%s/%s", b.b2.downloadURI, b.Name, escape(name)) |
1079 | req, err := http.NewRequest("GET", uri, nil) | |
1121 | method := "GET" | |
1122 | if header { | |
1123 | method = "HEAD" | |
1124 | } | |
1125 | req, err := http.NewRequest(method, uri, nil) | |
1080 | 1126 | if err != nil { |
1081 | 1127 | return nil, err |
1082 | 1128 | } |
1120 | 1166 | } |
1121 | 1167 | info[name] = val |
1122 | 1168 | } |
1169 | sha1 := resp.Header.Get("X-Bz-Content-Sha1") | |
1170 | if sha1 == "none" && info["Large_file_sha1"] != "" { | |
1171 | sha1 = info["Large_file_sha1"] | |
1172 | } | |
1123 | 1173 | return &FileReader{ |
1124 | 1174 | ReadCloser: resp.Body, |
1125 | SHA1: resp.Header.Get("X-Bz-Content-Sha1"), | |
1175 | SHA1: sha1, | |
1126 | 1176 | ID: resp.Header.Get("X-Bz-File-Id"), |
1127 | 1177 | ContentType: resp.Header.Get("Content-Type"), |
1128 | 1178 | ContentLength: int(clen), |
1133 | 1183 | // HideFile wraps b2_hide_file. |
1134 | 1184 | func (b *Bucket) HideFile(ctx context.Context, name string) (*File, error) { |
1135 | 1185 | b2req := &b2types.HideFileRequest{ |
1136 | BucketID: b.id, | |
1186 | BucketID: b.ID, | |
1137 | 1187 | File: name, |
1138 | 1188 | } |
1139 | 1189 | b2resp := &b2types.HideFileResponse{} |
1148 | 1198 | Name: name, |
1149 | 1199 | Timestamp: millitime(b2resp.Timestamp), |
1150 | 1200 | b2: b.b2, |
1151 | id: b2resp.ID, | |
1201 | ID: b2resp.ID, | |
1152 | 1202 | }, nil |
1153 | 1203 | } |
1154 | 1204 | |
1156 | 1206 | type FileInfo struct { |
1157 | 1207 | Name string |
1158 | 1208 | SHA1 string |
1209 | MD5 string | |
1159 | 1210 | Size int64 |
1160 | 1211 | ContentType string |
1161 | 1212 | Info map[string]string |
1166 | 1217 | // GetFileInfo wraps b2_get_file_info. |
1167 | 1218 | func (f *File) GetFileInfo(ctx context.Context) (*FileInfo, error) { |
1168 | 1219 | b2req := &b2types.GetFileInfoRequest{ |
1169 | ID: f.id, | |
1220 | ID: f.ID, | |
1170 | 1221 | } |
1171 | 1222 | b2resp := &b2types.GetFileInfoResponse{} |
1172 | 1223 | headers := map[string]string{ |
1181 | 1232 | f.Info = &FileInfo{ |
1182 | 1233 | Name: b2resp.Name, |
1183 | 1234 | SHA1: b2resp.SHA1, |
1235 | MD5: b2resp.MD5, | |
1184 | 1236 | Size: b2resp.Size, |
1185 | 1237 | ContentType: b2resp.ContentType, |
1186 | 1238 | Info: b2resp.Info, |
1189 | 1241 | } |
1190 | 1242 | return f.Info, nil |
1191 | 1243 | } |
1244 | ||
1245 | // Key is a B2 application key. | |
1246 | type Key struct { | |
1247 | ID string | |
1248 | Secret string | |
1249 | Name string | |
1250 | Capabilities []string | |
1251 | Expires time.Time | |
1252 | b2 *B2 | |
1253 | } | |
1254 | ||
1255 | // CreateKey wraps b2_create_key. | |
1256 | func (b *B2) CreateKey(ctx context.Context, name string, caps []string, valid time.Duration, bucketID string, prefix string) (*Key, error) { | |
1257 | b2req := &b2types.CreateKeyRequest{ | |
1258 | AccountID: b.accountID, | |
1259 | Capabilities: caps, | |
1260 | Name: name, | |
1261 | Valid: int(valid.Seconds()), | |
1262 | BucketID: bucketID, | |
1263 | Prefix: prefix, | |
1264 | } | |
1265 | b2resp := &b2types.CreateKeyResponse{} | |
1266 | headers := map[string]string{ | |
1267 | "Authorization": b.authToken, | |
1268 | } | |
1269 | if err := b.opts.makeRequest(ctx, "b2_create_key", "POST", b.apiURI+b2types.V1api+"b2_create_key", b2req, b2resp, headers, nil); err != nil { | |
1270 | return nil, err | |
1271 | } | |
1272 | return &Key{ | |
1273 | Name: b2resp.Name, | |
1274 | ID: b2resp.ID, | |
1275 | Secret: b2resp.Secret, | |
1276 | Capabilities: b2resp.Capabilities, | |
1277 | Expires: millitime(b2resp.Expires), | |
1278 | b2: b, | |
1279 | }, nil | |
1280 | } | |
1281 | ||
1282 | // Delete wraps b2_delete_key. | |
1283 | func (k *Key) Delete(ctx context.Context) error { | |
1284 | b2req := &b2types.DeleteKeyRequest{ | |
1285 | KeyID: k.ID, | |
1286 | } | |
1287 | headers := map[string]string{ | |
1288 | "Authorization": k.b2.authToken, | |
1289 | } | |
1290 | return k.b2.opts.makeRequest(ctx, "b2_delete_key", "POST", k.b2.apiURI+b2types.V1api+"b2_delete_key", b2req, nil, headers, nil) | |
1291 | } | |
1292 | ||
1293 | // ListKeys wraps b2_list_keys. | |
1294 | func (b *B2) ListKeys(ctx context.Context, max int, next string) ([]*Key, string, error) { | |
1295 | b2req := &b2types.ListKeysRequest{ | |
1296 | AccountID: b.accountID, | |
1297 | Max: max, | |
1298 | Next: next, | |
1299 | } | |
1300 | headers := map[string]string{ | |
1301 | "Authorization": b.authToken, | |
1302 | } | |
1303 | b2resp := &b2types.ListKeysResponse{} | |
1304 | if err := b.opts.makeRequest(ctx, "b2_list_keys", "POST", b.apiURI+b2types.V1api+"b2_list_keys", b2req, b2resp, headers, nil); err != nil { | |
1305 | return nil, "", err | |
1306 | } | |
1307 | var keys []*Key | |
1308 | for _, key := range b2resp.Keys { | |
1309 | keys = append(keys, &Key{ | |
1310 | Name: key.Name, | |
1311 | ID: key.ID, | |
1312 | Capabilities: key.Capabilities, | |
1313 | Expires: millitime(key.Expires), | |
1314 | b2: b, | |
1315 | }) | |
1316 | } | |
1317 | return keys, b2resp.Next, nil | |
1318 | } |
0 | // Copyright 2016, Google | |
0 | // Copyright 2016, the Blazer authors | |
1 | 1 | // |
2 | 2 | // Licensed under the Apache License, Version 2.0 (the "License"); |
3 | 3 | // you may not use this file except in compliance with the License. |
107 | 107 | } |
108 | 108 | |
109 | 109 | // b2_list_buckets |
110 | buckets, err := b2.ListBuckets(ctx) | |
110 | buckets, err := b2.ListBuckets(ctx, "") | |
111 | 111 | if err != nil { |
112 | 112 | t.Fatal(err) |
113 | 113 | } |
120 | 120 | } |
121 | 121 | if !found { |
122 | 122 | t.Errorf("%s: new bucket not found", bname) |
123 | } | |
124 | ||
125 | buckets, err = b2.ListBuckets(ctx, bname) | |
126 | if len(buckets) != 1 { | |
127 | t.Errorf("excpected exactly 1 bucket, got %d", len(buckets)) | |
128 | } else { | |
129 | if buckets[0].Name != bname { | |
130 | t.Errorf("got %s, want %s", bname, buckets[0].Name) | |
131 | } | |
123 | 132 | } |
124 | 133 | |
125 | 134 | // b2_get_upload_url |
228 | 237 | } |
229 | 238 | |
230 | 239 | // b2_download_file_by_name |
231 | fr, err := bucket.DownloadFileByName(ctx, smallFileName, 0, 0) | |
240 | fr, err := bucket.DownloadFileByName(ctx, smallFileName, 0, 0, false) | |
232 | 241 | if err != nil { |
233 | 242 | t.Fatal(err) |
234 | 243 | } |
264 | 273 | } |
265 | 274 | |
266 | 275 | // b2_get_download_authorization |
267 | if _, err := bucket.GetDownloadAuthorization(ctx, "foo/", 24*time.Hour); err != nil { | |
276 | if _, err := bucket.GetDownloadAuthorization(ctx, "foo/", 24*time.Hour, "attachment"); err != nil { | |
268 | 277 | t.Errorf("failed to get download auth token: %v", err) |
269 | 278 | } |
270 | 279 | } |
279 | 288 | |
280 | 289 | hung := make(chan struct{}) |
281 | 290 | |
282 | // An http.RoundTripper that dies after sending ~10k bytes. | |
291 | // An http.RoundTripper that dies and hangs after sending ~10k bytes. | |
283 | 292 | hang := func() { |
284 | 293 | close(hung) |
285 | 294 | select {} |
316 | 325 | |
317 | 326 | go func() { |
318 | 327 | ue.UploadFile(ctx, buf, buf.Len(), smallFileName, "application/octet-stream", smallSHA1, nil) |
319 | t.Fatal("this ought not to be reachable") | |
320 | 328 | }() |
321 | 329 | |
322 | 330 | <-hung |
638 | 646 | }() |
639 | 647 | |
640 | 648 | // b2_download_file_by_name |
641 | fr, err := bucket.DownloadFileByName(ctx, filename, 0, 0) | |
649 | fr, err := bucket.DownloadFileByName(ctx, filename, 0, 0, false) | |
642 | 650 | if err != nil { |
643 | 651 | t.Fatal(err) |
644 | 652 | } |
0 | // Copyright 2017, Google | |
0 | // Copyright 2017, the Blazer authors | |
1 | 1 | // |
2 | 2 | // Licensed under the Apache License, Version 2.0 (the "License"); |
3 | 3 | // you may not use this file except in compliance with the License. |
0 | // b2keys is a small utility for managing Backblaze B2 keys. | |
1 | package main | |
2 | ||
3 | import ( | |
4 | "context" | |
5 | "flag" | |
6 | "fmt" | |
7 | "os" | |
8 | "time" | |
9 | ||
10 | "github.com/google/subcommands" | |
11 | "github.com/kurin/blazer/b2" | |
12 | ) | |
13 | ||
14 | const ( | |
15 | apiID = "B2_ACCOUNT_ID" | |
16 | apiKey = "B2_SECRET_KEY" | |
17 | ) | |
18 | ||
19 | func main() { | |
20 | subcommands.Register(&create{}, "") | |
21 | flag.Parse() | |
22 | ctx := context.Background() | |
23 | os.Exit(int(subcommands.Execute(ctx))) | |
24 | } | |
25 | ||
26 | type create struct { | |
27 | d *time.Duration | |
28 | bucket *string | |
29 | pfx *string | |
30 | } | |
31 | ||
32 | func (c *create) Name() string { return "create" } | |
33 | func (c *create) Synopsis() string { return "create a new application key" } | |
34 | func (c *create) Usage() string { | |
35 | return "b2keys create [-bucket bucket] [-duration duration] [-prefix pfx] name capability [capability ...]" | |
36 | } | |
37 | ||
38 | func (c *create) SetFlags(fs *flag.FlagSet) { | |
39 | c.d = fs.Duration("duration", 0, "the lifetime of the new key") | |
40 | c.bucket = fs.String("bucket", "", "limit the key to the given bucket") | |
41 | c.pfx = fs.String("prefix", "", "limit the key to the objects starting with prefix") | |
42 | } | |
43 | ||
44 | func (c *create) Execute(ctx context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus { | |
45 | id := os.Getenv(apiID) | |
46 | key := os.Getenv(apiKey) | |
47 | if id == "" || key == "" { | |
48 | fmt.Fprintf(os.Stderr, "both %s and %s must be set in the environment", apiID, apiKey) | |
49 | return subcommands.ExitUsageError | |
50 | } | |
51 | ||
52 | args := f.Args() | |
53 | if len(args) < 2 { | |
54 | fmt.Fprintf(os.Stderr, "%s\n", c.Usage()) | |
55 | return subcommands.ExitUsageError | |
56 | } | |
57 | name := args[0] | |
58 | caps := args[1:] | |
59 | ||
60 | var opts []b2.KeyOption | |
61 | if *c.d > 0 { | |
62 | opts = append(opts, b2.Lifetime(*c.d)) | |
63 | } | |
64 | if *c.pfx != "" { | |
65 | opts = append(opts, b2.Prefix(*c.pfx)) | |
66 | } | |
67 | opts = append(opts, b2.Capabilities(caps...)) | |
68 | ||
69 | client, err := b2.NewClient(ctx, id, key, b2.UserAgent("b2keys")) | |
70 | if err != nil { | |
71 | fmt.Fprintf(os.Stderr, "%v\n", err) | |
72 | return subcommands.ExitFailure | |
73 | } | |
74 | ||
75 | var cr creater = client | |
76 | ||
77 | if *c.bucket != "" { | |
78 | bucket, err := client.Bucket(ctx, *c.bucket) | |
79 | if err != nil { | |
80 | fmt.Fprintf(os.Stderr, "%v\n", err) | |
81 | return subcommands.ExitFailure | |
82 | } | |
83 | cr = bucket | |
84 | } | |
85 | ||
86 | b2key, err := cr.CreateKey(ctx, name, opts...) | |
87 | if err != nil { | |
88 | fmt.Fprintf(os.Stderr, "%v\n", err) | |
89 | return subcommands.ExitFailure | |
90 | } | |
91 | fmt.Printf("key=%s, secret=%s\n", b2key.ID(), b2key.Secret()) | |
92 | return subcommands.ExitSuccess | |
93 | } | |
94 | ||
95 | type creater interface { | |
96 | CreateKey(context.Context, string, ...b2.KeyOption) (*b2.Key, error) | |
97 | } |
0 | package main | |
1 | ||
2 | import ( | |
3 | "context" | |
4 | "fmt" | |
5 | "net/http" | |
6 | ||
7 | "github.com/kurin/blazer/bonfire" | |
8 | "github.com/kurin/blazer/internal/pyre" | |
9 | ) | |
10 | ||
11 | type superManager struct { | |
12 | *bonfire.LocalBucket | |
13 | bonfire.FS | |
14 | } | |
15 | ||
16 | func main() { | |
17 | ctx := context.Background() | |
18 | mux := http.NewServeMux() | |
19 | ||
20 | fs := bonfire.FS("/tmp/b2") | |
21 | bm := &bonfire.LocalBucket{Port: 8822} | |
22 | ||
23 | if err := pyre.RegisterServerOnMux(ctx, &pyre.Server{ | |
24 | Account: bonfire.Localhost(8822), | |
25 | LargeFile: fs, | |
26 | Bucket: bm, | |
27 | }, mux); err != nil { | |
28 | fmt.Println(err) | |
29 | return | |
30 | } | |
31 | ||
32 | sm := superManager{ | |
33 | LocalBucket: bm, | |
34 | FS: fs, | |
35 | } | |
36 | ||
37 | pyre.RegisterLargeFileManagerOnMux(fs, mux) | |
38 | pyre.RegisterSimpleFileManagerOnMux(fs, mux) | |
39 | pyre.RegisterDownloadManagerOnMux(sm, mux) | |
40 | fmt.Println("ok") | |
41 | fmt.Println(http.ListenAndServe("localhost:8822", mux)) | |
42 | } |
0 | // Copyright 2018, the Blazer authors | |
1 | // | |
2 | // Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | // you may not use this file except in compliance with the License. | |
4 | // You may obtain a copy of the License at | |
5 | // | |
6 | // http://www.apache.org/licenses/LICENSE-2.0 | |
7 | // | |
8 | // Unless required by applicable law or agreed to in writing, software | |
9 | // distributed under the License is distributed on an "AS IS" BASIS, | |
10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | // See the License for the specific language governing permissions and | |
12 | // limitations under the License. | |
13 | ||
14 | // Package bonfire implements the B2 service. | |
15 | package bonfire | |
16 | ||
17 | import ( | |
18 | "crypto/sha1" | |
19 | "encoding/json" | |
20 | "errors" | |
21 | "fmt" | |
22 | "io" | |
23 | "os" | |
24 | "path/filepath" | |
25 | "sort" | |
26 | "strconv" | |
27 | "sync" | |
28 | ||
29 | "github.com/kurin/blazer/internal/pyre" | |
30 | ) | |
31 | ||
32 | type FS string | |
33 | ||
34 | func (f FS) open(fp string) (io.WriteCloser, error) { | |
35 | if err := os.MkdirAll(filepath.Dir(fp), 0755); err != nil { | |
36 | return nil, err | |
37 | } | |
38 | return os.Create(fp) | |
39 | } | |
40 | ||
41 | func (f FS) PartWriter(id string, part int) (io.WriteCloser, error) { | |
42 | fp := filepath.Join(string(f), id, fmt.Sprintf("%d", part)) | |
43 | return f.open(fp) | |
44 | } | |
45 | ||
46 | func (f FS) Writer(bucket, name, id string) (io.WriteCloser, error) { | |
47 | fp := filepath.Join(string(f), bucket, name, id) | |
48 | return f.open(fp) | |
49 | } | |
50 | ||
51 | func (f FS) Parts(id string) ([]string, error) { | |
52 | dir := filepath.Join(string(f), id) | |
53 | file, err := os.Open(dir) | |
54 | if err != nil { | |
55 | return nil, err | |
56 | } | |
57 | defer file.Close() | |
58 | fs, err := file.Readdir(0) | |
59 | if err != nil { | |
60 | return nil, err | |
61 | } | |
62 | shas := make([]string, len(fs)-1) | |
63 | for _, fi := range fs { | |
64 | if fi.Name() == "info" { | |
65 | continue | |
66 | } | |
67 | i, err := strconv.ParseInt(fi.Name(), 10, 32) | |
68 | if err != nil { | |
69 | return nil, err | |
70 | } | |
71 | p, err := os.Open(filepath.Join(dir, fi.Name())) | |
72 | if err != nil { | |
73 | return nil, err | |
74 | } | |
75 | sha := sha1.New() | |
76 | if _, err := io.Copy(sha, p); err != nil { | |
77 | p.Close() | |
78 | return nil, err | |
79 | } | |
80 | p.Close() | |
81 | shas[int(i)-1] = fmt.Sprintf("%x", sha.Sum(nil)) | |
82 | } | |
83 | return shas, nil | |
84 | } | |
85 | ||
86 | type fi struct { | |
87 | Name string | |
88 | Bucket string | |
89 | } | |
90 | ||
91 | func (f FS) Start(bucketId, fileName, fileId string, bs []byte) error { | |
92 | w, err := f.open(filepath.Join(string(f), fileId, "info")) | |
93 | if err != nil { | |
94 | return err | |
95 | } | |
96 | if err := json.NewEncoder(w).Encode(fi{Name: fileName, Bucket: bucketId}); err != nil { | |
97 | w.Close() | |
98 | return err | |
99 | } | |
100 | return w.Close() | |
101 | } | |
102 | ||
103 | func (f FS) Finish(fileId string) error { | |
104 | r, err := os.Open(filepath.Join(string(f), fileId, "info")) | |
105 | if err != nil { | |
106 | return err | |
107 | } | |
108 | defer r.Close() | |
109 | var info fi | |
110 | if err := json.NewDecoder(r).Decode(&info); err != nil { | |
111 | return err | |
112 | } | |
113 | shas, err := f.Parts(fileId) // oh my god this is terrible | |
114 | if err != nil { | |
115 | return err | |
116 | } | |
117 | w, err := f.open(filepath.Join(string(f), info.Bucket, info.Name, fileId)) | |
118 | if err != nil { | |
119 | return err | |
120 | } | |
121 | for i := 1; i <= len(shas); i++ { | |
122 | r, err := os.Open(filepath.Join(string(f), fileId, fmt.Sprintf("%d", i))) | |
123 | if err != nil { | |
124 | w.Close() | |
125 | return err | |
126 | } | |
127 | if _, err := io.Copy(w, r); err != nil { | |
128 | w.Close() | |
129 | r.Close() | |
130 | return err | |
131 | } | |
132 | r.Close() | |
133 | } | |
134 | if err := w.Close(); err != nil { | |
135 | return err | |
136 | } | |
137 | return os.RemoveAll(filepath.Join(string(f), fileId)) | |
138 | } | |
139 | ||
140 | func (f FS) ObjectByName(bucket, name string) (pyre.DownloadableObject, error) { | |
141 | dir := filepath.Join(string(f), bucket, name) | |
142 | d, err := os.Open(dir) | |
143 | if err != nil { | |
144 | return nil, err | |
145 | } | |
146 | defer d.Close() | |
147 | fis, err := d.Readdir(0) | |
148 | if err != nil { | |
149 | return nil, err | |
150 | } | |
151 | sort.Slice(fis, func(i, j int) bool { return fis[i].ModTime().Before(fis[j].ModTime()) }) | |
152 | o, err := os.Open(filepath.Join(dir, fis[0].Name())) | |
153 | if err != nil { | |
154 | return nil, err | |
155 | } | |
156 | return do{ | |
157 | o: o, | |
158 | size: fis[0].Size(), | |
159 | }, nil | |
160 | } | |
161 | ||
162 | type do struct { | |
163 | size int64 | |
164 | o *os.File | |
165 | } | |
166 | ||
167 | func (d do) Size() int64 { return d.size } | |
168 | func (d do) Reader() io.ReaderAt { return d.o } | |
169 | func (d do) Close() error { return d.o.Close() } | |
170 | ||
171 | func (f FS) Get(fileId string) ([]byte, error) { return nil, nil } | |
172 | ||
173 | type Localhost int | |
174 | ||
175 | func (l Localhost) String() string { return fmt.Sprintf("http://localhost:%d", l) } | |
176 | func (l Localhost) UploadHost(id string) (string, error) { return l.String(), nil } | |
177 | func (Localhost) Authorize(string, string) (string, error) { return "ok", nil } | |
178 | func (Localhost) CheckCreds(string, string) error { return nil } | |
179 | func (l Localhost) APIRoot(string) string { return l.String() } | |
180 | func (l Localhost) DownloadRoot(string) string { return l.String() } | |
181 | func (Localhost) Sizes(string) (int32, int32) { return 1e5, 1 } | |
182 | func (l Localhost) UploadPartHost(fileId string) (string, error) { return l.String(), nil } | |
183 | ||
184 | type LocalBucket struct { | |
185 | Port int | |
186 | ||
187 | mux sync.Mutex | |
188 | b map[string][]byte | |
189 | nti map[string]string | |
190 | } | |
191 | ||
192 | func (lb *LocalBucket) AddBucket(id, name string, bs []byte) error { | |
193 | lb.mux.Lock() | |
194 | defer lb.mux.Unlock() | |
195 | ||
196 | if lb.b == nil { | |
197 | lb.b = make(map[string][]byte) | |
198 | } | |
199 | ||
200 | if lb.nti == nil { | |
201 | lb.nti = make(map[string]string) | |
202 | } | |
203 | ||
204 | lb.b[id] = bs | |
205 | lb.nti[name] = id | |
206 | return nil | |
207 | } | |
208 | ||
209 | func (lb *LocalBucket) RemoveBucket(id string) error { | |
210 | lb.mux.Lock() | |
211 | defer lb.mux.Unlock() | |
212 | ||
213 | if lb.b == nil { | |
214 | lb.b = make(map[string][]byte) | |
215 | } | |
216 | ||
217 | delete(lb.b, id) | |
218 | return nil | |
219 | } | |
220 | ||
221 | func (lb *LocalBucket) UpdateBucket(id string, rev int, bs []byte) error { | |
222 | return errors.New("no") | |
223 | } | |
224 | ||
225 | func (lb *LocalBucket) ListBuckets(acct string) ([][]byte, error) { | |
226 | lb.mux.Lock() | |
227 | defer lb.mux.Unlock() | |
228 | ||
229 | var bss [][]byte | |
230 | for _, bs := range lb.b { | |
231 | bss = append(bss, bs) | |
232 | } | |
233 | return bss, nil | |
234 | } | |
235 | ||
236 | func (lb *LocalBucket) GetBucket(id string) ([]byte, error) { | |
237 | lb.mux.Lock() | |
238 | defer lb.mux.Unlock() | |
239 | ||
240 | bs, ok := lb.b[id] | |
241 | if !ok { | |
242 | return nil, errors.New("not found") | |
243 | } | |
244 | return bs, nil | |
245 | } | |
246 | ||
247 | func (lb *LocalBucket) GetBucketID(name string) (string, error) { | |
248 | lb.mux.Lock() | |
249 | defer lb.mux.Unlock() | |
250 | ||
251 | id, ok := lb.nti[name] | |
252 | if !ok { | |
253 | return "", errors.New("not found") | |
254 | } | |
255 | return id, nil | |
256 | } |
0 | // Copyright 2017, Google | |
0 | // Copyright 2017, the Blazer authors | |
1 | 1 | // |
2 | 2 | // Licensed under the Apache License, Version 2.0 (the "License"); |
3 | 3 | // you may not use this file except in compliance with the License. |
0 | // Copyright 2018, Google | |
0 | // Copyright 2018, the Blazer authors | |
1 | 1 | // |
2 | 2 | // Licensed under the Apache License, Version 2.0 (the "License"); |
3 | 3 | // you may not use this file except in compliance with the License. |
0 | // Copyright 2016, Google | |
0 | // Copyright 2016, the Blazer authors | |
1 | 1 | // |
2 | 2 | // Licensed under the Apache License, Version 2.0 (the "License"); |
3 | 3 | // you may not use this file except in compliance with the License. |
28 | 28 | } |
29 | 29 | |
30 | 30 | type AuthorizeAccountResponse struct { |
31 | AccountID string `json:"accountId"` | |
32 | AuthToken string `json:"authorizationToken"` | |
33 | URI string `json:"apiUrl"` | |
34 | DownloadURI string `json:"downloadUrl"` | |
35 | MinPartSize int `json:"minimumPartSize"` | |
31 | AccountID string `json:"accountId"` | |
32 | AuthToken string `json:"authorizationToken"` | |
33 | URI string `json:"apiUrl"` | |
34 | DownloadURI string `json:"downloadUrl"` | |
35 | MinPartSize int `json:"minimumPartSize"` | |
36 | PartSize int `json:"recommendedPartSize"` | |
37 | AbsMinPartSize int `json:"absoluteMinimumPartSize"` | |
38 | Allowed Allowance `json:"allowed"` | |
39 | } | |
40 | ||
41 | type Allowance struct { | |
42 | Capabilities []string `json:"capabilities"` | |
43 | Bucket string `json:"bucketId"` | |
44 | Prefix string `json:"namePrefix"` | |
36 | 45 | } |
37 | 46 | |
38 | 47 | type LifecycleRule struct { |
65 | 74 | |
66 | 75 | type ListBucketsRequest struct { |
67 | 76 | AccountID string `json:"accountId"` |
77 | Bucket string `json:"bucketId,omitempty"` | |
78 | Name string `json:"bucketName,omitempty"` | |
68 | 79 | } |
69 | 80 | |
70 | 81 | type ListBucketsResponse struct { |
72 | 83 | } |
73 | 84 | |
74 | 85 | type UpdateBucketRequest struct { |
75 | AccountID string `json:"accountId"` | |
76 | BucketID string `json:"bucketId"` | |
77 | // bucketName is a required field according to | |
78 | // https://www.backblaze.com/b2/docs/b2_update_bucket.html. | |
79 | // | |
80 | // However, actually setting it returns 400: unknown field in | |
81 | // com.backblaze.modules.b2.data.UpdateBucketRequest: bucketName | |
82 | // | |
83 | //Name string `json:"bucketName"` | |
86 | AccountID string `json:"accountId"` | |
87 | BucketID string `json:"bucketId"` | |
84 | 88 | Type string `json:"bucketType,omitempty"` |
85 | 89 | Info map[string]string `json:"bucketInfo,omitempty"` |
86 | 90 | LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"` |
98 | 102 | Token string `json:"authorizationToken"` |
99 | 103 | } |
100 | 104 | |
101 | type UploadFileResponse struct { | |
102 | FileID string `json:"fileId"` | |
103 | Timestamp int64 `json:"uploadTimestamp"` | |
104 | Action string `json:"action"` | |
105 | } | |
105 | type UploadFileResponse GetFileInfoResponse | |
106 | 106 | |
107 | 107 | type DeleteFileVersionRequest struct { |
108 | 108 | Name string `json:"fileName"` |
205 | 205 | } |
206 | 206 | |
207 | 207 | type GetFileInfoResponse struct { |
208 | FileID string `json:"fileId"` | |
209 | Name string `json:"fileName"` | |
210 | SHA1 string `json:"contentSha1"` | |
211 | Size int64 `json:"contentLength"` | |
212 | ContentType string `json:"contentType"` | |
213 | Info map[string]string `json:"fileInfo"` | |
214 | Action string `json:"action"` | |
215 | Timestamp int64 `json:"uploadTimestamp"` | |
208 | FileID string `json:"fileId,omitempty"` | |
209 | Name string `json:"fileName,omitempty"` | |
210 | AccountID string `json:"accountId,omitempty"` | |
211 | BucketID string `json:"bucketId,omitempty"` | |
212 | Size int64 `json:"contentLength,omitempty"` | |
213 | SHA1 string `json:"contentSha1,omitempty"` | |
214 | MD5 string `json:"contentMd5,omitempty"` | |
215 | ContentType string `json:"contentType,omitempty"` | |
216 | Info map[string]string `json:"fileInfo,omitempty"` | |
217 | Action string `json:"action,omitempty"` | |
218 | Timestamp int64 `json:"uploadTimestamp,omitempty"` | |
216 | 219 | } |
217 | 220 | |
218 | 221 | type GetDownloadAuthorizationRequest struct { |
219 | BucketID string `json:"bucketId"` | |
220 | Prefix string `json:"fileNamePrefix"` | |
221 | Valid int `json:"validDurationInSeconds"` | |
222 | BucketID string `json:"bucketId"` | |
223 | Prefix string `json:"fileNamePrefix"` | |
224 | Valid int `json:"validDurationInSeconds"` | |
225 | ContentDisposition string `json:"b2ContentDisposition,omitempty"` | |
222 | 226 | } |
223 | 227 | |
224 | 228 | type GetDownloadAuthorizationResponse struct { |
237 | 241 | Files []GetFileInfoResponse `json:"files"` |
238 | 242 | Continuation string `json:"nextFileId"` |
239 | 243 | } |
244 | ||
245 | type CreateKeyRequest struct { | |
246 | AccountID string `json:"accountId"` | |
247 | Capabilities []string `json:"capabilities"` | |
248 | Name string `json:"keyName"` | |
249 | Valid int `json:"validDurationInSeconds,omitempty"` | |
250 | BucketID string `json:"bucketId,omitempty"` | |
251 | Prefix string `json:"namePrefix,omitempty"` | |
252 | } | |
253 | ||
254 | type Key struct { | |
255 | ID string `json:"applicationKeyId"` | |
256 | Secret string `json:"applicationKey"` | |
257 | AccountID string `json:"accountId"` | |
258 | Capabilities []string `json:"capabilities"` | |
259 | Name string `json:"keyName"` | |
260 | Expires int64 `json:"expirationTimestamp"` | |
261 | BucketID string `json:"bucketId"` | |
262 | Prefix string `json:"namePrefix"` | |
263 | } | |
264 | ||
265 | type CreateKeyResponse Key | |
266 | ||
267 | type DeleteKeyRequest struct { | |
268 | KeyID string `json:"applicationKeyId"` | |
269 | } | |
270 | ||
271 | type DeleteKeyResponse Key | |
272 | ||
273 | type ListKeysRequest struct { | |
274 | AccountID string `json:"accountId"` | |
275 | Max int `json:"maxKeyCount,omitempty"` | |
276 | Next string `json:"startApplicationKeyId,omitempty"` | |
277 | } | |
278 | ||
279 | type ListKeysResponse struct { | |
280 | Keys []Key `json:"keys"` | |
281 | Next string `json:"nextApplicationKeyId"` | |
282 | } |
0 | // Copyright 2017, Google | |
0 | // Copyright 2017, the Blazer authors | |
1 | 1 | // |
2 | 2 | // Licensed under the Apache License, Version 2.0 (the "License"); |
3 | 3 | // you may not use this file except in compliance with the License. |
0 | // Copyright 2018, the Blazer authors | |
1 | // | |
2 | // Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | // you may not use this file except in compliance with the License. | |
4 | // You may obtain a copy of the License at | |
5 | // | |
6 | // http://www.apache.org/licenses/LICENSE-2.0 | |
7 | // | |
8 | // Unless required by applicable law or agreed to in writing, software | |
9 | // distributed under the License is distributed on an "AS IS" BASIS, | |
10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | // See the License for the specific language governing permissions and | |
12 | // limitations under the License. | |
13 | ||
14 | package pyre | |
15 | ||
16 | import ( | |
17 | "context" | |
18 | "encoding/base64" | |
19 | "errors" | |
20 | "fmt" | |
21 | "net" | |
22 | "net/http" | |
23 | "os" | |
24 | "reflect" | |
25 | "strings" | |
26 | ||
27 | "github.com/golang/protobuf/proto" | |
28 | "github.com/google/uuid" | |
29 | "github.com/grpc-ecosystem/grpc-gateway/runtime" | |
30 | "google.golang.org/grpc" | |
31 | "google.golang.org/grpc/metadata" | |
32 | ||
33 | pb "github.com/kurin/blazer/internal/pyre/proto" | |
34 | ) | |
35 | ||
36 | type apiErr struct { | |
37 | Status int `json:"status"` | |
38 | Code string `json:"code"` | |
39 | Message string `json:"message"` | |
40 | } | |
41 | ||
42 | func serveMuxOptions() []runtime.ServeMuxOption { | |
43 | return []runtime.ServeMuxOption{ | |
44 | runtime.WithMarshalerOption("*", &runtime.JSONPb{}), | |
45 | runtime.WithProtoErrorHandler(func(ctx context.Context, mux *runtime.ServeMux, m runtime.Marshaler, rw http.ResponseWriter, req *http.Request, err error) { | |
46 | aErr := apiErr{ | |
47 | Status: 400, | |
48 | Code: "uh oh", | |
49 | Message: err.Error(), | |
50 | } | |
51 | rw.WriteHeader(aErr.Status) | |
52 | if err := m.NewEncoder(rw).Encode(aErr); err != nil { | |
53 | fmt.Fprintln(os.Stdout, err) | |
54 | } | |
55 | }), | |
56 | } | |
57 | } | |
58 | ||
59 | func getAuth(ctx context.Context) (string, error) { | |
60 | md, ok := metadata.FromIncomingContext(ctx) | |
61 | if !ok { | |
62 | return "", errors.New("no metadata") | |
63 | } | |
64 | data := md.Get("authorization") | |
65 | if len(data) == 0 { | |
66 | return "", nil | |
67 | } | |
68 | return data[0], nil | |
69 | } | |
70 | ||
71 | func RegisterServerOnMux(ctx context.Context, srv *Server, mux *http.ServeMux) error { | |
72 | rmux := runtime.NewServeMux(serveMuxOptions()...) | |
73 | l, err := net.Listen("tcp", "localhost:0") | |
74 | if err != nil { | |
75 | return err | |
76 | } | |
77 | gsrv := grpc.NewServer() | |
78 | if err := pb.RegisterPyreServiceHandlerFromEndpoint(ctx, rmux, l.Addr().String(), []grpc.DialOption{grpc.WithInsecure()}); err != nil { | |
79 | return err | |
80 | } | |
81 | pb.RegisterPyreServiceServer(gsrv, srv) | |
82 | mux.Handle("/b2api/v1/", rmux) | |
83 | go gsrv.Serve(l) | |
84 | go func() { | |
85 | <-ctx.Done() | |
86 | gsrv.GracefulStop() | |
87 | }() | |
88 | return nil | |
89 | } | |
90 | ||
91 | type AccountManager interface { | |
92 | Authorize(acct, key string) (string, error) | |
93 | CheckCreds(token, api string) error | |
94 | APIRoot(acct string) string | |
95 | DownloadRoot(acct string) string | |
96 | UploadPartHost(fileID string) (string, error) | |
97 | UploadHost(id string) (string, error) | |
98 | Sizes(acct string) (recommended, minimum int32) | |
99 | } | |
100 | ||
101 | type BucketManager interface { | |
102 | AddBucket(id, name string, bs []byte) error | |
103 | RemoveBucket(id string) error | |
104 | UpdateBucket(id string, rev int, bs []byte) error | |
105 | ListBuckets(acct string) ([][]byte, error) | |
106 | GetBucket(id string) ([]byte, error) | |
107 | } | |
108 | ||
109 | type LargeFileOrganizer interface { | |
110 | Start(bucketID, fileName, fileID string, bs []byte) error | |
111 | Get(fileID string) ([]byte, error) | |
112 | Parts(fileID string) ([]string, error) | |
113 | Finish(fileID string) error | |
114 | } | |
115 | ||
116 | type Server struct { | |
117 | Account AccountManager | |
118 | Bucket BucketManager | |
119 | LargeFile LargeFileOrganizer | |
120 | List ListManager | |
121 | } | |
122 | ||
123 | func (s *Server) AuthorizeAccount(ctx context.Context, req *pb.AuthorizeAccountRequest) (*pb.AuthorizeAccountResponse, error) { | |
124 | auth, err := getAuth(ctx) | |
125 | if err != nil { | |
126 | return nil, err | |
127 | } | |
128 | if !strings.HasPrefix(auth, "Basic ") { | |
129 | return nil, errors.New("basic auth required") | |
130 | } | |
131 | auth = strings.TrimPrefix(auth, "Basic ") | |
132 | bs, err := base64.StdEncoding.DecodeString(auth) | |
133 | if err != nil { | |
134 | return nil, err | |
135 | } | |
136 | split := strings.Split(string(bs), ":") | |
137 | if len(split) != 2 { | |
138 | return nil, errors.New("bad auth") | |
139 | } | |
140 | acct, key := split[0], split[1] | |
141 | token, err := s.Account.Authorize(acct, key) | |
142 | if err != nil { | |
143 | return nil, err | |
144 | } | |
145 | rec, min := s.Account.Sizes(acct) | |
146 | return &pb.AuthorizeAccountResponse{ | |
147 | AuthorizationToken: token, | |
148 | ApiUrl: s.Account.APIRoot(acct), | |
149 | DownloadUrl: s.Account.DownloadRoot(acct), | |
150 | RecommendedPartSize: rec, | |
151 | MinimumPartSize: rec, | |
152 | AbsoluteMinimumPartSize: min, | |
153 | }, nil | |
154 | } | |
155 | ||
156 | func (s *Server) ListBuckets(ctx context.Context, req *pb.ListBucketsRequest) (*pb.ListBucketsResponse, error) { | |
157 | resp := &pb.ListBucketsResponse{} | |
158 | buckets, err := s.Bucket.ListBuckets(req.AccountId) | |
159 | if err != nil { | |
160 | return nil, err | |
161 | } | |
162 | for _, bs := range buckets { | |
163 | var bucket pb.Bucket | |
164 | if err := proto.Unmarshal(bs, &bucket); err != nil { | |
165 | return nil, err | |
166 | } | |
167 | resp.Buckets = append(resp.Buckets, &bucket) | |
168 | } | |
169 | return resp, nil | |
170 | } | |
171 | ||
172 | func (s *Server) CreateBucket(ctx context.Context, req *pb.Bucket) (*pb.Bucket, error) { | |
173 | req.BucketId = uuid.New().String() | |
174 | bs, err := proto.Marshal(req) | |
175 | if err != nil { | |
176 | return nil, err | |
177 | } | |
178 | if err := s.Bucket.AddBucket(req.BucketId, req.BucketName, bs); err != nil { | |
179 | return nil, err | |
180 | } | |
181 | return req, nil | |
182 | } | |
183 | ||
184 | func (s *Server) DeleteBucket(ctx context.Context, req *pb.Bucket) (*pb.Bucket, error) { | |
185 | bs, err := s.Bucket.GetBucket(req.BucketId) | |
186 | if err != nil { | |
187 | return nil, err | |
188 | } | |
189 | var bucket pb.Bucket | |
190 | if err := proto.Unmarshal(bs, &bucket); err != nil { | |
191 | return nil, err | |
192 | } | |
193 | if err := s.Bucket.RemoveBucket(req.BucketId); err != nil { | |
194 | return nil, err | |
195 | } | |
196 | return &bucket, nil | |
197 | } | |
198 | ||
199 | func (s *Server) GetUploadUrl(ctx context.Context, req *pb.GetUploadUrlRequest) (*pb.GetUploadUrlResponse, error) { | |
200 | host, err := s.Account.UploadHost(req.BucketId) | |
201 | if err != nil { | |
202 | return nil, err | |
203 | } | |
204 | return &pb.GetUploadUrlResponse{ | |
205 | UploadUrl: fmt.Sprintf("%s/b2api/v1/b2_upload_file/%s", host, req.BucketId), | |
206 | BucketId: req.BucketId, | |
207 | }, nil | |
208 | } | |
209 | ||
210 | func (s *Server) StartLargeFile(ctx context.Context, req *pb.StartLargeFileRequest) (*pb.StartLargeFileResponse, error) { | |
211 | fileID := uuid.New().String() | |
212 | resp := &pb.StartLargeFileResponse{ | |
213 | FileId: fileID, | |
214 | FileName: req.FileName, | |
215 | BucketId: req.BucketId, | |
216 | ContentType: req.ContentType, | |
217 | FileInfo: req.FileInfo, | |
218 | } | |
219 | bs, err := proto.Marshal(resp) | |
220 | if err != nil { | |
221 | return nil, err | |
222 | } | |
223 | if err := s.LargeFile.Start(req.BucketId, req.FileName, fileID, bs); err != nil { | |
224 | return nil, err | |
225 | } | |
226 | return resp, nil | |
227 | } | |
228 | ||
229 | func (s *Server) GetUploadPartUrl(ctx context.Context, req *pb.GetUploadPartUrlRequest) (*pb.GetUploadPartUrlResponse, error) { | |
230 | host, err := s.Account.UploadPartHost(req.FileId) | |
231 | if err != nil { | |
232 | return nil, err | |
233 | } | |
234 | return &pb.GetUploadPartUrlResponse{ | |
235 | UploadUrl: fmt.Sprintf("%s/b2api/v1/b2_upload_part/%s", host, req.FileId), | |
236 | }, nil | |
237 | } | |
238 | ||
239 | func (s *Server) FinishLargeFile(ctx context.Context, req *pb.FinishLargeFileRequest) (*pb.FinishLargeFileResponse, error) { | |
240 | parts, err := s.LargeFile.Parts(req.FileId) | |
241 | if err != nil { | |
242 | return nil, err | |
243 | } | |
244 | if !reflect.DeepEqual(parts, req.PartSha1Array) { | |
245 | return nil, errors.New("sha1 array mismatch") | |
246 | } | |
247 | if err := s.LargeFile.Finish(req.FileId); err != nil { | |
248 | return nil, err | |
249 | } | |
250 | return &pb.FinishLargeFileResponse{}, nil | |
251 | } | |
252 | ||
253 | func (s *Server) ListFileVersions(ctx context.Context, req *pb.ListFileVersionsRequest) (*pb.ListFileVersionsResponse, error) { | |
254 | return nil, nil | |
255 | } | |
256 | ||
257 | type objTuple struct { | |
258 | name, version string | |
259 | } | |
260 | ||
261 | type ListManager interface { | |
262 | // NextN returns the next n objects, sorted by lexicographical order by name, | |
263 | // beginning at and including, if it exists, fileName. If withPrefix is not | |
264 | // empty, it only returns names that begin with that prefix. If skipPrefix | |
265 | // is not empty, then the no files with that prefix are returned. If the two | |
266 | // conflict, skipPrefix wins (i.e., do not return the entry). | |
267 | // | |
268 | // If fewer than n entries are returned, this signifies that no more names | |
269 | // exist that meet these criteria. | |
270 | NextN(bucketID, fileName, withPrefix, skipPrefix string, n int) ([]VersionedObject, error) | |
271 | } | |
272 | ||
273 | type VersionedObject interface { | |
274 | Name() string | |
275 | NextNVersions(begin string, n int) ([]string, error) | |
276 | } | |
277 | ||
278 | func getDirNames(lm ListManager, bucket, name, prefix, delim string, n int) ([]string, error) { | |
279 | var sfx string | |
280 | var out []string | |
281 | for n > 0 { | |
282 | vo, err := lm.NextN(bucket, name, prefix, sfx, 1) | |
283 | if err != nil { | |
284 | return nil, err | |
285 | } | |
286 | if len(vo) == 0 { | |
287 | return out, nil | |
288 | } | |
289 | v := vo[0] | |
290 | name = v.Name() | |
291 | suffix := name[len(prefix):] | |
292 | i := strings.Index(suffix, delim) | |
293 | if i < 0 { | |
294 | sfx = "" | |
295 | out = append(out, name) | |
296 | name += "\000" | |
297 | n-- | |
298 | continue | |
299 | } | |
300 | sfx = v.Name()[:len(prefix)+i+1] | |
301 | out = append(out, sfx) | |
302 | n-- | |
303 | } | |
304 | return out, nil | |
305 | } | |
306 | ||
307 | //func getNextObjects(lm ListManager, bucket, name, prefix, delimiter string, n int) ([]VersionedObject, error) { | |
308 | // if delimiter == "" { | |
309 | // return lm.NextN(bucket, name, prefix, "", n) | |
310 | // } | |
311 | // afterPfx := strings.TrimPrefix(name, prefix) | |
312 | // i := strings.Index(afterPfx, delimiter) | |
313 | // if i == 0 { | |
314 | // | |
315 | // } | |
316 | // if i < 0 { | |
317 | // return lm.NextN(bucket, name, prefix, "", n) | |
318 | // } | |
319 | // skipPfx := name[:len(prefix)+i] | |
320 | // // TO | |
321 | //} | |
322 | // | |
323 | //func listFileVersions(lm ListManager, bucket, name, version, prefix, delimiter string, n int) ([]objTuple, error) { | |
324 | // var tups []objTuple | |
325 | // var got int | |
326 | // for { | |
327 | // objs, err := getNextObjects(bucket, name, prefix, delimiter, n-got) | |
328 | // if err != nil { | |
329 | // return nil, err | |
330 | // } | |
331 | // if len(objs) == 0 { | |
332 | // break | |
333 | // } | |
334 | // for _, o := range objs { | |
335 | // var begin string | |
336 | // if len(tups) == 0 { | |
337 | // begin = version | |
338 | // } | |
339 | // vers, err := lm.NextNVersions(begin, n-got) | |
340 | // if err != nil { | |
341 | // return nil, err | |
342 | // } | |
343 | // got += len(vers) | |
344 | // for _, ver := range vers { | |
345 | // tups = append(tups, objTuple{name: o.Name(), version: ver}) | |
346 | // } | |
347 | // if got >= n { | |
348 | // return tups[:n], nil | |
349 | // } | |
350 | // } | |
351 | // } | |
352 | // return tups, nil | |
353 | //} |
0 | // Copyright 2018, the Blazer authors | |
1 | // | |
2 | // Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | // you may not use this file except in compliance with the License. | |
4 | // You may obtain a copy of the License at | |
5 | // | |
6 | // http://www.apache.org/licenses/LICENSE-2.0 | |
7 | // | |
8 | // Unless required by applicable law or agreed to in writing, software | |
9 | // distributed under the License is distributed on an "AS IS" BASIS, | |
10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | // See the License for the specific language governing permissions and | |
12 | // limitations under the License. | |
13 | ||
14 | package pyre | |
15 | ||
16 | import ( | |
17 | "reflect" | |
18 | "sort" | |
19 | "strings" | |
20 | "sync" | |
21 | "testing" | |
22 | ) | |
23 | ||
24 | type testVersionedObject struct { | |
25 | name string | |
26 | versions []string | |
27 | } | |
28 | ||
29 | func (t testVersionedObject) Name() string { return t.name } | |
30 | ||
31 | func (t testVersionedObject) NextNVersions(b string, n int) ([]string, error) { | |
32 | var out []string | |
33 | var seen bool | |
34 | if b == "" { | |
35 | seen = true | |
36 | } | |
37 | for _, v := range t.versions { | |
38 | if b == v { | |
39 | seen = true | |
40 | } | |
41 | if !seen { | |
42 | continue | |
43 | } | |
44 | if len(out) >= n { | |
45 | return out, nil | |
46 | } | |
47 | out = append(out, v) | |
48 | } | |
49 | return out, nil | |
50 | } | |
51 | ||
52 | type testListManager struct { | |
53 | objs map[string][]string | |
54 | m sync.Mutex | |
55 | } | |
56 | ||
57 | func (t *testListManager) NextN(b, fn, pfx, spfx string, n int) ([]VersionedObject, error) { | |
58 | t.m.Lock() | |
59 | defer t.m.Unlock() | |
60 | ||
61 | var out []VersionedObject | |
62 | var keys []string | |
63 | for k := range t.objs { | |
64 | keys = append(keys, k) | |
65 | } | |
66 | sort.Strings(keys) | |
67 | for _, k := range keys { | |
68 | if k < fn { | |
69 | continue | |
70 | } | |
71 | if !strings.HasPrefix(k, pfx) { | |
72 | continue | |
73 | } | |
74 | if spfx != "" && strings.HasPrefix(k, spfx) { | |
75 | continue | |
76 | } | |
77 | out = append(out, testVersionedObject{name: k, versions: t.objs[k]}) | |
78 | n-- | |
79 | if n <= 0 { | |
80 | return out, nil | |
81 | } | |
82 | } | |
83 | return out, nil | |
84 | } | |
85 | ||
86 | func TestGetDirNames(t *testing.T) { | |
87 | table := []struct { | |
88 | lm ListManager | |
89 | name string | |
90 | pfx string | |
91 | delim string | |
92 | num int | |
93 | want []string | |
94 | }{ | |
95 | { | |
96 | lm: &testListManager{ | |
97 | objs: map[string][]string{ | |
98 | "/usr/local/etc/foo/bar": {"a"}, | |
99 | "/usr/local/etc/foo/baz": {"a"}, | |
100 | "/usr/local/etc/foo": {"a"}, | |
101 | "/usr/local/etc/fool": {"a"}, | |
102 | }, | |
103 | }, | |
104 | num: 2, | |
105 | pfx: "/usr/local/etc/", | |
106 | delim: "/", | |
107 | want: []string{"/usr/local/etc/foo", "/usr/local/etc/foo/"}, | |
108 | }, | |
109 | { | |
110 | lm: &testListManager{ | |
111 | objs: map[string][]string{ | |
112 | "/usr/local/etc/foo/bar": {"a"}, | |
113 | "/usr/local/etc/foo/baz": {"a"}, | |
114 | "/usr/local/etc/foo": {"a"}, | |
115 | "/usr/local/etc/fool": {"a"}, | |
116 | "/usr/local/etc/bar": {"a"}, | |
117 | }, | |
118 | }, | |
119 | num: 4, | |
120 | pfx: "/usr/local/etc/", | |
121 | delim: "/", | |
122 | want: []string{"/usr/local/etc/bar", "/usr/local/etc/foo", "/usr/local/etc/foo/", "/usr/local/etc/fool"}, | |
123 | }, | |
124 | } | |
125 | ||
126 | for _, e := range table { | |
127 | got, err := getDirNames(e.lm, "", e.name, e.pfx, e.delim, e.num) | |
128 | if err != nil { | |
129 | t.Error(err) | |
130 | continue | |
131 | } | |
132 | if !reflect.DeepEqual(got, e.want) { | |
133 | t.Errorf("getDirNames(%v, %q, %q, %q, %d): got %v, want %v", e.lm, e.name, e.pfx, e.delim, e.num, got, e.want) | |
134 | } | |
135 | } | |
136 | } |
0 | // Copyright 2018, the Blazer authors | |
1 | // | |
2 | // Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | // you may not use this file except in compliance with the License. | |
4 | // You may obtain a copy of the License at | |
5 | // | |
6 | // http://www.apache.org/licenses/LICENSE-2.0 | |
7 | // | |
8 | // Unless required by applicable law or agreed to in writing, software | |
9 | // distributed under the License is distributed on an "AS IS" BASIS, | |
10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | // See the License for the specific language governing permissions and | |
12 | // limitations under the License. | |
13 | ||
14 | package pyre | |
15 | ||
16 | import ( | |
17 | "fmt" | |
18 | "io" | |
19 | "net/http" | |
20 | "strconv" | |
21 | "strings" | |
22 | ) | |
23 | ||
24 | type DownloadableObject interface { | |
25 | Size() int64 | |
26 | Reader() io.ReaderAt | |
27 | io.Closer | |
28 | } | |
29 | ||
30 | type DownloadManager interface { | |
31 | ObjectByName(bucketID, name string) (DownloadableObject, error) | |
32 | GetBucketID(bucket string) (string, error) | |
33 | GetBucket(id string) ([]byte, error) | |
34 | } | |
35 | ||
36 | type downloadServer struct { | |
37 | dm DownloadManager | |
38 | } | |
39 | ||
40 | type downloadRequest struct { | |
41 | off, n int64 | |
42 | } | |
43 | ||
44 | func parseDownloadHeaders(r *http.Request) (*downloadRequest, error) { | |
45 | rang := r.Header.Get("Range") | |
46 | if rang == "" { | |
47 | return &downloadRequest{}, nil | |
48 | } | |
49 | if !strings.HasPrefix(rang, "bytes=") { | |
50 | return nil, fmt.Errorf("unknown range format: %q", rang) | |
51 | } | |
52 | rang = strings.TrimPrefix(rang, "bytes=") | |
53 | if !strings.Contains(rang, "-") { | |
54 | return nil, fmt.Errorf("unknown range format: %q", rang) | |
55 | } | |
56 | parts := strings.Split(rang, "-") | |
57 | off, err := strconv.ParseInt(parts[0], 10, 64) | |
58 | if err != nil { | |
59 | return nil, err | |
60 | } | |
61 | end, err := strconv.ParseInt(parts[1], 10, 64) | |
62 | if err != nil { | |
63 | return nil, err | |
64 | } | |
65 | return &downloadRequest{ | |
66 | off: off, | |
67 | n: (end + 1) - off, | |
68 | }, nil | |
69 | } | |
70 | ||
71 | func (fs *downloadServer) serveWholeObject(rw http.ResponseWriter, obj DownloadableObject) { | |
72 | rw.Header().Set("Content-Length", fmt.Sprintf("%d", obj.Size())) | |
73 | sr := io.NewSectionReader(obj.Reader(), 0, obj.Size()) | |
74 | if _, err := io.Copy(rw, sr); err != nil { | |
75 | http.Error(rw, err.Error(), 503) | |
76 | fmt.Println("no reader", err) | |
77 | } | |
78 | } | |
79 | ||
80 | func (fs *downloadServer) servePartialObject(rw http.ResponseWriter, obj DownloadableObject, off, len int64) { | |
81 | if off >= obj.Size() { | |
82 | http.Error(rw, "hell naw", 416) | |
83 | fmt.Printf("range not good (%d-%d for %d)\n", off, len, obj.Size()) | |
84 | return | |
85 | } | |
86 | if off+len > obj.Size() { | |
87 | len = obj.Size() - off | |
88 | } | |
89 | sr := io.NewSectionReader(obj.Reader(), off, len) | |
90 | rw.Header().Set("Content-Length", fmt.Sprintf("%d", len)) | |
91 | rw.WriteHeader(206) // this goes after headers are set | |
92 | if _, err := io.Copy(rw, sr); err != nil { | |
93 | fmt.Println("bad read:", err) | |
94 | } | |
95 | } | |
96 | ||
97 | func (fs *downloadServer) ServeHTTP(rw http.ResponseWriter, r *http.Request) { | |
98 | req, err := parseDownloadHeaders(r) | |
99 | if err != nil { | |
100 | http.Error(rw, err.Error(), 503) | |
101 | fmt.Println("weird header") | |
102 | return | |
103 | } | |
104 | path := strings.TrimPrefix(r.URL.Path, "/") | |
105 | parts := strings.Split(path, "/") | |
106 | if len(parts) < 3 { | |
107 | http.Error(rw, err.Error(), 404) | |
108 | fmt.Println("weird file") | |
109 | return | |
110 | } | |
111 | bucket := parts[1] | |
112 | bid, err := fs.dm.GetBucketID(bucket) | |
113 | if err != nil { | |
114 | http.Error(rw, err.Error(), 503) | |
115 | fmt.Println("no bucket:", err) | |
116 | return | |
117 | } | |
118 | file := strings.Join(parts[2:], "/") | |
119 | obj, err := fs.dm.ObjectByName(bid, file) | |
120 | if err != nil { | |
121 | http.Error(rw, err.Error(), 503) | |
122 | fmt.Println("no reader", err) | |
123 | return | |
124 | } | |
125 | defer obj.Close() | |
126 | if req.off == 0 && req.n == 0 { | |
127 | fs.serveWholeObject(rw, obj) | |
128 | return | |
129 | } | |
130 | fs.servePartialObject(rw, obj, req.off, req.n) | |
131 | } | |
132 | ||
133 | func RegisterDownloadManagerOnMux(d DownloadManager, mux *http.ServeMux) { | |
134 | mux.Handle("/file/", &downloadServer{dm: d}) | |
135 | } |
0 | // Copyright 2018, the Blazer authors | |
1 | // | |
2 | // Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | // you may not use this file except in compliance with the License. | |
4 | // You may obtain a copy of the License at | |
5 | // | |
6 | // http://www.apache.org/licenses/LICENSE-2.0 | |
7 | // | |
8 | // Unless required by applicable law or agreed to in writing, software | |
9 | // distributed under the License is distributed on an "AS IS" BASIS, | |
10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | // See the License for the specific language governing permissions and | |
12 | // limitations under the License. | |
13 | ||
14 | package pyre | |
15 | ||
16 | import ( | |
17 | "encoding/json" | |
18 | "fmt" | |
19 | "io" | |
20 | "net/http" | |
21 | "strconv" | |
22 | "strings" | |
23 | ) | |
24 | ||
25 | const uploadFilePartPrefix = "/b2api/v1/b2_upload_part/" | |
26 | ||
27 | type LargeFileManager interface { | |
28 | PartWriter(id string, part int) (io.WriteCloser, error) | |
29 | } | |
30 | ||
31 | type largeFileServer struct { | |
32 | fm LargeFileManager | |
33 | } | |
34 | ||
35 | type uploadPartRequest struct { | |
36 | ID string `json:"fileId"` | |
37 | Part int `json:"partNumber"` | |
38 | Size int64 `json:"contentLength"` | |
39 | Hash string `json:"contentSha1"` | |
40 | } | |
41 | ||
42 | func parseUploadPartHeaders(r *http.Request) (uploadPartRequest, error) { | |
43 | var ur uploadPartRequest | |
44 | ur.Hash = r.Header.Get("X-Bz-Content-Sha1") | |
45 | part, err := strconv.ParseInt(r.Header.Get("X-Bz-Part-Number"), 10, 64) | |
46 | if err != nil { | |
47 | return ur, err | |
48 | } | |
49 | ur.Part = int(part) | |
50 | size, err := strconv.ParseInt(r.Header.Get("Content-Length"), 10, 64) | |
51 | if err != nil { | |
52 | return ur, err | |
53 | } | |
54 | ur.Size = size | |
55 | ur.ID = strings.TrimPrefix(r.URL.Path, uploadFilePartPrefix) | |
56 | return ur, nil | |
57 | } | |
58 | ||
59 | func (fs *largeFileServer) ServeHTTP(rw http.ResponseWriter, r *http.Request) { | |
60 | req, err := parseUploadPartHeaders(r) | |
61 | if err != nil { | |
62 | http.Error(rw, err.Error(), 500) | |
63 | fmt.Println("oh no") | |
64 | return | |
65 | } | |
66 | w, err := fs.fm.PartWriter(req.ID, req.Part) | |
67 | if err != nil { | |
68 | http.Error(rw, err.Error(), 500) | |
69 | fmt.Println("oh no") | |
70 | return | |
71 | } | |
72 | if _, err := io.Copy(w, io.LimitReader(r.Body, req.Size)); err != nil { | |
73 | w.Close() | |
74 | http.Error(rw, err.Error(), 500) | |
75 | fmt.Println("oh no") | |
76 | return | |
77 | } | |
78 | if err := w.Close(); err != nil { | |
79 | http.Error(rw, err.Error(), 500) | |
80 | fmt.Println("oh no") | |
81 | return | |
82 | } | |
83 | if err := json.NewEncoder(rw).Encode(req); err != nil { | |
84 | fmt.Println("oh no") | |
85 | } | |
86 | } | |
87 | ||
88 | func RegisterLargeFileManagerOnMux(f LargeFileManager, mux *http.ServeMux) { | |
89 | mux.Handle(uploadFilePartPrefix, &largeFileServer{fm: f}) | |
90 | } |
0 | // Code generated by protoc-gen-go. DO NOT EDIT. | |
1 | // source: proto/pyre.proto | |
2 | ||
3 | package pyre_proto | |
4 | ||
5 | import proto "github.com/golang/protobuf/proto" | |
6 | import fmt "fmt" | |
7 | import math "math" | |
8 | import _ "google.golang.org/genproto/googleapis/api/annotations" | |
9 | ||
10 | import ( | |
11 | context "golang.org/x/net/context" | |
12 | grpc "google.golang.org/grpc" | |
13 | ) | |
14 | ||
15 | // Reference imports to suppress errors if they are not otherwise used. | |
16 | var _ = proto.Marshal | |
17 | var _ = fmt.Errorf | |
18 | var _ = math.Inf | |
19 | ||
20 | // This is a compile-time assertion to ensure that this generated file | |
21 | // is compatible with the proto package it is being compiled against. | |
22 | // A compilation error at this line likely means your copy of the | |
23 | // proto package needs to be updated. | |
24 | const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package | |
25 | ||
26 | type AuthorizeAccountRequest struct { | |
27 | XXX_NoUnkeyedLiteral struct{} `json:"-"` | |
28 | XXX_unrecognized []byte `json:"-"` | |
29 | XXX_sizecache int32 `json:"-"` | |
30 | } | |
31 | ||
32 | func (m *AuthorizeAccountRequest) Reset() { *m = AuthorizeAccountRequest{} } | |
33 | func (m *AuthorizeAccountRequest) String() string { return proto.CompactTextString(m) } | |
34 | func (*AuthorizeAccountRequest) ProtoMessage() {} | |
35 | func (*AuthorizeAccountRequest) Descriptor() ([]byte, []int) { | |
36 | return fileDescriptor_pyre_492df08819220afa, []int{0} | |
37 | } | |
38 | func (m *AuthorizeAccountRequest) XXX_Unmarshal(b []byte) error { | |
39 | return xxx_messageInfo_AuthorizeAccountRequest.Unmarshal(m, b) | |
40 | } | |
41 | func (m *AuthorizeAccountRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
42 | return xxx_messageInfo_AuthorizeAccountRequest.Marshal(b, m, deterministic) | |
43 | } | |
44 | func (dst *AuthorizeAccountRequest) XXX_Merge(src proto.Message) { | |
45 | xxx_messageInfo_AuthorizeAccountRequest.Merge(dst, src) | |
46 | } | |
47 | func (m *AuthorizeAccountRequest) XXX_Size() int { | |
48 | return xxx_messageInfo_AuthorizeAccountRequest.Size(m) | |
49 | } | |
50 | func (m *AuthorizeAccountRequest) XXX_DiscardUnknown() { | |
51 | xxx_messageInfo_AuthorizeAccountRequest.DiscardUnknown(m) | |
52 | } | |
53 | ||
54 | var xxx_messageInfo_AuthorizeAccountRequest proto.InternalMessageInfo | |
55 | ||
56 | type AuthorizeAccountResponse struct { | |
57 | // The identifier for the account. | |
58 | AccountId string `protobuf:"bytes,1,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` | |
59 | // An authorization token to use with all calls, other than | |
60 | // b2_authorize_account, that need an Authorization header. This | |
61 | // authorization token is valid for at most 24 hours. | |
62 | AuthorizationToken string `protobuf:"bytes,2,opt,name=authorization_token,json=authorizationToken,proto3" json:"authorization_token,omitempty"` | |
63 | // The base URL to use for all API calls except for uploading and downloading | |
64 | // files. | |
65 | ApiUrl string `protobuf:"bytes,3,opt,name=api_url,json=apiUrl,proto3" json:"api_url,omitempty"` | |
66 | // The base URL to use for downloading files. | |
67 | DownloadUrl string `protobuf:"bytes,4,opt,name=download_url,json=downloadUrl,proto3" json:"download_url,omitempty"` | |
68 | // The recommended size for each part of a large file. We recommend using | |
69 | // this part size for optimal upload performance. | |
70 | RecommendedPartSize int32 `protobuf:"varint,5,opt,name=recommended_part_size,json=recommendedPartSize,proto3" json:"recommended_part_size,omitempty"` | |
71 | // The smallest possible size of a part of a large file (except the last | |
72 | // one). This is smaller than the recommended part size. If you use it, you | |
73 | // may find that it takes longer overall to upload a large file. | |
74 | AbsoluteMinimumPartSize int32 `protobuf:"varint,6,opt,name=absolute_minimum_part_size,json=absoluteMinimumPartSize,proto3" json:"absolute_minimum_part_size,omitempty"` | |
75 | MinimumPartSize int32 `protobuf:"varint,7,opt,name=minimum_part_size,json=minimumPartSize,proto3" json:"minimum_part_size,omitempty"` | |
76 | XXX_NoUnkeyedLiteral struct{} `json:"-"` | |
77 | XXX_unrecognized []byte `json:"-"` | |
78 | XXX_sizecache int32 `json:"-"` | |
79 | } | |
80 | ||
81 | func (m *AuthorizeAccountResponse) Reset() { *m = AuthorizeAccountResponse{} } | |
82 | func (m *AuthorizeAccountResponse) String() string { return proto.CompactTextString(m) } | |
83 | func (*AuthorizeAccountResponse) ProtoMessage() {} | |
84 | func (*AuthorizeAccountResponse) Descriptor() ([]byte, []int) { | |
85 | return fileDescriptor_pyre_492df08819220afa, []int{1} | |
86 | } | |
87 | func (m *AuthorizeAccountResponse) XXX_Unmarshal(b []byte) error { | |
88 | return xxx_messageInfo_AuthorizeAccountResponse.Unmarshal(m, b) | |
89 | } | |
90 | func (m *AuthorizeAccountResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
91 | return xxx_messageInfo_AuthorizeAccountResponse.Marshal(b, m, deterministic) | |
92 | } | |
93 | func (dst *AuthorizeAccountResponse) XXX_Merge(src proto.Message) { | |
94 | xxx_messageInfo_AuthorizeAccountResponse.Merge(dst, src) | |
95 | } | |
96 | func (m *AuthorizeAccountResponse) XXX_Size() int { | |
97 | return xxx_messageInfo_AuthorizeAccountResponse.Size(m) | |
98 | } | |
99 | func (m *AuthorizeAccountResponse) XXX_DiscardUnknown() { | |
100 | xxx_messageInfo_AuthorizeAccountResponse.DiscardUnknown(m) | |
101 | } | |
102 | ||
103 | var xxx_messageInfo_AuthorizeAccountResponse proto.InternalMessageInfo | |
104 | ||
105 | func (m *AuthorizeAccountResponse) GetAccountId() string { | |
106 | if m != nil { | |
107 | return m.AccountId | |
108 | } | |
109 | return "" | |
110 | } | |
111 | ||
112 | func (m *AuthorizeAccountResponse) GetAuthorizationToken() string { | |
113 | if m != nil { | |
114 | return m.AuthorizationToken | |
115 | } | |
116 | return "" | |
117 | } | |
118 | ||
119 | func (m *AuthorizeAccountResponse) GetApiUrl() string { | |
120 | if m != nil { | |
121 | return m.ApiUrl | |
122 | } | |
123 | return "" | |
124 | } | |
125 | ||
126 | func (m *AuthorizeAccountResponse) GetDownloadUrl() string { | |
127 | if m != nil { | |
128 | return m.DownloadUrl | |
129 | } | |
130 | return "" | |
131 | } | |
132 | ||
133 | func (m *AuthorizeAccountResponse) GetRecommendedPartSize() int32 { | |
134 | if m != nil { | |
135 | return m.RecommendedPartSize | |
136 | } | |
137 | return 0 | |
138 | } | |
139 | ||
140 | func (m *AuthorizeAccountResponse) GetAbsoluteMinimumPartSize() int32 { | |
141 | if m != nil { | |
142 | return m.AbsoluteMinimumPartSize | |
143 | } | |
144 | return 0 | |
145 | } | |
146 | ||
147 | func (m *AuthorizeAccountResponse) GetMinimumPartSize() int32 { | |
148 | if m != nil { | |
149 | return m.MinimumPartSize | |
150 | } | |
151 | return 0 | |
152 | } | |
153 | ||
154 | type ListBucketsRequest struct { | |
155 | // The ID of your account. | |
156 | AccountId string `protobuf:"bytes,1,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` | |
157 | // When specified, the result will be a list containing just this bucket, if | |
158 | // it's present in the account, or no buckets if the account does not have a | |
159 | // bucket with this ID. | |
160 | BucketId string `protobuf:"bytes,2,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` | |
161 | // When specified, the result will be a list containing just this bucket, if | |
162 | // it's present in the account, or no buckets if the account does not have a | |
163 | // bucket with this ID. | |
164 | BucketName string `protobuf:"bytes,3,opt,name=bucket_name,json=bucketName,proto3" json:"bucket_name,omitempty"` | |
165 | // If present, B2 will use it as a filter for bucket types returned in the | |
166 | // list buckets response. If not present, only buckets with bucket types | |
167 | // "allPublic", "allPrivate" and "snapshot" will be returned. A special | |
168 | // filter value of ["all"] will return all bucket types. | |
169 | // | |
170 | // If present, it must be in the form of a json array of strings containing | |
171 | // valid bucket types in quotes and separated by a comma. Valid bucket types | |
172 | // include "allPrivate", "allPublic", "snapshot", and other values added in | |
173 | // the future. | |
174 | // | |
175 | // A bad request error will be returned if "all" is used with other bucket | |
176 | // types, this field is empty, or invalid bucket types are requested. | |
177 | BucketTypes []string `protobuf:"bytes,4,rep,name=bucket_types,json=bucketTypes,proto3" json:"bucket_types,omitempty"` | |
178 | XXX_NoUnkeyedLiteral struct{} `json:"-"` | |
179 | XXX_unrecognized []byte `json:"-"` | |
180 | XXX_sizecache int32 `json:"-"` | |
181 | } | |
182 | ||
183 | func (m *ListBucketsRequest) Reset() { *m = ListBucketsRequest{} } | |
184 | func (m *ListBucketsRequest) String() string { return proto.CompactTextString(m) } | |
185 | func (*ListBucketsRequest) ProtoMessage() {} | |
186 | func (*ListBucketsRequest) Descriptor() ([]byte, []int) { | |
187 | return fileDescriptor_pyre_492df08819220afa, []int{2} | |
188 | } | |
189 | func (m *ListBucketsRequest) XXX_Unmarshal(b []byte) error { | |
190 | return xxx_messageInfo_ListBucketsRequest.Unmarshal(m, b) | |
191 | } | |
192 | func (m *ListBucketsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
193 | return xxx_messageInfo_ListBucketsRequest.Marshal(b, m, deterministic) | |
194 | } | |
195 | func (dst *ListBucketsRequest) XXX_Merge(src proto.Message) { | |
196 | xxx_messageInfo_ListBucketsRequest.Merge(dst, src) | |
197 | } | |
198 | func (m *ListBucketsRequest) XXX_Size() int { | |
199 | return xxx_messageInfo_ListBucketsRequest.Size(m) | |
200 | } | |
201 | func (m *ListBucketsRequest) XXX_DiscardUnknown() { | |
202 | xxx_messageInfo_ListBucketsRequest.DiscardUnknown(m) | |
203 | } | |
204 | ||
205 | var xxx_messageInfo_ListBucketsRequest proto.InternalMessageInfo | |
206 | ||
207 | func (m *ListBucketsRequest) GetAccountId() string { | |
208 | if m != nil { | |
209 | return m.AccountId | |
210 | } | |
211 | return "" | |
212 | } | |
213 | ||
214 | func (m *ListBucketsRequest) GetBucketId() string { | |
215 | if m != nil { | |
216 | return m.BucketId | |
217 | } | |
218 | return "" | |
219 | } | |
220 | ||
221 | func (m *ListBucketsRequest) GetBucketName() string { | |
222 | if m != nil { | |
223 | return m.BucketName | |
224 | } | |
225 | return "" | |
226 | } | |
227 | ||
228 | func (m *ListBucketsRequest) GetBucketTypes() []string { | |
229 | if m != nil { | |
230 | return m.BucketTypes | |
231 | } | |
232 | return nil | |
233 | } | |
234 | ||
235 | type LifecycleRule struct { | |
236 | // After a file is uploaded, the number of days before it can be hidden. | |
237 | DaysFromUploadingToHiding int32 `protobuf:"varint,1,opt,name=days_from_uploading_to_hiding,json=daysFromUploadingToHiding,proto3" json:"days_from_uploading_to_hiding,omitempty"` | |
238 | // After a file is hidden, the number of days before it can be deleted. | |
239 | DaysFromHidingToDeleting int32 `protobuf:"varint,2,opt,name=days_from_hiding_to_deleting,json=daysFromHidingToDeleting,proto3" json:"days_from_hiding_to_deleting,omitempty"` | |
240 | // The rule applies to files whose names start with this prefix. | |
241 | FileNamePrefix string `protobuf:"bytes,3,opt,name=file_name_prefix,json=fileNamePrefix,proto3" json:"file_name_prefix,omitempty"` | |
242 | XXX_NoUnkeyedLiteral struct{} `json:"-"` | |
243 | XXX_unrecognized []byte `json:"-"` | |
244 | XXX_sizecache int32 `json:"-"` | |
245 | } | |
246 | ||
247 | func (m *LifecycleRule) Reset() { *m = LifecycleRule{} } | |
248 | func (m *LifecycleRule) String() string { return proto.CompactTextString(m) } | |
249 | func (*LifecycleRule) ProtoMessage() {} | |
250 | func (*LifecycleRule) Descriptor() ([]byte, []int) { | |
251 | return fileDescriptor_pyre_492df08819220afa, []int{3} | |
252 | } | |
253 | func (m *LifecycleRule) XXX_Unmarshal(b []byte) error { | |
254 | return xxx_messageInfo_LifecycleRule.Unmarshal(m, b) | |
255 | } | |
256 | func (m *LifecycleRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
257 | return xxx_messageInfo_LifecycleRule.Marshal(b, m, deterministic) | |
258 | } | |
259 | func (dst *LifecycleRule) XXX_Merge(src proto.Message) { | |
260 | xxx_messageInfo_LifecycleRule.Merge(dst, src) | |
261 | } | |
262 | func (m *LifecycleRule) XXX_Size() int { | |
263 | return xxx_messageInfo_LifecycleRule.Size(m) | |
264 | } | |
265 | func (m *LifecycleRule) XXX_DiscardUnknown() { | |
266 | xxx_messageInfo_LifecycleRule.DiscardUnknown(m) | |
267 | } | |
268 | ||
269 | var xxx_messageInfo_LifecycleRule proto.InternalMessageInfo | |
270 | ||
271 | func (m *LifecycleRule) GetDaysFromUploadingToHiding() int32 { | |
272 | if m != nil { | |
273 | return m.DaysFromUploadingToHiding | |
274 | } | |
275 | return 0 | |
276 | } | |
277 | ||
278 | func (m *LifecycleRule) GetDaysFromHidingToDeleting() int32 { | |
279 | if m != nil { | |
280 | return m.DaysFromHidingToDeleting | |
281 | } | |
282 | return 0 | |
283 | } | |
284 | ||
285 | func (m *LifecycleRule) GetFileNamePrefix() string { | |
286 | if m != nil { | |
287 | return m.FileNamePrefix | |
288 | } | |
289 | return "" | |
290 | } | |
291 | ||
292 | type CorsRule struct { | |
293 | // A name for humans to recognize the rule in a user interface. Names must be | |
294 | // unique within a bucket. Names can consist of upper-case and lower-case | |
295 | // English letters, numbers, and "-". No other characters are allowed. A name | |
296 | // must be at least 6 characters long, and can be at most 50 characters long. | |
297 | // These are all allowed names: myPhotosSite, allowAnyHttps, | |
298 | // backblaze-images. Names that start with "b2-" are reserved for Backblaze | |
299 | // use. | |
300 | CorsRuleName string `protobuf:"bytes,1,opt,name=cors_rule_name,json=corsRuleName,proto3" json:"cors_rule_name,omitempty"` | |
301 | // A non-empty list specifying which origins the rule covers. Each value may | |
302 | // have one of many formats: | |
303 | // | |
304 | // * The origin can be fully specified, such as http://www.example.com:8180 | |
305 | // or https://www.example.com:4433. | |
306 | // | |
307 | // * The origin can omit a default port, such as https://www.example.com. | |
308 | // | |
309 | // * The origin may have a single '*' as part of the domain name, such as | |
310 | // https://*.example.com, https://*:8443 or https://*. | |
311 | // | |
312 | // * The origin may be 'https' to match any origin that uses HTTPS. (This is | |
313 | // broader than 'https://*' because it matches any port.) | |
314 | // | |
315 | // * Finally, the origin can be a single '*' to match any origin. | |
316 | // | |
317 | // If any entry is "*", it must be the only entry. There can be at most one | |
318 | // "https" entry and no entry after it may start with "https:". | |
319 | AllowedOrigins []string `protobuf:"bytes,2,rep,name=allowed_origins,json=allowedOrigins,proto3" json:"allowed_origins,omitempty"` | |
320 | // A list specifying which operations the rule allows. At least one value | |
321 | // must be specified. All values must be from the following list. More values | |
322 | // may be added to this list at any time. | |
323 | // | |
324 | // b2_download_file_by_name | |
325 | // b2_download_file_by_id | |
326 | // b2_upload_file | |
327 | // b2_upload_part | |
328 | AllowedOperations []string `protobuf:"bytes,3,rep,name=allowed_operations,json=allowedOperations,proto3" json:"allowed_operations,omitempty"` | |
329 | // If present, this is a list of headers that are allowed in a pre-flight | |
330 | // OPTIONS's request's Access-Control-Request-Headers header value. Each | |
331 | // value may have one of many formats: | |
332 | // | |
333 | // * It may be a complete header name, such as x-bz-content-sha1. | |
334 | // | |
335 | // * It may end with an asterisk, such as x-bz-info-*. | |
336 | // | |
337 | // * Finally, it may be a single '*' to match any header. | |
338 | // | |
339 | // If any entry is "*", it must be the only entry in the list. If this list | |
340 | // is missing, it is be treated as if it is a list with no entries. | |
341 | AllowedHeaders []string `protobuf:"bytes,4,rep,name=allowed_headers,json=allowedHeaders,proto3" json:"allowed_headers,omitempty"` | |
342 | // If present, this is a list of headers that may be exposed to an | |
343 | // application inside the client (eg. exposed to Javascript in a browser). | |
344 | // Each entry in the list must be a complete header name (eg. | |
345 | // "x-bz-content-sha1"). If this list is missing or empty, no headers will be | |
346 | // exposed. | |
347 | ExposeHeaders []string `protobuf:"bytes,5,rep,name=expose_headers,json=exposeHeaders,proto3" json:"expose_headers,omitempty"` | |
348 | // This specifies the maximum number of seconds that a browser may cache the | |
349 | // response to a preflight request. The value must not be negative and it | |
350 | // must not be more than 86,400 seconds (one day). | |
351 | MaxAgeSeconds int32 `protobuf:"varint,6,opt,name=max_age_seconds,json=maxAgeSeconds,proto3" json:"max_age_seconds,omitempty"` | |
352 | XXX_NoUnkeyedLiteral struct{} `json:"-"` | |
353 | XXX_unrecognized []byte `json:"-"` | |
354 | XXX_sizecache int32 `json:"-"` | |
355 | } | |
356 | ||
357 | func (m *CorsRule) Reset() { *m = CorsRule{} } | |
358 | func (m *CorsRule) String() string { return proto.CompactTextString(m) } | |
359 | func (*CorsRule) ProtoMessage() {} | |
360 | func (*CorsRule) Descriptor() ([]byte, []int) { | |
361 | return fileDescriptor_pyre_492df08819220afa, []int{4} | |
362 | } | |
363 | func (m *CorsRule) XXX_Unmarshal(b []byte) error { | |
364 | return xxx_messageInfo_CorsRule.Unmarshal(m, b) | |
365 | } | |
366 | func (m *CorsRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
367 | return xxx_messageInfo_CorsRule.Marshal(b, m, deterministic) | |
368 | } | |
369 | func (dst *CorsRule) XXX_Merge(src proto.Message) { | |
370 | xxx_messageInfo_CorsRule.Merge(dst, src) | |
371 | } | |
372 | func (m *CorsRule) XXX_Size() int { | |
373 | return xxx_messageInfo_CorsRule.Size(m) | |
374 | } | |
375 | func (m *CorsRule) XXX_DiscardUnknown() { | |
376 | xxx_messageInfo_CorsRule.DiscardUnknown(m) | |
377 | } | |
378 | ||
379 | var xxx_messageInfo_CorsRule proto.InternalMessageInfo | |
380 | ||
381 | func (m *CorsRule) GetCorsRuleName() string { | |
382 | if m != nil { | |
383 | return m.CorsRuleName | |
384 | } | |
385 | return "" | |
386 | } | |
387 | ||
388 | func (m *CorsRule) GetAllowedOrigins() []string { | |
389 | if m != nil { | |
390 | return m.AllowedOrigins | |
391 | } | |
392 | return nil | |
393 | } | |
394 | ||
395 | func (m *CorsRule) GetAllowedOperations() []string { | |
396 | if m != nil { | |
397 | return m.AllowedOperations | |
398 | } | |
399 | return nil | |
400 | } | |
401 | ||
402 | func (m *CorsRule) GetAllowedHeaders() []string { | |
403 | if m != nil { | |
404 | return m.AllowedHeaders | |
405 | } | |
406 | return nil | |
407 | } | |
408 | ||
409 | func (m *CorsRule) GetExposeHeaders() []string { | |
410 | if m != nil { | |
411 | return m.ExposeHeaders | |
412 | } | |
413 | return nil | |
414 | } | |
415 | ||
416 | func (m *CorsRule) GetMaxAgeSeconds() int32 { | |
417 | if m != nil { | |
418 | return m.MaxAgeSeconds | |
419 | } | |
420 | return 0 | |
421 | } | |
422 | ||
423 | type Bucket struct { | |
424 | AccountId string `protobuf:"bytes,1,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` | |
425 | BucketId string `protobuf:"bytes,2,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` | |
426 | BucketName string `protobuf:"bytes,3,opt,name=bucket_name,json=bucketName,proto3" json:"bucket_name,omitempty"` | |
427 | BucketType string `protobuf:"bytes,4,opt,name=bucket_type,json=bucketType,proto3" json:"bucket_type,omitempty"` | |
428 | BucketInfo map[string]string `protobuf:"bytes,5,rep,name=bucket_info,json=bucketInfo,proto3" json:"bucket_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` | |
429 | CoresRules []*CorsRule `protobuf:"bytes,6,rep,name=cores_rules,json=coresRules,proto3" json:"cores_rules,omitempty"` | |
430 | LifecycleRules []*LifecycleRule `protobuf:"bytes,7,rep,name=lifecycle_rules,json=lifecycleRules,proto3" json:"lifecycle_rules,omitempty"` | |
431 | Revision int32 `protobuf:"varint,8,opt,name=revision,proto3" json:"revision,omitempty"` | |
432 | XXX_NoUnkeyedLiteral struct{} `json:"-"` | |
433 | XXX_unrecognized []byte `json:"-"` | |
434 | XXX_sizecache int32 `json:"-"` | |
435 | } | |
436 | ||
437 | func (m *Bucket) Reset() { *m = Bucket{} } | |
438 | func (m *Bucket) String() string { return proto.CompactTextString(m) } | |
439 | func (*Bucket) ProtoMessage() {} | |
440 | func (*Bucket) Descriptor() ([]byte, []int) { | |
441 | return fileDescriptor_pyre_492df08819220afa, []int{5} | |
442 | } | |
443 | func (m *Bucket) XXX_Unmarshal(b []byte) error { | |
444 | return xxx_messageInfo_Bucket.Unmarshal(m, b) | |
445 | } | |
446 | func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
447 | return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) | |
448 | } | |
449 | func (dst *Bucket) XXX_Merge(src proto.Message) { | |
450 | xxx_messageInfo_Bucket.Merge(dst, src) | |
451 | } | |
452 | func (m *Bucket) XXX_Size() int { | |
453 | return xxx_messageInfo_Bucket.Size(m) | |
454 | } | |
455 | func (m *Bucket) XXX_DiscardUnknown() { | |
456 | xxx_messageInfo_Bucket.DiscardUnknown(m) | |
457 | } | |
458 | ||
459 | var xxx_messageInfo_Bucket proto.InternalMessageInfo | |
460 | ||
461 | func (m *Bucket) GetAccountId() string { | |
462 | if m != nil { | |
463 | return m.AccountId | |
464 | } | |
465 | return "" | |
466 | } | |
467 | ||
468 | func (m *Bucket) GetBucketId() string { | |
469 | if m != nil { | |
470 | return m.BucketId | |
471 | } | |
472 | return "" | |
473 | } | |
474 | ||
475 | func (m *Bucket) GetBucketName() string { | |
476 | if m != nil { | |
477 | return m.BucketName | |
478 | } | |
479 | return "" | |
480 | } | |
481 | ||
482 | func (m *Bucket) GetBucketType() string { | |
483 | if m != nil { | |
484 | return m.BucketType | |
485 | } | |
486 | return "" | |
487 | } | |
488 | ||
489 | func (m *Bucket) GetBucketInfo() map[string]string { | |
490 | if m != nil { | |
491 | return m.BucketInfo | |
492 | } | |
493 | return nil | |
494 | } | |
495 | ||
496 | func (m *Bucket) GetCoresRules() []*CorsRule { | |
497 | if m != nil { | |
498 | return m.CoresRules | |
499 | } | |
500 | return nil | |
501 | } | |
502 | ||
503 | func (m *Bucket) GetLifecycleRules() []*LifecycleRule { | |
504 | if m != nil { | |
505 | return m.LifecycleRules | |
506 | } | |
507 | return nil | |
508 | } | |
509 | ||
510 | func (m *Bucket) GetRevision() int32 { | |
511 | if m != nil { | |
512 | return m.Revision | |
513 | } | |
514 | return 0 | |
515 | } | |
516 | ||
517 | type ListBucketsResponse struct { | |
518 | Buckets []*Bucket `protobuf:"bytes,1,rep,name=buckets,proto3" json:"buckets,omitempty"` | |
519 | XXX_NoUnkeyedLiteral struct{} `json:"-"` | |
520 | XXX_unrecognized []byte `json:"-"` | |
521 | XXX_sizecache int32 `json:"-"` | |
522 | } | |
523 | ||
524 | func (m *ListBucketsResponse) Reset() { *m = ListBucketsResponse{} } | |
525 | func (m *ListBucketsResponse) String() string { return proto.CompactTextString(m) } | |
526 | func (*ListBucketsResponse) ProtoMessage() {} | |
527 | func (*ListBucketsResponse) Descriptor() ([]byte, []int) { | |
528 | return fileDescriptor_pyre_492df08819220afa, []int{6} | |
529 | } | |
530 | func (m *ListBucketsResponse) XXX_Unmarshal(b []byte) error { | |
531 | return xxx_messageInfo_ListBucketsResponse.Unmarshal(m, b) | |
532 | } | |
533 | func (m *ListBucketsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
534 | return xxx_messageInfo_ListBucketsResponse.Marshal(b, m, deterministic) | |
535 | } | |
536 | func (dst *ListBucketsResponse) XXX_Merge(src proto.Message) { | |
537 | xxx_messageInfo_ListBucketsResponse.Merge(dst, src) | |
538 | } | |
539 | func (m *ListBucketsResponse) XXX_Size() int { | |
540 | return xxx_messageInfo_ListBucketsResponse.Size(m) | |
541 | } | |
542 | func (m *ListBucketsResponse) XXX_DiscardUnknown() { | |
543 | xxx_messageInfo_ListBucketsResponse.DiscardUnknown(m) | |
544 | } | |
545 | ||
546 | var xxx_messageInfo_ListBucketsResponse proto.InternalMessageInfo | |
547 | ||
548 | func (m *ListBucketsResponse) GetBuckets() []*Bucket { | |
549 | if m != nil { | |
550 | return m.Buckets | |
551 | } | |
552 | return nil | |
553 | } | |
554 | ||
555 | type GetUploadUrlRequest struct { | |
556 | BucketId string `protobuf:"bytes,1,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` | |
557 | XXX_NoUnkeyedLiteral struct{} `json:"-"` | |
558 | XXX_unrecognized []byte `json:"-"` | |
559 | XXX_sizecache int32 `json:"-"` | |
560 | } | |
561 | ||
562 | func (m *GetUploadUrlRequest) Reset() { *m = GetUploadUrlRequest{} } | |
563 | func (m *GetUploadUrlRequest) String() string { return proto.CompactTextString(m) } | |
564 | func (*GetUploadUrlRequest) ProtoMessage() {} | |
565 | func (*GetUploadUrlRequest) Descriptor() ([]byte, []int) { | |
566 | return fileDescriptor_pyre_492df08819220afa, []int{7} | |
567 | } | |
568 | func (m *GetUploadUrlRequest) XXX_Unmarshal(b []byte) error { | |
569 | return xxx_messageInfo_GetUploadUrlRequest.Unmarshal(m, b) | |
570 | } | |
571 | func (m *GetUploadUrlRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
572 | return xxx_messageInfo_GetUploadUrlRequest.Marshal(b, m, deterministic) | |
573 | } | |
574 | func (dst *GetUploadUrlRequest) XXX_Merge(src proto.Message) { | |
575 | xxx_messageInfo_GetUploadUrlRequest.Merge(dst, src) | |
576 | } | |
577 | func (m *GetUploadUrlRequest) XXX_Size() int { | |
578 | return xxx_messageInfo_GetUploadUrlRequest.Size(m) | |
579 | } | |
580 | func (m *GetUploadUrlRequest) XXX_DiscardUnknown() { | |
581 | xxx_messageInfo_GetUploadUrlRequest.DiscardUnknown(m) | |
582 | } | |
583 | ||
584 | var xxx_messageInfo_GetUploadUrlRequest proto.InternalMessageInfo | |
585 | ||
586 | func (m *GetUploadUrlRequest) GetBucketId() string { | |
587 | if m != nil { | |
588 | return m.BucketId | |
589 | } | |
590 | return "" | |
591 | } | |
592 | ||
593 | type GetUploadUrlResponse struct { | |
594 | BucketId string `protobuf:"bytes,1,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` | |
595 | UploadUrl string `protobuf:"bytes,2,opt,name=upload_url,json=uploadUrl,proto3" json:"upload_url,omitempty"` | |
596 | AuthorizationToken string `protobuf:"bytes,3,opt,name=authorization_token,json=authorizationToken,proto3" json:"authorization_token,omitempty"` | |
597 | XXX_NoUnkeyedLiteral struct{} `json:"-"` | |
598 | XXX_unrecognized []byte `json:"-"` | |
599 | XXX_sizecache int32 `json:"-"` | |
600 | } | |
601 | ||
602 | func (m *GetUploadUrlResponse) Reset() { *m = GetUploadUrlResponse{} } | |
603 | func (m *GetUploadUrlResponse) String() string { return proto.CompactTextString(m) } | |
604 | func (*GetUploadUrlResponse) ProtoMessage() {} | |
605 | func (*GetUploadUrlResponse) Descriptor() ([]byte, []int) { | |
606 | return fileDescriptor_pyre_492df08819220afa, []int{8} | |
607 | } | |
608 | func (m *GetUploadUrlResponse) XXX_Unmarshal(b []byte) error { | |
609 | return xxx_messageInfo_GetUploadUrlResponse.Unmarshal(m, b) | |
610 | } | |
611 | func (m *GetUploadUrlResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
612 | return xxx_messageInfo_GetUploadUrlResponse.Marshal(b, m, deterministic) | |
613 | } | |
614 | func (dst *GetUploadUrlResponse) XXX_Merge(src proto.Message) { | |
615 | xxx_messageInfo_GetUploadUrlResponse.Merge(dst, src) | |
616 | } | |
617 | func (m *GetUploadUrlResponse) XXX_Size() int { | |
618 | return xxx_messageInfo_GetUploadUrlResponse.Size(m) | |
619 | } | |
620 | func (m *GetUploadUrlResponse) XXX_DiscardUnknown() { | |
621 | xxx_messageInfo_GetUploadUrlResponse.DiscardUnknown(m) | |
622 | } | |
623 | ||
624 | var xxx_messageInfo_GetUploadUrlResponse proto.InternalMessageInfo | |
625 | ||
626 | func (m *GetUploadUrlResponse) GetBucketId() string { | |
627 | if m != nil { | |
628 | return m.BucketId | |
629 | } | |
630 | return "" | |
631 | } | |
632 | ||
633 | func (m *GetUploadUrlResponse) GetUploadUrl() string { | |
634 | if m != nil { | |
635 | return m.UploadUrl | |
636 | } | |
637 | return "" | |
638 | } | |
639 | ||
640 | func (m *GetUploadUrlResponse) GetAuthorizationToken() string { | |
641 | if m != nil { | |
642 | return m.AuthorizationToken | |
643 | } | |
644 | return "" | |
645 | } | |
646 | ||
647 | type UploadFileResponse struct { | |
648 | FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` | |
649 | FileName string `protobuf:"bytes,2,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` | |
650 | AccountId string `protobuf:"bytes,3,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` | |
651 | BucketId string `protobuf:"bytes,4,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` | |
652 | ContentLength int32 `protobuf:"varint,5,opt,name=content_length,json=contentLength,proto3" json:"content_length,omitempty"` | |
653 | ContentSha1 string `protobuf:"bytes,6,opt,name=content_sha1,json=contentSha1,proto3" json:"content_sha1,omitempty"` | |
654 | ContentType string `protobuf:"bytes,7,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` | |
655 | FileInfo map[string]string `protobuf:"bytes,8,rep,name=file_info,json=fileInfo,proto3" json:"file_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` | |
656 | Action string `protobuf:"bytes,9,opt,name=action,proto3" json:"action,omitempty"` | |
657 | UploadTimestamp int64 `protobuf:"varint,10,opt,name=upload_timestamp,json=uploadTimestamp,proto3" json:"upload_timestamp,omitempty"` | |
658 | XXX_NoUnkeyedLiteral struct{} `json:"-"` | |
659 | XXX_unrecognized []byte `json:"-"` | |
660 | XXX_sizecache int32 `json:"-"` | |
661 | } | |
662 | ||
663 | func (m *UploadFileResponse) Reset() { *m = UploadFileResponse{} } | |
664 | func (m *UploadFileResponse) String() string { return proto.CompactTextString(m) } | |
665 | func (*UploadFileResponse) ProtoMessage() {} | |
666 | func (*UploadFileResponse) Descriptor() ([]byte, []int) { | |
667 | return fileDescriptor_pyre_492df08819220afa, []int{9} | |
668 | } | |
669 | func (m *UploadFileResponse) XXX_Unmarshal(b []byte) error { | |
670 | return xxx_messageInfo_UploadFileResponse.Unmarshal(m, b) | |
671 | } | |
672 | func (m *UploadFileResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
673 | return xxx_messageInfo_UploadFileResponse.Marshal(b, m, deterministic) | |
674 | } | |
675 | func (dst *UploadFileResponse) XXX_Merge(src proto.Message) { | |
676 | xxx_messageInfo_UploadFileResponse.Merge(dst, src) | |
677 | } | |
678 | func (m *UploadFileResponse) XXX_Size() int { | |
679 | return xxx_messageInfo_UploadFileResponse.Size(m) | |
680 | } | |
681 | func (m *UploadFileResponse) XXX_DiscardUnknown() { | |
682 | xxx_messageInfo_UploadFileResponse.DiscardUnknown(m) | |
683 | } | |
684 | ||
685 | var xxx_messageInfo_UploadFileResponse proto.InternalMessageInfo | |
686 | ||
687 | func (m *UploadFileResponse) GetFileId() string { | |
688 | if m != nil { | |
689 | return m.FileId | |
690 | } | |
691 | return "" | |
692 | } | |
693 | ||
694 | func (m *UploadFileResponse) GetFileName() string { | |
695 | if m != nil { | |
696 | return m.FileName | |
697 | } | |
698 | return "" | |
699 | } | |
700 | ||
701 | func (m *UploadFileResponse) GetAccountId() string { | |
702 | if m != nil { | |
703 | return m.AccountId | |
704 | } | |
705 | return "" | |
706 | } | |
707 | ||
708 | func (m *UploadFileResponse) GetBucketId() string { | |
709 | if m != nil { | |
710 | return m.BucketId | |
711 | } | |
712 | return "" | |
713 | } | |
714 | ||
715 | func (m *UploadFileResponse) GetContentLength() int32 { | |
716 | if m != nil { | |
717 | return m.ContentLength | |
718 | } | |
719 | return 0 | |
720 | } | |
721 | ||
722 | func (m *UploadFileResponse) GetContentSha1() string { | |
723 | if m != nil { | |
724 | return m.ContentSha1 | |
725 | } | |
726 | return "" | |
727 | } | |
728 | ||
729 | func (m *UploadFileResponse) GetContentType() string { | |
730 | if m != nil { | |
731 | return m.ContentType | |
732 | } | |
733 | return "" | |
734 | } | |
735 | ||
736 | func (m *UploadFileResponse) GetFileInfo() map[string]string { | |
737 | if m != nil { | |
738 | return m.FileInfo | |
739 | } | |
740 | return nil | |
741 | } | |
742 | ||
743 | func (m *UploadFileResponse) GetAction() string { | |
744 | if m != nil { | |
745 | return m.Action | |
746 | } | |
747 | return "" | |
748 | } | |
749 | ||
750 | func (m *UploadFileResponse) GetUploadTimestamp() int64 { | |
751 | if m != nil { | |
752 | return m.UploadTimestamp | |
753 | } | |
754 | return 0 | |
755 | } | |
756 | ||
757 | type StartLargeFileRequest struct { | |
758 | BucketId string `protobuf:"bytes,1,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` | |
759 | FileName string `protobuf:"bytes,2,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` | |
760 | ContentType string `protobuf:"bytes,3,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` | |
761 | FileInfo map[string]string `protobuf:"bytes,4,rep,name=file_info,json=fileInfo,proto3" json:"file_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` | |
762 | XXX_NoUnkeyedLiteral struct{} `json:"-"` | |
763 | XXX_unrecognized []byte `json:"-"` | |
764 | XXX_sizecache int32 `json:"-"` | |
765 | } | |
766 | ||
767 | func (m *StartLargeFileRequest) Reset() { *m = StartLargeFileRequest{} } | |
768 | func (m *StartLargeFileRequest) String() string { return proto.CompactTextString(m) } | |
769 | func (*StartLargeFileRequest) ProtoMessage() {} | |
770 | func (*StartLargeFileRequest) Descriptor() ([]byte, []int) { | |
771 | return fileDescriptor_pyre_492df08819220afa, []int{10} | |
772 | } | |
773 | func (m *StartLargeFileRequest) XXX_Unmarshal(b []byte) error { | |
774 | return xxx_messageInfo_StartLargeFileRequest.Unmarshal(m, b) | |
775 | } | |
776 | func (m *StartLargeFileRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
777 | return xxx_messageInfo_StartLargeFileRequest.Marshal(b, m, deterministic) | |
778 | } | |
779 | func (dst *StartLargeFileRequest) XXX_Merge(src proto.Message) { | |
780 | xxx_messageInfo_StartLargeFileRequest.Merge(dst, src) | |
781 | } | |
782 | func (m *StartLargeFileRequest) XXX_Size() int { | |
783 | return xxx_messageInfo_StartLargeFileRequest.Size(m) | |
784 | } | |
785 | func (m *StartLargeFileRequest) XXX_DiscardUnknown() { | |
786 | xxx_messageInfo_StartLargeFileRequest.DiscardUnknown(m) | |
787 | } | |
788 | ||
789 | var xxx_messageInfo_StartLargeFileRequest proto.InternalMessageInfo | |
790 | ||
791 | func (m *StartLargeFileRequest) GetBucketId() string { | |
792 | if m != nil { | |
793 | return m.BucketId | |
794 | } | |
795 | return "" | |
796 | } | |
797 | ||
798 | func (m *StartLargeFileRequest) GetFileName() string { | |
799 | if m != nil { | |
800 | return m.FileName | |
801 | } | |
802 | return "" | |
803 | } | |
804 | ||
805 | func (m *StartLargeFileRequest) GetContentType() string { | |
806 | if m != nil { | |
807 | return m.ContentType | |
808 | } | |
809 | return "" | |
810 | } | |
811 | ||
812 | func (m *StartLargeFileRequest) GetFileInfo() map[string]string { | |
813 | if m != nil { | |
814 | return m.FileInfo | |
815 | } | |
816 | return nil | |
817 | } | |
818 | ||
819 | type StartLargeFileResponse struct { | |
820 | FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` | |
821 | FileName string `protobuf:"bytes,2,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` | |
822 | AccountId string `protobuf:"bytes,3,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` | |
823 | BucketId string `protobuf:"bytes,4,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` | |
824 | ContentType string `protobuf:"bytes,5,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` | |
825 | FileInfo map[string]string `protobuf:"bytes,6,rep,name=file_info,json=fileInfo,proto3" json:"file_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` | |
826 | UploadTimestamp int64 `protobuf:"varint,7,opt,name=upload_timestamp,json=uploadTimestamp,proto3" json:"upload_timestamp,omitempty"` | |
827 | XXX_NoUnkeyedLiteral struct{} `json:"-"` | |
828 | XXX_unrecognized []byte `json:"-"` | |
829 | XXX_sizecache int32 `json:"-"` | |
830 | } | |
831 | ||
832 | func (m *StartLargeFileResponse) Reset() { *m = StartLargeFileResponse{} } | |
833 | func (m *StartLargeFileResponse) String() string { return proto.CompactTextString(m) } | |
834 | func (*StartLargeFileResponse) ProtoMessage() {} | |
835 | func (*StartLargeFileResponse) Descriptor() ([]byte, []int) { | |
836 | return fileDescriptor_pyre_492df08819220afa, []int{11} | |
837 | } | |
838 | func (m *StartLargeFileResponse) XXX_Unmarshal(b []byte) error { | |
839 | return xxx_messageInfo_StartLargeFileResponse.Unmarshal(m, b) | |
840 | } | |
841 | func (m *StartLargeFileResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
842 | return xxx_messageInfo_StartLargeFileResponse.Marshal(b, m, deterministic) | |
843 | } | |
844 | func (dst *StartLargeFileResponse) XXX_Merge(src proto.Message) { | |
845 | xxx_messageInfo_StartLargeFileResponse.Merge(dst, src) | |
846 | } | |
847 | func (m *StartLargeFileResponse) XXX_Size() int { | |
848 | return xxx_messageInfo_StartLargeFileResponse.Size(m) | |
849 | } | |
850 | func (m *StartLargeFileResponse) XXX_DiscardUnknown() { | |
851 | xxx_messageInfo_StartLargeFileResponse.DiscardUnknown(m) | |
852 | } | |
853 | ||
854 | var xxx_messageInfo_StartLargeFileResponse proto.InternalMessageInfo | |
855 | ||
856 | func (m *StartLargeFileResponse) GetFileId() string { | |
857 | if m != nil { | |
858 | return m.FileId | |
859 | } | |
860 | return "" | |
861 | } | |
862 | ||
863 | func (m *StartLargeFileResponse) GetFileName() string { | |
864 | if m != nil { | |
865 | return m.FileName | |
866 | } | |
867 | return "" | |
868 | } | |
869 | ||
870 | func (m *StartLargeFileResponse) GetAccountId() string { | |
871 | if m != nil { | |
872 | return m.AccountId | |
873 | } | |
874 | return "" | |
875 | } | |
876 | ||
877 | func (m *StartLargeFileResponse) GetBucketId() string { | |
878 | if m != nil { | |
879 | return m.BucketId | |
880 | } | |
881 | return "" | |
882 | } | |
883 | ||
884 | func (m *StartLargeFileResponse) GetContentType() string { | |
885 | if m != nil { | |
886 | return m.ContentType | |
887 | } | |
888 | return "" | |
889 | } | |
890 | ||
891 | func (m *StartLargeFileResponse) GetFileInfo() map[string]string { | |
892 | if m != nil { | |
893 | return m.FileInfo | |
894 | } | |
895 | return nil | |
896 | } | |
897 | ||
898 | func (m *StartLargeFileResponse) GetUploadTimestamp() int64 { | |
899 | if m != nil { | |
900 | return m.UploadTimestamp | |
901 | } | |
902 | return 0 | |
903 | } | |
904 | ||
905 | type GetUploadPartUrlRequest struct { | |
906 | FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` | |
907 | XXX_NoUnkeyedLiteral struct{} `json:"-"` | |
908 | XXX_unrecognized []byte `json:"-"` | |
909 | XXX_sizecache int32 `json:"-"` | |
910 | } | |
911 | ||
912 | func (m *GetUploadPartUrlRequest) Reset() { *m = GetUploadPartUrlRequest{} } | |
913 | func (m *GetUploadPartUrlRequest) String() string { return proto.CompactTextString(m) } | |
914 | func (*GetUploadPartUrlRequest) ProtoMessage() {} | |
915 | func (*GetUploadPartUrlRequest) Descriptor() ([]byte, []int) { | |
916 | return fileDescriptor_pyre_492df08819220afa, []int{12} | |
917 | } | |
918 | func (m *GetUploadPartUrlRequest) XXX_Unmarshal(b []byte) error { | |
919 | return xxx_messageInfo_GetUploadPartUrlRequest.Unmarshal(m, b) | |
920 | } | |
921 | func (m *GetUploadPartUrlRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
922 | return xxx_messageInfo_GetUploadPartUrlRequest.Marshal(b, m, deterministic) | |
923 | } | |
924 | func (dst *GetUploadPartUrlRequest) XXX_Merge(src proto.Message) { | |
925 | xxx_messageInfo_GetUploadPartUrlRequest.Merge(dst, src) | |
926 | } | |
927 | func (m *GetUploadPartUrlRequest) XXX_Size() int { | |
928 | return xxx_messageInfo_GetUploadPartUrlRequest.Size(m) | |
929 | } | |
930 | func (m *GetUploadPartUrlRequest) XXX_DiscardUnknown() { | |
931 | xxx_messageInfo_GetUploadPartUrlRequest.DiscardUnknown(m) | |
932 | } | |
933 | ||
934 | var xxx_messageInfo_GetUploadPartUrlRequest proto.InternalMessageInfo | |
935 | ||
936 | func (m *GetUploadPartUrlRequest) GetFileId() string { | |
937 | if m != nil { | |
938 | return m.FileId | |
939 | } | |
940 | return "" | |
941 | } | |
942 | ||
943 | type GetUploadPartUrlResponse struct { | |
944 | FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` | |
945 | UploadUrl string `protobuf:"bytes,2,opt,name=upload_url,json=uploadUrl,proto3" json:"upload_url,omitempty"` | |
946 | AuthorizationToken string `protobuf:"bytes,3,opt,name=authorization_token,json=authorizationToken,proto3" json:"authorization_token,omitempty"` | |
947 | XXX_NoUnkeyedLiteral struct{} `json:"-"` | |
948 | XXX_unrecognized []byte `json:"-"` | |
949 | XXX_sizecache int32 `json:"-"` | |
950 | } | |
951 | ||
952 | func (m *GetUploadPartUrlResponse) Reset() { *m = GetUploadPartUrlResponse{} } | |
953 | func (m *GetUploadPartUrlResponse) String() string { return proto.CompactTextString(m) } | |
954 | func (*GetUploadPartUrlResponse) ProtoMessage() {} | |
955 | func (*GetUploadPartUrlResponse) Descriptor() ([]byte, []int) { | |
956 | return fileDescriptor_pyre_492df08819220afa, []int{13} | |
957 | } | |
958 | func (m *GetUploadPartUrlResponse) XXX_Unmarshal(b []byte) error { | |
959 | return xxx_messageInfo_GetUploadPartUrlResponse.Unmarshal(m, b) | |
960 | } | |
961 | func (m *GetUploadPartUrlResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
962 | return xxx_messageInfo_GetUploadPartUrlResponse.Marshal(b, m, deterministic) | |
963 | } | |
964 | func (dst *GetUploadPartUrlResponse) XXX_Merge(src proto.Message) { | |
965 | xxx_messageInfo_GetUploadPartUrlResponse.Merge(dst, src) | |
966 | } | |
967 | func (m *GetUploadPartUrlResponse) XXX_Size() int { | |
968 | return xxx_messageInfo_GetUploadPartUrlResponse.Size(m) | |
969 | } | |
970 | func (m *GetUploadPartUrlResponse) XXX_DiscardUnknown() { | |
971 | xxx_messageInfo_GetUploadPartUrlResponse.DiscardUnknown(m) | |
972 | } | |
973 | ||
974 | var xxx_messageInfo_GetUploadPartUrlResponse proto.InternalMessageInfo | |
975 | ||
976 | func (m *GetUploadPartUrlResponse) GetFileId() string { | |
977 | if m != nil { | |
978 | return m.FileId | |
979 | } | |
980 | return "" | |
981 | } | |
982 | ||
983 | func (m *GetUploadPartUrlResponse) GetUploadUrl() string { | |
984 | if m != nil { | |
985 | return m.UploadUrl | |
986 | } | |
987 | return "" | |
988 | } | |
989 | ||
990 | func (m *GetUploadPartUrlResponse) GetAuthorizationToken() string { | |
991 | if m != nil { | |
992 | return m.AuthorizationToken | |
993 | } | |
994 | return "" | |
995 | } | |
996 | ||
997 | type FinishLargeFileRequest struct { | |
998 | FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` | |
999 | PartSha1Array []string `protobuf:"bytes,2,rep,name=part_sha1_array,json=partSha1Array,proto3" json:"part_sha1_array,omitempty"` | |
1000 | XXX_NoUnkeyedLiteral struct{} `json:"-"` | |
1001 | XXX_unrecognized []byte `json:"-"` | |
1002 | XXX_sizecache int32 `json:"-"` | |
1003 | } | |
1004 | ||
1005 | func (m *FinishLargeFileRequest) Reset() { *m = FinishLargeFileRequest{} } | |
1006 | func (m *FinishLargeFileRequest) String() string { return proto.CompactTextString(m) } | |
1007 | func (*FinishLargeFileRequest) ProtoMessage() {} | |
1008 | func (*FinishLargeFileRequest) Descriptor() ([]byte, []int) { | |
1009 | return fileDescriptor_pyre_492df08819220afa, []int{14} | |
1010 | } | |
1011 | func (m *FinishLargeFileRequest) XXX_Unmarshal(b []byte) error { | |
1012 | return xxx_messageInfo_FinishLargeFileRequest.Unmarshal(m, b) | |
1013 | } | |
1014 | func (m *FinishLargeFileRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
1015 | return xxx_messageInfo_FinishLargeFileRequest.Marshal(b, m, deterministic) | |
1016 | } | |
1017 | func (dst *FinishLargeFileRequest) XXX_Merge(src proto.Message) { | |
1018 | xxx_messageInfo_FinishLargeFileRequest.Merge(dst, src) | |
1019 | } | |
1020 | func (m *FinishLargeFileRequest) XXX_Size() int { | |
1021 | return xxx_messageInfo_FinishLargeFileRequest.Size(m) | |
1022 | } | |
1023 | func (m *FinishLargeFileRequest) XXX_DiscardUnknown() { | |
1024 | xxx_messageInfo_FinishLargeFileRequest.DiscardUnknown(m) | |
1025 | } | |
1026 | ||
1027 | var xxx_messageInfo_FinishLargeFileRequest proto.InternalMessageInfo | |
1028 | ||
1029 | func (m *FinishLargeFileRequest) GetFileId() string { | |
1030 | if m != nil { | |
1031 | return m.FileId | |
1032 | } | |
1033 | return "" | |
1034 | } | |
1035 | ||
1036 | func (m *FinishLargeFileRequest) GetPartSha1Array() []string { | |
1037 | if m != nil { | |
1038 | return m.PartSha1Array | |
1039 | } | |
1040 | return nil | |
1041 | } | |
1042 | ||
1043 | type FinishLargeFileResponse struct { | |
1044 | FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` | |
1045 | FileName string `protobuf:"bytes,2,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` | |
1046 | AccountId string `protobuf:"bytes,3,opt,name=account_id,json=accountId,proto3" json:"account_id,omitempty"` | |
1047 | BucketId string `protobuf:"bytes,4,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` | |
1048 | ContentLength int64 `protobuf:"varint,5,opt,name=content_length,json=contentLength,proto3" json:"content_length,omitempty"` | |
1049 | ContentSha1 string `protobuf:"bytes,6,opt,name=content_sha1,json=contentSha1,proto3" json:"content_sha1,omitempty"` | |
1050 | ContentType string `protobuf:"bytes,7,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` | |
1051 | FileInfo map[string]string `protobuf:"bytes,8,rep,name=file_info,json=fileInfo,proto3" json:"file_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` | |
1052 | Action string `protobuf:"bytes,9,opt,name=action,proto3" json:"action,omitempty"` | |
1053 | UploadTimestamp int64 `protobuf:"varint,10,opt,name=upload_timestamp,json=uploadTimestamp,proto3" json:"upload_timestamp,omitempty"` | |
1054 | XXX_NoUnkeyedLiteral struct{} `json:"-"` | |
1055 | XXX_unrecognized []byte `json:"-"` | |
1056 | XXX_sizecache int32 `json:"-"` | |
1057 | } | |
1058 | ||
1059 | func (m *FinishLargeFileResponse) Reset() { *m = FinishLargeFileResponse{} } | |
1060 | func (m *FinishLargeFileResponse) String() string { return proto.CompactTextString(m) } | |
1061 | func (*FinishLargeFileResponse) ProtoMessage() {} | |
1062 | func (*FinishLargeFileResponse) Descriptor() ([]byte, []int) { | |
1063 | return fileDescriptor_pyre_492df08819220afa, []int{15} | |
1064 | } | |
1065 | func (m *FinishLargeFileResponse) XXX_Unmarshal(b []byte) error { | |
1066 | return xxx_messageInfo_FinishLargeFileResponse.Unmarshal(m, b) | |
1067 | } | |
1068 | func (m *FinishLargeFileResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
1069 | return xxx_messageInfo_FinishLargeFileResponse.Marshal(b, m, deterministic) | |
1070 | } | |
1071 | func (dst *FinishLargeFileResponse) XXX_Merge(src proto.Message) { | |
1072 | xxx_messageInfo_FinishLargeFileResponse.Merge(dst, src) | |
1073 | } | |
1074 | func (m *FinishLargeFileResponse) XXX_Size() int { | |
1075 | return xxx_messageInfo_FinishLargeFileResponse.Size(m) | |
1076 | } | |
1077 | func (m *FinishLargeFileResponse) XXX_DiscardUnknown() { | |
1078 | xxx_messageInfo_FinishLargeFileResponse.DiscardUnknown(m) | |
1079 | } | |
1080 | ||
1081 | var xxx_messageInfo_FinishLargeFileResponse proto.InternalMessageInfo | |
1082 | ||
1083 | func (m *FinishLargeFileResponse) GetFileId() string { | |
1084 | if m != nil { | |
1085 | return m.FileId | |
1086 | } | |
1087 | return "" | |
1088 | } | |
1089 | ||
1090 | func (m *FinishLargeFileResponse) GetFileName() string { | |
1091 | if m != nil { | |
1092 | return m.FileName | |
1093 | } | |
1094 | return "" | |
1095 | } | |
1096 | ||
1097 | func (m *FinishLargeFileResponse) GetAccountId() string { | |
1098 | if m != nil { | |
1099 | return m.AccountId | |
1100 | } | |
1101 | return "" | |
1102 | } | |
1103 | ||
1104 | func (m *FinishLargeFileResponse) GetBucketId() string { | |
1105 | if m != nil { | |
1106 | return m.BucketId | |
1107 | } | |
1108 | return "" | |
1109 | } | |
1110 | ||
1111 | func (m *FinishLargeFileResponse) GetContentLength() int64 { | |
1112 | if m != nil { | |
1113 | return m.ContentLength | |
1114 | } | |
1115 | return 0 | |
1116 | } | |
1117 | ||
1118 | func (m *FinishLargeFileResponse) GetContentSha1() string { | |
1119 | if m != nil { | |
1120 | return m.ContentSha1 | |
1121 | } | |
1122 | return "" | |
1123 | } | |
1124 | ||
1125 | func (m *FinishLargeFileResponse) GetContentType() string { | |
1126 | if m != nil { | |
1127 | return m.ContentType | |
1128 | } | |
1129 | return "" | |
1130 | } | |
1131 | ||
1132 | func (m *FinishLargeFileResponse) GetFileInfo() map[string]string { | |
1133 | if m != nil { | |
1134 | return m.FileInfo | |
1135 | } | |
1136 | return nil | |
1137 | } | |
1138 | ||
1139 | func (m *FinishLargeFileResponse) GetAction() string { | |
1140 | if m != nil { | |
1141 | return m.Action | |
1142 | } | |
1143 | return "" | |
1144 | } | |
1145 | ||
1146 | func (m *FinishLargeFileResponse) GetUploadTimestamp() int64 { | |
1147 | if m != nil { | |
1148 | return m.UploadTimestamp | |
1149 | } | |
1150 | return 0 | |
1151 | } | |
1152 | ||
1153 | type ListFileVersionsRequest struct { | |
1154 | BucketId string `protobuf:"bytes,1,opt,name=bucket_id,json=bucketId,proto3" json:"bucket_id,omitempty"` | |
1155 | StartFileName string `protobuf:"bytes,2,opt,name=start_file_name,json=startFileName,proto3" json:"start_file_name,omitempty"` | |
1156 | StartFileId string `protobuf:"bytes,3,opt,name=start_file_id,json=startFileId,proto3" json:"start_file_id,omitempty"` | |
1157 | MaxFileCount int32 `protobuf:"varint,4,opt,name=max_file_count,json=maxFileCount,proto3" json:"max_file_count,omitempty"` | |
1158 | Prefix string `protobuf:"bytes,5,opt,name=prefix,proto3" json:"prefix,omitempty"` | |
1159 | Delimiter string `protobuf:"bytes,6,opt,name=delimiter,proto3" json:"delimiter,omitempty"` | |
1160 | XXX_NoUnkeyedLiteral struct{} `json:"-"` | |
1161 | XXX_unrecognized []byte `json:"-"` | |
1162 | XXX_sizecache int32 `json:"-"` | |
1163 | } | |
1164 | ||
1165 | func (m *ListFileVersionsRequest) Reset() { *m = ListFileVersionsRequest{} } | |
1166 | func (m *ListFileVersionsRequest) String() string { return proto.CompactTextString(m) } | |
1167 | func (*ListFileVersionsRequest) ProtoMessage() {} | |
1168 | func (*ListFileVersionsRequest) Descriptor() ([]byte, []int) { | |
1169 | return fileDescriptor_pyre_492df08819220afa, []int{16} | |
1170 | } | |
1171 | func (m *ListFileVersionsRequest) XXX_Unmarshal(b []byte) error { | |
1172 | return xxx_messageInfo_ListFileVersionsRequest.Unmarshal(m, b) | |
1173 | } | |
1174 | func (m *ListFileVersionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
1175 | return xxx_messageInfo_ListFileVersionsRequest.Marshal(b, m, deterministic) | |
1176 | } | |
1177 | func (dst *ListFileVersionsRequest) XXX_Merge(src proto.Message) { | |
1178 | xxx_messageInfo_ListFileVersionsRequest.Merge(dst, src) | |
1179 | } | |
1180 | func (m *ListFileVersionsRequest) XXX_Size() int { | |
1181 | return xxx_messageInfo_ListFileVersionsRequest.Size(m) | |
1182 | } | |
1183 | func (m *ListFileVersionsRequest) XXX_DiscardUnknown() { | |
1184 | xxx_messageInfo_ListFileVersionsRequest.DiscardUnknown(m) | |
1185 | } | |
1186 | ||
1187 | var xxx_messageInfo_ListFileVersionsRequest proto.InternalMessageInfo | |
1188 | ||
1189 | func (m *ListFileVersionsRequest) GetBucketId() string { | |
1190 | if m != nil { | |
1191 | return m.BucketId | |
1192 | } | |
1193 | return "" | |
1194 | } | |
1195 | ||
1196 | func (m *ListFileVersionsRequest) GetStartFileName() string { | |
1197 | if m != nil { | |
1198 | return m.StartFileName | |
1199 | } | |
1200 | return "" | |
1201 | } | |
1202 | ||
1203 | func (m *ListFileVersionsRequest) GetStartFileId() string { | |
1204 | if m != nil { | |
1205 | return m.StartFileId | |
1206 | } | |
1207 | return "" | |
1208 | } | |
1209 | ||
1210 | func (m *ListFileVersionsRequest) GetMaxFileCount() int32 { | |
1211 | if m != nil { | |
1212 | return m.MaxFileCount | |
1213 | } | |
1214 | return 0 | |
1215 | } | |
1216 | ||
1217 | func (m *ListFileVersionsRequest) GetPrefix() string { | |
1218 | if m != nil { | |
1219 | return m.Prefix | |
1220 | } | |
1221 | return "" | |
1222 | } | |
1223 | ||
1224 | func (m *ListFileVersionsRequest) GetDelimiter() string { | |
1225 | if m != nil { | |
1226 | return m.Delimiter | |
1227 | } | |
1228 | return "" | |
1229 | } | |
1230 | ||
1231 | type ListFileVersionsResponse struct { | |
1232 | Files []*File `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty"` | |
1233 | NextFileName string `protobuf:"bytes,2,opt,name=next_file_name,json=nextFileName,proto3" json:"next_file_name,omitempty"` | |
1234 | NextFileId string `protobuf:"bytes,3,opt,name=next_file_id,json=nextFileId,proto3" json:"next_file_id,omitempty"` | |
1235 | XXX_NoUnkeyedLiteral struct{} `json:"-"` | |
1236 | XXX_unrecognized []byte `json:"-"` | |
1237 | XXX_sizecache int32 `json:"-"` | |
1238 | } | |
1239 | ||
1240 | func (m *ListFileVersionsResponse) Reset() { *m = ListFileVersionsResponse{} } | |
1241 | func (m *ListFileVersionsResponse) String() string { return proto.CompactTextString(m) } | |
1242 | func (*ListFileVersionsResponse) ProtoMessage() {} | |
1243 | func (*ListFileVersionsResponse) Descriptor() ([]byte, []int) { | |
1244 | return fileDescriptor_pyre_492df08819220afa, []int{17} | |
1245 | } | |
1246 | func (m *ListFileVersionsResponse) XXX_Unmarshal(b []byte) error { | |
1247 | return xxx_messageInfo_ListFileVersionsResponse.Unmarshal(m, b) | |
1248 | } | |
1249 | func (m *ListFileVersionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
1250 | return xxx_messageInfo_ListFileVersionsResponse.Marshal(b, m, deterministic) | |
1251 | } | |
1252 | func (dst *ListFileVersionsResponse) XXX_Merge(src proto.Message) { | |
1253 | xxx_messageInfo_ListFileVersionsResponse.Merge(dst, src) | |
1254 | } | |
1255 | func (m *ListFileVersionsResponse) XXX_Size() int { | |
1256 | return xxx_messageInfo_ListFileVersionsResponse.Size(m) | |
1257 | } | |
1258 | func (m *ListFileVersionsResponse) XXX_DiscardUnknown() { | |
1259 | xxx_messageInfo_ListFileVersionsResponse.DiscardUnknown(m) | |
1260 | } | |
1261 | ||
1262 | var xxx_messageInfo_ListFileVersionsResponse proto.InternalMessageInfo | |
1263 | ||
1264 | func (m *ListFileVersionsResponse) GetFiles() []*File { | |
1265 | if m != nil { | |
1266 | return m.Files | |
1267 | } | |
1268 | return nil | |
1269 | } | |
1270 | ||
1271 | func (m *ListFileVersionsResponse) GetNextFileName() string { | |
1272 | if m != nil { | |
1273 | return m.NextFileName | |
1274 | } | |
1275 | return "" | |
1276 | } | |
1277 | ||
1278 | func (m *ListFileVersionsResponse) GetNextFileId() string { | |
1279 | if m != nil { | |
1280 | return m.NextFileId | |
1281 | } | |
1282 | return "" | |
1283 | } | |
1284 | ||
1285 | type File struct { | |
1286 | FileId string `protobuf:"bytes,1,opt,name=file_id,json=fileId,proto3" json:"file_id,omitempty"` | |
1287 | FileName string `protobuf:"bytes,2,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` | |
1288 | ContentLength int64 `protobuf:"varint,3,opt,name=content_length,json=contentLength,proto3" json:"content_length,omitempty"` | |
1289 | ContentType string `protobuf:"bytes,4,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` | |
1290 | ContentSha1 string `protobuf:"bytes,5,opt,name=content_sha1,json=contentSha1,proto3" json:"content_sha1,omitempty"` | |
1291 | FileInfo map[string]string `protobuf:"bytes,6,rep,name=file_info,json=fileInfo,proto3" json:"file_info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` | |
1292 | Action string `protobuf:"bytes,7,opt,name=action,proto3" json:"action,omitempty"` | |
1293 | Size int64 `protobuf:"varint,8,opt,name=size,proto3" json:"size,omitempty"` | |
1294 | UploadTimestamp int64 `protobuf:"varint,9,opt,name=upload_timestamp,json=uploadTimestamp,proto3" json:"upload_timestamp,omitempty"` | |
1295 | XXX_NoUnkeyedLiteral struct{} `json:"-"` | |
1296 | XXX_unrecognized []byte `json:"-"` | |
1297 | XXX_sizecache int32 `json:"-"` | |
1298 | } | |
1299 | ||
1300 | func (m *File) Reset() { *m = File{} } | |
1301 | func (m *File) String() string { return proto.CompactTextString(m) } | |
1302 | func (*File) ProtoMessage() {} | |
1303 | func (*File) Descriptor() ([]byte, []int) { | |
1304 | return fileDescriptor_pyre_492df08819220afa, []int{18} | |
1305 | } | |
1306 | func (m *File) XXX_Unmarshal(b []byte) error { | |
1307 | return xxx_messageInfo_File.Unmarshal(m, b) | |
1308 | } | |
1309 | func (m *File) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
1310 | return xxx_messageInfo_File.Marshal(b, m, deterministic) | |
1311 | } | |
1312 | func (dst *File) XXX_Merge(src proto.Message) { | |
1313 | xxx_messageInfo_File.Merge(dst, src) | |
1314 | } | |
1315 | func (m *File) XXX_Size() int { | |
1316 | return xxx_messageInfo_File.Size(m) | |
1317 | } | |
1318 | func (m *File) XXX_DiscardUnknown() { | |
1319 | xxx_messageInfo_File.DiscardUnknown(m) | |
1320 | } | |
1321 | ||
1322 | var xxx_messageInfo_File proto.InternalMessageInfo | |
1323 | ||
1324 | func (m *File) GetFileId() string { | |
1325 | if m != nil { | |
1326 | return m.FileId | |
1327 | } | |
1328 | return "" | |
1329 | } | |
1330 | ||
1331 | func (m *File) GetFileName() string { | |
1332 | if m != nil { | |
1333 | return m.FileName | |
1334 | } | |
1335 | return "" | |
1336 | } | |
1337 | ||
1338 | func (m *File) GetContentLength() int64 { | |
1339 | if m != nil { | |
1340 | return m.ContentLength | |
1341 | } | |
1342 | return 0 | |
1343 | } | |
1344 | ||
1345 | func (m *File) GetContentType() string { | |
1346 | if m != nil { | |
1347 | return m.ContentType | |
1348 | } | |
1349 | return "" | |
1350 | } | |
1351 | ||
1352 | func (m *File) GetContentSha1() string { | |
1353 | if m != nil { | |
1354 | return m.ContentSha1 | |
1355 | } | |
1356 | return "" | |
1357 | } | |
1358 | ||
1359 | func (m *File) GetFileInfo() map[string]string { | |
1360 | if m != nil { | |
1361 | return m.FileInfo | |
1362 | } | |
1363 | return nil | |
1364 | } | |
1365 | ||
1366 | func (m *File) GetAction() string { | |
1367 | if m != nil { | |
1368 | return m.Action | |
1369 | } | |
1370 | return "" | |
1371 | } | |
1372 | ||
1373 | func (m *File) GetSize() int64 { | |
1374 | if m != nil { | |
1375 | return m.Size | |
1376 | } | |
1377 | return 0 | |
1378 | } | |
1379 | ||
1380 | func (m *File) GetUploadTimestamp() int64 { | |
1381 | if m != nil { | |
1382 | return m.UploadTimestamp | |
1383 | } | |
1384 | return 0 | |
1385 | } | |
1386 | ||
1387 | func init() { | |
1388 | proto.RegisterType((*AuthorizeAccountRequest)(nil), "pyre.proto.AuthorizeAccountRequest") | |
1389 | proto.RegisterType((*AuthorizeAccountResponse)(nil), "pyre.proto.AuthorizeAccountResponse") | |
1390 | proto.RegisterType((*ListBucketsRequest)(nil), "pyre.proto.ListBucketsRequest") | |
1391 | proto.RegisterType((*LifecycleRule)(nil), "pyre.proto.LifecycleRule") | |
1392 | proto.RegisterType((*CorsRule)(nil), "pyre.proto.CorsRule") | |
1393 | proto.RegisterType((*Bucket)(nil), "pyre.proto.Bucket") | |
1394 | proto.RegisterMapType((map[string]string)(nil), "pyre.proto.Bucket.BucketInfoEntry") | |
1395 | proto.RegisterType((*ListBucketsResponse)(nil), "pyre.proto.ListBucketsResponse") | |
1396 | proto.RegisterType((*GetUploadUrlRequest)(nil), "pyre.proto.GetUploadUrlRequest") | |
1397 | proto.RegisterType((*GetUploadUrlResponse)(nil), "pyre.proto.GetUploadUrlResponse") | |
1398 | proto.RegisterType((*UploadFileResponse)(nil), "pyre.proto.UploadFileResponse") | |
1399 | proto.RegisterMapType((map[string]string)(nil), "pyre.proto.UploadFileResponse.FileInfoEntry") | |
1400 | proto.RegisterType((*StartLargeFileRequest)(nil), "pyre.proto.StartLargeFileRequest") | |
1401 | proto.RegisterMapType((map[string]string)(nil), "pyre.proto.StartLargeFileRequest.FileInfoEntry") | |
1402 | proto.RegisterType((*StartLargeFileResponse)(nil), "pyre.proto.StartLargeFileResponse") | |
1403 | proto.RegisterMapType((map[string]string)(nil), "pyre.proto.StartLargeFileResponse.FileInfoEntry") | |
1404 | proto.RegisterType((*GetUploadPartUrlRequest)(nil), "pyre.proto.GetUploadPartUrlRequest") | |
1405 | proto.RegisterType((*GetUploadPartUrlResponse)(nil), "pyre.proto.GetUploadPartUrlResponse") | |
1406 | proto.RegisterType((*FinishLargeFileRequest)(nil), "pyre.proto.FinishLargeFileRequest") | |
1407 | proto.RegisterType((*FinishLargeFileResponse)(nil), "pyre.proto.FinishLargeFileResponse") | |
1408 | proto.RegisterMapType((map[string]string)(nil), "pyre.proto.FinishLargeFileResponse.FileInfoEntry") | |
1409 | proto.RegisterType((*ListFileVersionsRequest)(nil), "pyre.proto.ListFileVersionsRequest") | |
1410 | proto.RegisterType((*ListFileVersionsResponse)(nil), "pyre.proto.ListFileVersionsResponse") | |
1411 | proto.RegisterType((*File)(nil), "pyre.proto.File") | |
1412 | proto.RegisterMapType((map[string]string)(nil), "pyre.proto.File.FileInfoEntry") | |
1413 | } | |
1414 | ||
1415 | // Reference imports to suppress errors if they are not otherwise used. | |
1416 | var _ context.Context | |
1417 | var _ grpc.ClientConn | |
1418 | ||
1419 | // This is a compile-time assertion to ensure that this generated file | |
1420 | // is compatible with the grpc package it is being compiled against. | |
1421 | const _ = grpc.SupportPackageIsVersion4 | |
1422 | ||
1423 | // PyreServiceClient is the client API for PyreService service. | |
1424 | // | |
1425 | // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. | |
1426 | type PyreServiceClient interface { | |
1427 | // Used to log in to the B2 API. Returns an authorization token that can be | |
1428 | // used for account-level operations, and a URL that should be used as the | |
1429 | // base URL for subsequent API calls. | |
1430 | AuthorizeAccount(ctx context.Context, in *AuthorizeAccountRequest, opts ...grpc.CallOption) (*AuthorizeAccountResponse, error) | |
1431 | // Lists buckets associated with an account, in alphabetical order by bucket | |
1432 | // name. | |
1433 | ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) | |
1434 | // Creates a new bucket. A bucket belongs to the account used to create it. | |
1435 | // | |
1436 | // Buckets can be named. The name must be globally unique. No account can use | |
1437 | // a bucket with the same name. Buckets are assigned a unique bucketId which | |
1438 | // is used when uploading, downloading, or deleting files. | |
1439 | // | |
1440 | // There is a limit of 100 buckets per account. | |
1441 | CreateBucket(ctx context.Context, in *Bucket, opts ...grpc.CallOption) (*Bucket, error) | |
1442 | // Deletes the bucket specified. Only buckets that contain no version of any | |
1443 | // files can be deleted. | |
1444 | DeleteBucket(ctx context.Context, in *Bucket, opts ...grpc.CallOption) (*Bucket, error) | |
1445 | GetUploadUrl(ctx context.Context, in *GetUploadUrlRequest, opts ...grpc.CallOption) (*GetUploadUrlResponse, error) | |
1446 | // Prepares for uploading the parts of a large file. | |
1447 | StartLargeFile(ctx context.Context, in *StartLargeFileRequest, opts ...grpc.CallOption) (*StartLargeFileResponse, error) | |
1448 | // Gets an URL to use for uploading parts of a large file. | |
1449 | GetUploadPartUrl(ctx context.Context, in *GetUploadPartUrlRequest, opts ...grpc.CallOption) (*GetUploadPartUrlResponse, error) | |
1450 | // Converts the parts that have been uploaded into a single B2 file. | |
1451 | FinishLargeFile(ctx context.Context, in *FinishLargeFileRequest, opts ...grpc.CallOption) (*FinishLargeFileResponse, error) | |
1452 | // Lists all of the versions of all of the files contained in one bucket, in | |
1453 | // alphabetical order by file name, and by reverse of date/time uploaded for | |
1454 | // versions of files with the same name. | |
1455 | ListFileVersions(ctx context.Context, in *ListFileVersionsRequest, opts ...grpc.CallOption) (*ListFileVersionsResponse, error) | |
1456 | } | |
1457 | ||
1458 | type pyreServiceClient struct { | |
1459 | cc *grpc.ClientConn | |
1460 | } | |
1461 | ||
1462 | func NewPyreServiceClient(cc *grpc.ClientConn) PyreServiceClient { | |
1463 | return &pyreServiceClient{cc} | |
1464 | } | |
1465 | ||
1466 | func (c *pyreServiceClient) AuthorizeAccount(ctx context.Context, in *AuthorizeAccountRequest, opts ...grpc.CallOption) (*AuthorizeAccountResponse, error) { | |
1467 | out := new(AuthorizeAccountResponse) | |
1468 | err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/AuthorizeAccount", in, out, opts...) | |
1469 | if err != nil { | |
1470 | return nil, err | |
1471 | } | |
1472 | return out, nil | |
1473 | } | |
1474 | ||
1475 | func (c *pyreServiceClient) ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) { | |
1476 | out := new(ListBucketsResponse) | |
1477 | err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/ListBuckets", in, out, opts...) | |
1478 | if err != nil { | |
1479 | return nil, err | |
1480 | } | |
1481 | return out, nil | |
1482 | } | |
1483 | ||
1484 | func (c *pyreServiceClient) CreateBucket(ctx context.Context, in *Bucket, opts ...grpc.CallOption) (*Bucket, error) { | |
1485 | out := new(Bucket) | |
1486 | err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/CreateBucket", in, out, opts...) | |
1487 | if err != nil { | |
1488 | return nil, err | |
1489 | } | |
1490 | return out, nil | |
1491 | } | |
1492 | ||
1493 | func (c *pyreServiceClient) DeleteBucket(ctx context.Context, in *Bucket, opts ...grpc.CallOption) (*Bucket, error) { | |
1494 | out := new(Bucket) | |
1495 | err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/DeleteBucket", in, out, opts...) | |
1496 | if err != nil { | |
1497 | return nil, err | |
1498 | } | |
1499 | return out, nil | |
1500 | } | |
1501 | ||
1502 | func (c *pyreServiceClient) GetUploadUrl(ctx context.Context, in *GetUploadUrlRequest, opts ...grpc.CallOption) (*GetUploadUrlResponse, error) { | |
1503 | out := new(GetUploadUrlResponse) | |
1504 | err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/GetUploadUrl", in, out, opts...) | |
1505 | if err != nil { | |
1506 | return nil, err | |
1507 | } | |
1508 | return out, nil | |
1509 | } | |
1510 | ||
1511 | func (c *pyreServiceClient) StartLargeFile(ctx context.Context, in *StartLargeFileRequest, opts ...grpc.CallOption) (*StartLargeFileResponse, error) { | |
1512 | out := new(StartLargeFileResponse) | |
1513 | err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/StartLargeFile", in, out, opts...) | |
1514 | if err != nil { | |
1515 | return nil, err | |
1516 | } | |
1517 | return out, nil | |
1518 | } | |
1519 | ||
1520 | func (c *pyreServiceClient) GetUploadPartUrl(ctx context.Context, in *GetUploadPartUrlRequest, opts ...grpc.CallOption) (*GetUploadPartUrlResponse, error) { | |
1521 | out := new(GetUploadPartUrlResponse) | |
1522 | err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/GetUploadPartUrl", in, out, opts...) | |
1523 | if err != nil { | |
1524 | return nil, err | |
1525 | } | |
1526 | return out, nil | |
1527 | } | |
1528 | ||
1529 | func (c *pyreServiceClient) FinishLargeFile(ctx context.Context, in *FinishLargeFileRequest, opts ...grpc.CallOption) (*FinishLargeFileResponse, error) { | |
1530 | out := new(FinishLargeFileResponse) | |
1531 | err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/FinishLargeFile", in, out, opts...) | |
1532 | if err != nil { | |
1533 | return nil, err | |
1534 | } | |
1535 | return out, nil | |
1536 | } | |
1537 | ||
1538 | func (c *pyreServiceClient) ListFileVersions(ctx context.Context, in *ListFileVersionsRequest, opts ...grpc.CallOption) (*ListFileVersionsResponse, error) { | |
1539 | out := new(ListFileVersionsResponse) | |
1540 | err := c.cc.Invoke(ctx, "/pyre.proto.PyreService/ListFileVersions", in, out, opts...) | |
1541 | if err != nil { | |
1542 | return nil, err | |
1543 | } | |
1544 | return out, nil | |
1545 | } | |
1546 | ||
1547 | // PyreServiceServer is the server API for PyreService service. | |
1548 | type PyreServiceServer interface { | |
1549 | // Used to log in to the B2 API. Returns an authorization token that can be | |
1550 | // used for account-level operations, and a URL that should be used as the | |
1551 | // base URL for subsequent API calls. | |
1552 | AuthorizeAccount(context.Context, *AuthorizeAccountRequest) (*AuthorizeAccountResponse, error) | |
1553 | // Lists buckets associated with an account, in alphabetical order by bucket | |
1554 | // name. | |
1555 | ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) | |
1556 | // Creates a new bucket. A bucket belongs to the account used to create it. | |
1557 | // | |
1558 | // Buckets can be named. The name must be globally unique. No account can use | |
1559 | // a bucket with the same name. Buckets are assigned a unique bucketId which | |
1560 | // is used when uploading, downloading, or deleting files. | |
1561 | // | |
1562 | // There is a limit of 100 buckets per account. | |
1563 | CreateBucket(context.Context, *Bucket) (*Bucket, error) | |
1564 | // Deletes the bucket specified. Only buckets that contain no version of any | |
1565 | // files can be deleted. | |
1566 | DeleteBucket(context.Context, *Bucket) (*Bucket, error) | |
1567 | GetUploadUrl(context.Context, *GetUploadUrlRequest) (*GetUploadUrlResponse, error) | |
1568 | // Prepares for uploading the parts of a large file. | |
1569 | StartLargeFile(context.Context, *StartLargeFileRequest) (*StartLargeFileResponse, error) | |
1570 | // Gets an URL to use for uploading parts of a large file. | |
1571 | GetUploadPartUrl(context.Context, *GetUploadPartUrlRequest) (*GetUploadPartUrlResponse, error) | |
1572 | // Converts the parts that have been uploaded into a single B2 file. | |
1573 | FinishLargeFile(context.Context, *FinishLargeFileRequest) (*FinishLargeFileResponse, error) | |
1574 | // Lists all of the versions of all of the files contained in one bucket, in | |
1575 | // alphabetical order by file name, and by reverse of date/time uploaded for | |
1576 | // versions of files with the same name. | |
1577 | ListFileVersions(context.Context, *ListFileVersionsRequest) (*ListFileVersionsResponse, error) | |
1578 | } | |
1579 | ||
1580 | func RegisterPyreServiceServer(s *grpc.Server, srv PyreServiceServer) { | |
1581 | s.RegisterService(&_PyreService_serviceDesc, srv) | |
1582 | } | |
1583 | ||
1584 | func _PyreService_AuthorizeAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |
1585 | in := new(AuthorizeAccountRequest) | |
1586 | if err := dec(in); err != nil { | |
1587 | return nil, err | |
1588 | } | |
1589 | if interceptor == nil { | |
1590 | return srv.(PyreServiceServer).AuthorizeAccount(ctx, in) | |
1591 | } | |
1592 | info := &grpc.UnaryServerInfo{ | |
1593 | Server: srv, | |
1594 | FullMethod: "/pyre.proto.PyreService/AuthorizeAccount", | |
1595 | } | |
1596 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |
1597 | return srv.(PyreServiceServer).AuthorizeAccount(ctx, req.(*AuthorizeAccountRequest)) | |
1598 | } | |
1599 | return interceptor(ctx, in, info, handler) | |
1600 | } | |
1601 | ||
1602 | func _PyreService_ListBuckets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |
1603 | in := new(ListBucketsRequest) | |
1604 | if err := dec(in); err != nil { | |
1605 | return nil, err | |
1606 | } | |
1607 | if interceptor == nil { | |
1608 | return srv.(PyreServiceServer).ListBuckets(ctx, in) | |
1609 | } | |
1610 | info := &grpc.UnaryServerInfo{ | |
1611 | Server: srv, | |
1612 | FullMethod: "/pyre.proto.PyreService/ListBuckets", | |
1613 | } | |
1614 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |
1615 | return srv.(PyreServiceServer).ListBuckets(ctx, req.(*ListBucketsRequest)) | |
1616 | } | |
1617 | return interceptor(ctx, in, info, handler) | |
1618 | } | |
1619 | ||
1620 | func _PyreService_CreateBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |
1621 | in := new(Bucket) | |
1622 | if err := dec(in); err != nil { | |
1623 | return nil, err | |
1624 | } | |
1625 | if interceptor == nil { | |
1626 | return srv.(PyreServiceServer).CreateBucket(ctx, in) | |
1627 | } | |
1628 | info := &grpc.UnaryServerInfo{ | |
1629 | Server: srv, | |
1630 | FullMethod: "/pyre.proto.PyreService/CreateBucket", | |
1631 | } | |
1632 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |
1633 | return srv.(PyreServiceServer).CreateBucket(ctx, req.(*Bucket)) | |
1634 | } | |
1635 | return interceptor(ctx, in, info, handler) | |
1636 | } | |
1637 | ||
1638 | func _PyreService_DeleteBucket_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |
1639 | in := new(Bucket) | |
1640 | if err := dec(in); err != nil { | |
1641 | return nil, err | |
1642 | } | |
1643 | if interceptor == nil { | |
1644 | return srv.(PyreServiceServer).DeleteBucket(ctx, in) | |
1645 | } | |
1646 | info := &grpc.UnaryServerInfo{ | |
1647 | Server: srv, | |
1648 | FullMethod: "/pyre.proto.PyreService/DeleteBucket", | |
1649 | } | |
1650 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |
1651 | return srv.(PyreServiceServer).DeleteBucket(ctx, req.(*Bucket)) | |
1652 | } | |
1653 | return interceptor(ctx, in, info, handler) | |
1654 | } | |
1655 | ||
1656 | func _PyreService_GetUploadUrl_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |
1657 | in := new(GetUploadUrlRequest) | |
1658 | if err := dec(in); err != nil { | |
1659 | return nil, err | |
1660 | } | |
1661 | if interceptor == nil { | |
1662 | return srv.(PyreServiceServer).GetUploadUrl(ctx, in) | |
1663 | } | |
1664 | info := &grpc.UnaryServerInfo{ | |
1665 | Server: srv, | |
1666 | FullMethod: "/pyre.proto.PyreService/GetUploadUrl", | |
1667 | } | |
1668 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |
1669 | return srv.(PyreServiceServer).GetUploadUrl(ctx, req.(*GetUploadUrlRequest)) | |
1670 | } | |
1671 | return interceptor(ctx, in, info, handler) | |
1672 | } | |
1673 | ||
1674 | func _PyreService_StartLargeFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |
1675 | in := new(StartLargeFileRequest) | |
1676 | if err := dec(in); err != nil { | |
1677 | return nil, err | |
1678 | } | |
1679 | if interceptor == nil { | |
1680 | return srv.(PyreServiceServer).StartLargeFile(ctx, in) | |
1681 | } | |
1682 | info := &grpc.UnaryServerInfo{ | |
1683 | Server: srv, | |
1684 | FullMethod: "/pyre.proto.PyreService/StartLargeFile", | |
1685 | } | |
1686 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |
1687 | return srv.(PyreServiceServer).StartLargeFile(ctx, req.(*StartLargeFileRequest)) | |
1688 | } | |
1689 | return interceptor(ctx, in, info, handler) | |
1690 | } | |
1691 | ||
1692 | func _PyreService_GetUploadPartUrl_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |
1693 | in := new(GetUploadPartUrlRequest) | |
1694 | if err := dec(in); err != nil { | |
1695 | return nil, err | |
1696 | } | |
1697 | if interceptor == nil { | |
1698 | return srv.(PyreServiceServer).GetUploadPartUrl(ctx, in) | |
1699 | } | |
1700 | info := &grpc.UnaryServerInfo{ | |
1701 | Server: srv, | |
1702 | FullMethod: "/pyre.proto.PyreService/GetUploadPartUrl", | |
1703 | } | |
1704 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |
1705 | return srv.(PyreServiceServer).GetUploadPartUrl(ctx, req.(*GetUploadPartUrlRequest)) | |
1706 | } | |
1707 | return interceptor(ctx, in, info, handler) | |
1708 | } | |
1709 | ||
1710 | func _PyreService_FinishLargeFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |
1711 | in := new(FinishLargeFileRequest) | |
1712 | if err := dec(in); err != nil { | |
1713 | return nil, err | |
1714 | } | |
1715 | if interceptor == nil { | |
1716 | return srv.(PyreServiceServer).FinishLargeFile(ctx, in) | |
1717 | } | |
1718 | info := &grpc.UnaryServerInfo{ | |
1719 | Server: srv, | |
1720 | FullMethod: "/pyre.proto.PyreService/FinishLargeFile", | |
1721 | } | |
1722 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |
1723 | return srv.(PyreServiceServer).FinishLargeFile(ctx, req.(*FinishLargeFileRequest)) | |
1724 | } | |
1725 | return interceptor(ctx, in, info, handler) | |
1726 | } | |
1727 | ||
1728 | func _PyreService_ListFileVersions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |
1729 | in := new(ListFileVersionsRequest) | |
1730 | if err := dec(in); err != nil { | |
1731 | return nil, err | |
1732 | } | |
1733 | if interceptor == nil { | |
1734 | return srv.(PyreServiceServer).ListFileVersions(ctx, in) | |
1735 | } | |
1736 | info := &grpc.UnaryServerInfo{ | |
1737 | Server: srv, | |
1738 | FullMethod: "/pyre.proto.PyreService/ListFileVersions", | |
1739 | } | |
1740 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |
1741 | return srv.(PyreServiceServer).ListFileVersions(ctx, req.(*ListFileVersionsRequest)) | |
1742 | } | |
1743 | return interceptor(ctx, in, info, handler) | |
1744 | } | |
1745 | ||
1746 | var _PyreService_serviceDesc = grpc.ServiceDesc{ | |
1747 | ServiceName: "pyre.proto.PyreService", | |
1748 | HandlerType: (*PyreServiceServer)(nil), | |
1749 | Methods: []grpc.MethodDesc{ | |
1750 | { | |
1751 | MethodName: "AuthorizeAccount", | |
1752 | Handler: _PyreService_AuthorizeAccount_Handler, | |
1753 | }, | |
1754 | { | |
1755 | MethodName: "ListBuckets", | |
1756 | Handler: _PyreService_ListBuckets_Handler, | |
1757 | }, | |
1758 | { | |
1759 | MethodName: "CreateBucket", | |
1760 | Handler: _PyreService_CreateBucket_Handler, | |
1761 | }, | |
1762 | { | |
1763 | MethodName: "DeleteBucket", | |
1764 | Handler: _PyreService_DeleteBucket_Handler, | |
1765 | }, | |
1766 | { | |
1767 | MethodName: "GetUploadUrl", | |
1768 | Handler: _PyreService_GetUploadUrl_Handler, | |
1769 | }, | |
1770 | { | |
1771 | MethodName: "StartLargeFile", | |
1772 | Handler: _PyreService_StartLargeFile_Handler, | |
1773 | }, | |
1774 | { | |
1775 | MethodName: "GetUploadPartUrl", | |
1776 | Handler: _PyreService_GetUploadPartUrl_Handler, | |
1777 | }, | |
1778 | { | |
1779 | MethodName: "FinishLargeFile", | |
1780 | Handler: _PyreService_FinishLargeFile_Handler, | |
1781 | }, | |
1782 | { | |
1783 | MethodName: "ListFileVersions", | |
1784 | Handler: _PyreService_ListFileVersions_Handler, | |
1785 | }, | |
1786 | }, | |
1787 | Streams: []grpc.StreamDesc{}, | |
1788 | Metadata: "proto/pyre.proto", | |
1789 | } | |
1790 | ||
1791 | func init() { proto.RegisterFile("proto/pyre.proto", fileDescriptor_pyre_492df08819220afa) } | |
1792 | ||
1793 | var fileDescriptor_pyre_492df08819220afa = []byte{ | |
1794 | // 1591 bytes of a gzipped FileDescriptorProto | |
1795 | 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcb, 0x6f, 0x1b, 0x55, | |
1796 | 0x17, 0xd7, 0xd8, 0x71, 0x1c, 0x1f, 0xc7, 0xb1, 0x7b, 0xd3, 0x36, 0x13, 0xb7, 0x4d, 0xdc, 0xdb, | |
1797 | 0x24, 0x5f, 0x9a, 0xaf, 0x5f, 0xfc, 0x25, 0x08, 0x09, 0xb5, 0x02, 0x91, 0x06, 0x42, 0x23, 0xa5, | |
1798 | 0xa5, 0x72, 0x52, 0x24, 0x16, 0x68, 0x74, 0xe3, 0xb9, 0xb1, 0xaf, 0x3a, 0x0f, 0x73, 0x67, 0x9c, | |
1799 | 0xc6, 0x5d, 0xf1, 0x50, 0x17, 0x74, 0x0b, 0xff, 0x00, 0x7f, 0x07, 0x12, 0x1b, 0x36, 0xec, 0xd9, | |
1800 | 0xb3, 0xea, 0x86, 0x7f, 0x81, 0x0d, 0xe8, 0x3e, 0xc6, 0x9e, 0x47, 0x9c, 0x14, 0x68, 0x45, 0x57, | |
1801 | 0x9e, 0x39, 0xf7, 0x77, 0xce, 0x3d, 0xf7, 0x77, 0x1e, 0x73, 0xae, 0xa1, 0xd6, 0xe3, 0x7e, 0xe8, | |
1802 | 0x37, 0x7b, 0x03, 0x4e, 0xd7, 0xe5, 0x23, 0x82, 0xd1, 0x73, 0xfd, 0x6a, 0xc7, 0xf7, 0x3b, 0x0e, | |
1803 | 0x6d, 0x92, 0x1e, 0x6b, 0x12, 0xcf, 0xf3, 0x43, 0x12, 0x32, 0xdf, 0x0b, 0xd4, 0x2a, 0x9e, 0x87, | |
1804 | 0xb9, 0xad, 0x7e, 0xd8, 0xf5, 0x39, 0x7b, 0x4a, 0xb7, 0xda, 0x6d, 0xbf, 0xef, 0x85, 0x2d, 0xfa, | |
1805 | 0x79, 0x9f, 0x06, 0x21, 0xfe, 0x29, 0x07, 0x66, 0x76, 0x2d, 0xe8, 0xf9, 0x5e, 0x40, 0xd1, 0x35, | |
1806 | 0x00, 0xa2, 0x44, 0x16, 0xb3, 0x4d, 0xa3, 0x61, 0xac, 0x96, 0x5a, 0x25, 0x2d, 0xd9, 0xb5, 0x51, | |
1807 | 0x13, 0x66, 0x89, 0x56, 0x95, 0xdb, 0x59, 0xa1, 0xff, 0x98, 0x7a, 0x66, 0x4e, 0xe2, 0x50, 0x62, | |
1808 | 0xe9, 0x40, 0xac, 0xa0, 0x39, 0x28, 0x92, 0x1e, 0xb3, 0xfa, 0xdc, 0x31, 0xf3, 0x12, 0x34, 0x49, | |
1809 | 0x7a, 0xec, 0x11, 0x77, 0xd0, 0x75, 0x98, 0xb6, 0xfd, 0x27, 0x9e, 0xe3, 0x13, 0x5b, 0xae, 0x4e, | |
1810 | 0xc8, 0xd5, 0x72, 0x24, 0x13, 0x90, 0x4d, 0xb8, 0xc4, 0x69, 0xdb, 0x77, 0x5d, 0xea, 0xd9, 0xd4, | |
1811 | 0xb6, 0x7a, 0x84, 0x87, 0x56, 0xc0, 0x9e, 0x52, 0xb3, 0xd0, 0x30, 0x56, 0x0b, 0xad, 0xd9, 0xd8, | |
1812 | 0xe2, 0x43, 0xc2, 0xc3, 0x7d, 0xf6, 0x94, 0xa2, 0x3b, 0x50, 0x27, 0x87, 0x81, 0xef, 0xf4, 0x43, | |
1813 | 0x6a, 0xb9, 0xcc, 0x63, 0x6e, 0xdf, 0x8d, 0x29, 0x4e, 0x4a, 0xc5, 0xb9, 0x08, 0x71, 0x5f, 0x01, | |
1814 | 0x86, 0xca, 0x6b, 0x70, 0x21, 0xab, 0x53, 0x94, 0x3a, 0x55, 0x37, 0x89, 0xc5, 0xdf, 0x19, 0x80, | |
1815 | 0xf6, 0x58, 0x10, 0xde, 0xed, 0xb7, 0x1f, 0xd3, 0x30, 0xd0, 0xe4, 0x9e, 0xc7, 0xdf, 0x15, 0x28, | |
1816 | 0x1d, 0x4a, 0x05, 0xb1, 0xaa, 0x58, 0x9b, 0x52, 0x82, 0x5d, 0x1b, 0x2d, 0x42, 0x59, 0x2f, 0x7a, | |
1817 | 0xc4, 0xa5, 0x9a, 0x2f, 0x50, 0xa2, 0x07, 0xc4, 0xa5, 0x82, 0x33, 0x0d, 0x08, 0x07, 0x3d, 0x1a, | |
1818 | 0x98, 0x13, 0x8d, 0xbc, 0xe0, 0x4c, 0xc9, 0x0e, 0x84, 0x08, 0xff, 0x60, 0x40, 0x65, 0x8f, 0x1d, | |
1819 | 0xd1, 0xf6, 0xa0, 0xed, 0xd0, 0x56, 0xdf, 0xa1, 0xe8, 0x7d, 0xb8, 0x66, 0x93, 0x41, 0x60, 0x1d, | |
1820 | 0x71, 0xdf, 0xb5, 0xfa, 0x3d, 0x41, 0x2e, 0xf3, 0x3a, 0x56, 0xe8, 0x5b, 0x5d, 0x26, 0x9e, 0xa4, | |
1821 | 0x93, 0x85, 0xd6, 0xbc, 0x00, 0xed, 0x70, 0xdf, 0x7d, 0x14, 0x41, 0x0e, 0xfc, 0x7b, 0x12, 0x80, | |
1822 | 0xde, 0x83, 0xab, 0x23, 0x0b, 0x4a, 0x49, 0xa8, 0xdb, 0xd4, 0xa1, 0xa1, 0x30, 0x90, 0x93, 0x06, | |
1823 | 0xcc, 0xc8, 0x80, 0xd2, 0x3a, 0xf0, 0x3f, 0xd0, 0xeb, 0x68, 0x15, 0x6a, 0x47, 0xcc, 0xa1, 0xf2, | |
1824 | 0x54, 0x56, 0x8f, 0xd3, 0x23, 0x76, 0xa2, 0x0f, 0x37, 0x23, 0xe4, 0xe2, 0x68, 0x0f, 0xa5, 0x14, | |
1825 | 0x7f, 0x91, 0x83, 0xa9, 0x6d, 0x9f, 0x07, 0xd2, 0xf1, 0x25, 0x98, 0x69, 0xfb, 0x3c, 0xb0, 0x78, | |
1826 | 0x5f, 0xeb, 0x6a, 0x3a, 0xa7, 0xdb, 0x1a, 0x21, 0x39, 0xf9, 0x0f, 0x54, 0x89, 0xe3, 0xf8, 0x4f, | |
1827 | 0xa8, 0x6d, 0xf9, 0x9c, 0x75, 0x98, 0x17, 0x98, 0x39, 0x49, 0xcb, 0x8c, 0x16, 0x7f, 0xac, 0xa4, | |
1828 | 0xe8, 0x7f, 0x80, 0x86, 0xc0, 0x1e, 0xe5, 0xaa, 0x5a, 0xcc, 0xbc, 0xc4, 0x5e, 0x88, 0xb0, 0xc3, | |
1829 | 0x85, 0xb8, 0xdd, 0x2e, 0x25, 0x36, 0xe5, 0x11, 0xdd, 0x91, 0xdd, 0x7b, 0x4a, 0x8a, 0x96, 0x61, | |
1830 | 0x86, 0x9e, 0xf4, 0xfc, 0x80, 0x0e, 0x71, 0x05, 0x89, 0xab, 0x28, 0x69, 0x04, 0x5b, 0x81, 0xaa, | |
1831 | 0x4b, 0x4e, 0x2c, 0xd2, 0xa1, 0x56, 0x40, 0xdb, 0xbe, 0x67, 0x07, 0x3a, 0x1b, 0x2b, 0x2e, 0x39, | |
1832 | 0xd9, 0xea, 0xd0, 0x7d, 0x25, 0xc4, 0xdf, 0xe7, 0x61, 0x52, 0xe5, 0xd4, 0xeb, 0xcd, 0xa5, 0x11, | |
1833 | 0x40, 0xe4, 0x92, 0x2e, 0x3f, 0x18, 0xa5, 0x12, 0xda, 0x1e, 0x02, 0x98, 0x77, 0xe4, 0xcb, 0x43, | |
1834 | 0x95, 0x37, 0xf1, 0xfa, 0xa8, 0x03, 0xad, 0x2b, 0x37, 0xf5, 0xcf, 0xae, 0x77, 0xe4, 0x7f, 0xe8, | |
1835 | 0x85, 0x7c, 0x10, 0x19, 0x11, 0x02, 0xf4, 0x36, 0x94, 0xdb, 0x3e, 0xa7, 0x2a, 0x88, 0xe2, 0xc4, | |
1836 | 0xc2, 0xc8, 0xc5, 0xb8, 0x91, 0x28, 0xdc, 0x2d, 0x90, 0x40, 0xf1, 0x18, 0xa0, 0xbb, 0x50, 0x75, | |
1837 | 0xa2, 0x24, 0xd6, 0xaa, 0x45, 0xa9, 0x3a, 0x1f, 0x57, 0x4d, 0xe4, 0x79, 0x6b, 0xc6, 0x89, 0xbf, | |
1838 | 0x06, 0xa8, 0x0e, 0x53, 0x9c, 0x1e, 0xb3, 0x80, 0xf9, 0x9e, 0x39, 0x25, 0x99, 0x1e, 0xbe, 0xd7, | |
1839 | 0xdf, 0x85, 0x6a, 0xca, 0x6b, 0x54, 0x83, 0xfc, 0x63, 0x3a, 0xd0, 0x2c, 0x8b, 0x47, 0x74, 0x11, | |
1840 | 0x0a, 0xc7, 0xc4, 0xe9, 0x53, 0xcd, 0xad, 0x7a, 0xb9, 0x9d, 0x7b, 0xc7, 0xc0, 0xdb, 0x30, 0x9b, | |
1841 | 0x28, 0x7d, 0xdd, 0x3b, 0x6f, 0x41, 0x51, 0x1d, 0x3d, 0x30, 0x0d, 0xe9, 0x2d, 0xca, 0xb2, 0xd5, | |
1842 | 0x8a, 0x20, 0x78, 0x13, 0x66, 0x3f, 0xa2, 0xa1, 0xaa, 0xb6, 0x47, 0xdc, 0x89, 0x1a, 0x48, 0x22, | |
1843 | 0xaa, 0x46, 0x32, 0xaa, 0xf8, 0x6b, 0x03, 0x2e, 0x26, 0x95, 0xf4, 0xd6, 0x67, 0x69, 0x89, 0x3c, | |
1844 | 0x52, 0x75, 0x2f, 0x1b, 0xad, 0x3a, 0x4d, 0xa9, 0x1f, 0xd9, 0x18, 0xd7, 0xd3, 0xf3, 0xe3, 0x7a, | |
1845 | 0x3a, 0xfe, 0x31, 0x0f, 0x48, 0xb9, 0xb0, 0xc3, 0x1c, 0x3a, 0xf4, 0x61, 0x0e, 0x8a, 0xb2, 0xcc, | |
1846 | 0x87, 0x1e, 0x4c, 0x8a, 0x57, 0x95, 0xa8, 0xc3, 0xfa, 0x8f, 0x12, 0x35, 0x2a, 0xfc, 0x54, 0x92, | |
1847 | 0xe7, 0xcf, 0x4c, 0xf2, 0x89, 0xd4, 0xc1, 0x96, 0x45, 0x87, 0xf0, 0x42, 0xea, 0x85, 0x96, 0x43, | |
1848 | 0xbd, 0x4e, 0xd8, 0xd5, 0x5f, 0x86, 0x8a, 0x96, 0xee, 0x49, 0xa1, 0x68, 0x9b, 0x11, 0x2c, 0xe8, | |
1849 | 0x92, 0x0d, 0x59, 0x77, 0xa5, 0x56, 0x59, 0xcb, 0xf6, 0xbb, 0x64, 0x23, 0x0e, 0x91, 0xe5, 0x50, | |
1850 | 0x4c, 0x40, 0x64, 0x3d, 0xec, 0xea, 0x53, 0xc8, 0x6a, 0x98, 0x92, 0xf1, 0xbd, 0x15, 0x8f, 0x6f, | |
1851 | 0x96, 0x91, 0x75, 0xf1, 0x32, 0xaa, 0x0b, 0x79, 0x66, 0x59, 0x15, 0x97, 0x61, 0x92, 0xb4, 0x05, | |
1852 | 0x9f, 0x66, 0x49, 0x7f, 0x13, 0xe5, 0x1b, 0xba, 0x09, 0x35, 0x1d, 0xa8, 0x90, 0xb9, 0x34, 0x08, | |
1853 | 0x89, 0xdb, 0x33, 0xa1, 0x61, 0xac, 0xe6, 0x5b, 0x55, 0x25, 0x3f, 0x88, 0xc4, 0xf5, 0x3b, 0x50, | |
1854 | 0x49, 0x58, 0xff, 0x4b, 0xf9, 0xfb, 0x87, 0x01, 0x97, 0xf6, 0x43, 0xc2, 0xc3, 0x3d, 0xc2, 0x3b, | |
1855 | 0x54, 0xb9, 0x7c, 0x7e, 0xf6, 0x9d, 0x1d, 0xc7, 0x34, 0x83, 0xf9, 0x2c, 0x83, 0x7b, 0x71, 0x06, | |
1856 | 0x27, 0x24, 0x83, 0xcd, 0x38, 0x83, 0xa7, 0xba, 0x34, 0x8e, 0xc4, 0x7f, 0xc6, 0xc0, 0x8b, 0x1c, | |
1857 | 0x5c, 0x4e, 0x6f, 0xf7, 0xaf, 0xa5, 0x71, 0x9a, 0xba, 0x42, 0x96, 0xba, 0xfb, 0x71, 0xea, 0x54, | |
1858 | 0x17, 0xfd, 0xff, 0x59, 0xd4, 0x9d, 0x93, 0x80, 0xa7, 0x25, 0x5a, 0xf1, 0x35, 0x24, 0xda, 0x26, | |
1859 | 0xcc, 0x0d, 0xdb, 0x95, 0x98, 0x9c, 0x62, 0x7d, 0x6e, 0x1c, 0xcd, 0xa2, 0xc7, 0x99, 0x59, 0xa5, | |
1860 | 0xf3, 0x82, 0xf3, 0xaa, 0x7b, 0xdc, 0xa7, 0x70, 0x79, 0x87, 0x79, 0x2c, 0xe8, 0x66, 0x4a, 0x64, | |
1861 | 0xac, 0x0b, 0x2b, 0x50, 0x55, 0x53, 0x63, 0x97, 0x6c, 0x58, 0x84, 0x73, 0x32, 0xd0, 0x93, 0x48, | |
1862 | 0x45, 0x88, 0x45, 0x9b, 0xd9, 0x12, 0x42, 0xfc, 0x73, 0x1e, 0xe6, 0x32, 0xb6, 0xdf, 0xb0, 0x1e, | |
1863 | 0x9a, 0x7f, 0x3d, 0x3d, 0xf4, 0x41, 0xb6, 0x87, 0x6e, 0xc4, 0xd3, 0x78, 0x0c, 0x2d, 0x6f, 0x6c, | |
1864 | 0x23, 0xfd, 0xd5, 0x80, 0x39, 0x31, 0x09, 0x08, 0x0b, 0x9f, 0x50, 0x2e, 0x66, 0x8b, 0xe0, 0xa5, | |
1865 | 0x5a, 0xe9, 0x0a, 0x54, 0x03, 0x51, 0xb2, 0x56, 0x3a, 0xa8, 0x15, 0x29, 0xde, 0x89, 0x22, 0x8b, | |
1866 | 0xa1, 0x12, 0xc3, 0x0d, 0x83, 0x5b, 0x1e, 0xa2, 0x76, 0x6d, 0x31, 0x27, 0x8b, 0xc9, 0x52, 0x22, | |
1867 | 0x64, 0xc8, 0x65, 0x8c, 0x0b, 0xad, 0x69, 0x97, 0x9c, 0x08, 0xc8, 0xb6, 0x90, 0x09, 0xaa, 0xf4, | |
1868 | 0xe8, 0xad, 0xda, 0x8b, 0x7e, 0x43, 0x57, 0xa1, 0x64, 0x53, 0x87, 0xb9, 0x2c, 0xa4, 0x5c, 0x47, | |
1869 | 0x75, 0x24, 0xc0, 0xcf, 0x0d, 0x30, 0xb3, 0x07, 0xd4, 0xc9, 0xba, 0x02, 0x05, 0xb1, 0x69, 0x34, | |
1870 | 0xed, 0xd4, 0x92, 0x91, 0x74, 0x68, 0x4b, 0x2d, 0x0b, 0x07, 0x3d, 0x7a, 0x92, 0x3d, 0xeb, 0xb4, | |
1871 | 0x90, 0x0e, 0x8f, 0xda, 0x80, 0xe9, 0x11, 0x6a, 0x78, 0x52, 0x88, 0x30, 0xbb, 0x36, 0xfe, 0x3d, | |
1872 | 0x07, 0x13, 0xe2, 0xf1, 0x6f, 0x56, 0x49, 0x36, 0xd3, 0xf3, 0xe7, 0x64, 0x7a, 0x6c, 0x32, 0x4e, | |
1873 | 0xa4, 0x71, 0xba, 0x18, 0x0a, 0xd9, 0x62, 0xb8, 0x93, 0x6d, 0xd8, 0x0b, 0x69, 0x7e, 0x5e, 0x22, | |
1874 | 0xad, 0x8b, 0x89, 0xb4, 0x46, 0x30, 0x21, 0xaf, 0xa4, 0x53, 0xd2, 0x6f, 0xf9, 0x7c, 0x6a, 0xaa, | |
1875 | 0x97, 0x5e, 0x7d, 0xaa, 0x6f, 0xfe, 0x36, 0x05, 0xe5, 0x87, 0x03, 0x4e, 0xf7, 0x29, 0x3f, 0x66, | |
1876 | 0x6d, 0x8a, 0x9e, 0x19, 0x50, 0x4b, 0xff, 0x8b, 0x80, 0x6e, 0xc4, 0x8f, 0x38, 0xe6, 0xff, 0x87, | |
1877 | 0xfa, 0xd2, 0xd9, 0x20, 0x95, 0x5c, 0x78, 0xe5, 0xab, 0x5f, 0x5e, 0x7c, 0x9b, 0x6b, 0xa0, 0x85, | |
1878 | 0xe6, 0xe1, 0x26, 0xe9, 0xb1, 0xe6, 0xf1, 0x46, 0xf3, 0x70, 0xd3, 0x8a, 0x3a, 0x35, 0xb5, 0x74, | |
1879 | 0x7b, 0x43, 0x21, 0x94, 0x63, 0xb3, 0x38, 0x5a, 0x48, 0x5e, 0x10, 0xd2, 0xf7, 0xf3, 0xfa, 0xe2, | |
1880 | 0xd8, 0x75, 0xbd, 0xef, 0x92, 0xdc, 0x77, 0x01, 0xcf, 0x27, 0xf6, 0x75, 0x58, 0x10, 0x5a, 0x7a, | |
1881 | 0x72, 0xbf, 0x6d, 0xac, 0xa1, 0xcf, 0x60, 0x7a, 0x9b, 0x53, 0x12, 0x52, 0x7d, 0x55, 0x3b, 0x65, | |
1882 | 0xd2, 0xaf, 0x9f, 0x22, 0xc3, 0xcb, 0xd2, 0xfa, 0x22, 0xae, 0x27, 0xac, 0xb7, 0xa5, 0x29, 0x6d, | |
1883 | 0x5f, 0x9b, 0x97, 0xb7, 0xe7, 0x57, 0x63, 0x5e, 0x5e, 0xd4, 0xe3, 0xe6, 0x07, 0x30, 0x1d, 0xbf, | |
1884 | 0x45, 0xa0, 0x04, 0x29, 0xa7, 0x5c, 0x4a, 0xea, 0x8d, 0xf1, 0x80, 0x64, 0xb8, 0xf0, 0x95, 0xc4, | |
1885 | 0xce, 0x1d, 0x1a, 0x5a, 0xa3, 0xcf, 0xb2, 0xd8, 0xfa, 0x4b, 0x03, 0x66, 0x92, 0xc3, 0x0a, 0xba, | |
1886 | 0x7e, 0xee, 0x0c, 0x58, 0xc7, 0xe7, 0xcf, 0x3a, 0x78, 0x55, 0x7a, 0x80, 0xf1, 0xb5, 0x84, 0x07, | |
1887 | 0xaa, 0x7b, 0x3a, 0x02, 0x2d, 0x3b, 0x8b, 0xf0, 0xe1, 0xb9, 0x01, 0xb5, 0xf4, 0x84, 0x91, 0x4c, | |
1888 | 0xdd, 0x31, 0x43, 0x4b, 0x32, 0x75, 0xc7, 0x0d, 0x29, 0xf8, 0xbf, 0xd2, 0x93, 0x65, 0xdc, 0x18, | |
1889 | 0xc7, 0x85, 0x1c, 0x13, 0x34, 0x21, 0xcf, 0x0c, 0xa8, 0xa6, 0x3e, 0x7b, 0x08, 0x9f, 0xf9, 0x4d, | |
1890 | 0x54, 0xae, 0xdc, 0x78, 0x89, 0xef, 0x26, 0xbe, 0x29, 0x3d, 0xb9, 0x81, 0x93, 0x45, 0x74, 0x24, | |
1891 | 0xd1, 0x29, 0x52, 0xbe, 0x31, 0xa0, 0x96, 0xee, 0xf4, 0x49, 0x52, 0xc6, 0x7c, 0xe8, 0x92, 0xa4, | |
1892 | 0x8c, 0xfb, 0x58, 0xe0, 0x35, 0xe9, 0xca, 0x12, 0x5e, 0xcc, 0xd6, 0x95, 0x6c, 0x94, 0xc7, 0x5a, | |
1893 | 0xe1, 0xb6, 0xb1, 0x76, 0x38, 0x29, 0x6d, 0xbd, 0xf5, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0x09, | |
1894 | 0x84, 0xd9, 0x63, 0x01, 0x15, 0x00, 0x00, | |
1895 | } |
0 | // Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. | |
1 | // source: proto/pyre.proto | |
2 | ||
3 | /* | |
4 | Package pyre_proto is a reverse proxy. | |
5 | ||
6 | It translates gRPC into RESTful JSON APIs. | |
7 | */ | |
8 | package pyre_proto | |
9 | ||
10 | import ( | |
11 | "io" | |
12 | "net/http" | |
13 | ||
14 | "github.com/golang/protobuf/proto" | |
15 | "github.com/grpc-ecosystem/grpc-gateway/runtime" | |
16 | "github.com/grpc-ecosystem/grpc-gateway/utilities" | |
17 | "golang.org/x/net/context" | |
18 | "google.golang.org/grpc" | |
19 | "google.golang.org/grpc/codes" | |
20 | "google.golang.org/grpc/grpclog" | |
21 | "google.golang.org/grpc/status" | |
22 | ) | |
23 | ||
24 | var _ codes.Code | |
25 | var _ io.Reader | |
26 | var _ status.Status | |
27 | var _ = runtime.String | |
28 | var _ = utilities.NewDoubleArray | |
29 | ||
30 | func request_PyreService_AuthorizeAccount_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { | |
31 | var protoReq AuthorizeAccountRequest | |
32 | var metadata runtime.ServerMetadata | |
33 | ||
34 | msg, err := client.AuthorizeAccount(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) | |
35 | return msg, metadata, err | |
36 | ||
37 | } | |
38 | ||
39 | func request_PyreService_ListBuckets_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { | |
40 | var protoReq ListBucketsRequest | |
41 | var metadata runtime.ServerMetadata | |
42 | ||
43 | if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { | |
44 | return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) | |
45 | } | |
46 | ||
47 | msg, err := client.ListBuckets(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) | |
48 | return msg, metadata, err | |
49 | ||
50 | } | |
51 | ||
52 | func request_PyreService_CreateBucket_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { | |
53 | var protoReq Bucket | |
54 | var metadata runtime.ServerMetadata | |
55 | ||
56 | if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { | |
57 | return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) | |
58 | } | |
59 | ||
60 | msg, err := client.CreateBucket(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) | |
61 | return msg, metadata, err | |
62 | ||
63 | } | |
64 | ||
65 | func request_PyreService_DeleteBucket_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { | |
66 | var protoReq Bucket | |
67 | var metadata runtime.ServerMetadata | |
68 | ||
69 | if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { | |
70 | return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) | |
71 | } | |
72 | ||
73 | msg, err := client.DeleteBucket(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) | |
74 | return msg, metadata, err | |
75 | ||
76 | } | |
77 | ||
78 | func request_PyreService_GetUploadUrl_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { | |
79 | var protoReq GetUploadUrlRequest | |
80 | var metadata runtime.ServerMetadata | |
81 | ||
82 | if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { | |
83 | return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) | |
84 | } | |
85 | ||
86 | msg, err := client.GetUploadUrl(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) | |
87 | return msg, metadata, err | |
88 | ||
89 | } | |
90 | ||
91 | func request_PyreService_StartLargeFile_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { | |
92 | var protoReq StartLargeFileRequest | |
93 | var metadata runtime.ServerMetadata | |
94 | ||
95 | if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { | |
96 | return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) | |
97 | } | |
98 | ||
99 | msg, err := client.StartLargeFile(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) | |
100 | return msg, metadata, err | |
101 | ||
102 | } | |
103 | ||
104 | func request_PyreService_GetUploadPartUrl_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { | |
105 | var protoReq GetUploadPartUrlRequest | |
106 | var metadata runtime.ServerMetadata | |
107 | ||
108 | if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { | |
109 | return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) | |
110 | } | |
111 | ||
112 | msg, err := client.GetUploadPartUrl(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) | |
113 | return msg, metadata, err | |
114 | ||
115 | } | |
116 | ||
117 | func request_PyreService_FinishLargeFile_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { | |
118 | var protoReq FinishLargeFileRequest | |
119 | var metadata runtime.ServerMetadata | |
120 | ||
121 | if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { | |
122 | return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) | |
123 | } | |
124 | ||
125 | msg, err := client.FinishLargeFile(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) | |
126 | return msg, metadata, err | |
127 | ||
128 | } | |
129 | ||
130 | func request_PyreService_ListFileVersions_0(ctx context.Context, marshaler runtime.Marshaler, client PyreServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { | |
131 | var protoReq ListFileVersionsRequest | |
132 | var metadata runtime.ServerMetadata | |
133 | ||
134 | if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { | |
135 | return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) | |
136 | } | |
137 | ||
138 | msg, err := client.ListFileVersions(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) | |
139 | return msg, metadata, err | |
140 | ||
141 | } | |
142 | ||
143 | // RegisterPyreServiceHandlerFromEndpoint is same as RegisterPyreServiceHandler but | |
144 | // automatically dials to "endpoint" and closes the connection when "ctx" gets done. | |
145 | func RegisterPyreServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { | |
146 | conn, err := grpc.Dial(endpoint, opts...) | |
147 | if err != nil { | |
148 | return err | |
149 | } | |
150 | defer func() { | |
151 | if err != nil { | |
152 | if cerr := conn.Close(); cerr != nil { | |
153 | grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) | |
154 | } | |
155 | return | |
156 | } | |
157 | go func() { | |
158 | <-ctx.Done() | |
159 | if cerr := conn.Close(); cerr != nil { | |
160 | grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) | |
161 | } | |
162 | }() | |
163 | }() | |
164 | ||
165 | return RegisterPyreServiceHandler(ctx, mux, conn) | |
166 | } | |
167 | ||
168 | // RegisterPyreServiceHandler registers the http handlers for service PyreService to "mux". | |
169 | // The handlers forward requests to the grpc endpoint over "conn". | |
170 | func RegisterPyreServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { | |
171 | return RegisterPyreServiceHandlerClient(ctx, mux, NewPyreServiceClient(conn)) | |
172 | } | |
173 | ||
174 | // RegisterPyreServiceHandlerClient registers the http handlers for service PyreService | |
175 | // to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "PyreServiceClient". | |
176 | // Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "PyreServiceClient" | |
177 | // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in | |
178 | // "PyreServiceClient" to call the correct interceptors. | |
179 | func RegisterPyreServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client PyreServiceClient) error { | |
180 | ||
181 | mux.Handle("GET", pattern_PyreService_AuthorizeAccount_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { | |
182 | ctx, cancel := context.WithCancel(req.Context()) | |
183 | defer cancel() | |
184 | if cn, ok := w.(http.CloseNotifier); ok { | |
185 | go func(done <-chan struct{}, closed <-chan bool) { | |
186 | select { | |
187 | case <-done: | |
188 | case <-closed: | |
189 | cancel() | |
190 | } | |
191 | }(ctx.Done(), cn.CloseNotify()) | |
192 | } | |
193 | inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) | |
194 | rctx, err := runtime.AnnotateContext(ctx, mux, req) | |
195 | if err != nil { | |
196 | runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) | |
197 | return | |
198 | } | |
199 | resp, md, err := request_PyreService_AuthorizeAccount_0(rctx, inboundMarshaler, client, req, pathParams) | |
200 | ctx = runtime.NewServerMetadataContext(ctx, md) | |
201 | if err != nil { | |
202 | runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) | |
203 | return | |
204 | } | |
205 | ||
206 | forward_PyreService_AuthorizeAccount_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) | |
207 | ||
208 | }) | |
209 | ||
210 | mux.Handle("POST", pattern_PyreService_ListBuckets_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { | |
211 | ctx, cancel := context.WithCancel(req.Context()) | |
212 | defer cancel() | |
213 | if cn, ok := w.(http.CloseNotifier); ok { | |
214 | go func(done <-chan struct{}, closed <-chan bool) { | |
215 | select { | |
216 | case <-done: | |
217 | case <-closed: | |
218 | cancel() | |
219 | } | |
220 | }(ctx.Done(), cn.CloseNotify()) | |
221 | } | |
222 | inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) | |
223 | rctx, err := runtime.AnnotateContext(ctx, mux, req) | |
224 | if err != nil { | |
225 | runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) | |
226 | return | |
227 | } | |
228 | resp, md, err := request_PyreService_ListBuckets_0(rctx, inboundMarshaler, client, req, pathParams) | |
229 | ctx = runtime.NewServerMetadataContext(ctx, md) | |
230 | if err != nil { | |
231 | runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) | |
232 | return | |
233 | } | |
234 | ||
235 | forward_PyreService_ListBuckets_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) | |
236 | ||
237 | }) | |
238 | ||
239 | mux.Handle("POST", pattern_PyreService_CreateBucket_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { | |
240 | ctx, cancel := context.WithCancel(req.Context()) | |
241 | defer cancel() | |
242 | if cn, ok := w.(http.CloseNotifier); ok { | |
243 | go func(done <-chan struct{}, closed <-chan bool) { | |
244 | select { | |
245 | case <-done: | |
246 | case <-closed: | |
247 | cancel() | |
248 | } | |
249 | }(ctx.Done(), cn.CloseNotify()) | |
250 | } | |
251 | inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) | |
252 | rctx, err := runtime.AnnotateContext(ctx, mux, req) | |
253 | if err != nil { | |
254 | runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) | |
255 | return | |
256 | } | |
257 | resp, md, err := request_PyreService_CreateBucket_0(rctx, inboundMarshaler, client, req, pathParams) | |
258 | ctx = runtime.NewServerMetadataContext(ctx, md) | |
259 | if err != nil { | |
260 | runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) | |
261 | return | |
262 | } | |
263 | ||
264 | forward_PyreService_CreateBucket_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) | |
265 | ||
266 | }) | |
267 | ||
268 | mux.Handle("POST", pattern_PyreService_DeleteBucket_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { | |
269 | ctx, cancel := context.WithCancel(req.Context()) | |
270 | defer cancel() | |
271 | if cn, ok := w.(http.CloseNotifier); ok { | |
272 | go func(done <-chan struct{}, closed <-chan bool) { | |
273 | select { | |
274 | case <-done: | |
275 | case <-closed: | |
276 | cancel() | |
277 | } | |
278 | }(ctx.Done(), cn.CloseNotify()) | |
279 | } | |
280 | inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) | |
281 | rctx, err := runtime.AnnotateContext(ctx, mux, req) | |
282 | if err != nil { | |
283 | runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) | |
284 | return | |
285 | } | |
286 | resp, md, err := request_PyreService_DeleteBucket_0(rctx, inboundMarshaler, client, req, pathParams) | |
287 | ctx = runtime.NewServerMetadataContext(ctx, md) | |
288 | if err != nil { | |
289 | runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) | |
290 | return | |
291 | } | |
292 | ||
293 | forward_PyreService_DeleteBucket_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) | |
294 | ||
295 | }) | |
296 | ||
297 | mux.Handle("POST", pattern_PyreService_GetUploadUrl_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { | |
298 | ctx, cancel := context.WithCancel(req.Context()) | |
299 | defer cancel() | |
300 | if cn, ok := w.(http.CloseNotifier); ok { | |
301 | go func(done <-chan struct{}, closed <-chan bool) { | |
302 | select { | |
303 | case <-done: | |
304 | case <-closed: | |
305 | cancel() | |
306 | } | |
307 | }(ctx.Done(), cn.CloseNotify()) | |
308 | } | |
309 | inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) | |
310 | rctx, err := runtime.AnnotateContext(ctx, mux, req) | |
311 | if err != nil { | |
312 | runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) | |
313 | return | |
314 | } | |
315 | resp, md, err := request_PyreService_GetUploadUrl_0(rctx, inboundMarshaler, client, req, pathParams) | |
316 | ctx = runtime.NewServerMetadataContext(ctx, md) | |
317 | if err != nil { | |
318 | runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) | |
319 | return | |
320 | } | |
321 | ||
322 | forward_PyreService_GetUploadUrl_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) | |
323 | ||
324 | }) | |
325 | ||
326 | mux.Handle("POST", pattern_PyreService_StartLargeFile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { | |
327 | ctx, cancel := context.WithCancel(req.Context()) | |
328 | defer cancel() | |
329 | if cn, ok := w.(http.CloseNotifier); ok { | |
330 | go func(done <-chan struct{}, closed <-chan bool) { | |
331 | select { | |
332 | case <-done: | |
333 | case <-closed: | |
334 | cancel() | |
335 | } | |
336 | }(ctx.Done(), cn.CloseNotify()) | |
337 | } | |
338 | inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) | |
339 | rctx, err := runtime.AnnotateContext(ctx, mux, req) | |
340 | if err != nil { | |
341 | runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) | |
342 | return | |
343 | } | |
344 | resp, md, err := request_PyreService_StartLargeFile_0(rctx, inboundMarshaler, client, req, pathParams) | |
345 | ctx = runtime.NewServerMetadataContext(ctx, md) | |
346 | if err != nil { | |
347 | runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) | |
348 | return | |
349 | } | |
350 | ||
351 | forward_PyreService_StartLargeFile_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) | |
352 | ||
353 | }) | |
354 | ||
355 | mux.Handle("POST", pattern_PyreService_GetUploadPartUrl_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { | |
356 | ctx, cancel := context.WithCancel(req.Context()) | |
357 | defer cancel() | |
358 | if cn, ok := w.(http.CloseNotifier); ok { | |
359 | go func(done <-chan struct{}, closed <-chan bool) { | |
360 | select { | |
361 | case <-done: | |
362 | case <-closed: | |
363 | cancel() | |
364 | } | |
365 | }(ctx.Done(), cn.CloseNotify()) | |
366 | } | |
367 | inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) | |
368 | rctx, err := runtime.AnnotateContext(ctx, mux, req) | |
369 | if err != nil { | |
370 | runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) | |
371 | return | |
372 | } | |
373 | resp, md, err := request_PyreService_GetUploadPartUrl_0(rctx, inboundMarshaler, client, req, pathParams) | |
374 | ctx = runtime.NewServerMetadataContext(ctx, md) | |
375 | if err != nil { | |
376 | runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) | |
377 | return | |
378 | } | |
379 | ||
380 | forward_PyreService_GetUploadPartUrl_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) | |
381 | ||
382 | }) | |
383 | ||
384 | mux.Handle("POST", pattern_PyreService_FinishLargeFile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { | |
385 | ctx, cancel := context.WithCancel(req.Context()) | |
386 | defer cancel() | |
387 | if cn, ok := w.(http.CloseNotifier); ok { | |
388 | go func(done <-chan struct{}, closed <-chan bool) { | |
389 | select { | |
390 | case <-done: | |
391 | case <-closed: | |
392 | cancel() | |
393 | } | |
394 | }(ctx.Done(), cn.CloseNotify()) | |
395 | } | |
396 | inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) | |
397 | rctx, err := runtime.AnnotateContext(ctx, mux, req) | |
398 | if err != nil { | |
399 | runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) | |
400 | return | |
401 | } | |
402 | resp, md, err := request_PyreService_FinishLargeFile_0(rctx, inboundMarshaler, client, req, pathParams) | |
403 | ctx = runtime.NewServerMetadataContext(ctx, md) | |
404 | if err != nil { | |
405 | runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) | |
406 | return | |
407 | } | |
408 | ||
409 | forward_PyreService_FinishLargeFile_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) | |
410 | ||
411 | }) | |
412 | ||
413 | mux.Handle("POST", pattern_PyreService_ListFileVersions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { | |
414 | ctx, cancel := context.WithCancel(req.Context()) | |
415 | defer cancel() | |
416 | if cn, ok := w.(http.CloseNotifier); ok { | |
417 | go func(done <-chan struct{}, closed <-chan bool) { | |
418 | select { | |
419 | case <-done: | |
420 | case <-closed: | |
421 | cancel() | |
422 | } | |
423 | }(ctx.Done(), cn.CloseNotify()) | |
424 | } | |
425 | inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) | |
426 | rctx, err := runtime.AnnotateContext(ctx, mux, req) | |
427 | if err != nil { | |
428 | runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) | |
429 | return | |
430 | } | |
431 | resp, md, err := request_PyreService_ListFileVersions_0(rctx, inboundMarshaler, client, req, pathParams) | |
432 | ctx = runtime.NewServerMetadataContext(ctx, md) | |
433 | if err != nil { | |
434 | runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) | |
435 | return | |
436 | } | |
437 | ||
438 | forward_PyreService_ListFileVersions_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) | |
439 | ||
440 | }) | |
441 | ||
442 | return nil | |
443 | } | |
444 | ||
445 | var ( | |
446 | pattern_PyreService_AuthorizeAccount_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_authorize_account"}, "")) | |
447 | ||
448 | pattern_PyreService_ListBuckets_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_list_buckets"}, "")) | |
449 | ||
450 | pattern_PyreService_CreateBucket_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_create_bucket"}, "")) | |
451 | ||
452 | pattern_PyreService_DeleteBucket_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_delete_bucket"}, "")) | |
453 | ||
454 | pattern_PyreService_GetUploadUrl_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_get_upload_url"}, "")) | |
455 | ||
456 | pattern_PyreService_StartLargeFile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_start_large_file"}, "")) | |
457 | ||
458 | pattern_PyreService_GetUploadPartUrl_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_get_upload_part_url"}, "")) | |
459 | ||
460 | pattern_PyreService_FinishLargeFile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_finish_large_file"}, "")) | |
461 | ||
462 | pattern_PyreService_ListFileVersions_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"b2api", "v1", "b2_list_file_versions"}, "")) | |
463 | ) | |
464 | ||
465 | var ( | |
466 | forward_PyreService_AuthorizeAccount_0 = runtime.ForwardResponseMessage | |
467 | ||
468 | forward_PyreService_ListBuckets_0 = runtime.ForwardResponseMessage | |
469 | ||
470 | forward_PyreService_CreateBucket_0 = runtime.ForwardResponseMessage | |
471 | ||
472 | forward_PyreService_DeleteBucket_0 = runtime.ForwardResponseMessage | |
473 | ||
474 | forward_PyreService_GetUploadUrl_0 = runtime.ForwardResponseMessage | |
475 | ||
476 | forward_PyreService_StartLargeFile_0 = runtime.ForwardResponseMessage | |
477 | ||
478 | forward_PyreService_GetUploadPartUrl_0 = runtime.ForwardResponseMessage | |
479 | ||
480 | forward_PyreService_FinishLargeFile_0 = runtime.ForwardResponseMessage | |
481 | ||
482 | forward_PyreService_ListFileVersions_0 = runtime.ForwardResponseMessage | |
483 | ) |
0 | // Copyright 2018, the Blazer authors | |
1 | // | |
2 | // Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | // you may not use this file except in compliance with the License. | |
4 | // You may obtain a copy of the License at | |
5 | // | |
6 | // http://www.apache.org/licenses/LICENSE-2.0 | |
7 | // | |
8 | // Unless required by applicable law or agreed to in writing, software | |
9 | // distributed under the License is distributed on an "AS IS" BASIS, | |
10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | // See the License for the specific language governing permissions and | |
12 | // limitations under the License. | |
13 | ||
14 | syntax = "proto3"; | |
15 | ||
16 | import "google/api/annotations.proto"; | |
17 | ||
18 | package pyre.proto; | |
19 | ||
20 | message AuthorizeAccountRequest {} | |
21 | ||
22 | message AuthorizeAccountResponse { | |
23 | // The identifier for the account. | |
24 | string account_id = 1; | |
25 | // An authorization token to use with all calls, other than | |
26 | // b2_authorize_account, that need an Authorization header. This | |
27 | // authorization token is valid for at most 24 hours. | |
28 | string authorization_token = 2; | |
29 | // The base URL to use for all API calls except for uploading and downloading | |
30 | // files. | |
31 | string api_url = 3; | |
32 | // The base URL to use for downloading files. | |
33 | string download_url = 4; | |
34 | // The recommended size for each part of a large file. We recommend using | |
35 | // this part size for optimal upload performance. | |
36 | int32 recommended_part_size = 5; | |
37 | // The smallest possible size of a part of a large file (except the last | |
38 | // one). This is smaller than the recommended part size. If you use it, you | |
39 | // may find that it takes longer overall to upload a large file. | |
40 | int32 absolute_minimum_part_size = 6; | |
41 | int32 minimum_part_size = 7; // alias for recommended_part_size | |
42 | } | |
43 | ||
44 | message ListBucketsRequest { | |
45 | // The ID of your account. | |
46 | string account_id = 1; | |
47 | // When specified, the result will be a list containing just this bucket, if | |
48 | // it's present in the account, or no buckets if the account does not have a | |
49 | // bucket with this ID. | |
50 | string bucket_id = 2; | |
51 | // When specified, the result will be a list containing just this bucket, if | |
52 | // it's present in the account, or no buckets if the account does not have a | |
53 | // bucket with this ID. | |
54 | string bucket_name = 3; | |
55 | // If present, B2 will use it as a filter for bucket types returned in the | |
56 | // list buckets response. If not present, only buckets with bucket types | |
57 | // "allPublic", "allPrivate" and "snapshot" will be returned. A special | |
58 | // filter value of ["all"] will return all bucket types. | |
59 | // | |
60 | // If present, it must be in the form of a json array of strings containing | |
61 | // valid bucket types in quotes and separated by a comma. Valid bucket types | |
62 | // include "allPrivate", "allPublic", "snapshot", and other values added in | |
63 | // the future. | |
64 | // | |
65 | // A bad request error will be returned if "all" is used with other bucket | |
66 | // types, this field is empty, or invalid bucket types are requested. | |
67 | repeated string bucket_types = 4; | |
68 | } | |
69 | ||
70 | message LifecycleRule { | |
71 | // After a file is uploaded, the number of days before it can be hidden. | |
72 | int32 days_from_uploading_to_hiding = 1; | |
73 | // After a file is hidden, the number of days before it can be deleted. | |
74 | int32 days_from_hiding_to_deleting = 2; | |
75 | // The rule applies to files whose names start with this prefix. | |
76 | string file_name_prefix = 3; | |
77 | } | |
78 | ||
79 | message CorsRule { | |
80 | // A name for humans to recognize the rule in a user interface. Names must be | |
81 | // unique within a bucket. Names can consist of upper-case and lower-case | |
82 | // English letters, numbers, and "-". No other characters are allowed. A name | |
83 | // must be at least 6 characters long, and can be at most 50 characters long. | |
84 | // These are all allowed names: myPhotosSite, allowAnyHttps, | |
85 | // backblaze-images. Names that start with "b2-" are reserved for Backblaze | |
86 | // use. | |
87 | string cors_rule_name = 1; | |
88 | // A non-empty list specifying which origins the rule covers. Each value may | |
89 | // have one of many formats: | |
90 | // | |
91 | // * The origin can be fully specified, such as http://www.example.com:8180 | |
92 | // or https://www.example.com:4433. | |
93 | // | |
94 | // * The origin can omit a default port, such as https://www.example.com. | |
95 | // | |
96 | // * The origin may have a single '*' as part of the domain name, such as | |
97 | // https://*.example.com, https://*:8443 or https://*. | |
98 | // | |
99 | // * The origin may be 'https' to match any origin that uses HTTPS. (This is | |
100 | // broader than 'https://*' because it matches any port.) | |
101 | // | |
102 | // * Finally, the origin can be a single '*' to match any origin. | |
103 | // | |
104 | // If any entry is "*", it must be the only entry. There can be at most one | |
105 | // "https" entry and no entry after it may start with "https:". | |
106 | repeated string allowed_origins = 2; | |
107 | // A list specifying which operations the rule allows. At least one value | |
108 | // must be specified. All values must be from the following list. More values | |
109 | // may be added to this list at any time. | |
110 | // | |
111 | // b2_download_file_by_name | |
112 | // b2_download_file_by_id | |
113 | // b2_upload_file | |
114 | // b2_upload_part | |
115 | repeated string allowed_operations = 3; | |
116 | // If present, this is a list of headers that are allowed in a pre-flight | |
117 | // OPTIONS's request's Access-Control-Request-Headers header value. Each | |
118 | // value may have one of many formats: | |
119 | // | |
120 | // * It may be a complete header name, such as x-bz-content-sha1. | |
121 | // | |
122 | // * It may end with an asterisk, such as x-bz-info-*. | |
123 | // | |
124 | // * Finally, it may be a single '*' to match any header. | |
125 | // | |
126 | // If any entry is "*", it must be the only entry in the list. If this list | |
127 | // is missing, it is be treated as if it is a list with no entries. | |
128 | repeated string allowed_headers = 4; | |
129 | // If present, this is a list of headers that may be exposed to an | |
130 | // application inside the client (eg. exposed to Javascript in a browser). | |
131 | // Each entry in the list must be a complete header name (eg. | |
132 | // "x-bz-content-sha1"). If this list is missing or empty, no headers will be | |
133 | // exposed. | |
134 | repeated string expose_headers = 5; | |
135 | // This specifies the maximum number of seconds that a browser may cache the | |
136 | // response to a preflight request. The value must not be negative and it | |
137 | // must not be more than 86,400 seconds (one day). | |
138 | int32 max_age_seconds = 6; | |
139 | } | |
140 | ||
141 | message Bucket { | |
142 | string account_id = 1; | |
143 | string bucket_id = 2; | |
144 | string bucket_name = 3; | |
145 | string bucket_type = 4; | |
146 | map<string, string> bucket_info = 5; | |
147 | repeated CorsRule cores_rules = 6; | |
148 | repeated LifecycleRule lifecycle_rules = 7; | |
149 | int32 revision = 8; | |
150 | } | |
151 | ||
152 | message ListBucketsResponse { | |
153 | repeated Bucket buckets = 1; | |
154 | } | |
155 | ||
156 | message GetUploadUrlRequest { | |
157 | string bucket_id = 1; | |
158 | } | |
159 | ||
160 | message GetUploadUrlResponse { | |
161 | string bucket_id = 1; | |
162 | string upload_url = 2; | |
163 | string authorization_token = 3; | |
164 | } | |
165 | ||
166 | message UploadFileResponse { | |
167 | string file_id = 1; | |
168 | string file_name = 2; | |
169 | string account_id = 3; | |
170 | string bucket_id = 4; | |
171 | int32 content_length = 5; | |
172 | string content_sha1 = 6; | |
173 | string content_type = 7; | |
174 | map<string, string> file_info = 8; | |
175 | string action = 9; | |
176 | int64 upload_timestamp = 10; | |
177 | } | |
178 | ||
179 | message StartLargeFileRequest { | |
180 | string bucket_id = 1; | |
181 | string file_name = 2; | |
182 | string content_type = 3; | |
183 | map<string, string> file_info = 4; | |
184 | } | |
185 | ||
186 | message StartLargeFileResponse { | |
187 | string file_id = 1; | |
188 | string file_name = 2; | |
189 | string account_id = 3; | |
190 | string bucket_id = 4; | |
191 | string content_type = 5; | |
192 | map<string, string> file_info = 6; | |
193 | int64 upload_timestamp = 7; | |
194 | } | |
195 | ||
196 | message GetUploadPartUrlRequest { | |
197 | string file_id = 1; | |
198 | } | |
199 | ||
200 | message GetUploadPartUrlResponse { | |
201 | string file_id = 1; | |
202 | string upload_url = 2; | |
203 | string authorization_token = 3; | |
204 | } | |
205 | ||
206 | message FinishLargeFileRequest { | |
207 | string file_id = 1; | |
208 | repeated string part_sha1_array = 2; | |
209 | // string sha1 = 3; | |
210 | } | |
211 | ||
212 | message FinishLargeFileResponse { | |
213 | string file_id = 1; | |
214 | string file_name = 2; | |
215 | string account_id = 3; | |
216 | string bucket_id = 4; | |
217 | int64 content_length = 5; | |
218 | string content_sha1 = 6; // always "none" | |
219 | string content_type = 7; | |
220 | map<string, string> file_info = 8; | |
221 | string action = 9; | |
222 | int64 upload_timestamp = 10; | |
223 | } | |
224 | ||
225 | message ListFileVersionsRequest { | |
226 | string bucket_id = 1; | |
227 | string start_file_name = 2; | |
228 | string start_file_id = 3; | |
229 | int32 max_file_count = 4; | |
230 | string prefix = 5; | |
231 | string delimiter = 6; | |
232 | } | |
233 | ||
234 | message ListFileVersionsResponse { | |
235 | repeated File files = 1; | |
236 | string next_file_name = 2; | |
237 | string next_file_id = 3; | |
238 | } | |
239 | ||
240 | message File { | |
241 | string file_id = 1; | |
242 | string file_name = 2; | |
243 | int64 content_length = 3; | |
244 | string content_type = 4; | |
245 | string content_sha1 = 5; | |
246 | map<string, string> file_info = 6; | |
247 | string action = 7; | |
248 | int64 size = 8; | |
249 | int64 upload_timestamp = 9; | |
250 | } | |
251 | ||
252 | service PyreService { | |
253 | // Used to log in to the B2 API. Returns an authorization token that can be | |
254 | // used for account-level operations, and a URL that should be used as the | |
255 | // base URL for subsequent API calls. | |
256 | rpc AuthorizeAccount(AuthorizeAccountRequest) returns (AuthorizeAccountResponse) { | |
257 | option (google.api.http) = { | |
258 | get: "/b2api/v1/b2_authorize_account" | |
259 | }; | |
260 | } | |
261 | ||
262 | // Lists buckets associated with an account, in alphabetical order by bucket | |
263 | // name. | |
264 | rpc ListBuckets(ListBucketsRequest) returns (ListBucketsResponse) { | |
265 | option (google.api.http) = { | |
266 | post: "/b2api/v1/b2_list_buckets" | |
267 | body: "*" | |
268 | }; | |
269 | } | |
270 | ||
271 | // Creates a new bucket. A bucket belongs to the account used to create it. | |
272 | // | |
273 | // Buckets can be named. The name must be globally unique. No account can use | |
274 | // a bucket with the same name. Buckets are assigned a unique bucketId which | |
275 | // is used when uploading, downloading, or deleting files. | |
276 | // | |
277 | // There is a limit of 100 buckets per account. | |
278 | rpc CreateBucket(Bucket) returns (Bucket) { | |
279 | option (google.api.http) = { | |
280 | post: "/b2api/v1/b2_create_bucket" | |
281 | body: "*" | |
282 | }; | |
283 | } | |
284 | ||
285 | // Deletes the bucket specified. Only buckets that contain no version of any | |
286 | // files can be deleted. | |
287 | rpc DeleteBucket(Bucket) returns (Bucket) { | |
288 | option (google.api.http) = { | |
289 | post: "/b2api/v1/b2_delete_bucket" | |
290 | body: "*" | |
291 | }; | |
292 | } | |
293 | ||
294 | rpc GetUploadUrl(GetUploadUrlRequest) returns (GetUploadUrlResponse) { | |
295 | option (google.api.http) = { | |
296 | post: "/b2api/v1/b2_get_upload_url" | |
297 | body: "*" | |
298 | }; | |
299 | } | |
300 | ||
301 | // Prepares for uploading the parts of a large file. | |
302 | rpc StartLargeFile(StartLargeFileRequest) returns (StartLargeFileResponse) { | |
303 | option (google.api.http) = { | |
304 | post: "/b2api/v1/b2_start_large_file" | |
305 | body: "*" | |
306 | }; | |
307 | } | |
308 | ||
309 | // Gets an URL to use for uploading parts of a large file. | |
310 | rpc GetUploadPartUrl(GetUploadPartUrlRequest) returns (GetUploadPartUrlResponse) { | |
311 | option (google.api.http) = { | |
312 | post: "/b2api/v1/b2_get_upload_part_url" | |
313 | body: "*" | |
314 | }; | |
315 | } | |
316 | ||
317 | // Converts the parts that have been uploaded into a single B2 file. | |
318 | rpc FinishLargeFile(FinishLargeFileRequest) returns (FinishLargeFileResponse) { | |
319 | option (google.api.http) = { | |
320 | post: "/b2api/v1/b2_finish_large_file" | |
321 | body: "*" | |
322 | }; | |
323 | } | |
324 | ||
325 | // Lists all of the versions of all of the files contained in one bucket, in | |
326 | // alphabetical order by file name, and by reverse of date/time uploaded for | |
327 | // versions of files with the same name. | |
328 | rpc ListFileVersions(ListFileVersionsRequest) returns (ListFileVersionsResponse) { | |
329 | option (google.api.http) = { | |
330 | post: "/b2api/v1/b2_list_file_versions" | |
331 | body: "*" | |
332 | }; | |
333 | } | |
334 | } |
0 | // Copyright 2018, the Blazer authors | |
1 | // | |
2 | // Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | // you may not use this file except in compliance with the License. | |
4 | // You may obtain a copy of the License at | |
5 | // | |
6 | // http://www.apache.org/licenses/LICENSE-2.0 | |
7 | // | |
8 | // Unless required by applicable law or agreed to in writing, software | |
9 | // distributed under the License is distributed on an "AS IS" BASIS, | |
10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | // See the License for the specific language governing permissions and | |
12 | // limitations under the License. | |
13 | ||
14 | // Package pyre provides a gRPC-based implementation of B2, as well as a | |
15 | // RESTful gateway on top of it. | |
16 | package pyre | |
17 | ||
18 | //go:generate protoc -I/usr/local/include -I. -I$GOPATH/src -I$GOPATH/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis --grpc-gateway_out=logtostderr=true:. proto/pyre.proto | |
19 | //go:generate protoc -I/usr/local/include -I. -I$GOPATH/src -I$GOPATH/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis --go_out=plugins=grpc:. proto/pyre.proto |
0 | // Copyright 2018, the Blazer authors | |
1 | // | |
2 | // Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | // you may not use this file except in compliance with the License. | |
4 | // You may obtain a copy of the License at | |
5 | // | |
6 | // http://www.apache.org/licenses/LICENSE-2.0 | |
7 | // | |
8 | // Unless required by applicable law or agreed to in writing, software | |
9 | // distributed under the License is distributed on an "AS IS" BASIS, | |
10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | // See the License for the specific language governing permissions and | |
12 | // limitations under the License. | |
13 | ||
14 | package pyre | |
15 | ||
16 | import ( | |
17 | "encoding/json" | |
18 | "fmt" | |
19 | "io" | |
20 | "net/http" | |
21 | "strconv" | |
22 | "strings" | |
23 | ||
24 | "github.com/google/uuid" | |
25 | "github.com/kurin/blazer/internal/b2types" | |
26 | ) | |
27 | ||
28 | const uploadFilePrefix = "/b2api/v1/b2_upload_file/" | |
29 | ||
30 | type SimpleFileManager interface { | |
31 | Writer(bucket, name, id string) (io.WriteCloser, error) | |
32 | } | |
33 | ||
34 | type simpleFileServer struct { | |
35 | fm SimpleFileManager | |
36 | } | |
37 | ||
38 | type uploadRequest struct { | |
39 | name string | |
40 | contentType string | |
41 | size int64 | |
42 | sha1 string | |
43 | bucket string | |
44 | info map[string]string | |
45 | } | |
46 | ||
47 | func parseUploadHeaders(r *http.Request) (*uploadRequest, error) { | |
48 | ur := &uploadRequest{info: make(map[string]string)} | |
49 | ur.name = r.Header.Get("X-Bz-File-Name") | |
50 | ur.contentType = r.Header.Get("Content-Type") | |
51 | ur.sha1 = r.Header.Get("X-Bz-Content-Sha1") | |
52 | size, err := strconv.ParseInt(r.Header.Get("Content-Length"), 10, 64) | |
53 | if err != nil { | |
54 | return nil, err | |
55 | } | |
56 | ur.size = size | |
57 | for k := range r.Header { | |
58 | if !strings.HasPrefix("X-Bz-Info-", k) { | |
59 | continue | |
60 | } | |
61 | name := strings.TrimPrefix("X-Bz-Info-", k) | |
62 | ur.info[name] = r.Header.Get(k) | |
63 | } | |
64 | ur.bucket = strings.TrimPrefix(r.URL.Path, uploadFilePrefix) | |
65 | return ur, nil | |
66 | } | |
67 | ||
68 | func (fs *simpleFileServer) ServeHTTP(rw http.ResponseWriter, r *http.Request) { | |
69 | req, err := parseUploadHeaders(r) | |
70 | if err != nil { | |
71 | http.Error(rw, err.Error(), 500) | |
72 | fmt.Println("oh no") | |
73 | return | |
74 | } | |
75 | id := uuid.New().String() | |
76 | w, err := fs.fm.Writer(req.bucket, req.name, id) | |
77 | if err != nil { | |
78 | http.Error(rw, err.Error(), 500) | |
79 | fmt.Println("oh no") | |
80 | return | |
81 | } | |
82 | if _, err := io.Copy(w, io.LimitReader(r.Body, req.size)); err != nil { | |
83 | w.Close() | |
84 | http.Error(rw, err.Error(), 500) | |
85 | fmt.Println("oh no") | |
86 | return | |
87 | } | |
88 | if err := w.Close(); err != nil { | |
89 | http.Error(rw, err.Error(), 500) | |
90 | fmt.Println("oh no") | |
91 | return | |
92 | } | |
93 | resp := &b2types.UploadFileResponse{ | |
94 | FileID: id, | |
95 | Name: req.name, | |
96 | SHA1: req.sha1, | |
97 | BucketID: req.bucket, | |
98 | } | |
99 | if err := json.NewEncoder(rw).Encode(resp); err != nil { | |
100 | http.Error(rw, err.Error(), 500) | |
101 | fmt.Println("oh no") | |
102 | return | |
103 | } | |
104 | } | |
105 | ||
106 | func RegisterSimpleFileManagerOnMux(f SimpleFileManager, mux *http.ServeMux) { | |
107 | mux.Handle(uploadFilePrefix, &simpleFileServer{fm: f}) | |
108 | } |
0 | // Copyright 2016, Google | |
0 | // Copyright 2016, the Blazer authors | |
1 | 1 | // |
2 | 2 | // Licensed under the Apache License, Version 2.0 (the "License"); |
3 | 3 | // you may not use this file except in compliance with the License. |
26 | 26 | "io" |
27 | 27 | "io/ioutil" |
28 | 28 | "reflect" |
29 | "time" | |
29 | 30 | |
30 | 31 | "github.com/kurin/blazer/b2" |
31 | 32 | ) |
53 | 54 | name string |
54 | 55 | b *b2.Bucket |
55 | 56 | ba *b2.BucketAttrs |
57 | } | |
58 | ||
59 | // Mutex returns a new mutex on the given group. Only one caller can hold the | |
60 | // lock on a mutex with a given name, for a given group. | |
61 | func (g *Group) Mutex(ctx context.Context, name string) *Mutex { | |
62 | return &Mutex{ | |
63 | g: g, | |
64 | name: name, | |
65 | ctx: ctx, | |
66 | } | |
56 | 67 | } |
57 | 68 | |
58 | 69 | // Operate calls f with the contents of the group object given by name, and |
334 | 345 | return l, nil |
335 | 346 | } |
336 | 347 | |
348 | // A Mutex is a sync.Locker that is backed by data in B2. | |
349 | type Mutex struct { | |
350 | g *Group | |
351 | name string | |
352 | ctx context.Context | |
353 | } | |
354 | ||
355 | // Lock locks the mutex. If the mutex is already locked, lock will wait, | |
356 | // polling at 1 second intervals, until it can acquire the lock. | |
357 | func (m *Mutex) Lock() { | |
358 | cont := errors.New("continue") | |
359 | for { | |
360 | err := m.g.Operate(m.ctx, m.name, func(b []byte) ([]byte, error) { | |
361 | if len(b) != 0 { | |
362 | return nil, cont | |
363 | } | |
364 | return []byte{1}, nil | |
365 | }) | |
366 | if err == nil { | |
367 | return | |
368 | } | |
369 | if err != cont { | |
370 | panic(err) | |
371 | } | |
372 | time.Sleep(time.Second) | |
373 | } | |
374 | } | |
375 | ||
376 | // Unlock unconditionally unlocks the mutex. This allows programs to clear | |
377 | // stale locks. | |
378 | func (m *Mutex) Unlock() { | |
379 | if err := m.g.Operate(m.ctx, m.name, func([]byte) ([]byte, error) { | |
380 | return nil, nil | |
381 | }); err != nil { | |
382 | panic(err) | |
383 | } | |
384 | } | |
385 | ||
337 | 386 | type consistentInfo struct { |
338 | 387 | Version int |
339 | 388 | |
348 | 397 | // |
349 | 398 | // However, it is still necessary for higher level constructs to confirm that |
350 | 399 | // the serial number they expect is good. The writer does this, for example, |
351 | // but comparing the "key" of the file it is replacing. | |
400 | // by comparing the "key" of the file it is replacing. | |
352 | 401 | Serial int |
353 | 402 | Locations map[string]string |
354 | 403 | } |
5 | 5 | "os" |
6 | 6 | "strconv" |
7 | 7 | "sync" |
8 | "sync/atomic" | |
8 | 9 | "testing" |
10 | "time" | |
9 | 11 | |
10 | 12 | "github.com/kurin/blazer/b2" |
11 | 13 | ) |
29 | 31 | wg.Add(1) |
30 | 32 | i := i |
31 | 33 | go func() { |
32 | var n int | |
33 | 34 | defer wg.Done() |
34 | 35 | for j := 0; j < 10; j++ { |
36 | var n int | |
35 | 37 | if err := g.Operate(ctx, name, func(b []byte) ([]byte, error) { |
36 | 38 | if len(b) > 0 { |
37 | 39 | i, err := strconv.Atoi(string(b)) |
122 | 124 | } |
123 | 125 | } |
124 | 126 | |
127 | func TestMutex(t *testing.T) { | |
128 | ctx := context.Background() | |
129 | bucket, done := startLiveTest(ctx, t) | |
130 | defer done() | |
131 | ||
132 | g := NewGroup(bucket, "tester") | |
133 | m := g.Mutex(ctx, "mootex") | |
134 | var a int32 | |
135 | var wg sync.WaitGroup | |
136 | for i := 0; i < 5; i++ { | |
137 | wg.Add(1) | |
138 | go func(i int) { | |
139 | defer wg.Done() | |
140 | for j := 0; j < 5; j++ { | |
141 | m.Lock() | |
142 | new := atomic.AddInt32(&a, 1) | |
143 | if new != 1 { | |
144 | t.Fatalf("two threads locked at once") | |
145 | } | |
146 | time.Sleep(20 * time.Millisecond) | |
147 | new = atomic.AddInt32(&a, -1) | |
148 | if new != 0 { | |
149 | t.Fatalf("two threads locked at once") | |
150 | } | |
151 | t.Logf("thread %d: lock %d", i, j) | |
152 | m.Unlock() | |
153 | } | |
154 | }(i) | |
155 | } | |
156 | wg.Wait() | |
157 | } | |
158 | ||
125 | 159 | func startLiveTest(ctx context.Context, t *testing.T) (*b2.Bucket, func()) { |
126 | 160 | id := os.Getenv(apiID) |
127 | 161 | key := os.Getenv(apiKey) |
0 | // Copyright 2017, Google | |
0 | // Copyright 2017, the Blazer authors | |
1 | 1 | // |
2 | 2 | // Licensed under the Apache License, Version 2.0 (the "License"); |
3 | 3 | // you may not use this file except in compliance with the License. |
0 | // Copyright 2018, Google | |
0 | // Copyright 2018, the Blazer authors | |
1 | 1 | // |
2 | 2 | // Licensed under the Apache License, Version 2.0 (the "License"); |
3 | 3 | // you may not use this file except in compliance with the License. |
0 | // Copyright 2018, Google | |
0 | // Copyright 2018, the Blazer authors | |
1 | 1 | // |
2 | 2 | // Licensed under the Apache License, Version 2.0 (the "License"); |
3 | 3 | // you may not use this file except in compliance with the License. |