0.61 - delete_keys(), refactoring, unit tests
Rob Lauer
1 year, 1 month ago
0 | Wed Mar 29 08:12:48 2023 Rob Lauer <rlauer6@comcast.net> | |
1 | ||
2 | [0.61 - delete_keys(), refactoring]: | |
3 | * s3-perl.pl | |
4 | - added ASCII table output, refactored | |
5 | * NEWS.md: updated | |
6 | * src/main/perl/lib/Amazon/S3.pm.in | |
7 | - some perlcritic refactoring | |
8 | - pod updates | |
9 | (list_bucket): use different markers for v1, v2 | |
10 | (list_bucket_all) | |
11 | - throw $EVAL_ERROR "The server has stopped responding" | |
12 | (_make_request): light refactoring | |
13 | (_sendrequest): accept keep_root and pass to _xpc_of_content() | |
14 | (_xpc_of_content): NoAttr => $TRUE | |
15 | (_remember_errors): return false if no error | |
16 | * src/main/perl/lib/Amazon/S3/Bucket.pm.in | |
17 | - some perlcritic refactoring | |
18 | (add_key): check reftype properly | |
19 | (set_acl): send conten-length | |
20 | * src/main/perl/lib/Amazon/S3/Constants.pm.in | |
21 | - additional constants for refactoring | |
22 | - move subs from t/01-api.t to S3TestUtils.pm | |
23 | * src/main/perl/t/01-api.t: refactoring | |
24 | * src/main/perl/t/02-logger.t: likewise | |
25 | * src/main/perl/t/03-region.t: likewise | |
26 | * src/main/perl/t/04-list-buckets.t: likewise | |
27 | * src/main/perl/t/05-multipart-upload.t: likewise | |
28 | * src/main/perl/t/06-list-multipart-uploads.t: likewise | |
29 | * src/main/perl/test.localstack: converted to bash script | |
30 | * src/main/perl/S3TestUtils.pm: new | |
31 | * cpan/buildspec.yml: add above to distribution | |
32 | ||
0 | 33 | Mon Mar 27 10:47:54 2023 Rob Lauer <rlauer6@comcast.net> |
1 | 34 | |
2 | 35 | [0.61 - delete_keys()]: |
6 | 6 | notice of such bugs and the versions in which they were fixed will be |
7 | 7 | noted here, as well. |
8 | 8 | |
9 | # perl-Amazon-S3 0.61 (2023-03-27) | |
9 | # perl-Amazon-S3 0.61 (2023-03-30) | |
10 | 10 | |
11 | > This version adds a new method for bulk deletion of keys. | |
11 | > This version adds a new method for bulk deletion of keys. Some | |
12 | > refactoring of the code has been done and unit tests have been | |
13 | > cleaned up. | |
12 | 14 | |
13 | 15 | ## Enhancements |
14 | 16 |
6 | 6 | |
7 | 7 | # SYNOPSIS |
8 | 8 | |
9 | #!/usr/bin/perl | |
10 | use warnings; | |
11 | use strict; | |
12 | ||
13 | 9 | use Amazon::S3; |
14 | ||
15 | use vars qw/$OWNER_ID $OWNER_DISPLAYNAME/; | |
16 | 10 | |
17 | 11 | my $aws_access_key_id = "Fill me in!"; |
18 | 12 | my $aws_secret_access_key = "Fill me in too!"; |
28 | 22 | |
29 | 23 | # create a bucket |
30 | 24 | my $bucket_name = $aws_access_key_id . '-net-amazon-s3-test'; |
25 | ||
31 | 26 | my $bucket = $s3->add_bucket( { bucket => $bucket_name } ) |
32 | 27 | or die $s3->err . ": " . $s3->errstr; |
33 | 28 | |
34 | 29 | # store a key with a content-type and some optional metadata |
35 | 30 | my $keyname = 'testing.txt'; |
31 | ||
36 | 32 | my $value = 'T'; |
33 | ||
37 | 34 | $bucket->add_key( |
38 | 35 | $keyname, $value, |
39 | 36 | { content_type => 'text/plain', |
50 | 47 | # list keys in the bucket |
51 | 48 | $response = $bucket->list |
52 | 49 | or die $s3->err . ": " . $s3->errstr; |
50 | ||
53 | 51 | print $response->{bucket}."\n"; |
52 | ||
54 | 53 | for my $key (@{ $response->{keys} }) { |
55 | 54 | print "\t".$key->{key}."\n"; |
56 | 55 | } |
57 | 56 | |
58 | 57 | # delete key from bucket |
59 | 58 | $bucket->delete_key($keyname); |
59 | ||
60 | # delete multiple keys from bucket | |
61 | $bucket->delete_keys([$key1, $key2, $key3]); | |
60 | 62 | |
61 | 63 | # delete bucket |
62 | 64 | $bucket->delete_bucket; |
63 | 65 | |
64 | 66 | # DESCRIPTION |
65 | 67 | |
68 | This documentation refers to version 0.61. | |
69 | ||
66 | 70 | `Amazon::S3` provides a portable client interface to Amazon Simple |
67 | 71 | Storage System (S3). |
68 | 72 | |
69 | _This module is rather dated. For a much more robust and modern | |
70 | implementation of an S3 interface try `Net::Amazon::S3`. | |
71 | `Amazon::S3` ostensibly was intended to be a drop-in replacement for | |
72 | `Net:Amazon::S3` that "traded some performance in return for | |
73 | portability". That statement is no longer accurate as | |
74 | `Net::Amazon::S3` implements much more of the S3 API and may have | |
75 | changed the interface in ways that might break your | |
76 | applications. However, `Net::Amazon::S3` is today dependent on | |
73 | This module is rather dated, however with some help from a few | |
74 | contributors it has had some recent updates. Recent changes include | |
75 | implementations of: | |
76 | ||
77 | - ListObjectsV2 | |
78 | - CopyObject | |
79 | - DeleteObjects | |
80 | ||
81 | Additionally, this module now implements Signature Version 4 signing, | |
82 | unit tests have been updated and more documentation has been added or | |
83 | corrected. Credentials are encrypted if you have encryption modules installed. | |
84 | ||
85 | ## Comparison to Other Perl S3 Modules | |
86 | ||
87 | Other implementations for accessing Amazon's S3 service include | |
88 | `Net::Amazon::S3` and the `Paws` project. `Amazon::S3` ostensibly | |
89 | was intended to be a drop-in replacement for `Net:Amazon::S3` that | |
90 | "traded some performance in return for portability". That statement is | |
91 | no longer accurate as `Amazon::S3` may have changed the interface in | |
92 | ways that might break your applications if you are relying on | |
93 | compatibility with `Net::Amazon::S3`. | |
94 | ||
95 | However, `Net::Amazon::S3` and `Paws::S3` today, are dependent on | |
77 | 96 | `Moose` which may in fact level the playing field in terms of |
78 | 97 | performance penalties that may have been introduced by recent updates |
79 | to `Amazon::S3`. YMMV, however, this module may still appeal to some | |
80 | that favor simplicity of the interface and a lower number of | |
81 | dependencies. Below is the original description of the module._ | |
98 | to `Amazon::S3`. Changes to `Amazon::S3` include the use of more | |
99 | Perl modules in lieu of raw Perl code to increase maintainability and | |
100 | stability as well as some refactoring. `Amazon::S3` also strives now | |
101 | to adhere to best practices as much as possible. | |
102 | ||
103 | `Paws::S3` is a much more robust implementation of | |
104 | a Perl S3 interface, however this module may still appeal to | |
105 | those that favor simplicity of the interface and a lower number of | |
106 | dependencies. Below is the original description of the module. | |
82 | 107 | |
83 | 108 | > Amazon S3 is storage for the Internet. It is designed to |
84 | 109 | > make web-scale computing easier for developers. Amazon S3 |
113 | 138 | # LIMITATIONS AND DIFFERENCES WITH EARLIER VERSIONS |
114 | 139 | |
115 | 140 | As noted, this module is no longer a _drop-in_ replacement for |
116 | `Net::Amazon::S3` and has limitations and differences that may make | |
117 | the use of this module in your applications | |
118 | questionable. Additionally, one of the original intents of this fork | |
119 | of `Net::Amazon::S3` was to reduce the dependencies and make it | |
120 | _easy to install_. Recent changes to this module have introduced new | |
121 | dependencies in order to improve the maintainability and provide | |
122 | additional features. Installing CPAN modules is never easy, especially | |
123 | when the dependencies of the dependencies are impossible to control | |
124 | and include XS modules. | |
141 | `Net::Amazon::S3` and has limitations and differences that may impact | |
142 | the use of this module in your applications. Additionally, one of the | |
143 | original intents of this fork of `Net::Amazon::S3` was to reduce the | |
144 | number of dependencies and make it _easy to install_. Recent changes | |
145 | to this module have introduced new dependencies in order to improve | |
146 | the maintainability and provide additional features. Installing CPAN | |
147 | modules is never easy, especially when the dependencies of the | |
148 | dependencies are impossible to control and include XS modules. | |
125 | 149 | |
126 | 150 | - MINIMUM PERL |
127 | 151 | |
146 | 170 | |
147 | 171 | HTML::HeadParser 2.14 |
148 | 172 | LWP 6.13 |
149 | Amazon::S3 0.55 | |
173 | Amazon::S3 | |
150 | 174 | |
151 | 175 | ...other versions _may_ work...YMMV. |
152 | 176 | |
171 | 195 | parameter. This implies that you need to supply the bucket's region |
172 | 196 | when signing requests for any API call that involves a specific |
173 | 197 | bucket. Starting with version 0.55 of this module, |
174 | `Amazon::S3::Bucket` provides a new method (`region()` and accepts | |
198 | `Amazon::S3::Bucket` provides a new method (`region()`) and accepts | |
175 | 199 | in the constructor a `region` parameter. If a region is not |
176 | 200 | supplied, the region for the bucket will be set to the region set in |
177 | 201 | the `account` object (`Amazon::S3`) that you passed to the bucket's |
194 | 218 | |
195 | 219 | - Multipart Upload Support |
196 | 220 | |
197 | There is limited testing for multipart uploads. | |
221 | There is some limited testing for multipart uploads. | |
198 | 222 | |
199 | 223 | For more information regarding multi-part uploads visit the link below. |
200 | 224 | |
335 | 359 | |
336 | 360 | Unfortunately, while this will prevent [Net::Amazon::Signature::V4](https://metacpan.org/pod/Net%3A%3AAmazon%3A%3ASignature%3A%3AV4) |
337 | 361 | from hanging on to your credentials, you credentials will be stored in |
338 | the [Amazon::S3](https://metacpan.org/pod/Amazon%3A%3AS3) object. | |
362 | the `Amazon::S3` object. | |
339 | 363 | |
340 | 364 | Starting with version 0.55 of this module, if you have installed |
341 | 365 | [Crypt::CBC](https://metacpan.org/pod/Crypt%3A%3ACBC) and [Crypt::Blowfish](https://metacpan.org/pod/Crypt%3A%3ABlowfish), your credentials will be |
353 | 377 | |
354 | 378 | - 5. Do nothing...send the credentials, use the default signer. |
355 | 379 | |
356 | In this case, both the [Amazon::S3](https://metacpan.org/pod/Amazon%3A%3AS3) class and the | |
380 | In this case, both the `Amazon::S3` class and the | |
357 | 381 | [Net::Amazon::Signature::V4](https://metacpan.org/pod/Net%3A%3AAmazon%3A%3ASignature%3A%3AV4) have your credentials. Caveat Emptor. |
358 | 382 | |
359 | 383 | See Also [Amazon::Credentials](https://metacpan.org/pod/Amazon%3A%3ACredentials) for more information about safely |
719 | 743 | |
720 | 744 | Your AWS access key |
721 | 745 | |
722 | - AWS\_ACCESS\_KEY\_SECRET | |
746 | - AWS\_SECRET\_ACCESS\_KEY | |
723 | 747 | |
724 | 748 | Your AWS sekkr1t passkey. Be forewarned that setting this environment variable |
725 | 749 | on a shared system might leak that information to another user. Be careful. |
729 | 753 | Doesn't matter what you set it to. Just has to be set if you want |
730 | 754 | to skip ACLs tests. |
731 | 755 | |
756 | - AMAZON\_S3\_SKIP\_PERMISSIONS | |
757 | ||
758 | Skip tests that check for enforcement of ACLs...as of this version, | |
759 | LocalStack for example does not support enforcement of ACLs. | |
760 | ||
732 | 761 | - AMAZON\_S3\_SKIP\_REGION\_CONSTRAINT\_TEST |
733 | 762 | |
734 | 763 | Doesn't matter what you set it to. Just has to be set if you want |
751 | 780 | |
752 | 781 | _Consider using an S3 mocking service like `minio` or `LocalStack` |
753 | 782 | if you want to create real tests for your applications or this module._ |
783 | ||
784 | Here's bash script for testing using LocalStack | |
785 | ||
786 | #!/bin/bash | |
787 | # -*- mode: sh; -*- | |
788 | ||
789 | BUCKET=net-amazon-s3-test-test | |
790 | ENDPOINT_URL=s3.localhost.localstack.cloud:4566 | |
791 | ||
792 | AMAZON_S3_EXPENSIVE_TESTS=1 \ | |
793 | AMAZON_S3_HOST=$ENDPOINT_URL \ | |
794 | AMAZON_S3_LOCALSTACK=1 \ | |
795 | AWS_ACCESS_KEY_ID=test \ | |
796 | AWS_ACCESS_SECRET_KEY=test \ | |
797 | AMAZON_S3_DOMAIN_BUCKET_NAMES=1 make test 2>&1 | tee test.log | |
798 | ||
799 | To run the tests...clone the project and build the software. | |
800 | ||
801 | cd src/main/perl | |
802 | ./test.localstack | |
754 | 803 | |
755 | 804 | # ADDITIONAL INFORMATION |
756 | 805 |
12 | 12 | - ChangeLog |
13 | 13 | - README.md |
14 | 14 | - README-TESTING.md |
15 | - src/main/perl/S3TestUtils.pm | |
15 | 16 | path: |
16 | 17 | pm_module: src/main/perl/lib |
17 | 18 | tests: src/main/perl/t |
3 | 3 | |
4 | 4 | use strict; |
5 | 5 | use warnings; |
6 | ||
7 | use locale; # for proper sorting | |
6 | 8 | |
7 | 9 | use Amazon::Credentials; |
8 | 10 | use Amazon::S3; |
10 | 12 | use Data::Dumper; |
11 | 13 | use English qw(-no_match_vars); |
12 | 14 | use File::HomeDir; |
13 | use Getopt::Long qw(:config no_ignore_case); | |
15 | use Getopt::Long qw(:config no_ignore_case); | |
14 | 16 | use Log::Log4perl qw(:easy); |
15 | 17 | |
16 | 18 | use Readonly; |
18 | 20 | Readonly our $TRUE => 1; |
19 | 21 | Readonly our $FALSE => 0; |
20 | 22 | |
23 | Readonly our $DEFAULT_HOST => 's3.amazonaws.com'; | |
24 | ||
25 | Readonly our $EMPTY => q{}; | |
26 | ||
21 | 27 | ######################################################################## |
22 | 28 | sub _bucket { |
23 | 29 | ######################################################################## |
25 | 31 | |
26 | 32 | return $s3->bucket( |
27 | 33 | { bucket => $bucket_name, |
28 | verify_region => $TRUE | |
34 | verify_region => $TRUE, | |
29 | 35 | } |
30 | 36 | ); |
31 | 37 | } |
113 | 119 | ######################################################################## |
114 | 120 | my ( $s3, %options ) = @_; |
115 | 121 | |
116 | return $s3->list_bucket_all_v2( { bucket => $options{bucket} } ); | |
122 | my $prefix = $options{prefix}; | |
123 | my $response; | |
124 | ||
125 | if ( defined $prefix ) { | |
126 | ||
127 | $prefix =~ s/^\///xsm; | |
128 | ||
129 | $response = $s3->list_bucket_all_v2( | |
130 | { bucket => $options{bucket}, | |
131 | prefix => $prefix | |
132 | } | |
133 | ); | |
134 | } | |
135 | else { | |
136 | $response = $s3->list_bucket_all_v2( { bucket => $options{bucket} } ); | |
137 | } | |
138 | ||
139 | return $response | |
140 | if !$options{table}; | |
141 | ||
142 | my $data | |
143 | = [ reverse sort { $a->{key} cmp $b->{key} } @{ $response->{keys} } ]; | |
144 | ||
145 | my $cols = [qw(key size last_modified etag)]; | |
146 | ||
147 | my $heading = $response->{bucket}; | |
148 | ||
149 | if ( $options{prefix} ) { | |
150 | $heading = sprintf '%s/%s', $heading, $options{prefix}; | |
151 | } | |
152 | ||
153 | my $table = easy_table( | |
154 | data => $data, | |
155 | columns => $cols, | |
156 | table_options => { headingText => $heading }, | |
157 | fix_headings => $TRUE, | |
158 | ); | |
159 | ||
160 | return $table->drawit; | |
117 | 161 | } |
118 | 162 | |
119 | 163 | ######################################################################## |
121 | 165 | ######################################################################## |
122 | 166 | my ( $s3, %options ) = @_; |
123 | 167 | |
124 | return $s3->buckets(); | |
168 | my $buckets = $s3->buckets(); | |
169 | ||
170 | return | |
171 | if !$buckets; | |
172 | ||
173 | my $data = $buckets->{buckets}; | |
174 | ||
175 | my $table; | |
176 | ||
177 | if ( $options{table} ) { | |
178 | ||
179 | $table = eval { | |
180 | use Text::ASCIITable::EasyTable; | |
181 | ||
182 | return easy_table( | |
183 | data => [ sort { $a->{bucket} cmp $b->{bucket} } @{$data} ], | |
184 | columns => [qw( bucket region creation_date)], | |
185 | table_options => { headingText => 'Buckets' }, | |
186 | fix_headings => $TRUE, | |
187 | ); | |
188 | ||
189 | }; | |
190 | } | |
191 | ||
192 | return $table ? $table->drawit : $data; | |
125 | 193 | } |
126 | 194 | |
127 | 195 | ######################################################################## |
136 | 204 | -d, --debug debug output |
137 | 205 | -h, --help this |
138 | 206 | -H, --host default: s3.amazonaws.com |
207 | -o, --output json or keys when listing contents of a bucket, otherwise Dumper output | |
139 | 208 | -p, --profile AWS credentials profile, default is hunt for them |
140 | 209 | -r, --region region, default: us-east-1 |
210 | -t, --table output keys and bucket list as tables | |
141 | 211 | |
142 | 212 | Commands Args Description |
143 | 213 | -------- ---- ----------- |
144 | 214 | Buckets create(-bucket) - create a new bucket |
145 | list(-bucket) - list the contents of a bucket | |
215 | list(-bucket) prefix list the contents of a bucket (all) or just prefix | |
146 | 216 | remove(-bucket) - remove a bucket (must be empty) |
147 | 217 | show-(buckets) - |
148 | 218 | |
151 | 221 | delete(-key) key delete an object |
152 | 222 | get(-key) key [filename] fetch an object and optionally store to file |
153 | 223 | |
224 | Hint: output can be shown in ASCII tables if you have Text::ASCIITable::EasyTable installed. | |
154 | 225 | END_OF_HELP |
155 | 226 | |
156 | 227 | return; |
160 | 231 | sub main { |
161 | 232 | ######################################################################## |
162 | 233 | |
163 | my %options; | |
234 | my %options = ( output => $EMPTY ); | |
164 | 235 | |
165 | 236 | GetOptions( |
166 | \%options, 'bucket=s', 'debug', 'host|H=s', | |
167 | 'region=s', 'help|h', 'profile=s' | |
237 | \%options, 'bucket=s', 'debug', 'host|H=s', | |
238 | 'region=s', 'help|h', 'profile=s', 'output=s', | |
239 | 'table', | |
168 | 240 | ); |
169 | 241 | |
170 | 242 | if ( $options{help} ) { |
171 | 243 | help(); |
244 | ||
172 | 245 | exit 0; |
173 | 246 | } |
174 | 247 | |
180 | 253 | } |
181 | 254 | ); |
182 | 255 | |
183 | my $command = lc( shift @ARGV // q{} ); | |
256 | my $command = lc( shift @ARGV // $EMPTY ); | |
184 | 257 | $command =~ s/-(.*)$//xsm; |
185 | 258 | |
186 | 259 | my $args = [@ARGV]; # save for debugging |
187 | 260 | |
188 | $options{key} = shift @ARGV; | |
261 | $options{key} = shift @ARGV; | |
262 | $options{prefix} = $options{key}; | |
189 | 263 | |
190 | 264 | $options{file} = shift @ARGV; |
191 | 265 | $options{name} = $options{file}; # copy key |
192 | 266 | |
193 | my $host = $options{host} // q{}; | |
267 | my $host = $options{host} // $DEFAULT_HOST; | |
194 | 268 | $host =~ s/^https?:\/\///xsm; |
195 | 269 | |
196 | 270 | my $s3 = Amazon::S3->new( |
204 | 278 | |
205 | 279 | DEBUG( |
206 | 280 | sub { |
207 | return sprintf "%s, %s, %s\n", $s3->err // q{}, $s3->errstr // q{}, | |
281 | return sprintf "%s, %s, %s\n", $s3->err // $EMPTY, | |
282 | $s3->errstr // $EMPTY, | |
208 | 283 | Dumper( [ $s3->error ] ); |
209 | 284 | } |
210 | 285 | ); |
248 | 323 | ); |
249 | 324 | } |
250 | 325 | |
251 | print Dumper( [ 'result', $result ] ); | |
326 | if ( $options{output} eq 'json' ) { | |
327 | print JSON->new->pretty->encode($result); | |
328 | } | |
329 | elsif ( ref $result ) { | |
330 | print Dumper( [$result] ); | |
331 | } | |
332 | else { | |
333 | print $result; | |
334 | } | |
252 | 335 | } |
253 | 336 | |
254 | 337 | return; |
0 | package S3TestUtils; | |
1 | ||
2 | use strict; | |
3 | use warnings; | |
4 | ||
5 | use Data::Dumper; | |
6 | use English qw(-no_match_vars); | |
7 | use List::Util qw(any); | |
8 | use Readonly; | |
9 | use Test::More; | |
10 | ||
11 | use parent qw(Exporter); | |
12 | ||
13 | # chars | |
14 | Readonly our $EMPTY => q{}; | |
15 | Readonly our $SLASH => q{/}; | |
16 | ||
17 | # booleans | |
18 | Readonly our $TRUE => 1; | |
19 | Readonly our $FALSE => 0; | |
20 | ||
21 | # mocking services | |
22 | Readonly our $DEFAULT_LOCAL_STACK_HOST => 'localhost:4566'; | |
23 | Readonly our $DEFAULT_MINIO_HOST => 'localhost:9000'; | |
24 | ||
25 | # http codes | |
26 | Readonly our $HTTP_OK => '200'; | |
27 | Readonly our $HTTP_FORBIDDEN => '403'; | |
28 | Readonly our $HTTP_CONFLICT => '409'; | |
29 | ||
30 | # misc | |
31 | Readonly our $TEST_BUCKET_PREFIX => 'net-amazon-s3-test'; | |
32 | ||
33 | # create a domain name for this if AMAZON_S3_DNS_BUCKET_NAMES is true | |
34 | Readonly our $MOCK_SERVICES_BUCKET_NAME => $TEST_BUCKET_PREFIX . '-test'; | |
35 | ||
36 | Readonly our $PUBLIC_READ_POLICY => <<END_OF_POLICY; | |
37 | <Grant> | |
38 | <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" | |
39 | xsi:type="Group"> | |
40 | <URI>http://acs.amazonaws.com/groups/global/AllUsers</URI> | |
41 | </Grantee> | |
42 | <Permission>READ</Permission> | |
43 | </Grant> | |
44 | END_OF_POLICY | |
45 | ||
46 | our %EXPORT_TAGS = ( | |
47 | constants => [ | |
48 | qw( | |
49 | $EMPTY | |
50 | $SLASH | |
51 | $TRUE | |
52 | $FALSE | |
53 | $DEFAULT_LOCAL_STACK_HOST | |
54 | $HTTP_OK | |
55 | $HTTP_CONFLICT | |
56 | $HTTP_FORBIDDEN | |
57 | $TEST_BUCKET_PREFIX | |
58 | $MOCK_SERVICES_BUCKET_NAME | |
59 | $PUBLIC_READ_POLICY | |
60 | ) | |
61 | ], | |
62 | subs => [ | |
63 | qw( | |
64 | add_keys | |
65 | check_test_bucket | |
66 | create_bucket | |
67 | get_s3_service | |
68 | is_aws | |
69 | make_bucket_name | |
70 | set_s3_host | |
71 | ) | |
72 | ], | |
73 | ); | |
74 | ||
75 | our @EXPORT_OK = map { @{ $EXPORT_TAGS{$_} } } ( keys %EXPORT_TAGS ); | |
76 | ||
77 | ######################################################################## | |
78 | sub make_bucket_name { | |
79 | ######################################################################## | |
80 | return $MOCK_SERVICES_BUCKET_NAME | |
81 | if !is_aws(); | |
82 | ||
83 | my $suffix = eval { | |
84 | require Data::UUID; | |
85 | ||
86 | return lc Data::UUID->new->create_str(); | |
87 | }; | |
88 | ||
89 | $suffix //= join $EMPTY, map { ( 'A' .. 'Z', 'a' .. 'z', 0 .. 9 )[$_] } | |
90 | map { int rand 62 } ( 0 .. 15 ); | |
91 | ||
92 | my $bucket_name = sprintf '%s-%s', $TEST_BUCKET_PREFIX, $suffix; | |
93 | ||
94 | return $bucket_name; | |
95 | } | |
96 | ||
97 | ######################################################################## | |
98 | sub is_aws { | |
99 | ######################################################################## | |
100 | return ( $ENV{AMAZON_S3_LOCALSTACK} || $ENV{AMAZON_S3_MINIO} ) | |
101 | ? $FALSE | |
102 | : $TRUE; | |
103 | } | |
104 | ||
105 | ######################################################################## | |
106 | sub check_test_bucket { | |
107 | ######################################################################## | |
108 | my ($s3) = @_; | |
109 | ||
110 | # list all buckets that I own | |
111 | my $response = eval { return $s3->buckets; }; | |
112 | ||
113 | if ( $EVAL_ERROR || !$response ) { | |
114 | diag( | |
115 | Dumper( [ error => [ $response, $s3->err, $s3->errstr, $s3->error ] ] ) | |
116 | ); | |
117 | ||
118 | BAIL_OUT($EVAL_ERROR); | |
119 | } | |
120 | ||
121 | my ( $owner_id, $owner_displayname ) | |
122 | = @{$response}{qw(owner_id owner_displayname)}; | |
123 | ||
124 | my $bucket_name = make_bucket_name(); | |
125 | ||
126 | my @buckets = map { $_->{bucket} } @{ $response->{buckets} }; | |
127 | ||
128 | if ( any { $_ =~ /$bucket_name/xsm } @buckets ) { | |
129 | BAIL_OUT( 'test bucket already exists: ' . $bucket_name ); | |
130 | } | |
131 | ||
132 | return ( $owner_id, $owner_displayname ); | |
133 | } | |
134 | ||
135 | ######################################################################## | |
136 | sub set_s3_host { | |
137 | ######################################################################## | |
138 | my $host = $ENV{AMAZON_S3_HOST}; | |
139 | ||
140 | $host //= 's3.amazonaws.com'; | |
141 | ||
142 | ## no critic (RequireLocalizedPunctuationVars) | |
143 | ||
144 | if ( exists $ENV{AMAZON_S3_LOCALSTACK} ) { | |
145 | ||
146 | $host //= $DEFAULT_LOCAL_STACK_HOST; | |
147 | ||
148 | $ENV{AWS_ACCESS_KEY_ID} = 'test'; | |
149 | ||
150 | $ENV{AWS_SECRET_ACCESS_KEY} = 'test'; | |
151 | ||
152 | $ENV{AMAZON_S3_EXPENSIVE_TESTS} = $TRUE; | |
153 | ||
154 | $ENV{AMAZON_S3_SKIP_PERMISSIONS} = $TRUE; | |
155 | } | |
156 | elsif ( exists $ENV{AMAZON_S3_MINIO} ) { | |
157 | ||
158 | $host //= $DEFAULT_MINIO_HOST; | |
159 | ||
160 | $ENV{AMAZON_S3_SKIP_ACLS} = $TRUE; | |
161 | ||
162 | $ENV{AMAZON_S3_EXPENSIVE_TESTS} = $TRUE; | |
163 | ||
164 | $ENV{AMAZON_S3_SKIP_REGION_CONSTRAINT_TEST} = $TRUE; | |
165 | } | |
166 | ||
167 | return $host; | |
168 | } | |
169 | ||
170 | ######################################################################## | |
171 | sub get_s3_service { | |
172 | ######################################################################## | |
173 | my ($host) = @_; | |
174 | ||
175 | my $s3 = eval { | |
176 | ||
177 | if ( $ENV{AMAZON_S3_CREDENTIALS} ) { | |
178 | require Amazon::Credentials; | |
179 | ||
180 | return Amazon::S3->new( | |
181 | { credentials => Amazon::Credentials->new, | |
182 | host => $host, | |
183 | secure => is_aws(), | |
184 | dns_bucket_names => $ENV{AMAZON_S3_DNS_BUCKET_NAMES}, | |
185 | level => $ENV{DEBUG} ? 'trace' : 'error', | |
186 | } | |
187 | ); | |
188 | ||
189 | } | |
190 | else { | |
191 | return Amazon::S3->new( | |
192 | { aws_access_key_id => $ENV{AWS_ACCESS_KEY_ID}, | |
193 | aws_secret_access_key => $ENV{AWS_SECRET_ACCESS_KEY}, | |
194 | token => $ENV{AWS_SESSION_TOKEN}, | |
195 | host => $host, | |
196 | secure => is_aws(), | |
197 | dns_bucket_names => $ENV{AMAZON_S3_DNS_BUCKET_NAMES}, | |
198 | level => $ENV{DEBUG} ? 'trace' : 'error', | |
199 | } | |
200 | ); | |
201 | } | |
202 | }; | |
203 | ||
204 | return $s3; | |
205 | } | |
206 | ||
207 | ######################################################################## | |
208 | sub create_bucket { | |
209 | ######################################################################## | |
210 | my ( $s3, $bucket_name ) = @_; | |
211 | ||
212 | $bucket_name = $SLASH . $bucket_name; | |
213 | ||
214 | my $bucket_obj | |
215 | = eval { return $s3->add_bucket( { bucket => $bucket_name } ); }; | |
216 | ||
217 | return $bucket_obj; | |
218 | } | |
219 | ||
220 | ######################################################################## | |
221 | sub add_keys { | |
222 | ######################################################################## | |
223 | my ( $bucket_obj, $max_keys, $prefix ) = @_; | |
224 | ||
225 | $prefix //= q{}; | |
226 | ||
227 | foreach my $key ( 1 .. $max_keys ) { | |
228 | my $keyname = sprintf '%stesting-%02d.txt', $prefix, $key; | |
229 | my $value = 'T'; | |
230 | ||
231 | $bucket_obj->add_key( $keyname, $value ); | |
232 | } | |
233 | ||
234 | return $max_keys; | |
235 | } | |
236 | ||
237 | 1; |
2 | 2 | use strict; |
3 | 3 | use warnings; |
4 | 4 | |
5 | use Amazon::S3::Constants qw{:all}; | |
5 | use Amazon::S3::Constants qw(:all); | |
6 | 6 | use Carp; |
7 | 7 | use Data::Dumper; |
8 | use Digest::MD5 qw(md5 md5_hex); | |
8 | use Digest::MD5 qw(md5 md5_hex); | |
9 | 9 | use Digest::MD5::File qw(file_md5 file_md5_hex); |
10 | use English qw(-no_match_vars); | |
10 | use English qw(-no_match_vars); | |
11 | 11 | use File::stat; |
12 | 12 | use IO::File; |
13 | 13 | use IO::Scalar; |
14 | 14 | use MIME::Base64; |
15 | 15 | use Scalar::Util qw(reftype); |
16 | 16 | use URI; |
17 | use XML::Simple; | |
18 | ||
19 | use parent qw{Class::Accessor::Fast}; | |
20 | ||
21 | our $VERSION = '@PACKAGE_VERSION@'; ## no critic | |
17 | use XML::Simple; ## no critic (DiscouragedModules) | |
18 | ||
19 | use parent qw(Class::Accessor::Fast); | |
20 | ||
21 | our $VERSION = '@PACKAGE_VERSION@'; ## no critic (RequireInterpolation) | |
22 | 22 | |
23 | 23 | __PACKAGE__->mk_accessors( |
24 | 24 | qw( |
29 | 29 | region |
30 | 30 | logger |
31 | 31 | verify_region |
32 | ) | |
32 | ), | |
33 | 33 | ); |
34 | 34 | |
35 | 35 | ######################################################################## |
70 | 70 | } |
71 | 71 | |
72 | 72 | return $self; |
73 | } ## end sub new | |
73 | } | |
74 | 74 | |
75 | 75 | ######################################################################## |
76 | 76 | sub _uri { |
90 | 90 | |
91 | 91 | if ( $account->dns_bucket_names ) { |
92 | 92 | $uri =~ s/^\///xsm; |
93 | } ## end if ( $self->account->dns_bucket_names) | |
93 | } | |
94 | 94 | |
95 | 95 | return $uri; |
96 | } ## end sub _uri | |
96 | } | |
97 | 97 | |
98 | 98 | ######################################################################## |
99 | 99 | sub add_key { |
111 | 111 | $conf->{'x-amz-acl'} = $conf->{acl_short}; |
112 | 112 | |
113 | 113 | delete $conf->{acl_short}; |
114 | } ## end if ( $conf->{acl_short...}) | |
115 | ||
116 | if ( ref $value eq 'SCALAR' ) { | |
114 | } | |
115 | ||
116 | if ( ref($value) && reftype($value) eq 'SCALAR' ) { | |
117 | 117 | my $md5_hex = file_md5_hex( ${$value} ); |
118 | 118 | my $md5 = pack 'H*', $md5_hex; |
119 | 119 | |
126 | 126 | $value = _content_sub( ${$value}, $self->buffer_size ); |
127 | 127 | |
128 | 128 | $conf->{'x-amz-content-sha256'} = 'UNSIGNED-PAYLOAD'; |
129 | } ## end if ( ref $value eq 'SCALAR') | |
129 | } | |
130 | 130 | else { |
131 | 131 | $conf->{'Content-Length'} ||= length $value; |
132 | 132 | |
135 | 135 | my $md5_base64 = encode_base64($md5); |
136 | 136 | |
137 | 137 | $conf->{'Content-MD5'} = $md5_base64; |
138 | } ## end else [ if ( ref $value eq 'SCALAR')] | |
138 | } | |
139 | 139 | |
140 | 140 | # If we're pushing to a bucket that's under |
141 | 141 | # DNS flux, we might get a 307 Since LWP doesn't support actually |
145 | 145 | return $self->_add_key( |
146 | 146 | { headers => $conf, |
147 | 147 | data => $value, |
148 | key => $key | |
149 | } | |
148 | key => $key, | |
149 | }, | |
150 | 150 | ); |
151 | 151 | }; |
152 | 152 | |
156 | 156 | if ($EVAL_ERROR) { |
157 | 157 | my $rsp = $account->last_response; |
158 | 158 | |
159 | if ( $rsp->code eq '301' ) { | |
159 | if ( $rsp->code eq $HTTP_MOVED_PERMANENTLY ) { | |
160 | 160 | $self->region( $rsp->headers->{'x-amz-bucket-region'} ); |
161 | 161 | } |
162 | 162 | |
163 | 163 | $retval = $self->_add_key( |
164 | 164 | { headers => $conf, |
165 | 165 | data => $value, |
166 | key => $key | |
167 | } | |
166 | key => $key, | |
167 | }, | |
168 | 168 | ); |
169 | 169 | } |
170 | 170 | |
171 | 171 | return $retval; |
172 | } ## end sub add_key | |
172 | } | |
173 | 173 | |
174 | 174 | ######################################################################## |
175 | 175 | sub _add_key { |
186 | 186 | headers => $headers, |
187 | 187 | data => $data, |
188 | 188 | region => $self->region, |
189 | } | |
189 | }, | |
190 | 190 | ); |
191 | } ## end if ( ref $value ) | |
191 | } | |
192 | 192 | else { |
193 | 193 | return $account->_send_request_expect_nothing( |
194 | 194 | { method => 'PUT', |
196 | 196 | headers => $headers, |
197 | 197 | data => $data, |
198 | 198 | region => $self->region, |
199 | } | |
199 | }, | |
200 | 200 | ); |
201 | 201 | } |
202 | } ## end else [ if ( ref $value ) ] | |
202 | } | |
203 | 203 | |
204 | 204 | ######################################################################## |
205 | 205 | sub add_key_filename { |
207 | 207 | my ( $self, $key, $value, $conf ) = @_; |
208 | 208 | |
209 | 209 | return $self->add_key( $key, \$value, $conf ); |
210 | } ## end sub add_key_filename | |
210 | } | |
211 | 211 | |
212 | 212 | ######################################################################## |
213 | 213 | sub upload_multipart_object { |
288 | 288 | |
289 | 289 | return ( \$buffer, $bytes ); |
290 | 290 | }; |
291 | ||
292 | 291 | } |
293 | 292 | |
294 | 293 | my $headers = $parameters{headers} || {}; |
298 | 297 | $logger->trace( sprintf 'multipart id: %s', $id ); |
299 | 298 | |
300 | 299 | my $part = 1; |
300 | ||
301 | 301 | my %parts; |
302 | ||
302 | 303 | my $key = $parameters{key}; |
303 | 304 | |
304 | eval { | |
305 | my $retval = eval { | |
305 | 306 | while (1) { |
306 | 307 | my ( $buffer, $length ) = $parameters{callback}->(); |
307 | 308 | last if !$buffer; |
308 | 309 | |
309 | 310 | my $etag = $self->upload_part_of_multipart_upload( |
310 | { id => $id, key => $key, data => $buffer, part => $part } ); | |
311 | { id => $id, | |
312 | key => $key, | |
313 | data => $buffer, | |
314 | part => $part, | |
315 | }, | |
316 | ); | |
311 | 317 | |
312 | 318 | $parts{ $part++ } = $etag; |
313 | 319 | } |
342 | 348 | { region => $self->region, |
343 | 349 | method => 'POST', |
344 | 350 | path => $self->_uri($key) . '?uploads=', |
345 | headers => $conf | |
346 | } | |
351 | headers => $conf, | |
352 | }, | |
347 | 353 | ); |
348 | 354 | |
349 | 355 | my $response = $acct->_do_http($request); |
353 | 359 | my $r = $acct->_xpc_of_content( $response->content ); |
354 | 360 | |
355 | 361 | return $r->{UploadId}; |
356 | } ## end sub initiate_multipart_upload | |
362 | } | |
357 | 363 | |
358 | 364 | # |
359 | 365 | # Upload a part of a file as part of a multipart upload operation |
420 | 426 | method => 'PUT', |
421 | 427 | path => $self->_uri($key) . $params, |
422 | 428 | headers => $conf, |
423 | data => $data | |
424 | } | |
429 | data => $data, | |
430 | }, | |
425 | 431 | ); |
426 | 432 | |
427 | 433 | my $response = $acct->_do_http($request); |
434 | 440 | if ($etag) { |
435 | 441 | $etag =~ s/^"//xsm; |
436 | 442 | $etag =~ s/"$//xsm; |
437 | } ## end if ($etag) | |
443 | } | |
438 | 444 | |
439 | 445 | return $etag; |
440 | } ## end sub upload_part_of_multipart_upload | |
446 | } | |
441 | 447 | |
442 | 448 | ######################################################################## |
443 | 449 | sub make_xml_document_simple { |
447 | 453 | my $xml = q{<?xml version="1.0" encoding="UTF-8"?>}; |
448 | 454 | my $xml_template |
449 | 455 | = '<Part><PartNumber>%s</PartNumber><ETag>%s</ETag></Part>'; |
456 | ||
450 | 457 | my @parts; |
451 | 458 | |
452 | 459 | foreach my $part_num ( sort { $a <=> $b } keys %{$parts_hr} ) { |
496 | 503 | my $conf = { |
497 | 504 | 'Content-MD5' => $md5_base64, |
498 | 505 | 'Content-Length' => length $content, |
499 | 'Content-Type' => 'application/xml' | |
506 | 'Content-Type' => 'application/xml', | |
500 | 507 | }; |
501 | 508 | |
502 | 509 | my $acct = $self->account; |
507 | 514 | method => 'POST', |
508 | 515 | path => $self->_uri($key) . $params, |
509 | 516 | headers => $conf, |
510 | data => $content | |
511 | } | |
517 | data => $content, | |
518 | }, | |
512 | 519 | ); |
513 | 520 | |
514 | 521 | my $response = $acct->_do_http($request); |
519 | 526 | } |
520 | 527 | |
521 | 528 | return $TRUE; |
522 | } ## end sub complete_multipart_upload | |
529 | } | |
523 | 530 | |
524 | 531 | # |
525 | 532 | # Stop a multipart upload |
541 | 548 | my $request = $acct->_make_request( |
542 | 549 | { region => $self->region, |
543 | 550 | method => 'DELETE', |
544 | path => $self->_uri($key) . $params | |
545 | } | |
551 | path => $self->_uri($key) . $params, | |
552 | }, | |
546 | 553 | ); |
547 | 554 | |
548 | 555 | my $response = $acct->_do_http($request); |
550 | 557 | $acct->_croak_if_response_error($response); |
551 | 558 | |
552 | 559 | return $TRUE; |
553 | } ## end sub abort_multipart_upload | |
560 | } | |
554 | 561 | |
555 | 562 | # |
556 | 563 | # List all the uploaded parts for an ongoing multipart upload |
574 | 581 | { region => $self->region, |
575 | 582 | method => 'GET', |
576 | 583 | path => $self->_uri($key) . $params, |
577 | headers => $conf | |
578 | } | |
584 | headers => $conf, | |
585 | }, | |
579 | 586 | ); |
580 | 587 | |
581 | 588 | my $response = $acct->_do_http($request); |
584 | 591 | |
585 | 592 | # Just return the XML, let the caller figure out what to do with it |
586 | 593 | return $response->content; |
587 | } ## end sub list_multipart_upload_parts | |
594 | } | |
588 | 595 | |
589 | 596 | # |
590 | 597 | # List all the currently active multipart upload operations |
601 | 608 | { region => $self->region, |
602 | 609 | method => 'GET', |
603 | 610 | path => $self->_uri() . '?uploads', |
604 | headers => $conf | |
605 | } | |
611 | headers => $conf, | |
612 | }, | |
606 | 613 | ); |
607 | 614 | |
608 | 615 | my $response = $acct->_do_http($request); |
611 | 618 | |
612 | 619 | # Just return the XML, let the caller figure out what to do with it |
613 | 620 | return $response->content; |
614 | } ## end sub list_multipart_uploads | |
621 | } | |
615 | 622 | |
616 | 623 | ######################################################################## |
617 | 624 | sub head_key { |
619 | 626 | my ( $self, $key ) = @_; |
620 | 627 | |
621 | 628 | return $self->get_key( $key, 'HEAD' ); |
622 | } ## end sub head_key | |
629 | } | |
623 | 630 | |
624 | 631 | ######################################################################## |
625 | 632 | sub get_key { |
630 | 637 | |
631 | 638 | if ( ref $filename ) { |
632 | 639 | $filename = ${$filename}; |
633 | } ## end if ( ref $filename ) | |
640 | } | |
634 | 641 | |
635 | 642 | my $acct = $self->account; |
636 | 643 | |
640 | 647 | { region => $self->region, |
641 | 648 | method => $method, |
642 | 649 | path => $uri, |
643 | headers => {} | |
644 | } | |
650 | headers => {}, | |
651 | }, | |
645 | 652 | ); |
646 | 653 | |
647 | 654 | my $retval; |
649 | 656 | my $response = $acct->_do_http( $request, $filename ); |
650 | 657 | |
651 | 658 | return $retval |
652 | if $response->code == 404; | |
659 | if $response->code eq $HTTP_NOT_FOUND; | |
653 | 660 | |
654 | 661 | $acct->_croak_if_response_error($response); |
655 | 662 | |
658 | 665 | if ($etag) { |
659 | 666 | $etag =~ s/^"//xsm; |
660 | 667 | $etag =~ s/"$//xsm; |
661 | } ## end if ($etag) | |
668 | } | |
662 | 669 | |
663 | 670 | $retval = { |
664 | 671 | content_length => $response->content_length || 0, |
678 | 685 | # etag so it should be lc'd for comparison. |
679 | 686 | croak "Computed and Response MD5's do not match: $md5 : $etag" |
680 | 687 | if $md5 ne lc $etag; |
681 | } ## end if ( $method eq 'GET' ) | |
688 | } | |
682 | 689 | |
683 | 690 | foreach my $header ( $response->headers->header_field_names ) { |
684 | 691 | next if $header !~ /x-amz-meta-/ixsm; |
685 | 692 | $retval->{ lc $header } = $response->header($header); |
686 | } ## end foreach my $header ( $response...) | |
693 | } | |
687 | 694 | |
688 | 695 | return $retval; |
689 | } ## end sub get_key | |
696 | } | |
690 | 697 | |
691 | 698 | ######################################################################## |
692 | 699 | sub get_key_filename { |
695 | 702 | |
696 | 703 | if ( !defined $filename ) { |
697 | 704 | $filename = $key; |
698 | } ## end if ( !defined $filename) | |
705 | } | |
699 | 706 | |
700 | 707 | return $self->get_key( $key, $method, \$filename ); |
701 | } ## end sub get_key_filename | |
708 | } | |
702 | 709 | |
703 | 710 | ######################################################################## |
704 | 711 | # See: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html |
758 | 765 | } |
759 | 766 | |
760 | 767 | return $acct->_xpc_of_content( $response->content ); |
761 | } ## end sub copy_key | |
768 | } | |
762 | 769 | |
763 | 770 | ######################################################################## |
764 | 771 | sub delete_key { |
774 | 781 | { method => 'DELETE', |
775 | 782 | region => $self->region, |
776 | 783 | path => $self->_uri($key), |
777 | headers => {} | |
778 | } | |
784 | headers => {}, | |
785 | }, | |
779 | 786 | ); |
780 | } ## end sub delete_key | |
787 | } | |
781 | 788 | |
782 | 789 | ######################################################################## |
783 | 790 | sub _format_delete_keys { |
796 | 803 | Key => [ $key->{Key} ], |
797 | 804 | defined $key->{VersionId} |
798 | 805 | ? ( VersionId => [ $key->{VersionId} ] ) |
799 | : () | |
806 | : (), | |
800 | 807 | }; |
801 | 808 | } |
802 | 809 | else { # array of keys |
803 | push @keys, { Key => [$key] }; | |
810 | push @keys, { Key => [$key], }; | |
804 | 811 | } |
805 | 812 | } |
806 | 813 | } |
811 | 818 | push @keys, |
812 | 819 | { |
813 | 820 | Key => [ $object[0] ], |
814 | defined $object[1] ? ( VersionId => [ $object[1] ] ) : () | |
821 | defined $object[1] ? ( VersionId => [ $object[1] ] ) : (), | |
815 | 822 | }; |
816 | 823 | } |
817 | 824 | } |
864 | 871 | my $content = { |
865 | 872 | xmlns => $S3_XMLNS, |
866 | 873 | Quiet => [$quiet_mode], |
867 | Object => $keys | |
874 | Object => $keys, | |
868 | 875 | }; |
869 | 876 | |
870 | 877 | my $xml_content = XMLout( |
871 | 878 | $content, |
872 | 879 | RootName => 'Delete', |
873 | XMLDecl => $XMLDECL | |
880 | XMLDecl => $XMLDECL, | |
874 | 881 | ); |
875 | 882 | |
876 | 883 | my $conf = {}; |
889 | 896 | path => $self->_uri() . '?delete', |
890 | 897 | headers => $conf, |
891 | 898 | data => $xml_content, |
892 | } | |
899 | }, | |
893 | 900 | ); |
894 | } ## end sub delete_keys | |
901 | } | |
895 | 902 | |
896 | 903 | ######################################################################## |
897 | 904 | sub delete_bucket { |
902 | 909 | if @_ > 1; |
903 | 910 | |
904 | 911 | return $self->account->delete_bucket($self); |
905 | } ## end sub delete_bucket | |
912 | } | |
906 | 913 | |
907 | 914 | ######################################################################## |
908 | 915 | sub list_v2 { |
916 | 923 | |
917 | 924 | if ( $conf->{'marker'} ) { |
918 | 925 | $conf->{'continuation-token'} = delete $conf->{'marker'}; |
919 | } ## end if ( $conf->{'marker'}) | |
926 | } | |
920 | 927 | |
921 | 928 | return $self->list($conf); |
922 | } ## end sub list_v2 | |
929 | } | |
923 | 930 | |
924 | 931 | ######################################################################## |
925 | 932 | sub list { |
931 | 938 | $conf->{bucket} = $self->bucket; |
932 | 939 | |
933 | 940 | return $self->account->list_bucket($conf); |
934 | } ## end sub list | |
941 | } | |
935 | 942 | |
936 | 943 | ######################################################################## |
937 | 944 | sub list_all_v2 { |
943 | 950 | $conf->{bucket} = $self->bucket; |
944 | 951 | |
945 | 952 | return $self->account->list_bucket_all_v2($conf); |
946 | } ## end sub list_all_v2 | |
953 | } | |
947 | 954 | |
948 | 955 | ######################################################################## |
949 | 956 | sub list_all { |
955 | 962 | $conf->{bucket} = $self->bucket; |
956 | 963 | |
957 | 964 | return $self->account->list_bucket_all($conf); |
958 | } ## end sub list_all | |
965 | } | |
959 | 966 | |
960 | 967 | ######################################################################## |
961 | 968 | sub get_acl { |
968 | 975 | { region => $self->region, |
969 | 976 | method => 'GET', |
970 | 977 | path => $self->_uri($key) . '?acl=', |
971 | headers => {} | |
972 | } | |
978 | headers => {}, | |
979 | }, | |
973 | 980 | ); |
974 | 981 | |
975 | 982 | my $old_redirectable = $account->ua->requests_redirectable; |
984 | 991 | my $old_host = $account->host; |
985 | 992 | $account->host( $uri->host ); |
986 | 993 | |
987 | my $request = $account->_make_request( | |
994 | $request = $account->_make_request( | |
988 | 995 | { region => $self->region, |
989 | 996 | method => 'GET', |
990 | 997 | path => $uri->path, |
991 | headers => {} | |
992 | } | |
998 | headers => {}, | |
999 | }, | |
993 | 1000 | ); |
994 | 1001 | |
995 | 1002 | $response = $account->_do_http($request); |
996 | 1003 | |
997 | 1004 | $account->ua->requests_redirectable($old_redirectable); |
998 | 1005 | $account->host($old_host); |
999 | } ## end if ( $response->code =~...) | |
1006 | } | |
1000 | 1007 | |
1001 | 1008 | my $content; |
1002 | 1009 | |
1003 | 1010 | # do we test for NOT FOUND, returning undef? |
1004 | if ( $response->code ne '404' ) { | |
1011 | if ( $response->code ne $HTTP_NOT_FOUND ) { | |
1005 | 1012 | $account->_croak_if_response_error($response); |
1006 | 1013 | $content = $response->content; |
1007 | 1014 | } |
1008 | 1015 | |
1009 | 1016 | return $content; |
1010 | } ## end sub get_acl | |
1017 | } | |
1011 | 1018 | |
1012 | 1019 | ######################################################################## |
1013 | 1020 | sub set_acl { |
1024 | 1031 | |
1025 | 1032 | my $path = $self->_uri( $conf->{key} ) . '?acl='; |
1026 | 1033 | |
1027 | my $hash_ref | |
1028 | = ( $conf->{acl_short} ) | |
1029 | ? { 'x-amz-acl' => $conf->{acl_short} } | |
1030 | : {}; | |
1034 | my $headers = {}; | |
1035 | ||
1036 | if ( $conf->{acl_short} ) { | |
1037 | $headers->{'x-amz-acl'} = $conf->{acl_short}; | |
1038 | } | |
1031 | 1039 | |
1032 | 1040 | my $xml = $conf->{acl_xml} || $EMPTY; |
1033 | 1041 | |
1034 | 1042 | my $account = $self->account; |
1043 | ||
1044 | $headers->{'Content-Length'} = length $xml; | |
1035 | 1045 | |
1036 | 1046 | return $account->_send_request_expect_nothing( |
1037 | 1047 | { method => 'PUT', |
1038 | 1048 | path => $path, |
1039 | headers => $hash_ref, | |
1049 | headers => $headers, | |
1040 | 1050 | data => $xml, |
1041 | region => $self->region | |
1042 | } | |
1051 | region => $self->region, | |
1052 | }, | |
1043 | 1053 | ); |
1044 | } ## end sub set_acl | |
1054 | } | |
1045 | 1055 | |
1046 | 1056 | ######################################################################## |
1047 | 1057 | sub get_location_constraint { |
1053 | 1063 | my $xpc = $account->_send_request( |
1054 | 1064 | { region => $self->region, |
1055 | 1065 | method => 'GET', |
1056 | path => $self->bucket . '/?location=' | |
1057 | } | |
1066 | path => $self->bucket . '/?location=', | |
1067 | }, | |
1058 | 1068 | ); |
1059 | 1069 | |
1060 | 1070 | my $lc; |
1064 | 1074 | if $account->_remember_errors($xpc); |
1065 | 1075 | |
1066 | 1076 | return $lc; |
1067 | } ## end if ( !$xpc ) | |
1077 | } | |
1068 | 1078 | |
1069 | 1079 | $lc = $xpc->{content}; |
1070 | 1080 | |
1071 | 1081 | if ( defined $lc && $lc eq $EMPTY ) { |
1072 | 1082 | $lc = undef; |
1073 | } ## end if ( defined $lc && $lc...) | |
1083 | } | |
1074 | 1084 | |
1075 | 1085 | return $lc; |
1076 | } ## end sub get_location_constraint | |
1086 | } | |
1077 | 1087 | |
1078 | 1088 | # proxy up the err requests |
1079 | 1089 | |
1091 | 1101 | my ($self) = @_; |
1092 | 1102 | |
1093 | 1103 | return $self->account->err; |
1094 | } ## end sub err | |
1104 | } | |
1095 | 1105 | |
1096 | 1106 | ######################################################################## |
1097 | 1107 | sub errstr { |
1099 | 1109 | my ($self) = @_; |
1100 | 1110 | |
1101 | 1111 | return $self->account->errstr; |
1102 | } ## end sub errstr | |
1112 | } | |
1103 | 1113 | |
1104 | 1114 | ######################################################################## |
1105 | 1115 | sub error { |
1107 | 1117 | my ($self) = @_; |
1108 | 1118 | |
1109 | 1119 | return $self->account->error; |
1110 | } ## end sub err | |
1120 | } | |
1111 | 1121 | |
1112 | 1122 | ######################################################################## |
1113 | 1123 | sub _content_sub { |
1138 | 1148 | $fh->binmode; |
1139 | 1149 | |
1140 | 1150 | $remaining = $stat->size; |
1141 | } ## end if ( !$fh->opened ) | |
1151 | } | |
1142 | 1152 | |
1143 | 1153 | my $read = $fh->read( $buffer, $blksize ); |
1144 | 1154 | |
1151 | 1161 | or croak "close of upload content $filename failed: $OS_ERROR"; |
1152 | 1162 | |
1153 | 1163 | $buffer ||= $EMPTY; # LWP expects an empty string on finish, read returns 0 |
1154 | } ## end if ( !$read ) | |
1164 | } | |
1155 | 1165 | |
1156 | 1166 | $remaining -= length $buffer; |
1157 | 1167 | |
1158 | 1168 | return $buffer; |
1159 | 1169 | }; |
1160 | } ## end sub _content_sub | |
1170 | } | |
1161 | 1171 | |
1162 | 1172 | 1; |
1163 | 1173 | |
1238 | 1248 | |
1239 | 1249 | =item logger |
1240 | 1250 | |
1241 | Sets the logger object (should be an object capable of providing at | |
1242 | least a C<debug> and C<trace> method for recording log messages. If no | |
1243 | logger object is passed the C<account> object's logger object will be used. | |
1251 | Sets the logger. The logger should be a blessed reference capable of | |
1252 | providing at least a C<debug> and C<trace> method for recording log | |
1253 | messages. If no logger object is passed the C<account> object's logger | |
1254 | object will be used. | |
1244 | 1255 | |
1245 | 1256 | =item verify_region |
1246 | 1257 | |
1274 | 1285 | |
1275 | 1286 | =item value |
1276 | 1287 | |
1277 | A SCALAR string representing the contents of the object.. | |
1288 | A SCALAR string representing the contents of the object. | |
1278 | 1289 | |
1279 | 1290 | =item configuration |
1280 | 1291 |
2 | 2 | use strict; |
3 | 3 | use warnings; |
4 | 4 | |
5 | use parent qw{Exporter}; | |
5 | use parent qw(Exporter); | |
6 | 6 | |
7 | 7 | use Readonly; |
8 | 8 | |
20 | 20 | Readonly our $MIN_MULTIPART_UPLOAD_CHUNK_SIZE => 5 * 1024 * 1024; |
21 | 21 | Readonly our $DEFAULT_LOG_LEVEL => 'error'; |
22 | 22 | Readonly our $MAX_DELETE_KEYS => 1000; |
23 | Readonly our $MAX_RETRIES => 5; | |
24 | Readonly our $DEFAULT_REGION => 'us-east-1'; | |
23 | 25 | |
24 | 26 | Readonly our $XMLDECL => '<?xml version="1.0" encoding="UTF-8"?>'; |
25 | 27 | Readonly our $S3_XMLNS => 'http://s3.amazonaws.com/doc/2006-03-01/'; |
31 | 33 | warn => 2, |
32 | 34 | error => 1, |
33 | 35 | fatal => 0, |
36 | ); | |
37 | ||
38 | Readonly::Hash our %LIST_OBJECT_MARKERS => ( | |
39 | '2' => [qw(ContinuationToken NextContinuationToken continuation-token)], | |
40 | '1' => [qw(Marker NextMarker marker)], | |
34 | 41 | ); |
35 | 42 | |
36 | 43 | # booleans |
48 | 55 | Readonly our $AMPERSAND => q{&}; |
49 | 56 | Readonly our $EQUAL_SIGN => q{=}; |
50 | 57 | |
58 | # HTTP codes | |
59 | ||
60 | Readonly our $HTTP_BAD_REQUEST => 400; | |
61 | Readonly our $HTTP_UNAUTHORIZED => 401; | |
62 | Readonly our $HTTP_PAYMENT_RQUIRED => 402; | |
63 | Readonly our $HTTP_FORBIDDEN => 403; | |
64 | Readonly our $HTTP_NOT_FOUND => 404; | |
65 | Readonly our $HTTP_CONFLICT => 409; | |
66 | Readonly our $HTTP_MOVED_PERMANENTLY => 301; | |
67 | Readonly our $HTTP_FOUND => 302; | |
68 | Readonly our $HTTP_SEE_OTHER => 303; | |
69 | Readonly our $HTTP_NOT_MODIFIED => 304; | |
70 | ||
51 | 71 | our %EXPORT_TAGS = ( |
52 | 72 | chars => [ |
53 | qw{ | |
73 | qw( | |
54 | 74 | $AMPERSAND |
55 | 75 | $COLON |
56 | 76 | $DOUBLE_COLON |
60 | 80 | $EQUAL_SIGN |
61 | 81 | $QUESTION_MARK |
62 | 82 | $SLASH |
63 | } | |
83 | ) | |
64 | 84 | ], |
65 | 85 | booleans => [ |
66 | qw{ | |
86 | qw( | |
67 | 87 | $TRUE |
68 | 88 | $FALSE |
69 | } | |
89 | ) | |
70 | 90 | ], |
71 | 91 | defaults => [ |
72 | qw{ | |
92 | qw( | |
73 | 93 | $AMAZON_HEADER_PREFIX |
74 | 94 | $METADATA_PREFIX |
75 | 95 | $KEEP_ALIVE_CACHESIZE |
76 | 96 | $DEFAULT_TIMEOUT |
77 | 97 | $DEFAULT_BUFFER_SIZE |
78 | 98 | $DEFAULT_LOG_LEVEL |
79 | %LOG_LEVELS | |
80 | 99 | $DEFAULT_HOST |
100 | $DEFAULT_REGION | |
81 | 101 | $MAX_BUCKET_NAME_LENGTH |
82 | 102 | $MAX_DELETE_KEYS |
83 | 103 | $MIN_BUCKET_NAME_LENGTH |
84 | 104 | $MIN_MULTIPART_UPLOAD_CHUNK_SIZE |
105 | $MAX_RETRIES | |
106 | ) | |
107 | ], | |
108 | misc => [ | |
109 | qw( | |
85 | 110 | $S3_XMLNS |
86 | 111 | $XMLDECL |
87 | } | |
112 | %LIST_OBJECT_MARKERS | |
113 | %LOG_LEVELS | |
114 | $NOT_FOUND | |
115 | ) | |
116 | ], | |
117 | http => [ | |
118 | qw( | |
119 | $HTTP_BAD_REQUEST | |
120 | $HTTP_CONFLICT | |
121 | $HTTP_UNAUTHORIZED | |
122 | $HTTP_PAYMENT_RQUIRED | |
123 | $HTTP_FORBIDDEN | |
124 | $HTTP_NOT_FOUND | |
125 | $HTTP_MOVED_PERMANENTLY | |
126 | $HTTP_FOUND | |
127 | $HTTP_SEE_OTHER | |
128 | $HTTP_NOT_MODIFIED | |
129 | ) | |
130 | ||
88 | 131 | ], |
89 | 132 | ); |
90 | 133 |
3 | 3 | use warnings; |
4 | 4 | |
5 | 5 | use Amazon::S3::Bucket; |
6 | use Amazon::S3::Constants qw{:all}; | |
6 | use Amazon::S3::Constants qw(:all); | |
7 | 7 | use Amazon::S3::Logger; |
8 | 8 | use Amazon::S3::Signature::V4; |
9 | 9 | |
10 | 10 | use Carp; |
11 | 11 | use Data::Dumper; |
12 | 12 | use Digest::HMAC_SHA1; |
13 | use Digest::MD5 qw{md5_hex}; | |
14 | use English qw{-no_match_vars}; | |
13 | use Digest::MD5 qw(md5_hex); | |
14 | use English qw(-no_match_vars); | |
15 | 15 | use HTTP::Date; |
16 | 16 | use URI; |
17 | 17 | use LWP::UserAgent::Determined; |
18 | use MIME::Base64 qw{encode_base64 decode_base64}; | |
19 | use Scalar::Util qw{ reftype blessed }; | |
20 | use List::Util qw{ any pairs }; | |
21 | use URI::Escape qw{uri_escape_utf8}; | |
22 | use XML::Simple qw{XMLin}; ## no critic (Community::DiscouragedModules) | |
23 | ||
24 | use parent qw{Class::Accessor::Fast}; | |
18 | use MIME::Base64 qw(encode_base64 decode_base64); | |
19 | use Scalar::Util qw( reftype blessed ); | |
20 | use List::Util qw( any pairs ); | |
21 | use URI::Escape qw(uri_escape_utf8); | |
22 | use XML::Simple qw(XMLin); ## no critic (Community::DiscouragedModules) | |
23 | ||
24 | use parent qw(Class::Accessor::Fast); | |
25 | 25 | |
26 | 26 | __PACKAGE__->mk_accessors( |
27 | qw{ | |
27 | qw( | |
28 | 28 | aws_access_key_id |
29 | 29 | aws_secret_access_key |
30 | 30 | token |
47 | 47 | _signer |
48 | 48 | timeout |
49 | 49 | ua |
50 | } | |
50 | ), | |
51 | 51 | ); |
52 | 52 | |
53 | our $VERSION = '@PACKAGE_VERSION@'; ## no critic (ValuesAndExpressions::RequireInterpolationOfMetachars) | |
53 | our $VERSION = '@PACKAGE_VERSION@'; ## no critic (RequireInterpolation) | |
54 | 54 | |
55 | 55 | ######################################################################## |
56 | 56 | sub new { |
99 | 99 | $safe_options{aws_access_key_id} = '****'; |
100 | 100 | } |
101 | 101 | |
102 | return Dumper( [ 'options: ', \%safe_options ] ); | |
103 | } | |
102 | return Dumper( [ options => \%safe_options ] ); | |
103 | }, | |
104 | 104 | ); |
105 | 105 | |
106 | 106 | if ( !$self->credentials ) { |
125 | 125 | requests_redirectable => [qw(GET HEAD DELETE)], |
126 | 126 | ); |
127 | 127 | |
128 | $ua->timing( join $COMMA, map { 2**$_ } 0 .. 5 ); | |
128 | $ua->timing( join $COMMA, map { 2**$_ } 0 .. $MAX_RETRIES ); | |
129 | 129 | } |
130 | 130 | else { |
131 | 131 | $ua = LWP::UserAgent->new( |
138 | 138 | $ua->env_proxy; |
139 | 139 | $self->ua($ua); |
140 | 140 | |
141 | $self->region( $self->_region // 'us-east-1' ); | |
141 | $self->region( $self->_region // $DEFAULT_REGION ); | |
142 | 142 | |
143 | 143 | if ( !$self->_signer && $self->cache_signer ) { |
144 | 144 | $self->_signer( $self->signer ); |
161 | 161 | return $text if !$text; |
162 | 162 | |
163 | 163 | if ( !defined $encryption_key ) { |
164 | eval { | |
165 | require Crypt::Blowfish; | |
166 | require Crypt::CBC; | |
164 | $encryption_key = eval { | |
165 | if ( !defined $encryption_key ) { | |
166 | require Crypt::Blowfish; | |
167 | require Crypt::CBC; | |
168 | ||
169 | return md5_hex( rand $PID ); | |
170 | } | |
167 | 171 | }; |
168 | ||
169 | if ($EVAL_ERROR) { | |
170 | $encryption_key = $EMPTY; | |
171 | } | |
172 | else { | |
173 | $encryption_key = md5_hex( rand $PID ); | |
174 | } | |
175 | 172 | } |
176 | 173 | |
177 | return $text | |
178 | if !$encryption_key; | |
174 | if ( !$encryption_key || $EVAL_ERROR ) { | |
175 | return $text; | |
176 | } | |
179 | 177 | |
180 | 178 | my $cipher = Crypt::CBC->new( |
181 | 179 | -pass => $encryption_key, |
197 | 195 | |
198 | 196 | my $cipher = Crypt::CBC->new( |
199 | 197 | -pass => $encryption_key, |
200 | -cipher => 'Crypt::Blowfish' | |
198 | -cipher => 'Crypt::Blowfish', | |
201 | 199 | ); |
202 | 200 | |
203 | 201 | return $cipher->decrypt($secret); |
216 | 214 | $bucket = Amazon::S3::Bucket->new( bucket => $bucket, account => $self ); |
217 | 215 | } |
218 | 216 | |
219 | return $bucket->get_location_constraint // 'us-east-1'; | |
217 | return $bucket->get_location_constraint // $DEFAULT_REGION; | |
220 | 218 | } |
221 | 219 | |
222 | 220 | ######################################################################## |
225 | 223 | my ($self) = @_; |
226 | 224 | |
227 | 225 | my $region = $ENV{AWS_REGION} || $ENV{AWS_DEFAULT_REGION}; |
226 | ||
228 | 227 | return $region |
229 | 228 | if $region; |
230 | 229 | |
244 | 243 | } |
245 | 244 | } |
246 | 245 | |
247 | return $region || 'us-east-1'; | |
246 | return $region || $DEFAULT_REGION; | |
248 | 247 | } |
249 | 248 | |
250 | 249 | # Amazon::Credentials compatibility methods |
285 | 284 | # /Backups<path>?partNumber=27&uploadId=<id> - HTTP/1.1" 400 |
286 | 285 | # RequestTimeout 360 20971520 20478 - "-" "libwww-perl/6.15" |
287 | 286 | my $http_codes_hr = $self->ua->codes_to_determinate(); |
288 | $http_codes_hr->{400} = 1; | |
287 | $http_codes_hr->{$HTTP_BAD_REQUEST} = $TRUE; | |
289 | 288 | } |
290 | 289 | |
291 | 290 | return; |
304 | 303 | # /Backups<path>?partNumber=27&uploadId=<id> - HTTP/1.1" 400 |
305 | 304 | # RequestTimeout 360 20971520 20478 - "-" "libwww-perl/6.15" |
306 | 305 | my $http_codes_hr = $self->ua->codes_to_determinate(); |
307 | delete $http_codes_hr->{400}; | |
306 | delete $http_codes_hr->{$HTTP_BAD_REQUEST}; | |
308 | 307 | } |
309 | 308 | |
310 | 309 | return; |
347 | 346 | my $region = $self->_region; |
348 | 347 | my $bucket_list; |
349 | 348 | |
350 | $self->reset_signer_region('us-east-1'); # default region for buckets op | |
349 | $self->reset_signer_region($DEFAULT_REGION); # default region for buckets op | |
351 | 350 | |
352 | 351 | my $r = $self->_send_request( |
353 | 352 | { method => 'GET', |
354 | 353 | path => $EMPTY, |
355 | 354 | headers => {}, |
356 | region => 'us-east-1', | |
357 | } | |
355 | region => $DEFAULT_REGION, | |
356 | }, | |
358 | 357 | ); |
359 | 358 | |
360 | 359 | return $bucket_list |
380 | 379 | account => $self, |
381 | 380 | buffer_size => $self->buffer_size, |
382 | 381 | verify_region => $verify_region // $FALSE, |
383 | } | |
382 | }, | |
384 | 383 | ); |
385 | 384 | |
386 | 385 | } |
406 | 405 | # is probably not needed anymore since bucket operations now send |
407 | 406 | # the region of the bucket to the signer |
408 | 407 | if ( $self->cache_signer ) { |
409 | if ( $self->region && $self->region ne 'us-east-1' ) { | |
408 | if ( $self->region && $self->region ne $DEFAULT_REGION ) { | |
410 | 409 | if ( $self->signer->can('region') ) { |
411 | 410 | $self->signer->region($region); |
412 | 411 | } |
427 | 426 | my $region = $conf->{location_constraint} // $conf->{region} |
428 | 427 | // $self->region; |
429 | 428 | |
430 | if ( $region && $region eq 'us-east-1' ) { | |
429 | if ( $region && $region eq $DEFAULT_REGION ) { | |
431 | 430 | undef $region; |
432 | 431 | } |
433 | 432 | |
458 | 457 | headers => { %header_ref, 'Content-Length' => length $data }, |
459 | 458 | data => $data, |
460 | 459 | region => $region, |
461 | } | |
460 | }, | |
462 | 461 | ); |
463 | 462 | |
464 | 463 | my $bucket_obj = $retval ? $self->bucket($bucket) : undef; |
475 | 474 | |
476 | 475 | if ( ref $args[0] && reftype( $args[0] ) eq 'HASH' ) { |
477 | 476 | ( $bucketname, $region, $verify_region ) |
478 | = @{ $args[0] }{qw{bucket region verify_region}}; | |
477 | = @{ $args[0] }{qw(bucket region verify_region)}; | |
479 | 478 | } |
480 | 479 | else { |
481 | 480 | ( $bucketname, $region ) = @args; |
492 | 491 | account => $self, |
493 | 492 | region => $region, |
494 | 493 | verify_region => $verify_region, |
495 | } | |
494 | }, | |
496 | 495 | ); |
497 | 496 | } |
498 | 497 | |
521 | 520 | path => $bucket . $SLASH, |
522 | 521 | headers => {}, |
523 | 522 | region => $region, |
524 | } | |
523 | }, | |
525 | 524 | ); |
526 | 525 | } |
527 | 526 | |
549 | 548 | my $bucket_list; # return this |
550 | 549 | my $path = $bucket . $SLASH; |
551 | 550 | |
551 | my $list_type = $conf->{'list-type'} // '1'; | |
552 | ||
553 | my ( $marker, $next_marker, $query_next ) | |
554 | = @{ $LIST_OBJECT_MARKERS{$list_type} }; | |
555 | ||
556 | if ( $conf->{marker} ) { | |
557 | $conf->{$query_next} = delete $conf->{marker}; | |
558 | } | |
559 | ||
552 | 560 | if ( %{$conf} ) { |
561 | ||
553 | 562 | my @vars = keys %{$conf}; |
554 | 563 | |
555 | 564 | # remove undefined elements |
574 | 583 | path => $path, |
575 | 584 | headers => {}, # { 'Content-Length' => 0 }, |
576 | 585 | region => $self->region, |
577 | } | |
586 | }, | |
578 | 587 | ); |
579 | 588 | |
580 | 589 | return $bucket_list |
581 | 590 | if $self->_remember_errors($r); |
582 | 591 | |
583 | $self->get_logger->debug( sub { return Dumper($r); } ); | |
584 | ||
585 | my ( $marker, $next_marker ) = qw{ Marker NextMarker }; | |
586 | ||
587 | if ( $conf->{'list-type'} && $conf->{'list-type'} eq '2' ) { | |
588 | $marker = 'ContinuationToken'; | |
589 | $next_marker = 'NextContinuationToken'; | |
590 | } | |
592 | $self->get_logger->debug( | |
593 | sub { | |
594 | return Dumper( | |
595 | [ marker => $marker, | |
596 | next_marker => $next_marker, | |
597 | response => $r, | |
598 | ], | |
599 | ); | |
600 | }, | |
601 | ); | |
591 | 602 | |
592 | 603 | $bucket_list = { |
593 | 604 | bucket => $r->{Name}, |
622 | 633 | owner_displayname => $node->{Owner}{DisplayName}, |
623 | 634 | }; |
624 | 635 | } |
636 | ||
625 | 637 | $bucket_list->{keys} = \@keys; |
626 | 638 | |
627 | 639 | if ( $conf->{delimiter} ) { |
645 | 657 | push @common_prefixes, $prefix; |
646 | 658 | } |
647 | 659 | } |
660 | ||
648 | 661 | $bucket_list->{common_prefixes} = \@common_prefixes; |
649 | 662 | } |
650 | 663 | |
674 | 687 | if !$bucket; |
675 | 688 | |
676 | 689 | my $response = $self->list_bucket($conf); |
677 | croak 'The server has stopped responding' | |
690 | ||
691 | croak $EVAL_ERROR | |
678 | 692 | if !$response; |
679 | 693 | |
680 | 694 | return $response |
690 | 704 | $conf->{bucket} = $bucket; |
691 | 705 | |
692 | 706 | $response = $self->list_bucket($conf); |
693 | croak 'The server has stopped responding' | |
707 | ||
708 | croak $EVAL_ERROR | |
694 | 709 | if !$response; |
695 | 710 | |
696 | 711 | push @{ $all->{keys} }, @{ $response->{keys} }; |
766 | 781 | region => $self->region || $self->get_default_region, |
767 | 782 | service => 's3', |
768 | 783 | $self->get_token ? ( security_token => $creds->get_token ) : (), |
769 | } | |
784 | }, | |
770 | 785 | ); |
771 | 786 | |
772 | 787 | if ( $self->cache_signer ) { |
823 | 838 | |
824 | 839 | if ( ref $args[0] && reftype( $args[0] ) eq 'HASH' ) { |
825 | 840 | ( $method, $path, $headers, $data, $metadata, $region ) |
826 | = @{ $args[0] }{qw{method path headers data metadata region}}; | |
841 | = @{ $args[0] }{qw(method path headers data metadata region)}; | |
827 | 842 | } |
828 | 843 | else { |
829 | 844 | ( $method, $path, $headers, $data, $metadata, $region ) = @args; |
863 | 878 | |
864 | 879 | if ( $host =~ /([^:]+):([^:]\d+)$/xsm ) { |
865 | 880 | |
866 | eval { | |
881 | $url = eval { | |
867 | 882 | my $port = $2; |
868 | 883 | $host = $1; |
869 | 884 | |
870 | my $uri = URI->new("http://$bucket.host"); | |
885 | my $uri = URI->new; | |
886 | ||
871 | 887 | $uri->scheme('http'); |
872 | 888 | $uri->host("$bucket.$host"); |
873 | 889 | $uri->port($port); |
874 | 890 | $uri->path($path); |
875 | $url = $uri . $query_string; | |
876 | ||
891 | ||
892 | return $uri . $query_string; | |
877 | 893 | }; |
878 | 894 | |
895 | die "could not a uri for bucket: $bucket, host: $host, path: $path\n" | |
896 | if !$url || $EVAL_ERROR; | |
879 | 897 | } |
880 | 898 | else { |
881 | 899 | $url = "$protocol://$bucket.$host$path$query_string"; |
909 | 927 | $self->get_logger->trace( |
910 | 928 | sub { |
911 | 929 | return Dumper( [ 'REQUEST' => \@args ] ); |
912 | } | |
930 | }, | |
913 | 931 | ); |
914 | 932 | |
915 | 933 | my $request; |
934 | my $keep_root = $FALSE; | |
916 | 935 | |
917 | 936 | if ( @args == 1 && ref( $args[0] ) =~ /HTTP::Request/xsm ) { |
918 | 937 | $request = $args[0]; |
919 | 938 | } |
920 | 939 | else { |
940 | if ( ref $args[0] ) { | |
941 | $keep_root = delete $args[0]->{keep_root}; | |
942 | } | |
943 | ||
921 | 944 | $request = $self->_make_request(@args); |
922 | 945 | } |
923 | 946 | |
934 | 957 | $content = undef; |
935 | 958 | } |
936 | 959 | elsif ( $content && $response->content_type eq 'application/xml' ) { |
937 | $content = $self->_xpc_of_content($content); | |
960 | $content = $self->_xpc_of_content( $content, $keep_root ); | |
938 | 961 | } |
939 | 962 | |
940 | 963 | return $content; |
1108 | 1131 | my $request = $self->_make_request(@args); |
1109 | 1132 | |
1110 | 1133 | my $response = $self->_do_http($request); |
1134 | ||
1111 | 1135 | $self->get_logger->debug( Dumper( [$response] ) ); |
1112 | 1136 | |
1113 | 1137 | my $content = $response->content; |
1129 | 1153 | # first time we used it. Thus, we need to probe first to find out what's going on, |
1130 | 1154 | # before we start sending any actual data. |
1131 | 1155 | ######################################################################## |
1132 | sub _send_request_expect_nothing_probed { | |
1156 | sub _send_request_expect_nothing_probed { ## no critic (ProhibitUnusedPrivateSubroutines) | |
1133 | 1157 | ######################################################################## |
1134 | 1158 | my ( $self, @args ) = @_; |
1135 | 1159 | |
1137 | 1161 | |
1138 | 1162 | if ( @args == 1 && ref $args[0] ) { |
1139 | 1163 | ( $method, $path, $conf, $value, $region ) |
1140 | = @{ $args[0] }{qw{method path headers data region}}; | |
1164 | = @{ $args[0] }{qw(method path headers data region)}; | |
1141 | 1165 | } |
1142 | 1166 | else { |
1143 | 1167 | ( $method, $path, $conf, $value, $region ) |
1144 | = @{ $args[0] }{qw{method path headers data region}}; | |
1168 | = @{ $args[0] }{qw(method path headers data region)}; | |
1145 | 1169 | } |
1146 | 1170 | |
1147 | 1171 | $region = $region // $self->region; |
1149 | 1173 | my $request = $self->_make_request( |
1150 | 1174 | { method => 'HEAD', |
1151 | 1175 | path => $path, |
1152 | region => $region | |
1153 | } | |
1176 | region => $region, | |
1177 | }, | |
1154 | 1178 | ); |
1155 | 1179 | |
1156 | 1180 | my $override_uri = undef; |
1176 | 1200 | path => $path, |
1177 | 1201 | headers => $conf, |
1178 | 1202 | data => $value, |
1179 | region => $region | |
1180 | } | |
1203 | region => $region, | |
1204 | }, | |
1181 | 1205 | ); |
1182 | 1206 | |
1183 | 1207 | if ( defined $override_uri ) { |
1221 | 1245 | ######################################################################## |
1222 | 1246 | my ( $self, $src, $keep_root ) = @_; |
1223 | 1247 | |
1224 | my $xml_hr; | |
1225 | ||
1226 | eval { | |
1227 | $xml_hr = XMLin( | |
1248 | my $xml_hr = eval { | |
1249 | XMLin( | |
1228 | 1250 | $src, |
1229 | 'SuppressEmpty' => $EMPTY, | |
1230 | 'ForceArray' => ['Contents'], | |
1231 | 'KeepRoot' => $keep_root | |
1251 | SuppressEmpty => $EMPTY, | |
1252 | ForceArray => ['Contents'], | |
1253 | KeepRoot => $keep_root, | |
1254 | NoAttr => $TRUE, | |
1232 | 1255 | ); |
1233 | 1256 | }; |
1234 | 1257 | |
1235 | if ($EVAL_ERROR) { | |
1258 | if ( !$xml_hr && $EVAL_ERROR ) { | |
1236 | 1259 | confess "Error parsing $src: $EVAL_ERROR"; |
1237 | 1260 | } |
1238 | 1261 | |
1245 | 1268 | ######################################################################## |
1246 | 1269 | my ( $self, $src, $keep_root ) = @_; |
1247 | 1270 | |
1248 | return $TRUE if !$src; # this should not happen | |
1271 | return $src | |
1272 | if !$src; | |
1249 | 1273 | |
1250 | 1274 | if ( !ref $src && $src !~ /^[[:space:]]*</xsm ) { # if not xml |
1251 | 1275 | ( my $code = $src ) =~ s/^[[:space:]]*[(][\d]*[)].*$/$1/xsm; |
1279 | 1303 | # Deprecated - this adds a header for the old V2 auth signatures |
1280 | 1304 | # |
1281 | 1305 | ######################################################################## |
1282 | sub _add_auth_header { | |
1306 | sub _add_auth_header { ## no critic (ProhibitUnusedPrivateSubroutines) | |
1283 | 1307 | ######################################################################## |
1284 | 1308 | my ( $self, $headers, $method, $path ) = @_; |
1285 | 1309 | |
1389 | 1413 | $buf .= "/$1"; |
1390 | 1414 | |
1391 | 1415 | # ...unless there any parameters we're interested in... |
1392 | if ( $path =~ /[&?](acl|torrent|location|uploads|delete)($|=|&)/xsm ) { | |
1416 | if ( $path =~ /[&?](acl|torrent|location|uploads|delete)([=&]|$)/xsm ) { | |
1393 | 1417 | # if ( $path =~ /[&?](acl|torrent|location|uploads|delete)([=&])?/xsm ) { |
1394 | 1418 | $buf .= "?$1"; |
1395 | 1419 | } |
1444 | 1468 | ######################################################################## |
1445 | 1469 | my ( $self, $unencoded ) = @_; |
1446 | 1470 | |
1447 | return uri_escape_utf8( $unencoded, '^A-Za-z0-9\-\._~\x2f' ); | |
1471 | return uri_escape_utf8( $unencoded, '^A-Za-z0-9\-\._~\x2f' ); ## no critic (RequireInterpolation) | |
1448 | 1472 | } |
1449 | 1473 | |
1450 | 1474 | 1; |
1466 | 1490 | |
1467 | 1491 | =head1 SYNOPSIS |
1468 | 1492 | |
1469 | #!/usr/bin/perl | |
1470 | use warnings; | |
1471 | use strict; | |
1472 | ||
1473 | 1493 | use Amazon::S3; |
1474 | ||
1475 | use vars qw/$OWNER_ID $OWNER_DISPLAYNAME/; | |
1476 | 1494 | |
1477 | 1495 | my $aws_access_key_id = "Fill me in!"; |
1478 | 1496 | my $aws_secret_access_key = "Fill me in too!"; |
1488 | 1506 | |
1489 | 1507 | # create a bucket |
1490 | 1508 | my $bucket_name = $aws_access_key_id . '-net-amazon-s3-test'; |
1509 | ||
1491 | 1510 | my $bucket = $s3->add_bucket( { bucket => $bucket_name } ) |
1492 | 1511 | or die $s3->err . ": " . $s3->errstr; |
1493 | 1512 | |
1494 | 1513 | # store a key with a content-type and some optional metadata |
1495 | 1514 | my $keyname = 'testing.txt'; |
1515 | ||
1496 | 1516 | my $value = 'T'; |
1517 | ||
1497 | 1518 | $bucket->add_key( |
1498 | 1519 | $keyname, $value, |
1499 | 1520 | { content_type => 'text/plain', |
1510 | 1531 | # list keys in the bucket |
1511 | 1532 | $response = $bucket->list |
1512 | 1533 | or die $s3->err . ": " . $s3->errstr; |
1534 | ||
1513 | 1535 | print $response->{bucket}."\n"; |
1536 | ||
1514 | 1537 | for my $key (@{ $response->{keys} }) { |
1515 | 1538 | print "\t".$key->{key}."\n"; |
1516 | 1539 | } |
1517 | 1540 | |
1518 | 1541 | # delete key from bucket |
1519 | 1542 | $bucket->delete_key($keyname); |
1543 | ||
1544 | # delete multiple keys from bucket | |
1545 | $bucket->delete_keys([$key1, $key2, $key3]); | |
1520 | 1546 | |
1521 | 1547 | # delete bucket |
1522 | 1548 | $bucket->delete_bucket; |
1523 | 1549 | |
1524 | 1550 | =head1 DESCRIPTION |
1525 | 1551 | |
1552 | This documentation refers to version @PACKAGE_VERSION@. | |
1553 | ||
1526 | 1554 | C<Amazon::S3> provides a portable client interface to Amazon Simple |
1527 | 1555 | Storage System (S3). |
1528 | 1556 | |
1529 | I<This module is rather dated. For a much more robust and modern | |
1530 | implementation of an S3 interface try C<Net::Amazon::S3>. | |
1531 | C<Amazon::S3> ostensibly was intended to be a drop-in replacement for | |
1532 | C<Net:Amazon::S3> that "traded some performance in return for | |
1533 | portability". That statement is no longer accurate as | |
1534 | C<Net::Amazon::S3> implements much more of the S3 API and may have | |
1535 | changed the interface in ways that might break your | |
1536 | applications. However, C<Net::Amazon::S3> is today dependent on | |
1557 | This module is rather dated, however with some help from a few | |
1558 | contributors it has had some recent updates. Recent changes include | |
1559 | implementations of: | |
1560 | ||
1561 | =over 5 | |
1562 | ||
1563 | =item ListObjectsV2 | |
1564 | ||
1565 | =item CopyObject | |
1566 | ||
1567 | =item DeleteObjects | |
1568 | ||
1569 | =back | |
1570 | ||
1571 | Additionally, this module now implements Signature Version 4 signing, | |
1572 | unit tests have been updated and more documentation has been added or | |
1573 | corrected. Credentials are encrypted if you have encryption modules installed. | |
1574 | ||
1575 | =head2 Comparison to Other Perl S3 Modules | |
1576 | ||
1577 | Other implementations for accessing Amazon's S3 service include | |
1578 | C<Net::Amazon::S3> and the C<Paws> project. C<Amazon::S3> ostensibly | |
1579 | was intended to be a drop-in replacement for C<Net:Amazon::S3> that | |
1580 | "traded some performance in return for portability". That statement is | |
1581 | no longer accurate as C<Amazon::S3> may have changed the interface in | |
1582 | ways that might break your applications if you are relying on | |
1583 | compatibility with C<Net::Amazon::S3>. | |
1584 | ||
1585 | However, C<Net::Amazon::S3> and C<Paws::S3> today, are dependent on | |
1537 | 1586 | C<Moose> which may in fact level the playing field in terms of |
1538 | 1587 | performance penalties that may have been introduced by recent updates |
1539 | to C<Amazon::S3>. YMMV, however, this module may still appeal to some | |
1540 | that favor simplicity of the interface and a lower number of | |
1541 | dependencies. Below is the original description of the module.> | |
1588 | to C<Amazon::S3>. Changes to C<Amazon::S3> include the use of more | |
1589 | Perl modules in lieu of raw Perl code to increase maintainability and | |
1590 | stability as well as some refactoring. C<Amazon::S3> also strives now | |
1591 | to adhere to best practices as much as possible. | |
1592 | ||
1593 | C<Paws::S3> is a much more robust implementation of | |
1594 | a Perl S3 interface, however this module may still appeal to | |
1595 | those that favor simplicity of the interface and a lower number of | |
1596 | dependencies. Below is the original description of the module. | |
1542 | 1597 | |
1543 | 1598 | =over 10 |
1544 | 1599 | |
1577 | 1632 | =head1 LIMITATIONS AND DIFFERENCES WITH EARLIER VERSIONS |
1578 | 1633 | |
1579 | 1634 | As noted, this module is no longer a I<drop-in> replacement for |
1580 | C<Net::Amazon::S3> and has limitations and differences that may make | |
1581 | the use of this module in your applications | |
1582 | questionable. Additionally, one of the original intents of this fork | |
1583 | of C<Net::Amazon::S3> was to reduce the dependencies and make it | |
1584 | I<easy to install>. Recent changes to this module have introduced new | |
1585 | dependencies in order to improve the maintainability and provide | |
1586 | additional features. Installing CPAN modules is never easy, especially | |
1587 | when the dependencies of the dependencies are impossible to control | |
1588 | and include XS modules. | |
1635 | C<Net::Amazon::S3> and has limitations and differences that may impact | |
1636 | the use of this module in your applications. Additionally, one of the | |
1637 | original intents of this fork of C<Net::Amazon::S3> was to reduce the | |
1638 | number of dependencies and make it I<easy to install>. Recent changes | |
1639 | to this module have introduced new dependencies in order to improve | |
1640 | the maintainability and provide additional features. Installing CPAN | |
1641 | modules is never easy, especially when the dependencies of the | |
1642 | dependencies are impossible to control and include XS modules. | |
1589 | 1643 | |
1590 | 1644 | =over 5 |
1591 | 1645 | |
1612 | 1666 | |
1613 | 1667 | HTML::HeadParser 2.14 |
1614 | 1668 | LWP 6.13 |
1615 | Amazon::S3 0.55 | |
1669 | Amazon::S3 | |
1616 | 1670 | |
1617 | 1671 | ...other versions I<may> work...YMMV. |
1618 | 1672 | |
1639 | 1693 | parameter. This implies that you need to supply the bucket's region |
1640 | 1694 | when signing requests for any API call that involves a specific |
1641 | 1695 | bucket. Starting with version 0.55 of this module, |
1642 | C<Amazon::S3::Bucket> provides a new method (C<region()> and accepts | |
1696 | C<Amazon::S3::Bucket> provides a new method (C<region()>) and accepts | |
1643 | 1697 | in the constructor a C<region> parameter. If a region is not |
1644 | 1698 | supplied, the region for the bucket will be set to the region set in |
1645 | 1699 | the C<account> object (C<Amazon::S3>) that you passed to the bucket's |
1664 | 1718 | |
1665 | 1719 | =item Multipart Upload Support |
1666 | 1720 | |
1667 | There is limited testing for multipart uploads. | |
1721 | There is some limited testing for multipart uploads. | |
1668 | 1722 | |
1669 | 1723 | For more information regarding multi-part uploads visit the link below. |
1670 | 1724 | |
1813 | 1867 | |
1814 | 1868 | Unfortunately, while this will prevent L<Net::Amazon::Signature::V4> |
1815 | 1869 | from hanging on to your credentials, you credentials will be stored in |
1816 | the L<Amazon::S3> object. | |
1870 | the C<Amazon::S3> object. | |
1817 | 1871 | |
1818 | 1872 | Starting with version 0.55 of this module, if you have installed |
1819 | 1873 | L<Crypt::CBC> and L<Crypt::Blowfish>, your credentials will be |
1831 | 1885 | |
1832 | 1886 | =item 5. Do nothing...send the credentials, use the default signer. |
1833 | 1887 | |
1834 | In this case, both the L<Amazon::S3> class and the | |
1888 | In this case, both the C<Amazon::S3> class and the | |
1835 | 1889 | L<Net::Amazon::Signature::V4> have your credentials. Caveat Emptor. |
1836 | 1890 | |
1837 | 1891 | See Also L<Amazon::Credentials> for more information about safely |
2230 | 2284 | |
2231 | 2285 | Your AWS access key |
2232 | 2286 | |
2233 | =item AWS_ACCESS_KEY_SECRET | |
2287 | =item AWS_SECRET_ACCESS_KEY | |
2234 | 2288 | |
2235 | 2289 | Your AWS sekkr1t passkey. Be forewarned that setting this environment variable |
2236 | 2290 | on a shared system might leak that information to another user. Be careful. |
2240 | 2294 | Doesn't matter what you set it to. Just has to be set if you want |
2241 | 2295 | to skip ACLs tests. |
2242 | 2296 | |
2297 | =item AMAZON_S3_SKIP_PERMISSIONS | |
2298 | ||
2299 | Skip tests that check for enforcement of ACLs...as of this version, | |
2300 | LocalStack for example does not support enforcement of ACLs. | |
2301 | ||
2243 | 2302 | =item AMAZON_S3_SKIP_REGION_CONSTRAINT_TEST |
2244 | 2303 | |
2245 | 2304 | Doesn't matter what you set it to. Just has to be set if you want |
2264 | 2323 | |
2265 | 2324 | I<Consider using an S3 mocking service like C<minio> or C<LocalStack> |
2266 | 2325 | if you want to create real tests for your applications or this module.> |
2326 | ||
2327 | Here's bash script for testing using LocalStack | |
2328 | ||
2329 | #!/bin/bash | |
2330 | # -*- mode: sh; -*- | |
2331 | ||
2332 | BUCKET=net-amazon-s3-test-test | |
2333 | ENDPOINT_URL=s3.localhost.localstack.cloud:4566 | |
2334 | ||
2335 | AMAZON_S3_EXPENSIVE_TESTS=1 \ | |
2336 | AMAZON_S3_HOST=$ENDPOINT_URL \ | |
2337 | AMAZON_S3_LOCALSTACK=1 \ | |
2338 | AWS_ACCESS_KEY_ID=test \ | |
2339 | AWS_ACCESS_SECRET_KEY=test \ | |
2340 | AMAZON_S3_DOMAIN_BUCKET_NAMES=1 make test 2>&1 | tee test.log | |
2341 | ||
2342 | To run the tests...clone the project and build the software. | |
2343 | ||
2344 | cd src/main/perl | |
2345 | ./test.localstack | |
2267 | 2346 | |
2268 | 2347 | =head1 ADDITIONAL INFORMATION |
2269 | 2348 | |
2357 | 2436 | http://www.perl.com/language/misc/Artistic.html. Except |
2358 | 2437 | where otherwise noted, C<Amazon::S3> is Copyright 2008, Timothy |
2359 | 2438 | Appnel, tima@cpan.org. All rights reserved. |
2439 | ||
2440 | =cut |
0 | 0 | #!/usr/bin/env perl -w |
1 | ||
2 | ## no critic | |
3 | 1 | |
4 | 2 | use warnings; |
5 | 3 | use strict; |
6 | 4 | |
7 | use lib 'lib'; | |
5 | use lib qw( . lib); | |
8 | 6 | |
9 | 7 | use Data::Dumper; |
10 | 8 | use Digest::MD5::File qw(file_md5_hex); |
11 | use English qw{-no_match_vars}; | |
12 | use File::Temp qw{ tempfile }; | |
9 | use English qw{-no_match_vars}; | |
10 | use File::Temp qw{ tempfile }; | |
11 | use List::Util qw(any); | |
13 | 12 | use Test::More; |
14 | 13 | |
15 | our $OWNER_ID; | |
16 | our $OWNER_DISPLAYNAME; | |
14 | use S3TestUtils qw(:constants :subs); | |
15 | ||
17 | 16 | our @REGIONS = (undef); |
18 | 17 | |
19 | 18 | if ( $ENV{AMAZON_S3_REGIONS} ) { |
20 | 19 | push @REGIONS, split /\s*,\s*/xsm, $ENV{AMAZON_S3_REGIONS}; |
21 | 20 | } |
22 | 21 | |
23 | my $host; | |
24 | ||
25 | my $skip_owner_id; | |
26 | my $skip_permissions; | |
27 | my $skip_acls; | |
28 | ||
29 | if ( exists $ENV{AMAZON_S3_LOCALSTACK} ) { | |
30 | $host = 'localhost:4566'; | |
31 | ||
32 | $ENV{'AWS_ACCESS_KEY_ID'} = 'test'; | |
33 | $ENV{'AWS_ACCESS_KEY_SECRET'} = 'test'; | |
34 | ||
35 | $ENV{'AMAZON_S3_EXPENSIVE_TESTS'} = 1; | |
36 | ||
37 | $skip_owner_id = 1; | |
38 | $skip_permissions = 1; | |
39 | $skip_acls = 1; | |
40 | } | |
41 | else { | |
42 | $host = $ENV{AMAZON_S3_HOST}; | |
43 | } | |
44 | ||
45 | my $secure = $host ? 0 : 1; | |
46 | ||
47 | # - do not use DNS bucket names for testing if a mocking service is used | |
48 | # - override this by setting AMAZON_S3_DNS_BUCKET_NAMES to any value | |
49 | # - your tests may fail unless you have DNS entry for the bucket name | |
50 | # e.g 127.0.0.1 net-amazon-s3-test-test.localhost | |
51 | ||
52 | my $dns_bucket_names | |
53 | = ( $host && !exists $ENV{AMAZON_S3_DNS_BUCKET_NAMES} ) ? 0 : 1; | |
54 | ||
55 | $skip_acls //= exists $ENV{AMAZON_S3_MINIO} | |
56 | || exists $ENV{AMAZON_S3_SKIP_ACL_TESTS}; | |
57 | ||
58 | my $no_region_constraint //= exists $ENV{AMAZON_S3_MINIO} | |
59 | || exists $ENV{AMAZON_S3_SKIP_REGION_CONSTRAINT_TEST}; | |
60 | ||
61 | my $aws_access_key_id = $ENV{'AWS_ACCESS_KEY_ID'}; | |
62 | my $aws_secret_access_key = $ENV{'AWS_ACCESS_KEY_SECRET'}; | |
63 | my $token = $ENV{'AWS_SESSION_TOKEN'}; | |
64 | ||
65 | if ( !$ENV{'AMAZON_S3_EXPENSIVE_TESTS'} ) { | |
22 | my $host = set_s3_host(); | |
23 | ||
24 | my $bucket_name = make_bucket_name(); | |
25 | ||
26 | if ( !$ENV{AMAZON_S3_EXPENSIVE_TESTS} ) { | |
66 | 27 | plan skip_all => 'Testing this module for real costs money.'; |
67 | 28 | } |
68 | 29 | else { |
76 | 37 | use_ok('Amazon::S3'); |
77 | 38 | use_ok('Amazon::S3::Bucket'); |
78 | 39 | |
79 | my $s3; | |
80 | ||
81 | if ( $ENV{AMAZON_S3_CREDENTIALS} ) { | |
82 | require Amazon::Credentials; | |
83 | ||
84 | $s3 = Amazon::S3->new( | |
85 | { credentials => Amazon::Credentials->new, | |
86 | host => $host, | |
87 | secure => $secure, | |
88 | dns_bucket_names => $dns_bucket_names, | |
89 | level => $ENV{DEBUG} ? 'trace' : 'error', | |
90 | } | |
91 | ); | |
92 | ( $aws_access_key_id, $aws_secret_access_key, $token ) | |
93 | = $s3->get_credentials; | |
94 | } | |
95 | else { | |
96 | $s3 = Amazon::S3->new( | |
97 | { aws_access_key_id => $aws_access_key_id, | |
98 | aws_secret_access_key => $aws_secret_access_key, | |
99 | token => $token, | |
100 | host => $host, | |
101 | secure => $secure, | |
102 | dns_bucket_names => $dns_bucket_names, | |
103 | level => $ENV{DEBUG} ? 'trace' : 'error', | |
104 | } | |
105 | ); | |
106 | } | |
107 | ||
108 | # list all buckets that i own | |
109 | my $response = eval { return $s3->buckets; }; | |
110 | ||
111 | if ( $EVAL_ERROR || !$response ) { | |
112 | BAIL_OUT($EVAL_ERROR); | |
113 | } | |
114 | ||
115 | $OWNER_ID = $response->{owner_id}; | |
116 | $OWNER_DISPLAYNAME = $response->{owner_displayname}; | |
40 | my $s3 = get_s3_service($host); | |
41 | ||
42 | if ( !$s3 || $EVAL_ERROR ) { | |
43 | BAIL_OUT( 'could not initialize s3 object: ' . $EVAL_ERROR ); | |
44 | } | |
45 | ||
46 | # bail if test bucket already exists | |
47 | our ( $OWNER_ID, $OWNER_DISPLAYNAME ) = check_test_bucket($s3); | |
117 | 48 | |
118 | 49 | for my $location (@REGIONS) { |
119 | 50 | # this test formerly used the same bucket name for both regions, |
122 | 53 | # To test the bucket constraint policy below then we need to use a |
123 | 54 | # different bucket name. The old comment here was... |
124 | 55 | # |
125 | # create a bucket | |
126 | # make sure it's a valid hostname for EU testing | |
127 | # we use the same bucket name for both in order to force one or the | |
128 | # other to have stale DNS | |
56 | # > create a bucket | |
57 | # > make sure it's a valid hostname for EU testing | |
58 | # > we use the same bucket name for both in order to force one or the | |
59 | # > other to have stale DNS | |
129 | 60 | |
130 | 61 | $s3->region($location); |
131 | 62 | $host = $s3->host; |
132 | 63 | |
133 | my $bucketname_raw; | |
134 | my $bucketname; | |
64 | my $bucket_name_raw; | |
65 | my $bucket_name; | |
135 | 66 | my $bucket_obj; |
136 | 67 | my $bucket_suffix; |
137 | 68 | |
138 | while (1) { | |
139 | ||
140 | $bucketname_raw = sprintf 'net-amazon-s3-test-%s%s', | |
141 | lc($aws_access_key_id), $bucket_suffix // ''; | |
142 | ||
143 | $bucketname = '/' . $bucketname_raw; | |
69 | while ($TRUE) { | |
70 | ||
71 | $bucket_name_raw = make_bucket_name(); | |
72 | $bucket_name = $SLASH . $bucket_name_raw; | |
144 | 73 | |
145 | 74 | $bucket_obj = eval { |
146 | 75 | $s3->add_bucket( |
147 | { bucket => $bucketname, | |
76 | { bucket => $bucket_name, | |
148 | 77 | acl_short => 'public-read', |
149 | 78 | location_constraint => $location |
150 | 79 | } |
158 | 87 | last if $bucket_obj; |
159 | 88 | |
160 | 89 | # 409 indicates bucket name not yet available... |
161 | if ( $s3->last_response->code ne '409' ) { | |
162 | BAIL_OUT("could not create $bucketname"); | |
90 | if ( $s3->last_response->code ne $HTTP_CONFLICT ) { | |
91 | BAIL_OUT("could not create $bucket_name"); | |
163 | 92 | } |
164 | 93 | |
165 | 94 | $bucket_suffix = '-2'; |
166 | 95 | } |
167 | 96 | |
168 | is( ref $bucket_obj, | |
169 | 'Amazon::S3::Bucket', | |
170 | 'create bucket in ' . ( $location // 'DEFAULT_REGION' ) ) | |
171 | or BAIL_OUT("could not create bucket $bucketname"); | |
97 | is( | |
98 | ref $bucket_obj, | |
99 | 'Amazon::S3::Bucket', sprintf 'create bucket (%s) in %s ', | |
100 | $bucket_name, $location // 'DEFAULT_REGION' | |
101 | ) or BAIL_OUT("could not create bucket $bucket_name"); | |
172 | 102 | |
173 | 103 | SKIP: { |
174 | if ($no_region_constraint) { | |
175 | skip "No region constraints", 1; | |
104 | if ( $ENV{AMAZON_S3_SKIP_REGION_CONSTRAINT_TEST} ) { | |
105 | skip 'No region constraints', 1; | |
176 | 106 | } |
177 | 107 | |
178 | 108 | is( $bucket_obj->get_location_constraint, $location ); |
180 | 110 | |
181 | 111 | SKIP: { |
182 | 112 | |
183 | if ( $skip_acls || !$bucket_obj ) { | |
184 | skip "ACLs only for Amazon S3", 3; | |
113 | if ( $ENV{AMAZON_S3_SKIP_ACLS} || !$bucket_obj ) { | |
114 | skip 'ACLs only for Amazon S3', 3; | |
185 | 115 | } |
186 | 116 | |
187 | 117 | like_acl_allusers_read($bucket_obj); |
188 | 118 | |
189 | ok( $bucket_obj->set_acl( { acl_short => 'private' } ) ); | |
119 | my $rsp = $bucket_obj->set_acl( { acl_short => 'private' } ); | |
120 | ||
121 | ok( $rsp, 'set_acl - private' ) | |
122 | or diag( | |
123 | Dumper( [ response => $rsp, $s3->err, $s3->errstr, $s3->error ] ) ); | |
124 | ||
190 | 125 | unlike_acl_allusers_read($bucket_obj); |
191 | ||
192 | 126 | } |
193 | 127 | |
194 | 128 | # another way to get a bucket object (does no network I/O, |
195 | 129 | # assumes it already exists). Read Amazon::S3::Bucket. |
196 | $bucket_obj = $s3->bucket($bucketname); | |
197 | is( ref $bucket_obj, "Amazon::S3::Bucket" ); | |
130 | $bucket_obj = $s3->bucket($bucket_name); | |
131 | is( ref $bucket_obj, 'Amazon::S3::Bucket' ); | |
198 | 132 | |
199 | 133 | # fetch contents of the bucket |
200 | 134 | # note prefix, marker, max_keys options can be passed in |
201 | 135 | |
202 | $response = $bucket_obj->list | |
203 | or BAIL_OUT( $s3->err . ": " . $s3->errstr ); | |
136 | my $response = $bucket_obj->list | |
137 | or BAIL_OUT( $s3->err . ': ' . $s3->errstr ); | |
204 | 138 | |
205 | 139 | SKIP: { |
206 | skip "invalid response to 'list'" | |
207 | if !$response; | |
208 | ||
209 | is( $response->{bucket}, $bucketname_raw ) | |
140 | if ( !$response ) { | |
141 | skip 'invalid response to "list"'; | |
142 | } | |
143 | ||
144 | is( $response->{bucket}, $bucket_name_raw ) | |
210 | 145 | or BAIL_OUT( Dumper [$response] ); |
211 | 146 | |
212 | 147 | ok( !$response->{prefix} ); |
220 | 155 | is_deeply( $response->{keys}, [] ) |
221 | 156 | or diag( Dumper( [$response] ) ); |
222 | 157 | |
223 | is( undef, $bucket_obj->get_key("non-existing-key") ); | |
158 | is( undef, $bucket_obj->get_key('non-existing-key') ); | |
224 | 159 | } |
225 | 160 | |
226 | 161 | my $keyname = 'testing.txt'; |
230 | 165 | # Create a publicly readable key, then turn it private with a short acl. |
231 | 166 | # This key will persist past the end of the block. |
232 | 167 | my $value = 'T'; |
168 | ||
233 | 169 | $bucket_obj->add_key( |
234 | 170 | $keyname, $value, |
235 | 171 | { content_type => 'text/plain', |
240 | 176 | |
241 | 177 | my $url |
242 | 178 | = $s3->dns_bucket_names |
243 | ? "http://$bucketname_raw.$host/$keyname" | |
244 | : "http://$host/$bucketname/$keyname"; | |
179 | ? "http://$bucket_name_raw.$host/$keyname" | |
180 | : "http://$host/$bucket_name/$keyname"; | |
245 | 181 | |
246 | 182 | SKIP: { |
247 | if ($skip_acls) { | |
248 | skip "ACLs only for Amazon S3", 3; | |
249 | } | |
250 | ||
251 | is_request_response_code( $url, 200, | |
252 | "can access the publicly readable key" ); | |
183 | if ( $ENV{AMAZON_S3_SKIP_ACLS} ) { | |
184 | skip 'ACLs only for Amazon S3', 3; | |
185 | } | |
186 | ||
187 | is_request_response_code( $url, $HTTP_OK, | |
188 | 'can access the publicly readable key' ); | |
253 | 189 | |
254 | 190 | like_acl_allusers_read( $bucket_obj, $keyname ); |
255 | 191 | |
256 | 192 | ok( |
257 | $bucket_obj->set_acl( { key => $keyname, acl_short => 'private' } ) ); | |
193 | $bucket_obj->set_acl( | |
194 | { key => $keyname, | |
195 | acl_short => 'private' | |
196 | } | |
197 | ) | |
198 | ); | |
258 | 199 | } |
259 | 200 | |
260 | 201 | SKIP: { |
261 | if ($skip_acls) { | |
262 | skip 'ACLs only for Amazon S3', 1; | |
263 | } | |
264 | ||
265 | is_request_response_code( $url, 403, "cannot access the private key" ); | |
202 | if ( $ENV{AMAZON_S3_SKIP_PERMISSIONS} ) { | |
203 | skip 'Mocking service does not enforce ACLs', 1; | |
204 | } | |
205 | ||
206 | is_request_response_code( $url, $HTTP_FORBIDDEN, | |
207 | 'cannot access the private key' ); | |
266 | 208 | } |
267 | 209 | |
268 | 210 | SKIP: { |
269 | if ($skip_acls) { | |
211 | if ( $ENV{AMAZON_S3_SKIP_ACLS} ) { | |
270 | 212 | skip 'ACLs only for Amazon S3', 5; |
271 | 213 | } |
272 | 214 | |
281 | 223 | ); |
282 | 224 | |
283 | 225 | is_request_response_code( $url, |
284 | 200, "can access the publicly readable key after acl_xml set" ); | |
226 | $HTTP_OK, 'can access the publicly readable key after acl_xml set' ); | |
285 | 227 | |
286 | 228 | like_acl_allusers_read( $bucket_obj, $keyname ); |
287 | 229 | |
295 | 237 | } |
296 | 238 | |
297 | 239 | SKIP: { |
298 | if ( $skip_acls || $ENV{LOCALSTACK} ) { | |
299 | skip 'LocalStack does not enforce ACLs', 2; | |
240 | if ( $ENV{AMAZON_S3_SKIP_PERMISSIONS} ) { | |
241 | skip 'Mocking service does not enforce ACLs', 2; | |
300 | 242 | } |
301 | 243 | |
302 | 244 | is_request_response_code( $url, |
303 | 403, 'cannot access the private key after acl_xml set' ); | |
245 | $HTTP_FORBIDDEN, 'cannot access the private key after acl_xml set' ); | |
304 | 246 | |
305 | 247 | unlike_acl_allusers_read( $bucket_obj, $keyname ); |
306 | 248 | } |
325 | 267 | |
326 | 268 | my $url |
327 | 269 | = $s3->dns_bucket_names |
328 | ? "http://$bucketname_raw.$host/$keyname2" | |
329 | : "http://$host/$bucketname/$keyname2"; | |
270 | ? "http://$bucket_name_raw.$host/$keyname2" | |
271 | : "http://$host/$bucket_name/$keyname2"; | |
330 | 272 | |
331 | 273 | SKIP: { |
332 | skip 'LocalStack does not enforce ACLs', 1 | |
333 | if $skip_permissions || $skip_acls; | |
334 | ||
335 | is_request_response_code( $url, 403, "cannot access the private key" ); | |
274 | if ( $ENV{AMAZON_S3_SKIP_PERMISSIONS} ) { | |
275 | skip 'Mocking service does not enforce ACLs', 1; | |
276 | } | |
277 | ||
278 | is_request_response_code( $url, $HTTP_FORBIDDEN, | |
279 | 'cannot access the private key' ); | |
336 | 280 | } |
337 | 281 | |
338 | 282 | SKIP: { |
339 | skip 'ACLs only for Amazon S3', 4 if $skip_acls; | |
283 | if ( $ENV{AMAZON_S3_SKIP_ACLS} ) { | |
284 | skip 'ACLs only for Amazon S3', 4; | |
285 | } | |
340 | 286 | |
341 | 287 | unlike_acl_allusers_read( $bucket_obj, $keyname2 ); |
342 | 288 | |
349 | 295 | ); |
350 | 296 | |
351 | 297 | is_request_response_code( $url, |
352 | 200, "can access the publicly readable key" ); | |
298 | $HTTP_OK, 'can access the publicly readable key' ); | |
353 | 299 | |
354 | 300 | like_acl_allusers_read( $bucket_obj, $keyname2 ); |
355 | 301 | |
369 | 315 | } |
370 | 316 | |
371 | 317 | if ( !$response ) { |
372 | BAIL_OUT( $s3->err . ": " . $s3->errstr ); | |
373 | } | |
374 | ||
375 | is( $response->{bucket}, $bucketname_raw, "list($v) - bucketname " ); | |
318 | BAIL_OUT( $s3->err . ': ' . $s3->errstr ); | |
319 | } | |
320 | ||
321 | is( $response->{bucket}, $bucket_name_raw, sprintf 'list(%s) - %s', | |
322 | $v, $bucket_name ); | |
376 | 323 | |
377 | 324 | ok( !$response->{prefix}, "list($v) - prefix empty" ) |
378 | 325 | or diag( Dumper [$response] ); |
396 | 343 | is( $key->{size}, 1, "list($v) - size == 1" ); |
397 | 344 | |
398 | 345 | SKIP: { |
399 | skip 'LocalStack has different owner for bucket', 1 if $skip_owner_id; | |
346 | if ( $ENV{AMAZON_S3_SKIP_OWNER_ID_TEST} ) { | |
347 | skip 'mocking service has different owner for bucket', 1; | |
348 | } | |
349 | ||
400 | 350 | is( $key->{owner_id}, $OWNER_ID, "list($v) - owner id " ) |
401 | 351 | or diag( Dumper [$key] ); |
402 | 352 | } |
412 | 362 | |
413 | 363 | # now play with the file methods |
414 | 364 | my ( $fh, $lorem_ipsum ) = tempfile(); |
365 | ||
415 | 366 | print {$fh} <<'EOT'; |
416 | 367 | Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do |
417 | 368 | eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad |
427 | 378 | my $lorem_ipsum_md5 = file_md5_hex($lorem_ipsum); |
428 | 379 | my $lorem_ipsum_size = -s $lorem_ipsum; |
429 | 380 | |
430 | $keyname .= "2"; | |
381 | $keyname .= '2'; | |
431 | 382 | |
432 | 383 | $bucket_obj->add_key_filename( |
433 | 384 | $keyname, |
440 | 391 | $response = $bucket_obj->get_key($keyname); |
441 | 392 | |
442 | 393 | is( $response->{content_type}, 'text/plain', 'get_key - content_type' ); |
443 | like( $response->{value}, qr/Lorem ipsum/, 'get_key - Lorem ipsum' ); | |
394 | like( $response->{value}, qr/Lorem\sipsum/xsm, 'get_key - Lorem ipsum' ); | |
444 | 395 | |
445 | 396 | is( $response->{etag}, $lorem_ipsum_md5, 'get_key - etag' ) |
446 | 397 | or diag( Dumper [$response] ); |
456 | 407 | is( $response->{content_type}, |
457 | 408 | 'text/plain', 'get_key_filename - content_type' ); |
458 | 409 | |
459 | is( $response->{value}, '', 'get_key_filename - value empty' ); | |
410 | is( $response->{value}, $EMPTY, 'get_key_filename - value empty' ); | |
460 | 411 | |
461 | 412 | is( $response->{etag}, $lorem_ipsum_md5, 'get_key_filename - etag == md5' ); |
462 | 413 | |
478 | 429 | |
479 | 430 | isa_ok( $copy_result, 'HASH', 'copy_object returns a hash reference' ); |
480 | 431 | |
481 | $bucket_obj->delete_key($keyname); | |
482 | 432 | $response = $bucket_obj->list; |
483 | 433 | |
484 | 434 | ok( ( grep {"$keyname.bak"} @{ $response->{keys} } ), 'found the copy' ); |
485 | 435 | |
486 | $bucket_obj->delete_key($keyname); | |
487 | $bucket_obj->delete_key("$keyname.bak"); | |
436 | if ( !$ENV{AMAZON_S3_KEEP_BUCKET} ) { | |
437 | $bucket_obj->delete_key($keyname); | |
438 | $bucket_obj->delete_key("$keyname.bak"); | |
439 | } | |
488 | 440 | |
489 | 441 | # try empty files |
490 | 442 | $keyname .= '3'; |
491 | $bucket_obj->add_key( $keyname, '' ); | |
443 | $bucket_obj->add_key( $keyname, $EMPTY ); | |
492 | 444 | $response = $bucket_obj->get_key($keyname); |
493 | 445 | |
494 | is( $response->{value}, '', 'empty object - value empty' ); | |
446 | is( $response->{value}, $EMPTY, 'empty object - value empty' ); | |
495 | 447 | |
496 | 448 | is( |
497 | 449 | $response->{etag}, |
509 | 461 | # fetch contents of the bucket |
510 | 462 | # note prefix, marker, max_keys options can be passed in |
511 | 463 | $response = $bucket_obj->list |
512 | or die $s3->err . ": " . $s3->errstr; | |
513 | ||
514 | $bucketname =~ s/^\///; | |
515 | ||
516 | is( $response->{bucket}, $bucketname, | |
517 | 'delete key from bucket - bucketname' ); | |
464 | or die $s3->err . ': ' . $s3->errstr; | |
465 | ||
466 | $bucket_name =~ s/^\///xsm; | |
467 | ||
468 | is( $response->{bucket}, $bucket_name, | |
469 | 'delete key from bucket - ' . $bucket_name ); | |
518 | 470 | |
519 | 471 | ok( !$response->{prefix}, 'delete key from bucket - prefix empty' ); |
520 | 472 | |
533 | 485 | # delete multiple keys from bucket |
534 | 486 | # TODO: test deleting specific versions |
535 | 487 | # |
536 | $keyname = 'foo-'; | |
537 | ||
538 | for ( 1 .. 8 ) { | |
539 | $bucket_obj->add_key( "$keyname$_", q{} ); | |
540 | } | |
541 | ||
542 | $response = $bucket_obj->list | |
543 | or die $s3->err . ": " . $s3->errstr; | |
544 | ||
545 | my @key_list = @{ $response->{keys} }; | |
546 | ||
547 | is( 8, scalar @key_list, 'wrote 8 keys for delete_keys() test' ); | |
548 | ||
549 | ###################################################################### | |
550 | # quietly delete version keys - first two | |
551 | ###################################################################### | |
552 | my $delete_rsp = $bucket_obj->delete_keys( | |
553 | { quiet => 1, | |
554 | keys => [ map { $_->{key} } @key_list[ ( 0, 1 ) ] ] | |
555 | } | |
556 | ); | |
557 | ||
558 | # diag( Dumper( [ QUIET => $delete_rsp ] ) ); | |
559 | ||
560 | ok( !$delete_rsp, 'delete_keys() quiet response' ); | |
561 | ||
562 | $response = $bucket_obj->list | |
563 | or die $s3->err . ": " . $s3->errstr; | |
564 | ||
565 | is( scalar @{ $response->{keys} }, 6, 'delete versioned keys' ); | |
566 | ||
567 | shift @key_list; | |
568 | shift @key_list; | |
569 | ||
570 | ###################################################################### | |
571 | # delete list of keys - next two keys | |
572 | ###################################################################### | |
573 | $delete_rsp | |
574 | = $bucket_obj->delete_keys( map { $_->{key} } @key_list[ ( 0, 1 ) ] ); | |
575 | ||
576 | ok( $delete_rsp, 'delete_keys() response' ); | |
577 | ||
578 | $response = $bucket_obj->list | |
579 | or die $s3->err . ": " . $s3->errstr; | |
580 | ||
581 | is( scalar @{ $response->{keys} }, 4, 'delete list of keys' ); | |
582 | ||
583 | shift @key_list; | |
584 | shift @key_list; | |
585 | ||
586 | ###################################################################### | |
587 | # delete array of keys - last two keys | |
588 | ##################################################################### | |
589 | $delete_rsp | |
590 | = $bucket_obj->delete_keys( map { $_->{key} } @key_list[ ( 0, 1 ) ] ); | |
591 | ||
592 | ok( $delete_rsp, 'delete_keys() response' ); | |
593 | ||
594 | $response = $bucket_obj->list | |
595 | or die $s3->err . ": " . $s3->errstr; | |
596 | ||
597 | is( scalar @{ $response->{keys} }, 2, 'delete array of keys' ); | |
598 | ||
599 | ###################################################################### | |
600 | # callback | |
601 | ###################################################################### | |
602 | $delete_rsp = $bucket_obj->delete_keys( | |
603 | sub { | |
604 | my $key = shift @key_list; | |
605 | return ( $key->{key}, '1' ); | |
606 | } | |
607 | ); | |
608 | ||
609 | ok( $delete_rsp, 'delete_keys() response' ); | |
610 | ||
611 | $response = $bucket_obj->list | |
612 | or die $s3->err . ": " . $s3->errstr; | |
613 | ||
614 | ok( !scalar @{ $response->{keys} }, 'delete keys from callback' ); | |
615 | ||
616 | # | |
617 | # delete multiple keys from bucket | |
618 | ###################################################################### | |
619 | ||
620 | ok( $bucket_obj->delete_bucket(), 'delete bucket' ); | |
488 | ||
489 | SKIP: { | |
490 | if ( $ENV{AMAZON_S3_KEEP_BUCKET} ) { | |
491 | skip 'keeping bucket', 9; | |
492 | } | |
493 | ||
494 | $keyname = 'foo-'; | |
495 | ||
496 | for ( 1 .. 8 ) { | |
497 | $bucket_obj->add_key( "$keyname$_", $EMPTY ); | |
498 | } | |
499 | ||
500 | $response = $bucket_obj->list | |
501 | or die $s3->err . ': ' . $s3->errstr; | |
502 | ||
503 | my @key_list = @{ $response->{keys} }; | |
504 | ||
505 | is( 8, scalar @key_list, 'wrote 8 keys for delete_keys() test' ); | |
506 | ||
507 | ###################################################################### | |
508 | # quietly delete version keys - first two | |
509 | ###################################################################### | |
510 | my $delete_rsp = $bucket_obj->delete_keys( | |
511 | { quiet => 1, | |
512 | keys => [ map { $_->{key} } @key_list[ ( 0, 1 ) ] ] | |
513 | } | |
514 | ); | |
515 | ||
516 | ok( !$delete_rsp, 'delete_keys() quiet response - empty' ) | |
517 | or BAIL_OUT( | |
518 | 'could not delete quietly ' | |
519 | . Dumper( | |
520 | [ response => $delete_rsp, | |
521 | last_request => $s3->get_last_request, | |
522 | last_response => $s3->get_last_response, | |
523 | ] | |
524 | ) | |
525 | ); | |
526 | ||
527 | $response = $bucket_obj->list | |
528 | or die $s3->err . ': ' . $s3->errstr; | |
529 | ||
530 | is( | |
531 | scalar @{ $response->{keys} }, | |
532 | -2 + scalar(@key_list), | |
533 | 'delete versioned keys' | |
534 | ); | |
535 | ||
536 | shift @key_list; | |
537 | shift @key_list; | |
538 | ||
539 | ###################################################################### | |
540 | # delete list of keys - next two keys | |
541 | ###################################################################### | |
542 | $delete_rsp | |
543 | = $bucket_obj->delete_keys( map { $_->{key} } @key_list[ ( 0, 1 ) ] ); | |
544 | ||
545 | ok( $delete_rsp, 'delete_keys() response' ); | |
546 | ||
547 | $response = $bucket_obj->list | |
548 | or die $s3->err . ': ' . $s3->errstr; | |
549 | ||
550 | is( | |
551 | scalar @{ $response->{keys} }, | |
552 | -2 + scalar(@key_list), | |
553 | 'delete list of keys' | |
554 | ); | |
555 | ||
556 | shift @key_list; | |
557 | shift @key_list; | |
558 | ||
559 | ###################################################################### | |
560 | # delete array of keys - next two keys | |
561 | ##################################################################### | |
562 | $delete_rsp | |
563 | = $bucket_obj->delete_keys( map { $_->{key} } @key_list[ ( 0, 1 ) ] ); | |
564 | ||
565 | ok( $delete_rsp, 'delete_keys() response' ); | |
566 | ||
567 | $response = $bucket_obj->list | |
568 | or die $s3->err . ': ' . $s3->errstr; | |
569 | ||
570 | is( | |
571 | scalar @{ $response->{keys} }, | |
572 | -2 + scalar(@key_list), | |
573 | 'delete array of keys' | |
574 | ); | |
575 | ||
576 | shift @key_list; | |
577 | shift @key_list; | |
578 | ||
579 | ###################################################################### | |
580 | # callback - last two keys | |
581 | ###################################################################### | |
582 | $delete_rsp = $bucket_obj->delete_keys( | |
583 | sub { | |
584 | my $key = shift @key_list; | |
585 | return ( $key->{key} ); | |
586 | } | |
587 | ); | |
588 | ||
589 | ok( $delete_rsp, 'delete_keys() response' ); | |
590 | ||
591 | $response = $bucket_obj->list | |
592 | or die $s3->err . ': ' . $s3->errstr; | |
593 | ||
594 | is( scalar @{ $response->{keys} }, 0, 'delete keys from callback' ) | |
595 | or diag( Dumper( [ response => $response, key_list => \@key_list ] ) ); | |
596 | ||
597 | # | |
598 | # delete multiple keys from bucket | |
599 | ###################################################################### | |
600 | } | |
601 | ||
602 | SKIP: { | |
603 | if ( $ENV{AMAZON_S3_KEEP_BUCKET} ) { | |
604 | skip 'keeping bucket', 1; | |
605 | } | |
606 | ||
607 | ok( $bucket_obj->delete_bucket(), 'delete bucket' ); | |
608 | } | |
621 | 609 | } |
622 | 610 | |
623 | 611 | # see more docs in Amazon::S3::Bucket |
624 | 612 | |
625 | 613 | # local test methods |
614 | ######################################################################## | |
626 | 615 | sub is_request_response_code { |
616 | ######################################################################## | |
627 | 617 | my ( $url, $code, $message ) = @_; |
618 | ||
628 | 619 | my $request = HTTP::Request->new( 'GET', $url ); |
629 | 620 | |
630 | #warn $request->as_string(); | |
631 | 621 | my $response = $s3->ua->request($request); |
632 | 622 | |
633 | 623 | is( $response->code, $code, $message ) |
634 | or diag( Dumper($response) ); | |
635 | } | |
636 | ||
624 | or diag( Dumper( [ response_code => $response ] ) ); | |
625 | ||
626 | return; | |
627 | } | |
628 | ||
629 | ######################################################################## | |
637 | 630 | sub like_acl_allusers_read { |
638 | my ( $bucketobj, $keyname ) = @_; | |
639 | ||
640 | my $message = acl_allusers_read_message( 'like', @_ ); | |
641 | ||
642 | my $acl = $bucketobj->get_acl($keyname); | |
643 | ||
644 | like( $acl, qr(AllUsers.+READ), $message ) | |
645 | or diag( Dumper [$acl] ); | |
646 | ||
647 | } | |
648 | ||
631 | ######################################################################## | |
632 | my ( $bucket_obj, $keyname ) = @_; | |
633 | ||
634 | my $message = acl_allusers_read_message( 'like', $bucket_obj, $keyname ); | |
635 | ||
636 | my $acl = $bucket_obj->get_acl($keyname); | |
637 | ||
638 | like( $acl, qr/AllUsers.+READ/xsm, $message ) | |
639 | or diag( Dumper( [ acl => $acl ] ) ); | |
640 | ||
641 | return; | |
642 | } | |
643 | ||
644 | ######################################################################## | |
649 | 645 | sub unlike_acl_allusers_read { |
650 | my ( $bucketobj, $keyname ) = @_; | |
651 | my $message = acl_allusers_read_message( 'unlike', @_ ); | |
652 | unlike( $bucketobj->get_acl($keyname), qr(AllUsers.+READ), $message ); | |
653 | } | |
654 | ||
646 | ######################################################################## | |
647 | my ( $bucket_obj, $keyname ) = @_; | |
648 | ||
649 | my $message = acl_allusers_read_message( 'unlike', $bucket_obj, $keyname ); | |
650 | ||
651 | my $acl = $bucket_obj->get_acl($keyname); | |
652 | ||
653 | unlike( $bucket_obj->get_acl($keyname), qr/AllUsers.+READ/xsm, $message ) | |
654 | or diag( Dumper( [ acl => $acl ] ) ); | |
655 | ||
656 | return; | |
657 | } | |
658 | ||
659 | ######################################################################## | |
655 | 660 | sub acl_allusers_read_message { |
656 | my ( $like_or_unlike, $bucketobj, $keyname ) = @_; | |
657 | my $message = $like_or_unlike . "_acl_allusers_read: " . $bucketobj->bucket; | |
658 | $message .= " - $keyname" if $keyname; | |
661 | ######################################################################## | |
662 | my ( $like_or_unlike, $bucket_obj, $keyname ) = @_; | |
663 | ||
664 | my $message = sprintf '%s_acl_allusers_read: %s', $like_or_unlike, | |
665 | $bucket_obj->bucket; | |
666 | ||
667 | if ($keyname) { | |
668 | $message .= " - $keyname"; | |
669 | } | |
670 | ||
659 | 671 | return $message; |
660 | 672 | } |
661 | 673 | |
674 | ######################################################################## | |
662 | 675 | sub acl_xml_from_acl_short { |
663 | my $acl_short = shift || 'private'; | |
664 | ||
665 | my $public_read = ''; | |
666 | if ( $acl_short eq 'public-read' ) { | |
667 | $public_read = qq~ | |
668 | <Grant> | |
669 | <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" | |
670 | xsi:type="Group"> | |
671 | <URI>http://acs.amazonaws.com/groups/global/AllUsers</URI> | |
672 | </Grantee> | |
673 | <Permission>READ</Permission> | |
674 | </Grant> | |
675 | ~; | |
676 | } | |
677 | ||
678 | return qq~<?xml version="1.0" encoding="UTF-8"?> | |
679 | <AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> | |
680 | <Owner> | |
681 | <ID>$OWNER_ID</ID> | |
682 | <DisplayName>$OWNER_DISPLAYNAME</DisplayName> | |
683 | </Owner> | |
684 | <AccessControlList> | |
685 | <Grant> | |
686 | <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" | |
687 | xsi:type="CanonicalUser"> | |
688 | <ID>$OWNER_ID</ID> | |
689 | <DisplayName>$OWNER_DISPLAYNAME</DisplayName> | |
690 | </Grantee> | |
691 | <Permission>FULL_CONTROL</Permission> | |
692 | </Grant> | |
693 | $public_read | |
694 | </AccessControlList> | |
695 | </AccessControlPolicy>~; | |
696 | } | |
697 | ||
676 | ######################################################################## | |
677 | my ($acl_short) = @_; | |
678 | ||
679 | $acl_short //= 'private'; | |
680 | ||
681 | my $public_read | |
682 | = $acl_short eq 'public-read' ? $PUBLIC_READ_POLICY : $EMPTY; | |
683 | ||
684 | my $policy = <<"END_OF_POLICY"; | |
685 | <?xml version="1.0" encoding="UTF-8"?> | |
686 | <AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> | |
687 | <Owner> | |
688 | <ID>$OWNER_ID</ID> | |
689 | <DisplayName>$OWNER_DISPLAYNAME</DisplayName> | |
690 | </Owner> | |
691 | <AccessControlList> | |
692 | <Grant> | |
693 | <Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" | |
694 | xsi:type="CanonicalUser"> | |
695 | <ID>$OWNER_ID</ID> | |
696 | <DisplayName>$OWNER_DISPLAYNAME</DisplayName> | |
697 | </Grantee> | |
698 | <Permission>FULL_CONTROL</Permission> | |
699 | </Grant> | |
700 | $public_read | |
701 | </AccessControlList> | |
702 | </AccessControlPolicy> | |
703 | END_OF_POLICY | |
704 | ||
705 | return $policy; | |
706 | } | |
707 | ||
708 | 1; |
4 | 4 | use warnings; |
5 | 5 | use strict; |
6 | 6 | |
7 | use lib 'lib'; | |
7 | use lib qw(lib); | |
8 | 8 | |
9 | 9 | use English qw{-no_match_vars}; |
10 | 10 | |
15 | 15 | |
16 | 16 | use_ok('Amazon::S3'); |
17 | 17 | |
18 | ######################################################################## | |
18 | 19 | sub test_levels { |
20 | ######################################################################## | |
19 | 21 | my ($s3) = @_; |
20 | 22 | |
21 | 23 | print {*STDERR} "\n---[" . $s3->level . "]---\n"; |
30 | 32 | return; |
31 | 33 | } ## end sub test_levels |
32 | 34 | |
35 | ######################################################################## | |
33 | 36 | sub test_all_levels { |
37 | ######################################################################## | |
34 | 38 | my ($s3) = @_; |
35 | 39 | |
36 | 40 | $s3->level('trace'); |
37 | 41 | stderr_like( sub { test_levels($s3); }, |
38 | qr/trace\n.*debug\n.*info\n.*warn\n.*error\n.*fatal\n/xsm ); | |
42 | qr/trace\n.*debug\n.*info\n.*warn\n.*error\n.*fatal\n/xsm, 'trace' ); | |
39 | 43 | |
40 | 44 | $s3->level('debug'); |
41 | 45 | stderr_like( sub { test_levels($s3); }, |
42 | qr/debug\n.*info\n.*warn\n.*error\n.*fatal\n/xsm ); | |
43 | stderr_unlike( sub { test_levels($s3); }, qr/trace/ ); | |
46 | qr/debug\n.*info\n.*warn\n.*error\n.*fatal\n/xsm, 'debug' ); | |
47 | stderr_unlike( sub { test_levels($s3); }, | |
48 | qr/trace/, 'debug - not like trace' ); | |
44 | 49 | |
45 | 50 | $s3->level('info'); |
46 | 51 | stderr_like( sub { test_levels($s3); }, |
47 | qr/info\n.*warn\n.*error\n.*fatal\n/xsm ); | |
48 | stderr_unlike( sub { test_levels($s3); }, qr/trace|debug/ ); | |
52 | qr/info\n.*warn\n.*error\n.*fatal\n/xsm, 'info' ); | |
53 | stderr_unlike( sub { test_levels($s3); }, | |
54 | qr/trace|debug/, 'info - not like trace, debug' ); | |
49 | 55 | |
50 | 56 | $s3->level('warn'); |
51 | stderr_like( sub { test_levels($s3); }, qr/warn\n.*error\n.*fatal\n/xsm ); | |
52 | stderr_unlike( sub { test_levels($s3); }, qr/trace|debug|info/ ); | |
57 | stderr_like( sub { test_levels($s3); }, | |
58 | qr/warn\n.*error\n.*fatal\n/xsm, 'warn' ); | |
59 | stderr_unlike( sub { test_levels($s3); }, | |
60 | qr/trace|debug|info/, 'warn - not like trace, debug, info' ); | |
53 | 61 | |
54 | 62 | $s3->level('error'); |
55 | stderr_like( sub { test_levels($s3); }, qr/error\n.*fatal\n/xsm ); | |
56 | stderr_unlike( sub { test_levels($s3); }, qr/trace|debug|info|warn/ ); | |
63 | stderr_like( sub { test_levels($s3); }, qr/error\n.*fatal\n/xsm, 'error' ); | |
64 | stderr_unlike( sub { test_levels($s3); }, | |
65 | qr/trace|debug|info|warn/, 'error - not like trace, debug, info, warn' ); | |
57 | 66 | |
58 | 67 | $s3->level('fatal'); |
59 | stderr_like( sub { test_levels($s3); }, qr/fatal\n/xsm ); | |
60 | stderr_unlike( sub { test_levels($s3); }, qr/trace|debug|info|warn|error/ ); | |
68 | stderr_like( sub { test_levels($s3); }, qr/fatal\n/xsm, 'fatal' ); | |
69 | stderr_unlike( | |
70 | sub { test_levels($s3); }, | |
71 | qr/trace|debug|info|warn|error/, | |
72 | 'fatal - not like trace, debug, info, warn, error' | |
73 | ); | |
61 | 74 | |
62 | 75 | } ## end sub test_all_levels |
76 | ||
77 | ######################################################################## | |
63 | 78 | |
64 | 79 | my $s3 = Amazon::S3->new( |
65 | 80 | { aws_access_key_id => 'test', |
4 | 4 | use warnings; |
5 | 5 | use strict; |
6 | 6 | |
7 | use lib 'lib'; | |
7 | use lib qw(lib); | |
8 | 8 | |
9 | 9 | use English qw{-no_match_vars}; |
10 | 10 | |
11 | 11 | use Test::More; |
12 | ||
12 | 13 | plan tests => 7; |
13 | 14 | |
14 | 15 | use_ok('Amazon::S3'); |
21 | 22 | ); |
22 | 23 | |
23 | 24 | ok( $s3->region, 'us-east-1' ); |
25 | ||
24 | 26 | is( $s3->host, 's3.us-east-1.amazonaws.com', |
25 | 27 | 'default host is s3.us-east-1.amazonaws.com' ); |
26 | 28 | |
33 | 35 | ); |
34 | 36 | |
35 | 37 | is( $s3->region, 'us-west-2', 'region is set' ); |
38 | ||
36 | 39 | is( $s3->host, 's3.us-west-2.amazonaws.com', |
37 | 40 | 'host is modified during creation' ); |
38 | 41 | |
39 | 42 | $s3->region('us-east-1'); |
40 | 43 | |
41 | 44 | is( $s3->region, 'us-east-1', 'region is set' ); |
45 | ||
42 | 46 | is( $s3->host, 's3.us-east-1.amazonaws.com', |
43 | 47 | 'host is modified when region changes' ); |
44 | 48 |
4 | 4 | use warnings; |
5 | 5 | use strict; |
6 | 6 | |
7 | use lib 'lib'; | |
7 | use lib qw(. lib); | |
8 | 8 | |
9 | 9 | use English qw{-no_match_vars}; |
10 | ||
11 | use S3TestUtils qw(:constants :subs); | |
10 | 12 | |
11 | 13 | use Test::More; |
12 | 14 | use Data::Dumper; |
13 | 15 | |
14 | my $aws_access_key_id = $ENV{'AWS_ACCESS_KEY_ID'} // 'foo'; | |
15 | my $aws_secret_access_key = $ENV{'AWS_ACCESS_KEY_SECRET'} // 'foo'; | |
16 | my $token = $ENV{'AWS_SESSION_TOKEN'}; | |
17 | ||
18 | my $host = $ENV{AMAZON_S3_HOST}; | |
16 | my $host = set_s3_host(); | |
19 | 17 | |
20 | 18 | if ( !$ENV{'AMAZON_S3_EXPENSIVE_TESTS'} ) { |
21 | 19 | plan skip_all => 'Testing this module for real costs money.'; |
22 | } ## end if ( !$ENV{'AMAZON_S3_EXPENSIVE_TESTS'...}) | |
20 | } | |
23 | 21 | else { |
24 | plan tests => 16; | |
25 | } ## end else [ if ( !$ENV{'AMAZON_S3_EXPENSIVE_TESTS'...})] | |
22 | plan tests => 9; | |
23 | } | |
26 | 24 | |
27 | 25 | ######################################################################## |
28 | 26 | # BEGIN TESTS |
31 | 29 | use_ok('Amazon::S3'); |
32 | 30 | use_ok('Amazon::S3::Bucket'); |
33 | 31 | |
34 | my $s3; | |
35 | ||
36 | if ( $ENV{AMAZON_S3_CREDENTIALS} ) { | |
37 | require Amazon::Credentials; | |
38 | ||
39 | $s3 = Amazon::S3->new( | |
40 | { credentials => Amazon::Credentials->new, | |
41 | host => $host, | |
42 | log_level => $ENV{DEBUG} ? 'debug' : undef, | |
43 | } | |
44 | ); | |
45 | ( $aws_access_key_id, $aws_secret_access_key, $token ) | |
46 | = $s3->get_credentials; | |
47 | } ## end if ( $ENV{AMAZON_S3_CREDENTIALS...}) | |
48 | else { | |
49 | $s3 = Amazon::S3->new( | |
50 | { aws_access_key_id => $aws_access_key_id, | |
51 | aws_secret_access_key => $aws_secret_access_key, | |
52 | token => $token, | |
53 | debug => $ENV{DEBUG}, | |
54 | host => $host, | |
55 | secure => $host ? 0 : 1, # if host then probably container | |
56 | } | |
57 | ); | |
58 | } ## end else [ if ( $ENV{AMAZON_S3_CREDENTIALS...})] | |
59 | ||
60 | my $bucketname_raw = sprintf 'net-amazon-s3-test-%s', lc $aws_access_key_id; | |
61 | ||
62 | my $bucketname = '/' . $bucketname_raw; | |
63 | ||
64 | my $bucket_obj = eval { $s3->add_bucket( { bucket => $bucketname } ); }; | |
32 | my $s3 = get_s3_service($host); | |
33 | ||
34 | my $bucket_name = make_bucket_name(); | |
35 | ||
36 | my $bucket_obj = create_bucket( $s3, $bucket_name ); | |
37 | ||
38 | ok( ref $bucket_obj, 'created bucket - ' . $bucket_name ); | |
65 | 39 | |
66 | 40 | if ( $EVAL_ERROR || !$bucket_obj ) { |
67 | 41 | BAIL_OUT( $s3->err . ": " . $s3->errstr ); |
68 | } ## end if ( $EVAL_ERROR || !$bucket_obj) | |
69 | ||
70 | is( ref $bucket_obj, 'Amazon::S3::Bucket', 'created bucket' . $bucketname ) | |
71 | or BAIL_OUT("could not create bucket $bucketname"); | |
72 | ||
73 | my $response = $bucket_obj->list | |
74 | or BAIL_OUT( $s3->err . ": " . $s3->errstr ); | |
75 | ||
76 | is( $response->{bucket}, $bucketname_raw, 'no bucket name in list response' ) | |
77 | or do { | |
78 | diag( Dumper( [$response] ) ); | |
79 | BAIL_OUT( Dumper [$response] ); | |
80 | }; | |
81 | ||
82 | ok( !$response->{prefix}, 'no prefix in list response' ); | |
83 | ok( !$response->{marker}, 'no marker in list response' ); | |
84 | ||
85 | is( $response->{max_keys}, 1_000, 'max keys default = 1000' ) | |
86 | or BAIL_OUT( Dumper [$response] ); | |
87 | ||
88 | is( $response->{is_truncated}, 0, 'is_truncated 0' ); | |
89 | ||
90 | is_deeply( $response->{keys}, [], 'no keys in bucket yet' ) | |
91 | or BAIL_OUT( Dumper( [$response] ) ); | |
92 | ||
93 | foreach my $key ( 0 .. 9 ) { | |
94 | my $keyname = sprintf 'testing-%02d.txt', $key; | |
95 | my $value = 'T'; | |
96 | ||
97 | $bucket_obj->add_key( $keyname, $value ); | |
98 | } ## end foreach my $key ( 0 .. 9 ) | |
99 | ||
42 | } | |
43 | ||
44 | my $max_keys = 25; | |
45 | ||
46 | ######################################################################## | |
47 | subtest 'list (check response elements)' => sub { | |
48 | ######################################################################## | |
49 | my $response = $bucket_obj->list | |
50 | or BAIL_OUT( $s3->err . ": " . $s3->errstr ); | |
51 | ||
52 | is( $response->{bucket}, $bucket_name, 'no bucket name in list response' ) | |
53 | or do { | |
54 | diag( Dumper( [$response] ) ); | |
55 | BAIL_OUT( Dumper [$response] ); | |
56 | }; | |
57 | ||
58 | ok( !$response->{prefix}, 'no prefix in list response' ); | |
59 | ok( !$response->{marker}, 'no marker in list response' ); | |
60 | ||
61 | is( $response->{max_keys}, 1_000, 'max keys default = 1000' ) | |
62 | or BAIL_OUT( Dumper [$response] ); | |
63 | ||
64 | is( $response->{is_truncated}, 0, 'is_truncated 0' ); | |
65 | ||
66 | is_deeply( $response->{keys}, [], 'no keys in bucket yet' ) | |
67 | or BAIL_OUT( Dumper( [$response] ) ); | |
68 | }; | |
69 | ||
70 | ######################################################################## | |
100 | 71 | subtest 'list_all' => sub { |
72 | ######################################################################## | |
73 | ||
74 | add_keys( $bucket_obj, $max_keys ); | |
75 | ||
101 | 76 | my $response = $bucket_obj->list_all; |
102 | 77 | |
103 | 78 | is( ref $response, 'HASH', 'response isa HASH' ) |
106 | 81 | is( ref $response->{keys}, 'ARRAY', 'keys element is an ARRAY' ) |
107 | 82 | or diag( Dumper( [$response] ) ); |
108 | 83 | |
109 | is( @{ $response->{keys} }, 10, '10 keys returned' ) | |
84 | is( @{ $response->{keys} }, $max_keys, $max_keys . ' keys returned' ) | |
110 | 85 | or diag( Dumper( [$response] ) ); |
111 | 86 | |
112 | 87 | foreach my $key ( @{ $response->{keys} } ) { |
113 | 88 | is( ref $key, 'HASH', 'array element isa HASH' ) |
114 | 89 | or diag( Dumper( [$key] ) ); |
115 | 90 | |
116 | like( $key->{key}, qr/testing-\d{2}.txt/, 'keyname' ) | |
91 | like( $key->{key}, qr/testing-\d{2}[.]txt/xsm, 'keyname' ) | |
117 | 92 | or diag( Dumper( [$key] ) ); |
118 | 93 | |
119 | } ## end foreach my $key ( @{ $response...}) | |
120 | }; | |
121 | ||
94 | } | |
95 | }; | |
96 | ||
97 | ######################################################################## | |
122 | 98 | subtest 'list' => sub { |
99 | ######################################################################## | |
123 | 100 | |
124 | 101 | my $marker = ''; |
125 | 102 | my $iter = 0; # so we don't loop forever if this is busted |
126 | 103 | |
127 | 104 | my @key_list; |
105 | my $page_size = int $max_keys / 2; | |
128 | 106 | |
129 | 107 | while ( $marker || !$iter ) { |
130 | last if $iter++ > 5; | |
131 | ||
132 | $response = $bucket_obj->list( | |
133 | { 'max-keys' => 3, | |
108 | last if $iter++ > $max_keys; | |
109 | ||
110 | my $response = $bucket_obj->list( | |
111 | { 'max-keys' => $page_size, | |
134 | 112 | marker => $marker, |
135 | delimiter => '/' | |
113 | delimiter => '/', | |
136 | 114 | } |
137 | 115 | ); |
138 | 116 | |
139 | 117 | if ( !$response ) { |
140 | 118 | BAIL_OUT( $s3->err . ": " . $s3->errstr ); |
141 | } ## end if ( !$response ) | |
142 | ||
143 | is( $response->{bucket}, $bucketname_raw, 'no bucket name' ); | |
119 | } | |
120 | ||
121 | is( $response->{bucket}, $bucket_name, 'no bucket name' ); | |
144 | 122 | |
145 | 123 | ok( !$response->{prefix}, 'no prefix' ) |
146 | 124 | or diag( Dumper [$response] ); |
147 | 125 | |
148 | is( $response->{max_keys}, 3, 'max-keys 3' ); | |
126 | is( $response->{max_keys}, $page_size, 'max-keys ' . $page_size ); | |
149 | 127 | |
150 | 128 | is( ref $response->{keys}, 'ARRAY' ) |
151 | 129 | or BAIL_OUT( Dumper( [$response] ) ); |
153 | 131 | push @key_list, @{ $response->{keys} }; |
154 | 132 | |
155 | 133 | $marker = $response->{next_marker}; |
156 | } ## end while ( $marker || !$iter) | |
157 | ||
158 | is( @key_list, 10, 'got 10 keys' ) | |
159 | or diag( Dumper( \@key_list ) ); | |
160 | }; | |
161 | ||
162 | subtest 'list-v2' => sub { | |
134 | ||
135 | last if !$marker; | |
136 | } | |
137 | ||
138 | is( @key_list, $max_keys, $max_keys . ' returned' ) | |
139 | or diag( Dumper( [ key_list => \@key_list ] ) ); | |
140 | }; | |
141 | ||
142 | ######################################################################## | |
143 | subtest 'list_v2' => sub { | |
144 | ######################################################################## | |
163 | 145 | |
164 | 146 | my $marker = ''; |
165 | 147 | my $iter = 0; # so we don't loop forever if this is busted |
166 | 148 | |
167 | 149 | my @key_list; |
150 | my $page_size = int $max_keys / 2; | |
168 | 151 | |
169 | 152 | while ( $marker || !$iter ) { |
170 | last if $iter++ > 5; | |
171 | ||
172 | $response = $bucket_obj->list_v2( | |
173 | { 'max-keys' => 3, | |
153 | last if $iter++ > $max_keys; | |
154 | ||
155 | my $response = $bucket_obj->list_v2( | |
156 | { 'max-keys' => $page_size, | |
174 | 157 | $marker ? ( 'marker' => $marker ) : (), |
175 | 158 | delimiter => '/', |
176 | 159 | } |
178 | 161 | |
179 | 162 | if ( !$response ) { |
180 | 163 | BAIL_OUT( $s3->err . ": " . $s3->errstr ); |
181 | } ## end if ( !$response ) | |
182 | ||
183 | is( $response->{bucket}, $bucketname_raw, 'no bucket name' ); | |
164 | } | |
165 | ||
166 | is( $response->{bucket}, $bucket_name, 'no bucket name' ); | |
184 | 167 | |
185 | 168 | ok( !$response->{prefix}, 'no prefix' ) |
186 | 169 | or diag( Dumper [$response] ); |
187 | 170 | |
188 | is( $response->{max_keys}, 3, 'max-keys 3' ); | |
171 | is( $response->{max_keys}, $page_size, 'max-keys ' . $page_size ); | |
189 | 172 | |
190 | 173 | is( ref $response->{keys}, 'ARRAY' ) |
191 | 174 | or BAIL_OUT( Dumper( [$response] ) ); |
193 | 176 | push @key_list, @{ $response->{keys} }; |
194 | 177 | |
195 | 178 | $marker = $response->{next_marker}; |
196 | } ## end while ( $marker || !$iter) | |
197 | ||
198 | is( @key_list, 10, 'got 10 keys' ) | |
179 | ||
180 | last if !$marker; | |
181 | } | |
182 | ||
183 | is( @key_list, $max_keys, $max_keys . ' returned' ) | |
199 | 184 | or diag( Dumper( \@key_list ) ); |
200 | 185 | }; |
201 | 186 | |
202 | $response = $s3->list_bucket_all( { bucket => $bucketname } ); | |
203 | ||
204 | is( ref $response, 'HASH', 'list_bucket_all response is a HASH' ); | |
205 | is( @{ $response->{keys} }, 10, 'got all 10 keys' ); | |
206 | ||
207 | $response = $s3->list_bucket_all_v2( { bucket => $bucketname } ); | |
208 | is( ref $response, 'HASH', 'list_bucket_all_v2 response is a HASH' ); | |
209 | is( @{ $response->{keys} }, 10, 'got all 10 keys' ); | |
210 | ||
211 | foreach my $key ( 0 .. 9 ) { | |
212 | my $keyname = sprintf 'testing-%02d.txt', $key; | |
213 | ||
214 | $bucket_obj->delete_key($keyname); | |
215 | } ## end foreach my $key ( 0 .. 9 ) | |
187 | ######################################################################## | |
188 | subtest 'list_bucket_all' => sub { | |
189 | ######################################################################## | |
190 | ||
191 | $max_keys += add_keys( $bucket_obj, $max_keys, 'foo/' ); | |
192 | ||
193 | my $response = $s3->list_bucket_all( { bucket => $bucket_name } ); | |
194 | ||
195 | is( ref $response, 'HASH', 'list_bucket_all response is a HASH' ); | |
196 | ||
197 | is( @{ $response->{keys} }, $max_keys, $max_keys . ' returned' ); | |
198 | }; | |
199 | ||
200 | ######################################################################## | |
201 | subtest 'list_bucket_all_v2' => sub { | |
202 | ######################################################################## | |
203 | ||
204 | my $response = $s3->list_bucket_all_v2( { bucket => $bucket_name } ); | |
205 | ||
206 | is( ref $response, 'HASH', 'list_bucket_all_v2 response is a HASH' ); | |
207 | ||
208 | is( @{ $response->{keys} }, $max_keys, $max_keys . ' returned' ); | |
209 | ||
210 | foreach ( @{ $response->{keys} } ) { | |
211 | $bucket_obj->delete_key( $_->{key} ); | |
212 | } | |
213 | }; | |
216 | 214 | |
217 | 215 | $bucket_obj->delete_bucket; |
218 | 216 |
4 | 4 | use warnings; |
5 | 5 | use strict; |
6 | 6 | |
7 | use lib 'lib'; | |
7 | use lib qw( . lib); | |
8 | 8 | |
9 | 9 | use Carp; |
10 | 10 | |
11 | 11 | use Data::Dumper; |
12 | 12 | use Digest::MD5::File qw(file_md5_hex); |
13 | use English qw{-no_match_vars}; | |
14 | use File::Temp qw{ tempfile }; | |
13 | use English qw{-no_match_vars}; | |
14 | use File::Temp qw{ tempfile }; | |
15 | 15 | use Test::More; |
16 | 16 | |
17 | my $host; | |
17 | use S3TestUtils qw(:constants :subs); | |
18 | 18 | |
19 | if ( exists $ENV{AMAZON_S3_LOCALSTACK} ) { | |
20 | $host = 'localhost:4566'; | |
21 | ||
22 | $ENV{'AWS_ACCESS_KEY_ID'} = 'test'; | |
23 | $ENV{'AWS_ACCESS_KEY_SECRET'} = 'test'; | |
24 | ||
25 | $ENV{'AMAZON_S3_EXPENSIVE_TESTS'} = 1; | |
26 | ||
27 | } ## end if ( exists $ENV{AMAZON_S3_LOCALSTACK...}) | |
28 | else { | |
29 | $host = $ENV{AMAZON_S3_HOST}; | |
30 | } ## end else [ if ( exists $ENV{AMAZON_S3_LOCALSTACK...})] | |
31 | ||
32 | my $secure = $host ? 0 : 1; | |
33 | ||
34 | # do not use DNS bucket names for testing if a mocking service is used | |
35 | # override this by setting AMAZON_S3_DNS_BUCKET_NAMES to any value | |
36 | # your tests may fail unless you have DNS entry for the bucket name | |
37 | # e.g 127.0.0.1 net-amazon-s3-test-test.localhost | |
38 | ||
39 | my $dns_bucket_names | |
40 | = ( $host && !exists $ENV{AMAZON_S3_DNS_BUCKET_NAMES} ) ? 0 : 1; | |
41 | ||
42 | my $aws_access_key_id = $ENV{'AWS_ACCESS_KEY_ID'}; | |
43 | my $aws_secret_access_key = $ENV{'AWS_ACCESS_KEY_SECRET'}; | |
44 | my $token = $ENV{'AWS_SESSION_TOKEN'}; | |
19 | my $host = set_s3_host(); | |
45 | 20 | |
46 | 21 | if ( !$ENV{'AMAZON_S3_EXPENSIVE_TESTS'} ) { |
47 | 22 | plan skip_all => 'Testing this module for real costs money.'; |
48 | } ## end if ( !$ENV{'AMAZON_S3_EXPENSIVE_TESTS'...}) | |
23 | } | |
49 | 24 | else { |
50 | 25 | plan tests => 7; |
51 | 26 | } |
53 | 28 | use_ok('Amazon::S3'); |
54 | 29 | use_ok('Amazon::S3::Bucket'); |
55 | 30 | |
56 | my $s3; | |
31 | my $s3 = get_s3_service($host); | |
57 | 32 | |
58 | if ( $ENV{AMAZON_S3_CREDENTIALS} ) { | |
59 | require Amazon::Credentials; | |
60 | ||
61 | $s3 = Amazon::S3->new( | |
62 | { credentials => Amazon::Credentials->new, | |
63 | host => $host, | |
64 | secure => $secure, | |
65 | dns_bucket_names => $dns_bucket_names, | |
66 | level => $ENV{DEBUG} ? 'trace' : 'error', | |
67 | } | |
68 | ); | |
69 | ( $aws_access_key_id, $aws_secret_access_key, $token ) | |
70 | = $s3->get_credentials; | |
71 | } ## end if ( $ENV{AMAZON_S3_CREDENTIALS...}) | |
72 | else { | |
73 | $s3 = Amazon::S3->new( | |
74 | { aws_access_key_id => $aws_access_key_id, | |
75 | aws_secret_access_key => $aws_secret_access_key, | |
76 | token => $token, | |
77 | host => $host, | |
78 | secure => $secure, | |
79 | dns_bucket_names => $dns_bucket_names, | |
80 | level => $ENV{DEBUG} ? 'trace' : 'error', | |
81 | } | |
82 | ); | |
83 | } ## end else [ if ( $ENV{AMAZON_S3_CREDENTIALS...})] | |
84 | ||
85 | sub create_bucket { | |
86 | my ($bucket_name) = @_; | |
87 | ||
88 | $bucket_name = '/' . $bucket_name; | |
89 | my $bucket_obj | |
90 | = eval { return $s3->add_bucket( { bucket => $bucket_name } ); }; | |
91 | ||
92 | return $bucket_obj; | |
33 | if ( !$s3 ) { | |
34 | BAIL_OUT('could not initialize s3 object'); | |
93 | 35 | } |
94 | 36 | |
95 | my $bucket_obj = create_bucket sprintf 'net-amazon-s3-test-%s', | |
96 | lc $aws_access_key_id; | |
37 | my $bucket_name = make_bucket_name(); | |
38 | my $bucket_obj = create_bucket( $s3, $bucket_name ); | |
97 | 39 | |
98 | ok( ref $bucket_obj, 'created bucket' ); | |
40 | ok( ref $bucket_obj, 'created bucket - ' . $bucket_name ); | |
99 | 41 | |
100 | 42 | if ( $EVAL_ERROR || !$bucket_obj ) { |
101 | 43 | BAIL_OUT( $s3->err . ": " . $s3->errstr ); |
102 | 44 | } ## end if ( $EVAL_ERROR || !$bucket_obj) |
103 | 45 | |
46 | ######################################################################## | |
104 | 47 | subtest 'multipart-manual' => sub { |
48 | ######################################################################## | |
105 | 49 | my $key = 'big-object-1'; |
106 | 50 | |
107 | 51 | my $id = $bucket_obj->initiate_multipart_upload($key); |
129 | 73 | ok( $bucket_obj->delete_key($key) ); |
130 | 74 | }; |
131 | 75 | |
76 | ######################################################################## | |
132 | 77 | subtest 'multipart-file' => sub { |
78 | ######################################################################## | |
133 | 79 | my ( $fh, $file ) = tempfile(); |
134 | 80 | |
135 | 81 | my $buffer = 'x' x ( 1024 * 1024 ); |
166 | 112 | unlink $file; |
167 | 113 | }; |
168 | 114 | |
115 | ######################################################################## | |
169 | 116 | subtest 'multipart-2-parts' => sub { |
117 | ######################################################################## | |
170 | 118 | my $length = 1024 * 1024 * 7; |
171 | 119 | |
172 | 120 | my $data = 'x' x $length; |
189 | 137 | $bucket_obj->delete_key($key); |
190 | 138 | }; |
191 | 139 | |
140 | ######################################################################## | |
192 | 141 | subtest 'multipart-callback' => sub { |
142 | ######################################################################## | |
193 | 143 | my $key = 'big-object-4'; |
194 | 144 | |
195 | 145 | my @part = ( 5, 5, 5, 1 ); |
222 | 172 | $bucket_obj->delete_key($key); |
223 | 173 | }; |
224 | 174 | |
175 | ######################################################################## | |
176 | ||
225 | 177 | $bucket_obj->delete_bucket() |
226 | 178 | or diag( $s3->errstr ); |
227 | 179 | |
180 | 1; |
4 | 4 | use warnings; |
5 | 5 | use strict; |
6 | 6 | |
7 | use lib 'lib'; | |
7 | use lib qw(. lib); | |
8 | 8 | |
9 | 9 | use Carp; |
10 | 10 | |
11 | 11 | use Data::Dumper; |
12 | 12 | use Digest::MD5::File qw(file_md5_hex); |
13 | use English qw{-no_match_vars}; | |
14 | use File::Temp qw{ tempfile }; | |
13 | use English qw(-no_match_vars); | |
14 | use File::Temp qw( tempfile ); | |
15 | use S3TestUtils qw(:constants :subs); | |
15 | 16 | use Test::More; |
16 | 17 | use XML::Simple qw{XMLin}; |
17 | 18 | |
18 | my $host; | |
19 | ||
20 | if ( exists $ENV{AMAZON_S3_LOCALSTACK} ) { | |
21 | $host = 'localhost:4566'; | |
22 | ||
23 | $ENV{'AWS_ACCESS_KEY_ID'} = 'test'; | |
24 | $ENV{'AWS_ACCESS_KEY_SECRET'} = 'test'; | |
25 | ||
26 | $ENV{'AMAZON_S3_EXPENSIVE_TESTS'} = 1; | |
27 | ||
28 | } ## end if ( exists $ENV{AMAZON_S3_LOCALSTACK...}) | |
29 | else { | |
30 | $host = $ENV{AMAZON_S3_HOST}; | |
31 | } ## end else [ if ( exists $ENV{AMAZON_S3_LOCALSTACK...})] | |
32 | ||
33 | my $secure = $host ? 0 : 1; | |
34 | ||
35 | # do not use DNS bucket names for testing if a mocking service is used | |
36 | # override this by setting AMAZON_S3_DNS_BUCKET_NAMES to any value | |
37 | # your tests may fail unless you have DNS entry for the bucket name | |
38 | # e.g 127.0.0.1 net-amazon-s3-test-test.localhost | |
39 | ||
40 | my $dns_bucket_names | |
41 | = ( $host && !exists $ENV{AMAZON_S3_DNS_BUCKET_NAMES} ) ? 0 : 1; | |
42 | ||
43 | my $aws_access_key_id = $ENV{'AWS_ACCESS_KEY_ID'}; | |
44 | my $aws_secret_access_key = $ENV{'AWS_ACCESS_KEY_SECRET'}; | |
45 | my $token = $ENV{'AWS_SESSION_TOKEN'}; | |
19 | my $host = set_s3_host(); | |
46 | 20 | |
47 | 21 | if ( !$ENV{'AMAZON_S3_EXPENSIVE_TESTS'} ) { |
48 | 22 | plan skip_all => 'Testing this module for real costs money.'; |
54 | 28 | use_ok('Amazon::S3'); |
55 | 29 | use_ok('Amazon::S3::Bucket'); |
56 | 30 | |
57 | my $s3; | |
31 | my $s3 = get_s3_service($host); | |
58 | 32 | |
59 | if ( $ENV{AMAZON_S3_CREDENTIALS} ) { | |
60 | require Amazon::Credentials; | |
33 | my $bucket_name = make_bucket_name(); | |
61 | 34 | |
62 | $s3 = Amazon::S3->new( | |
63 | { credentials => Amazon::Credentials->new, | |
64 | host => $host, | |
65 | secure => $secure, | |
66 | dns_bucket_names => $dns_bucket_names, | |
67 | level => $ENV{DEBUG} ? 'trace' : 'error', | |
68 | } | |
69 | ); | |
70 | ( $aws_access_key_id, $aws_secret_access_key, $token ) | |
71 | = $s3->get_credentials; | |
72 | } ## end if ( $ENV{AMAZON_S3_CREDENTIALS...}) | |
73 | else { | |
74 | $s3 = Amazon::S3->new( | |
75 | { aws_access_key_id => $aws_access_key_id, | |
76 | aws_secret_access_key => $aws_secret_access_key, | |
77 | token => $token, | |
78 | host => $host, | |
79 | secure => $secure, | |
80 | dns_bucket_names => $dns_bucket_names, | |
81 | level => $ENV{DEBUG} ? 'trace' : 'error', | |
82 | } | |
83 | ); | |
84 | } ## end else [ if ( $ENV{AMAZON_S3_CREDENTIALS...})] | |
85 | ||
86 | ######################################################################## | |
87 | sub list_multipart_uploads { | |
88 | ######################################################################## | |
89 | my ($bucket_obj) = @_; | |
90 | ||
91 | my $xml = $bucket_obj->list_multipart_uploads; | |
92 | ||
93 | ok( $xml =~ /^</xms, 'is xml result' ); | |
94 | ||
95 | my $uploads = XMLin( $xml, KeepRoot => 1 ); | |
96 | ||
97 | isa_ok( $uploads, 'HASH', 'made a hash object' ) | |
98 | or diag($uploads); | |
99 | ||
100 | ok( defined $uploads->{ListMultipartUploadsResult}, | |
101 | 'looks like a results object' ) | |
102 | or diag($xml); | |
103 | ||
104 | my $upload_list = $uploads->{ListMultipartUploadsResult}->{Upload}; | |
105 | ||
106 | return $upload_list; | |
107 | } | |
108 | ||
109 | ######################################################################## | |
110 | sub partial_upload { | |
111 | ######################################################################## | |
112 | my ( $key, $bucket_obj, $size_in_mb ) = @_; | |
113 | ||
114 | my $id = $bucket_obj->initiate_multipart_upload($key); | |
115 | my $length = ( $size_in_mb || 5 ) * 1024 * 1024; | |
116 | ||
117 | my $data = 'x' x $length; | |
118 | ||
119 | my $etag | |
120 | = $bucket_obj->upload_part_of_multipart_upload( $key, $id, 1, $data, | |
121 | $length ); | |
122 | ||
123 | return $id; | |
124 | } | |
125 | ||
126 | ######################################################################## | |
127 | sub create_bucket { | |
128 | ######################################################################## | |
129 | my ($bucket_name) = @_; | |
130 | ||
131 | $bucket_name = '/' . $bucket_name; | |
132 | my $bucket_obj | |
133 | = eval { return $s3->add_bucket( { bucket => $bucket_name } ); }; | |
134 | ||
135 | return $bucket_obj; | |
136 | } | |
137 | ||
138 | my $bucket_name = sprintf 'net-amazon-s3-test-%s', lc $aws_access_key_id; | |
139 | my $bucket_obj = create_bucket $bucket_name; | |
35 | my $bucket_obj = create_bucket( $s3, $bucket_name ); | |
140 | 36 | |
141 | 37 | ok( ref $bucket_obj, 'created bucket - ' . $bucket_name ); |
142 | 38 | |
147 | 43 | my $id; |
148 | 44 | my $key = 'big-object-1'; |
149 | 45 | |
46 | ######################################################################## | |
150 | 47 | subtest 'list-multipart-uploads' => sub { |
48 | ######################################################################## | |
151 | 49 | |
152 | 50 | my $upload_list = list_multipart_uploads($bucket_obj); |
51 | ||
153 | 52 | ok( !defined $upload_list, 'no in-progress uploads' ) |
154 | 53 | or diag( Dumper( [$upload_list] ) ); |
155 | 54 | |
160 | 59 | ok( $upload_list->{UploadId} eq $id, 'UploadId eq $id' ); |
161 | 60 | }; |
162 | 61 | |
62 | ######################################################################## | |
163 | 63 | subtest 'abort-multipart-upload' => sub { |
64 | ######################################################################## | |
164 | 65 | |
165 | 66 | $bucket_obj->abort_multipart_upload( $key, $id ); |
166 | 67 | |
169 | 70 | ok( !defined $upload_list, 'aborted upload' ); |
170 | 71 | }; |
171 | 72 | |
73 | ######################################################################## | |
172 | 74 | subtest 'abort-on-error' => sub { |
75 | ######################################################################## | |
173 | 76 | my $id = $bucket_obj->initiate_multipart_upload($key); |
174 | 77 | |
175 | 78 | my $part_list = {}; |
194 | 97 | $bucket_obj->abort_multipart_upload( $key, $id ); |
195 | 98 | }; |
196 | 99 | |
100 | ######################################################################## | |
101 | ||
197 | 102 | $bucket_obj->delete_bucket() |
198 | 103 | or diag( $s3->errstr ); |
199 | 104 | |
105 | ######################################################################## | |
106 | sub partial_upload { | |
107 | ######################################################################## | |
108 | my ( $key, $bucket_obj, $size_in_mb ) = @_; | |
109 | ||
110 | my $id = $bucket_obj->initiate_multipart_upload($key); | |
111 | my $length = ( $size_in_mb || 5 ) * 1024 * 1024; | |
112 | ||
113 | my $data = 'x' x $length; | |
114 | ||
115 | my $etag | |
116 | = $bucket_obj->upload_part_of_multipart_upload( $key, $id, 1, $data, | |
117 | $length ); | |
118 | ||
119 | return $id; | |
120 | } | |
121 | ||
122 | ######################################################################## | |
123 | sub list_multipart_uploads { | |
124 | ######################################################################## | |
125 | my ($bucket_obj) = @_; | |
126 | ||
127 | my $xml = $bucket_obj->list_multipart_uploads; | |
128 | ||
129 | ok( $xml =~ /^</xms, 'is xml result' ); | |
130 | ||
131 | my $uploads = XMLin( $xml, KeepRoot => $TRUE ); | |
132 | ||
133 | isa_ok( $uploads, 'HASH', 'made a hash object' ) | |
134 | or diag($uploads); | |
135 | ||
136 | ok( defined $uploads->{ListMultipartUploadsResult}, | |
137 | 'looks like a results object' ) | |
138 | or diag($xml); | |
139 | ||
140 | my $upload_list = $uploads->{ListMultipartUploadsResult}->{Upload}; | |
141 | ||
142 | return $upload_list; | |
143 | } | |
144 | ||
145 | 1; |
0 | AMAZON_S3_EXPENSIVE_TESTS=1 \ | |
1 | AMAZON_S3_HOST=s3.localhost.localstack.cloud:4566 \ | |
2 | AMAZON_S3_LOCALSTACK=1 \ | |
3 | AWS_ACCESS_KEY_ID=test \ | |
4 | AWS_ACCESS_SECRET_KEY=test \ | |
5 | AMAZON_S3_DOMAIN_BUCKET_NAMES=1 make test >test.log 2>&1 | |
0 | #!/bin/bash | |
1 | # -*- mode: sh; -*- | |
6 | 2 | |
3 | BUCKET=net-amazon-s3-test-test | |
4 | ENDPOINT_URL=s3.localhost.localstack.cloud:4566 | |
5 | ||
6 | AMAZON_S3_EXPENSIVE_TESTS=1 \ | |
7 | AMAZON_S3_HOST=$ENDPOINT_URL \ | |
8 | AMAZON_S3_LOCALSTACK=1 \ | |
9 | AWS_ACCESS_KEY_ID=test \ | |
10 | AWS_ACCESS_SECRET_KEY=test \ | |
11 | AMAZON_S3_DOMAIN_BUCKET_NAMES=1 make test 2>&1 | tee test.log | |
12 |