diff --git a/.gitignore b/.gitignore deleted file mode 100644 index ea76ee0..0000000 --- a/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -*.pyc -*.swp -testsuite -/MANIFEST -/dist -build/* -s3cmd.spec diff --git a/.svnignore b/.svnignore deleted file mode 100644 index cb4611a..0000000 --- a/.svnignore +++ /dev/null @@ -1,8 +0,0 @@ -## Run 'svn propset svn:ignore -F .svnignore .' after you change this list -*.pyc -tst.* -MANIFEST -dist -build -.*.swp -s3cmd.1.gz diff --git a/ChangeLog b/ChangeLog deleted file mode 100644 index 106e9c4..0000000 --- a/ChangeLog +++ /dev/null @@ -1,1462 +0,0 @@ -2011-06-06 Michal Ludvig - -===== Migrated to GIT ===== - -No longer keeping ChangeLog up to date, use git log instead! - -Two "official" repositories (both the same content): - -* git://github.com/s3tools/s3cmd.git (primary) -* git://s3tools.git.sourceforge.net/gitroot/s3tools/s3cmd.git - -2011-04-11 Michal Ludvig - - * S3/S3Uri.py: Fixed cf:// uri parsing. - * S3/CloudFront.py: Don't fail if there are no cfinval - requests. - -2011-04-11 Michal Ludvig - - * S3/PkgInfo.py: Updated to 1.1.0-beta1 - * NEWS: Updated. - * s3cmd.1: Regenerated. - -2011-04-11 Michal Ludvig - - * S3/Config.py: Increase socket_timeout from 10 secs to 5 mins. - -2011-04-10 Michal Ludvig - - * s3cmd, S3/CloudFront.py, S3/S3Uri.py: Support for checking - status of CF Invalidation Requests [cfinvalinfo]. - * s3cmd, S3/CloudFront.py, S3/Config.py: Support for CloudFront - invalidation using [sync --cf-invalidate] command. - * S3/Utils.py: getDictFromTree() now recurses into - sub-trees. - -2011-03-30 Michal Ludvig - - * S3/CloudFront.py: Fix warning with Python 2.7 - * S3/CloudFront.py: Cmd._get_dist_name_for_bucket() moved to - CloudFront class. - -2011-01-13 Michal Ludvig - - * s3cmd, S3/FileLists.py: Move file/object listing functions - to S3/FileLists.py - -2011-01-09 Michal Ludvig - - * Released version 1.0.0 - ---------------------- - - * S3/PkgInfo.py: Updated to 1.0.0 - * NEWS: Updated. - -2011-01-02 Michal Ludvig - - * s3cmd: Improved r457 (Don't crash when file disappears - before checking MD5). - * s3cmd, s3cmd.1, format-manpage.pl: Improved --help text - and manpage. - * s3cmd: Removed explicit processing of --follow-symlinks - (is cought by the default / main loop). - -2010-12-24 Michal Ludvig - - * s3cmd: Set 10s socket timeout for read()/write(). - * s3cmd: Added --(no-)check-md5 for [sync]. - * run-tests.py, testsuite.tar.gz: Added testsuite for - the above. - * NEWS: Document the above. - * s3cmd: Don't crash when file disappears before - checking MD5. - -2010-12-09 Michal Ludvig - - * Released version 1.0.0-rc2 - -------------------------- - - * S3/PkgInfo.py: Updated to 1.0.0-rc2 - * NEWS, TODO, s3cmd.1: Updated. - -2010-11-13 Michal Ludvig - - * s3cmd: Added support for remote-to-remote sync. - (Based on patch from Sundar Raman - thanks!) - * run-tests.py: Testsuite for the above. - -2010-11-12 Michal Ludvig - - * s3cmd: Fixed typo in "s3cmd du" error path. - -2010-11-12 Michal Ludvig - - * format-manpage.pl: new manpage auto-formatter - * s3cmd.1: Updated using the above helper script - * setup.py: Warn if manpage is too old. - -2010-10-27 Michal Ludvig - - * run-tests.py, testsuite.tar.gz: Keep the testsuite in - SVN as a tarball. There's too many "strange" things - in the directory for it to be kept in SVN. - -2010-10-27 Michal Ludvig - - * TODO: Updated. - * upload-to-sf.sh: Updated for new SF.net system - -2010-10-26 Michal Ludvig - - * Released version 1.0.0-rc1 - -------------------------- - - * S3/PkgInfo.py: Updated to 1.0.0-rc1 - * NEWS, TODO: Updated. - -2010-10-26 Michal Ludvig - - * s3cmd, S3/CloudFront.py, S3/Config.py: Added support - for CloudFront DefaultRootObject. Thanks to Luke Andrew. - -2010-10-25 Michal Ludvig - - * s3cmd: Improved 'fixbucket' command. Thanks to Srinivasa - Moorthy. - * s3cmd: Read config file even if User Profile directory on - Windows contains non-ascii symbols. Thx Slava Vishnyakov - -2010-10-25 Michal Ludvig - - * s3cmd: Don't fail when a local node is a directory - and we expected a file. (as if for example /etc/passwd - was a dir) - -2010-10-25 Michal Ludvig - - * s3cmd, S3/S3.py: Ignore inaccessible (and missing) files - on upload. - * run-tests.py: Extended [sync] test to verify correct - handling of inaccessible files. - * testsuite/permission-tests: New testsuite files. - -2010-10-24 Michal Ludvig - - * S3/S3.py: "Stringify" all headers. Httplib should do - it but some Python 2.7 users reported problems that should - now be fixed. - * run-tests.py: Fixed test #6 - -2010-07-25 Aaron Maxwell - - * S3/Config.py, testsuite/etc/, run-tests.py, s3cmd.1, s3cmd: - Option to follow local symlinks for sync and - put (--follow-symlinks option), including tests and documentation - * run-tests.py: --bucket-prefix option, to allow different - developers to run tests in their own sandbox - -2010-07-08 Michal Ludvig - - * run-tests.py, testsuite/crappy-file-name.tar.gz: - Updated testsuite, work around a problem with [s3cmd cp] - when the source file contains '?' or '\x7f' - (where the inability to copy '?' is especially annoying). - -2010-07-08 Michal Ludvig - - * S3/Utils.py, S3/S3Uri.py: Fixed names after moving - functions between modules. - -2010-06-29 Timothee Groleau - - * S3/ACL.py: Fix isAnonRead method on Grantees - * ChangeLog: Update name of contributor for Timothee Groleau - -2010-06-13 Michal Ludvig - - * s3cmd, S3/CloudFront.py: Both [accesslog] and [cfmodify] - access logging can now be disabled with --no-access-logging - -2010-06-13 Michal Ludvig - - * S3/CloudFront.py: Allow s3:// URI as well as cf:// URI - for most CloudFront-related commands. - -2010-06-12 Michal Ludvig - - * s3cmd, S3/CloudFront.py, S3/Config.py: Support access - logging for CloudFront distributions. - * S3/S3.py, S3/Utils.py: Moved some functions to Utils.py - to make them available to CloudFront.py - * NEWS: Document the above. - -2010-05-27 Michal Ludvig - - * S3/S3.py: Fix bucket listing for buckets with - over 1000 prefixes. (contributed by Timothee Groleau) - * S3/S3.py: Fixed code formating. - -2010-05-21 Michal Ludvig - - * s3cmd, S3/S3.py: Added support for bucket locations - outside US/EU (i.e. us-west-1 and ap-southeast-1 as of now). - -2010-05-21 Michal Ludvig - - * s3cmd, S3/S3.py, S3/Config.py: Added --reduced-redundancy - switch for Reduced Redundancy Storage. - -2010-05-20 Michal Ludvig - - * s3cmd, S3/ACL.py, S3/Config.py: Support for --acl-grant - and --acl-revoke (contributed by Timothee Groleau) - * s3cmd: Couple of fixes on top of the above commit. - * s3cmd: Pre-parse ACL parameters in OptionS3ACL() - -2010-05-20 Michal Ludvig - - * S3/Exceptions.py, S3/S3.py: Some HTTP_400 exceptions - are retriable. - -2010-03-19 Michal Ludvig - - * s3cmd, S3/ACL.py: Print all ACLs for a Grantee - (one Grantee can have multiple different Grant entries) - -2010-03-19 Michal Ludvig - - * s3cmd: Enable bucket-level ACL setting - * s3cmd, S3/AccessLog.py, ...: Added [accesslog] command. - * s3cmd: Fix imports from S3.Utils - -2009-12-10 Michal Ludvig - - * s3cmd: Path separator conversion on Windows hosts. - -2009-10-08 Michal Ludvig - - * Released version 0.9.9.91 - ------------------------- - - * S3/PkgInfo.py: Updated to 0.9.9.91 - * NEWS: News for 0.9.9.91 - -2009-10-08 Michal Ludvig - - * S3/S3.py: fixed reference to _max_retries. - -2009-10-06 Michal Ludvig - - * Released version 0.9.9.90 - ------------------------- - - * S3/PkgInfo.py: Updated to 0.9.9.90 - * NEWS: News for 0.9.9.90 - -2009-10-06 Michal Ludvig - - * S3/S3.py: Introduce throttling on upload only after - second failure. I.e. first retry at full speed. - * TODO: Updated with new ideas. - -2009-06-02 Michal Ludvig - - * s3cmd: New [fixbucket] command for fixing invalid object - names in a given Bucket. For instance names with  in - them (not sure how people manage to upload them but they do). - * S3/S3.py, S3/Utils.py, S3/Config.py: Support methods for - the above, plus advise user to run 'fixbucket' when XML parsing - fails. - * NEWS: Updated. - -2009-05-29 Michal Ludvig - - * S3/Utils.py: New function replace_nonprintables() - * s3cmd: Filter local filenames through the above function - to avoid problems with uploaded filenames containing invalid - XML entities, eg  - * S3/S3.py: Warn if a non-printables char is passed to - urlencode_string() - they should have been replaced earlier - in the processing. - * run-tests.py, TODO, NEWS: Updated. - * testsuite/crappy-file-name.tar.gz: Tarball with a crappy-named - file. Untar for the testsuite. - -2009-05-29 Michal Ludvig - - * testsuite/blahBlah/*: Added files needed for run-tests.py - -2009-05-28 Michal Ludvig - - * S3/Utils.py (dateS3toPython): Be more relaxed about - timestamps format. - -2009-05-28 Michal Ludvig - - * s3cmd, run-test.py, TODO, NEWS: Added --dry-run - and --exclude/--include for [setacl]. - * s3cmd, run-test.py, TODO, NEWS: Added --dry-run - and --exclude/--include for [del]. - -2009-05-28 Michal Ludvig - - * s3cmd: Support for recursive [cp] and [mv], including - multiple-source arguments, --include/--exclude, - --dry-run, etc. - * run-tests.py: Tests for the above. - * S3/S3.py: Preserve metadata (eg ACL or MIME type) - during [cp] and [mv]. - * NEWS, TODO: Updated. - -2009-05-28 Michal Ludvig - - * run-tests.py: Added --verbose mode. - -2009-05-27 Michal Ludvig - - * NEWS: Added info about --verbatim. - * TODO: Added more tasks. - -2009-05-27 Michal Ludvig - - * S3/SortedDict.py: Add case-sensitive mode. - * s3cmd, S3/S3.py, S3/Config.py: Use SortedDict() in - case-sensitive mode to avoid dropping filenames - differing only in capitalisation - * run-tests.py: Testsuite for the above. - * NEWS: Updated. - -2009-03-20 Michal Ludvig - - * S3/S3.py: Re-sign requests before retrial to avoid - RequestTimeTooSkewed errors on failed long-running - uploads. - BTW 'request' now has its own class S3Request. - -2009-03-04 Michal Ludvig - - * s3cmd, S3/Config.py, S3/S3.py: Support for --verbatim. - -2009-02-25 Michal Ludvig - - * s3cmd: Fixed "put file.ext s3://bkt" (ie just the bucket name). - * s3cmd: Fixed reporting of ImportError of S3 modules. - * s3cmd: Fixed Error: global name 'real_filename' is not defined - -2009-02-24 Michal Ludvig - - * s3cmd: New command [sign] - * S3/Utils.py: New function sign_string() - * S3/S3.py, S3/CloudFront.py: Use sign_string(). - * NEWS: Updated. - -2009-02-17 Michal Ludvig - - * Released version 0.9.9 - ---------------------- - - * S3/PkgInfo.py: Updated to 0.9.9 - * NEWS: Compile a big news list for 0.9.9 - -2009-02-17 Michal Ludvig - - * s3cmd.1: Document all the new options and commands. - * s3cmd, S3/Config.py: Updated some help texts. Removed - option --debug-syncmatch along the way (because --dry-run - with --debug is good enough). - * TODO: Updated. - -2009-02-16 Michal Ludvig - - * s3cmd: Check Python version >= 2.4 as soon as possible. - -2009-02-14 Michal Ludvig - - * s3cmd, S3/Config.py, S3/S3.py: Added --add-header option. - * NEWS: Documented --add-header. - * run-tests.py: Fixed for new messages. - -2009-02-14 Michal Ludvig - - * README: Updated for 0.9.9 - * s3cmd, S3/PkgInfo.py, s3cmd.1: Replaced project - URLs with http://s3tools.org - * NEWS: Improved message. - -2009-02-12 Michal Ludvig - - * s3cmd: Added --list-md5 for 'ls' command. - * S3/Config.py: New setting list_md5 - -2009-02-12 Michal Ludvig - - * s3cmd: Set Content-Length header for requests with 'body'. - * s3cmd: And send it for requests with no body as well... - -2009-02-02 Michal Ludvig - - * Released version 0.9.9-rc3 - -------------------------- - - * S3/PkgInfo.py, NEWS: Updated for 0.9.9-rc3 - -2009-02-01 Michal Ludvig - - * S3/Exceptions.py: Correct S3Exception.__str__() to - avoid crash in S3Error() subclass. Reported by '~t2~'. - * NEWS: Updated. - -2009-01-30 Michal Ludvig - - * Released version 0.9.9-rc2 - -------------------------- - - * S3/PkgInfo.py, NEWS, TODO: Updated for 0.9.9-rc2 - -2009-01-30 Michal Ludvig - - * s3cmd: Under some circumstance s3cmd crashed - when put/get/sync had 0 files to transmit. Fixed now. - -2009-01-28 Michal Ludvig - - * s3cmd: Output 'delete:' in --dry-run only when - used together with --delete-removed. Otherwise - the user will think that without --dry-run it - would really delete the files. - -2009-01-27 Michal Ludvig - - * Released version 0.9.9-rc1 - -------------------------- - - * S3/PkgInfo.py, NEWS, TODO: Updated for 0.9.9-rc1 - -2009-01-26 Michal Ludvig - - * Merged CloudFront support from branches/s3cmd-airlock - See the ChangeLog in that branch for details. - -2009-01-25 W. Tell - - * s3cmd: Implemented --include and friends. - -2009-01-25 Michal Ludvig - - * s3cmd: Enabled --dry-run and --exclude for 'put' and 'get'. - * S3/Exceptions.py: Remove DeprecationWarning about - BaseException.message in Python 2.6 - * s3cmd: Rewritten gpg_command() to use subprocess.Popen() - instead of os.popen4() deprecated in 2.6 - * TODO: Note about failing GPG. - -2009-01-22 Michal Ludvig - - * S3/Config.py: guess_mime_type = True (will affect new - installations only). - -2009-01-22 Michal Ludvig - - * Released version 0.9.9-pre5 - --------------------------- - - * S3/PkgInfo.py, NEWS, TODO: Updated for 0.9.9-pre5 - -2009-01-22 Michal Ludvig - - * run-tests.py: Updated paths for the new sync - semantics. - * s3cmd, S3/S3.py: Small fixes to make testsuite happy. - -2009-01-21 Michal Ludvig - - * s3cmd: Migrated 'sync' local->remote to the new - scheme with fetch_{local,remote}_list(). - Enabled --dry-run for 'sync'. - -2009-01-20 Michal Ludvig - - * s3cmd: Migrated 'sync' remote->local to the new - scheme with fetch_{local,remote}_list(). - Changed fetch_remote_list() to return dict() compatible - with fetch_local_list(). - Re-implemented --exclude / --include processing. - * S3/Utils.py: functions for parsing RFC822 dates (for HTTP - header responses). - * S3/Config.py: placeholders for --include. - -2009-01-15 Michal Ludvig - - * s3cmd, S3/S3Uri.py, NEWS: Support for recursive 'put'. - -2009-01-13 Michal Ludvig - - * TODO: Updated. - * s3cmd: renamed (fetch_)remote_keys to remote_list and - a few other renames for consistency. - -2009-01-08 Michal Ludvig - - * S3/S3.py: Some errors during file upload were incorrectly - interpreted as MD5 mismatch. (bug #2384990) - * S3/ACL.py: Move attributes from class to instance. - * run-tests.py: Tests for ACL. - * s3cmd: Minor messages changes. - -2009-01-07 Michal Ludvig - - * s3cmd: New command 'setacl'. - * S3/S3.py: Implemented set_acl(). - * S3/ACL.py: Fill in tag in ACL XML. - * NEWS: Info about 'setacl'. - -2009-01-07 Michal Ludvig - - * s3cmd: Factored remote_keys generation from cmd_object_get() - to fetch_remote_keys(). - * s3cmd: Display Public URL in 'info' for AnonRead objects. - * S3/ACL.py: Generate XML from a current list of Grantees - -2009-01-07 Michal Ludvig - - * S3/ACL.py: Keep ACL internally as a list of of 'Grantee' objects. - * S3/Utils.py: Fix crash in stripNameSpace() when the XML has no NS. - -2009-01-07 Michal Ludvig - - * S3/ACL.py: New object for handling ACL issues. - * S3/S3.py: Moved most of S3.get_acl() to ACL class. - * S3/Utils.py: Reworked XML helpers - remove XMLNS before - parsing the input XML to avoid having all Tags prefixed - with {XMLNS} by ElementTree. - -2009-01-03 Michal Ludvig - - * s3cmd: Don't fail when neither $HOME nor %USERPROFILE% is set. - (fixes #2483388) - -2009-01-01 W. Tell - - * S3/S3.py, S3/Utils.py: Use 'hashlib' instead of md5 and sha - modules to avoid Python 2.6 warnings. - -2008-12-31 Michal Ludvig - - * Released version 0.9.9-pre4 - --------------------------- - -2008-12-31 Michal Ludvig - - * s3cmd: Reworked internal handling of unicode vs encoded filenames. - Should replace unknown characters with '?' instead of baling out. - -2008-12-31 Michal Ludvig - - * run-tests.py: Display system encoding in use. - * s3cmd: Print a nice error message when --exclude-from - file is not readable. - * S3/PkgInfo.py: Bumped up version to 0.9.9-pre4 - * S3/Exceptions.py: Added missing imports. - * NEWS: Updated. - * testsuite: reorganised UTF-8 files, added GBK encoding files, - moved encoding-specific files to 'tar.gz' archives, removed - unicode dir. - * run-tests.py: Adapted to the above change. - * run-tests.sh: removed. - * testsuite/exclude.encodings: Added. - * run-tests.py: Don't assume utf-8, use preferred encoding - instead. - * s3cmd, S3/Utils.py, S3/Exceptions.py, S3/Progress.py, - S3/Config.py, S3/S3.py: Added --encoding switch and - Config.encoding variable. Don't assume utf-8 for filesystem - and terminal output anymore. - * s3cmd: Avoid ZeroDivisionError on fast links. - * s3cmd: Unicodised all info() output. - -2008-12-30 Michal Ludvig - - * s3cmd: Replace unknown Unicode characters with '?' - to avoid UnicodeEncodeError's. Also make all output strings - unicode. - * run-tests.py: Exit on failed test. Fixed order of tests. - -2008-12-29 Michal Ludvig - - * TODO, NEWS: Updated - * s3cmd: Improved wildcard get. - * run-tests.py: Improved testsuite, added parameters support - to run only specified tests, cleaned up win/posix integration. - * S3/Exception.py: Python 2.4 doesn't automatically set - Exception.message. - -2008-12-29 Michal Ludvig - - * s3cmd, run-tests.py: Make it work on Windows. - -2008-12-26 Michal Ludvig - - * setup.cfg: Remove explicit install prefix. That should fix - Mac OS X and Windows "setup.py install" runs. - -2008-12-22 Michal Ludvig - - * s3cmd, S3/S3.py, S3/Progress.py: Display "[X of Y]" - in --progress mode. - * s3cmd, S3/Config.py: Implemented recursive [get]. - Added --skip-existing option for [get] and [sync]. - -2008-12-17 Michal Ludvig - - * TODO: Updated - -2008-12-14 Michal Ludvig - - * S3/Progress.py: Restructured import Utils to avoid import - conflicts. - -2008-12-12 Michal Ludvig - - * s3cmd: Better Exception output. Print sys.path on ImportError, - don't print backtrace on KeyboardInterrupt - -2008-12-11 Michal Ludvig - - * s3cmd: Support for multiple sources in 'get' command. - -2008-12-10 Michal Ludvig - - * TODO: Updated list. - * s3cmd: Don't display download/upload completed message - in --progress mode. - * S3/S3.py: Pass src/dst names down to Progress class. - * S3/Progress.py: added new class ProgressCR - apparently - ProgressANSI doesn't work on MacOS-X (and perhaps elsewhere). - * S3/Config.py: Default progress meter is now ProgressCR - * s3cmd: Updated email address for reporting bugs. - -2008-12-02 Michal Ludvig - - * s3cmd, S3/S3.py, NEWS: Support for (non-)recursive 'ls' - -2008-12-01 Michal Ludvig - - * Released version 0.9.9-pre3 - --------------------------- - - * S3/PkgInfo.py: Bumped up version to 0.9.9-pre3 - -2008-12-01 Michal Ludvig - - * run-tests.py: Added a lot of new tests. - * testsuite/etc/logo.png: New file. - -2008-11-30 Michal Ludvig - - * S3/S3.py: object_get() -- make start_position argument optional. - -2008-11-29 Michal Ludvig - - * s3cmd: Delete local files with "sync --delete-removed" - -2008-11-25 Michal Ludvig - - * s3cmd, S3/Progress.py: Fixed Unicode output in Progress meter. - * s3cmd: Fixed 'del --recursive' without prefix (i.e. all objects). - * TODO: Updated list. - * upload-to-sf.sh: Helper script. - * S3/PkgInfo.py: Bumped up version to 0.9.9-pre2+svn - -2008-11-24 Michal Ludvig - - * Released version 0.9.9-pre2 - ------------------------ - - * S3/PkgInfo.py: Bumped up version to 0.9.9-pre2 - * NEWS: Added 0.9.9-pre2 - -2008-11-24 Michal Ludvig - - * s3cmd, s3cmd.1, S3/S3.py: Display or don't display progress meter - default depends on whether we're on TTY (console) or not. - -2008-11-24 Michal Ludvig - - * s3cmd: Fixed 'get' conflict. - * s3cmd.1, TODO: Document 'mv' command. - -2008-11-24 Michal Ludvig - - * S3/S3.py, s3cmd, S3/Config.py, s3cmd.1: Added --continue for - 'get' command, improved 'get' failure resiliency. - * S3/Progress.py: Support for progress meter not starting in 0. - * S3/S3.py: improved retrying in send_request() and send_file() - -2008-11-24 Michal Ludvig - - * s3cmd, S3/S3.py, NEWS: "s3cmd mv" for moving objects - -2008-11-24 Michal Ludvig - - * S3/Utils.py: Common XML parser. - * s3cmd, S3/Exeptions.py: Print info message on Error. - -2008-11-21 Michal Ludvig - - * s3cmd: Support for 'cp' command. - * S3/S3.py: Added S3.object.copy() method. - * s3cmd.1: Document 'cp' command. - * NEWS: Let everyone know ;-) - Thanks Andrew Ryan for a patch proposal! - https://sourceforge.net/forum/forum.php?thread_id=2346987&forum_id=618865 - -2008-11-17 Michal Ludvig - - * S3/Progress.py: Two progress meter implementations. - * S3/Config.py, s3cmd: New --progress / --no-progress parameters - and Config() members. - * S3/S3.py: Call Progress() in send_file()/recv_file() - * NEWS: Let everyone know ;-) - -2008-11-16 Michal Ludvig - - * NEWS: Fetch 0.9.8.4 release news from 0.9.8.x branch. - -2008-11-16 Michal Ludvig - - Merge from 0.9.8.x branch, rel 251: - * S3/S3.py: Adjusting previous commit (orig 249) - it's not a good idea - to retry ALL failures. Especially not those code=4xx where AmazonS3 - servers are not happy with our requests. - Merge from 0.9.8.x branch, rel 249: - * S3/S3.py, S3/Exception.py: Re-issue failed requests in S3.send_request() - Merge from 0.9.8.x branch, rel 248: - * s3cmd: Don't leak open filehandles in sync. Thx Patrick Linskey for report. - Merge from 0.9.8.x branch, rel 247: - * s3cmd: Re-raise the right exception. - Merge from 0.9.8.x branch, rel 246: - * s3cmd, S3/S3.py, S3/Exceptions.py: Don't abort 'sync' or 'put' on files - that can't be open (e.g. Permision denied). Print a warning and skip over - instead. - Merge from 0.9.8.x branch, rel 245: - * S3/S3.py: Escape parameters in strings. Fixes sync to and - ls of directories with spaces. (Thx Lubomir Rintel from Fedora Project) - Merge from 0.9.8.x branch, rel 244: - * s3cmd: Unicode brainfuck again. This time force all output - in UTF-8, will see how many complaints we'll get... - -2008-09-16 Michal Ludvig - - * NEWS: s3cmd 0.9.8.4 released from branches/0.9.8.x SVN branch. - -2008-09-16 Michal Ludvig - - * S3/S3.py: Don't run into ZeroDivisionError when speed counter - returns 0s elapsed on upload/download file. - -2008-09-15 Michal Ludvig - - * s3cmd, S3/S3.py, S3/Utils.py, S3/S3Uri.py, S3/Exceptions.py: - Yet anoter Unicode round. Unicodised all command line arguments - before processing. - -2008-09-15 Michal Ludvig - - * S3/S3.py: "s3cmd mb" can create upper-case buckets again - in US. Non-US (e.g. EU) bucket names must conform to strict - DNS-rules. - * S3/S3Uri.py: Display public URLs correctly for non-DNS buckets. - -2008-09-10 Michal Ludvig - - * testsuite, run-tests.py: Added testsuite with first few tests. - -2008-09-10 Michal Ludvig - - * s3cmd, S3/S3Uri.py, S3/S3.py: All internal representations of - S3Uri()s are Unicode (i.e. not UTF-8 but type()==unicode). It - still doesn't work on non-UTF8 systems though. - -2008-09-04 Michal Ludvig - - * s3cmd: Rework UTF-8 output to keep sys.stdout untouched (or it'd - break 's3cmd get' to stdout for binary files). - -2008-09-03 Michal Ludvig - - * s3cmd, S3/S3.py, S3/Config.py: Removed --use-old-connect-method - again. Autodetect the need for old connect method instead. - -2008-09-03 Michal Ludvig - - * s3cmd, S3/S3.py: Make --verbose mode more useful and default - mode less verbose. - -2008-09-03 Michal Ludvig - - * s3cmd, S3/Config.py: [rb] Allow removal of non-empty buckets - with --force. - [mb, rb] Allow multiple arguments, i.e. create or remove - multiple buckets at once. - [del] Perform recursive removal with --recursive (or -r). - -2008-09-01 Michal Ludvig - - * s3cmd: Refuse 'sync' together with '--encrypt'. - * S3/S3.py: removed object_{get,put,delete}_uri() functions - and made object_{get,put,delete}() accept URI instead of - bucket/object parameters. - -2008-09-01 Michal Ludvig - - * S3/PkgInfo.py: Bumped up version to 0.9.9-pre1 - -2008-09-01 Michal Ludvig - - * s3cmd, S3/S3.py, S3/Config.py: Allow access to upper-case - named buckets again with --use-old-connect-method - (uses http://s3.amazonaws.com/bucket/object instead of - http://bucket.s3.amazonaws.com/object) - -2008-08-19 Michal Ludvig - - * s3cmd: Always output UTF-8, even on output redirects. - -2008-08-01 Michal Ludvig - - * TODO: Add some items - -2008-07-29 Michal Ludvig - - * Released version 0.9.8.3 - ------------------------ - -2008-07-29 Michal Ludvig - - * S3/PkgInfo.py: Bumped up version to 0.9.8.3 - * NEWS: Added 0.9.8.3 - -2008-07-29 Michal Ludvig - - * S3/Utils.py (hash_file_md5): Hash files in 32kB chunks - instead of reading it all up to a memory first to avoid - OOM on large files. - -2008-07-07 Michal Ludvig - - * s3cmd.1: couple of syntax fixes from Mikhail Gusarov - -2008-07-03 Michal Ludvig - - * Released version 0.9.8.2 - ------------------------ - -2008-07-03 Michal Ludvig - - * S3/PkgInfo.py: Bumped up version to 0.9.8.2 - * NEWS: Added 0.9.8.2 - * s3cmd: Print version info on 'unexpected error' output. - -2008-06-30 Michal Ludvig - - * S3/S3.py: Re-upload when Amazon doesn't send ETag - in PUT response. It happens from time to time for - unknown reasons. Thanks "Burtc" for report and - "hermzz" for fix. - -2008-06-27 Michal Ludvig - - * Released version 0.9.8.1 - ------------------------ - -2008-06-27 Michal Ludvig - - * S3/PkgInfo.py: Bumped up version to 0.9.8.1 - * NEWS: Added 0.9.8.1 - * s3cmd: make 'cfg' global - * run-tests.sh: Sort-of testsuite - -2008-06-23 Michal Ludvig - - * Released version 0.9.8 - ---------------------- - -2008-06-23 Michal Ludvig - - * S3/PkgInfo.py: Bumped up version to 0.9.8 - * NEWS: Added 0.9.8 - * TODO: Removed completed tasks - -2008-06-23 Michal Ludvig - - * s3cmd: Last-minute compatibility fixes for Python 2.4 - * s3cmd, s3cmd.1: --debug-exclude is an alias for --debug-syncmatch - * s3cmd: Don't require $HOME env variable to be set. - Fixes #2000133 - * s3cmd: Wrapped all execution in a try/except block - to catch all exceptions and ask for a report. - -2008-06-18 Michal Ludvig - - * S3/PkgInfo.py: Version 0.9.8-rc3 - -2008-06-18 Michal Ludvig - - * S3/S3.py: Bucket name can't contain upper-case letters (S3/DNS limitation). - -2008-06-12 Michal Ludvig - - * S3/PkgInfo.py: Version 0.9.8-rc2 - -2008-06-12 Michal Ludvig - - * s3cmd, s3cmd.1: Added GLOB (shell-style wildcard) exclude, renamed - orig regexp-style --exclude to --rexclude - -2008-06-11 Michal Ludvig - - * S3/PkgInfo.py: Version 0.9.8-rc1 - -2008-06-11 Michal Ludvig - - * s3cmd: Remove python 2.5 specific code (try/except/finally - block) and make s3cmd compatible with python 2.4 again. - * s3cmd, S3/Config.py, s3cmd.1: Added --exclude-from and --debug-syncmatch - switches for sync. - -2008-06-10 Michal Ludvig - - * s3cmd: Added --exclude switch for sync. - * s3cmd.1, NEWS: Document --exclude - -2008-06-05 Michal Ludvig - - * Released version 0.9.7 - ---------------------- - -2008-06-05 Michal Ludvig - - * S3/PkgInfo.py: Bumped up version to 0.9.7 - * NEWS: Added 0.9.7 - * TODO: Removed completed tasks - * s3cmd, s3cmd.1: Updated help texts, - removed --dry-run option as it's not implemented. - -2008-06-05 Michal Ludvig - - * S3/Config.py: Store more file attributes in sync to S3. - * s3cmd: Make sync remote2local more error-resilient. - -2008-06-04 Michal Ludvig - - * s3cmd: Implemented cmd_sync_remote2local() for restoring - backup from S3 to a local filesystem - * S3/S3.py: S3.object_get_uri() now requires writable stream - and not a path name. - * S3/Utils.py: Added mkdir_with_parents() - -2008-06-04 Michal Ludvig - - * s3cmd: Refactored cmd_sync() in preparation - for remote->local sync. - -2008-04-30 Michal Ludvig - - * s3db, S3/SimpleDB.py: Implemented almost full SimpleDB API. - -2008-04-29 Michal Ludvig - - * s3db, S3/SimpleDB.py: Initial support for Amazon SimpleDB. - For now implements ListDomains() call and most of the - infrastructure required for request creation. - -2008-04-29 Michal Ludvig - - * S3/Exceptions.py: Exceptions moved out of S3.S3 - * S3/SortedDict.py: rewritten from scratch to preserve - case of keys while still sorting in case-ignore mode. - -2008-04-28 Michal Ludvig - - * S3/S3.py: send_file() now computes MD5 sum of the file - being uploaded, compares with ETag returned by Amazon - and retries upload if they don't match. - -2008-03-05 Michal Ludvig - - * s3cmd, S3/S3.py, S3/Utils.py: Throttle upload speed and retry - when upload failed. - Report download/upload speed and time elapsed. - -2008-02-28 Michal Ludvig - - * Released version 0.9.6 - ---------------------- - -2008-02-28 Michal Ludvig - - * S3/PkgInfo.py: bumped up version to 0.9.6 - * NEWS: What's new in 0.9.6 - -2008-02-27 Michal Ludvig - - * s3cmd, s3cmd.1: Updated help and man page. - * S3/S3.py, S3/Utils.py, s3cmd: Support for 's3cmd info' command. - * s3cmd: Fix crash when 'sync'ing files with unresolvable owner uid/gid. - * S3/S3.py, S3/Utils.py: open files in binary mode (otherwise windows - users have problems). - * S3/S3.py: modify 'x-amz-date' format (problems reported on MacOS X). - Thanks Jon Larkowski for fix. - -2008-02-27 Michal Ludvig - - * TODO: Updated wishlist. - -2008-02-11 Michal Ludvig - - * S3/S3.py: Properly follow RedirectPermanent responses for EU buckets - * S3/S3.py: Create public buckets with -P (#1837328) - * S3/S3.py, s3cmd: Correctly display public URL on uploads. - * S3/S3.py, S3/Config.py: Support for MIME types. Both - default and guessing. Fixes bug #1872192 (Thanks Martin Herr) - -2007-11-13 Michal Ludvig - - * Released version 0.9.5 - ---------------------- - -2007-11-13 Michal Ludvig - - * S3/S3.py: Support for buckets stored in Europe, access now - goes via .s3.amazonaws.com where possible. - -2007-11-12 Michal Ludvig - - * s3cmd: Support for storing file attributes (like ownership, - mode, etc) in sync operation. - * s3cmd, S3/S3.py: New command 'ib' to get information about - bucket (only 'LocationConstraint' supported for now). - -2007-10-01 Michal Ludvig - - * s3cmd: Fix typo in argument name (patch - from Kim-Minh KAPLAN, SF #1804808) - -2007-09-25 Michal Ludvig - - * s3cmd: Exit with error code on error (patch - from Kim-Minh KAPLAN, SF #1800583) - -2007-09-25 Michal Ludvig - - * S3/S3.py: Don't fail if bucket listing doesn't have - node. - * s3cmd: Create ~/.s3cfg with 0600 permissions. - -2007-09-13 Michal Ludvig - - * s3cmd: Improved 'sync' - * S3/S3.py: Support for buckets with over 1000 objects. - -2007-09-03 Michal Ludvig - - * s3cmd: Small tweaks to --configure workflow. - -2007-09-02 Michal Ludvig - - * s3cmd: Initial support for 'sync' operation. For - now only local->s3 direction. In this version doesn't - work well with non-ASCII filenames and doesn't support - encryption. - -2007-08-24 Michal Ludvig - - * s3cmd, S3/Util.py: More ElementTree imports cleanup - -2007-08-19 Michal Ludvig - - * NEWS: Added news for 0.9.5 - -2007-08-19 Michal Ludvig - - * s3cmd: Better handling of multiple arguments for put, get and del - -2007-08-14 Michal Ludvig - - * setup.py, S3/Utils.py: Try import xml.etree.ElementTree - or elementtree.ElementTree module. - -2007-08-14 Michal Ludvig - - * s3cmd.1: Add info about --encrypt parameter. - -2007-08-14 Michal Ludvig - - * S3/PkgInfo.py: Bump up version to 0.9.5-pre - -2007-08-13 Michal Ludvig - - * Released version 0.9.4 - ---------------------- - -2007-08-13 Michal Ludvig - - * S3/S3.py: Added function urlencode_string() that encodes - non-ascii characters in object name before sending it to S3. - -2007-08-13 Michal Ludvig - - * README: Updated Amazon S3 pricing overview - -2007-08-13 Michal Ludvig - - * s3cmd, S3/Config.py, S3/S3.py: HTTPS support - -2007-07-20 Michal Ludvig - - * setup.py: Check correct Python version and ElementTree availability. - -2007-07-05 Michal Ludvig - - * s3cmd: --configure support for Proxy - * S3/S3.py: HTTP proxy support from - John D. Rowell - -2007-06-19 Michal Ludvig - - * setup.py: Check for S3CMD_PACKAGING and don't install - manpages and docs if defined. - * INSTALL: Document the above change. - * MANIFEST.in: Include uncompressed manpage - -2007-06-17 Michal Ludvig - - * s3cmd: Added encryption key support to --configure - * S3/PkgInfo.py: Bump up version to 0.9.4-pre - * setup.py: Cleaned up some rpm-specific stuff that - caused problems to Debian packager Mikhail Gusarov - * setup.cfg: Removed [bdist_rpm] section - * MANIFEST.in: Include S3/*.py - -2007-06-16 Michal Ludvig - - * s3cmd.1: Syntax fixes from Mikhail Gusarov - -2007-05-27 Michal Ludvig - - * Support for on-the-fly GPG encryption. - -2007-05-26 Michal Ludvig - - * s3cmd.1: Add info about "s3cmd du" command. - -2007-05-26 Michal Ludvig - - * Released version 0.9.3 - ---------------------- - -2007-05-26 Michal Ludvig - - * s3cmd: Patch from Basil Shubin - adding support for "s3cmd du" command. - * s3cmd: Modified output format of "s3cmd du" to conform - with unix "du". - * setup.cfg: Require Python 2.5 in RPM. Otherwise it needs - to require additional python modules (e.g. ElementTree) - which may have different names in different distros. It's - indeed still possible to manually install s3cmd with - Python 2.4 and appropriate modules. - -2007-04-09 Michal Ludvig - - * Released version 0.9.2 - ---------------------- - -2007-04-09 Michal Ludvig - - * s3cmd.1: Added manpage - * Updated infrastructure files to create "better" - distribution archives. - -2007-03-26 Michal Ludvig - - * setup.py, S3/PkgInfo.py: Move package info out of setup.py - * s3cmd: new parameter --version - * s3cmd, S3/S3Uri.py: Output public HTTP URL for objects - stored with Public ACL. - -2007-02-28 Michal Ludvig - - * s3cmd: Verify supplied accesskey and secretkey - in interactive configuration path. - * S3/Config.py: Hide access key and secret key - from debug output. - * S3/S3.py: Modify S3Error exception to work - in python 2.4 (=> don't expect Exception is - a new-style class). - * s3cmd: Updated for the above change. - -2007-02-19 Michal Ludvig - - * NEWS, INSTALL, README, setup.py: Added - more documentation. - -2007-02-19 Michal Ludvig - - * S3/S3.py, s3cmd: New feature - allow "get" to stdout - -2007-02-19 Michal Ludvig - - * S3/S3fs.py: Removed (development moved to branch s3fs-devel). - -2007-02-08 Michal Ludvig - - * S3/S3fs.py: - - Implemented mknod() - - Can create directory structure - - Rewritten to use SQLite3. Currently can create - the filesystem, and a root inode. - -2007-02-07 Michal Ludvig - - * s3cmd (from /s3py:74): Renamed SVN top-level project - s3py to s3cmd - -2007-02-07 Michal Ludvig - - * setup.cfg: Only require Python 2.4, not 2.5 - * S3/Config.py: Removed show_uri - no longer needed, - it's now default - -2007-02-07 Michal Ludvig - - * setup.py - - Version 0.9.1 - -2007-02-07 Michal Ludvig - - * s3cmd: Change all "exit()" calls to "sys.exit()" - and allow for python 2.4 - * S3/S3.py: Removed dependency on hashlib -> allow for python 2.4 - -2007-01-27 Michal Ludvig - - * S3/S3.py, S3/S3Uri.py: Case insensitive regex in S3Uri.py - -2007-01-26 Michal Ludvig - - * S3/S3fs.py: Added support for stroing/loading inodes. - No data yet however. - -2007-01-26 Michal Ludvig - - * S3/S3fs.py: Initial version of S3fs module. - Can create filesystem via "S3fs.mkfs()" - -2007-01-26 Michal Ludvig - - * S3/BidirMap.py, S3/Config.py, S3/S3.py, S3/S3Uri.py, - S3/SortedDict.py, S3/Utils.py, s3cmd: Added headers with - copyright to all files - * S3/S3.py, S3/S3Uri.py: Removed S3.compose_uri(), introduced - S3UriS3.compose_uri() instead. - -2007-01-26 Michal Ludvig - - * S3/S3.py, S3/S3Uri.py, s3cmd: - - Converted all users of parse_uri to S3Uri class API - - Removed "cp" command again. Will have to use 'put' - and 'get' for now. - -2007-01-25 Michal Ludvig - - * S3/S3Uri.py: New module S3/S3Uri.py - * S3/S3.py, s3cmd: Converted "put" operation to use - the new S3Uri class. - -2007-01-24 Michal Ludvig - - * S3/S3.py - * s3cmd - - Added 'cp' command - - Renamed parse_s3_uri to parse_uri (this will go away anyway) - -2007-01-19 Michal Ludvig - - * setup.cfg - * setup.py - - Include README into tarballs - -2007-01-19 Michal Ludvig - - * README - - Added comprehensive README file - -2007-01-19 Michal Ludvig - - * setup.cfg - * setup.py - - Added configuration for setup.py sdist - -2007-01-19 Michal Ludvig - - * S3/Config.py - * s3cmd - - Added interactive configurator (--configure) - - Added config dumper (--dump-config) - - Improved --help output - -2007-01-19 Michal Ludvig - - * setup.cfg - * setup.py - Added info for building RPM packages. - -2007-01-18 Michal Ludvig - - * S3/Config.py - * S3/S3.py - * s3cmd - Moved class Config from S3/S3.py to S3/Config.py - -2007-01-18 Michal Ludvig - - * S3/Config.py (from /s3py/trunk/S3/ConfigParser.py:47) - * S3/ConfigParser.py - * S3/S3.py - Renamed S3/ConfigParser.py to S3/Config.py - -2007-01-18 Michal Ludvig - - * s3cmd - Added info about homepage - -2007-01-17 Michal Ludvig - - * S3/S3.py - * s3cmd - - Use prefix for listings if specified. - - List all commands in --help - -2007-01-16 Michal Ludvig - - * S3/S3.py - * s3cmd - Major rework of Config class: - - Renamed from AwsConfig to Config - - Converted to Singleton (see Config.__new__() and an article on - Wikipedia) - - No more explicit listing of options - use introspection to get them - (class variables that of type str, int or bool that don't start with - underscore) - - Check values read from config file and verify their type. - - Added OptionMimeType and -m/-M options. Not yet implemented - functionality in the rest of S3/S3.py - -2007-01-15 Michal Ludvig - - * S3/S3.py - * s3cmd - - Merged list-buckets and bucket-list-objects operations into - a single 'ls' command. - - New parameter -P for uploading publicly readable objects - -2007-01-14 Michal Ludvig - - * s3.py - * setup.py - Renamed s3.py to s3cmd (take 2) - -2007-01-14 Michal Ludvig - - * s3cmd (from /s3py/trunk/s3.py:45) - Renamed s3.py to s3cmd - -2007-01-14 Michal Ludvig - - * S3 - * S3/S3.py - * s3.py - * setup.py - All classes from s3.py go to S3/S3.py - Added setup.py - -2007-01-14 Michal Ludvig - - * s3.py - Minor fix S3.utils -> S3.Utils - -2007-01-14 Michal Ludvig - - * .svnignore - * BidirMap.py - * ConfigParser.py - * S3 - * S3/BidirMap.py (from /s3py/trunk/BidirMap.py:35) - * S3/ConfigParser.py (from /s3py/trunk/ConfigParser.py:38) - * S3/SortedDict.py (from /s3py/trunk/SortedDict.py:35) - * S3/Utils.py (from /s3py/trunk/utils.py:39) - * S3/__init__.py - * SortedDict.py - * s3.py - * utils.py - Moved modules to their own package - -2007-01-12 Michal Ludvig - - * s3.py - Added "del" command - Converted all (?) commands to accept s3-uri - Added -u/--show-uri parameter - -2007-01-11 Michal Ludvig - - * s3.py - Verify MD5 on received files - Improved upload of multiple files - Initial S3-URI support (more tbd) - -2007-01-11 Michal Ludvig - - * s3.py - Minor fixes: - - store names of parsed files in AwsConfig - - Print total size with upload/download - -2007-01-11 Michal Ludvig - - * s3.py - * utils.py - Added support for sending and receiving files. - -2007-01-11 Michal Ludvig - - * ConfigParser.py - * s3.py - List all Objects in all Buckets command - Yet another logging improvement - Version check for Python 2.5 or higher - -2007-01-11 Michal Ludvig - - * ConfigParser.py - * s3.py - * utils.py - Added ConfigParser - Improved setting logging levels - It can now quite reliably list buckets and objects - -2007-01-11 Michal Ludvig - - * .svnignore - Added ignore list - -2007-01-11 Michal Ludvig - - * .svnignore - * BidirMap.py - * SortedDict.py - * s3.py - * utils.py - Initial import diff --git a/INSTALL b/INSTALL index 96bda33..f3cba51 100644 --- a/INSTALL +++ b/INSTALL @@ -1,14 +1,11 @@ Installation of s3cmd package ============================= -Author: - Michal Ludvig +Copyright: + TGRMN Software and contributors S3tools / S3cmd project homepage: - http://s3tools.sourceforge.net - -Amazon S3 homepage: - http://aws.amazon.com/s3 + http://s3tools.org !!! !!! Please consult README file for setup, usage and examples! @@ -17,10 +14,11 @@ Package formats --------------- S3cmd is distributed in two formats: + 1) Prebuilt RPM file - should work on most RPM-based distributions + 2) Source .tar.gz package - Installation of RPM package @@ -36,12 +34,13 @@ distribution documentation on ways to solve the problem. -Installation of source .tar.gz package --------------------------------------- +Installation from zip file +-------------------------- There are three options to run s3cmd from source tarball: -1) S3cmd program as distributed in s3cmd-X.Y.Z.tar.gz - can be run directly from where you untar'ed the package. +1) The S3cmd program, as distributed in s3cmd-X.Y.Z.tar.gz + on SourceForge or in master.zip on GitHub, can be run directly + from where you unzipped the package. 2) Or you may want to move "s3cmd" file and "S3" subdirectory to some other path. Make sure that "S3" subdirectory ends up @@ -51,7 +50,8 @@ you will have $HOME/bin/s3cmd file and $HOME/bin/S3 directory with a number of support files. -3) The cleanest and most recommended approach is to run +3) The cleanest and most recommended approach is to unzip the + package and then just run: python setup.py install @@ -65,8 +65,15 @@ Again, consult your distribution documentation on how to find out the actual package name and how to install it then. + Note that on Linux, if you are not "root" already, you may + need to run: + + sudo python setup.py install -Note to distibutions package maintainers + instead. + + +Note to distributions package maintainers ---------------------------------------- Define shell environment variable S3CMD_PACKAGING=yes if you don't want setup.py to install manpages and doc files. You'll @@ -86,12 +93,6 @@ s3tools-general@lists.sourceforge.net -For more information refer to: -* S3cmd / S3tools homepage at http://s3tools.sourceforge.net +or visit the S3cmd / S3tools homepage at: -Enjoy! - -Michal Ludvig -* michal@logix.cz -* http://www.logix.cz/michal - + http://s3tools.org diff --git a/LICENSE b/LICENSE deleted file mode 100644 index d159169..0000000 --- a/LICENSE +++ /dev/null @@ -1,339 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 4d3d4be..0000000 --- a/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -include INSTALL README NEWS -include s3cmd.1 diff --git a/Makefile b/Makefile deleted file mode 100644 index ee07f82..0000000 --- a/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -VERSION := 1.5.0 -SHELL := /bin/bash -SPEC := s3cmd.spec -COMMIT := $(shell git rev-parse HEAD) -SHORTCOMMIT := $(shell git rev-parse --short=8 HEAD) -TARBALL = s3cmd-$(VERSION)-$(SHORTCOMMIT).tar.gz - -release: - python setup.py register sdist upload - -clean: - -rm -rf s3cmd-*.tar.gz *.rpm *~ $(SPEC) - -find . -name \*.pyc -exec rm \{\} \; - -find . -name \*.pyo -exec rm \{\} \; - -$(SPEC): $(SPEC).in - sed -e 's/##VERSION##/$(VERSION)/' \ - -e 's/##COMMIT##/$(COMMIT)/' \ - -e 's/##SHORTCOMMIT##/$(SHORTCOMMIT)/' \ - $(SPEC).in > $(SPEC) - -tarball: - git archive --format tar --prefix s3cmd-$(COMMIT)/ HEAD | gzip -c > $(TARBALL) - -# Use older digest algorithms for local rpmbuilds, as EPEL5 and -# earlier releases need this. When building using mock for a -# particular target, it will use the proper (newer) digests if that -# target supports it. -rpm: clean tarball $(SPEC) - tmp_dir=`mktemp -d` ; \ - mkdir -p $${tmp_dir}/{BUILD,RPMS,SRPMS,SPECS,SOURCES} ; \ - cp $(TARBALL) $${tmp_dir}/SOURCES ; \ - cp $(SPEC) $${tmp_dir}/SPECS ; \ - cd $${tmp_dir} > /dev/null 2>&1; \ - rpmbuild -ba --define "_topdir $${tmp_dir}" \ - --define "_source_filedigest_algorithm 0" \ - --define "_binary_filedigest_algorithm 0" \ - --define "dist %{nil}" \ - SPECS/$(SPEC) ; \ - cd - > /dev/null 2>&1; \ - cp $${tmp_dir}/RPMS/noarch/* $${tmp_dir}/SRPMS/* . ; \ - rm -rf $${tmp_dir} ; \ - rpmlint *.rpm *.spec diff --git a/NEWS b/NEWS index affddc5..a4b449c 100644 --- a/NEWS +++ b/NEWS @@ -1,3 +1,7 @@ +s3cmd 1.5.0-rc1 - 2014-06-29 +=============== +[TODO - extract from: git log --no-merges v1.5.0-beta1..] + s3cmd 1.5.0-beta1 - 2013-12-02 ================= * Brougt to you by Matt Domsch and contributors, thanks guys! :) diff --git a/PKG-INFO b/PKG-INFO new file mode 100644 index 0000000..f9c3ddc --- /dev/null +++ b/PKG-INFO @@ -0,0 +1,23 @@ +Metadata-Version: 1.1 +Name: s3cmd +Version: 1.5.0-rc1 +Summary: Command line tool for managing Amazon S3 and CloudFront services +Home-page: http://s3tools.org +Author: Michal Ludvig +Author-email: michal@logix.cz +License: GPL version 2 +Description: + + S3cmd lets you copy files from/to Amazon S3 + (Simple Storage Service) using a simple to use + command line client. Supports rsync-like backup, + GPG encryption, and more. Also supports management + of Amazon's CloudFront content delivery network. + + + Authors: + -------- + Michal Ludvig + +Platform: UNKNOWN +Requires: dateutil diff --git a/README b/README index feae6d9..5fed0ee 100644 --- a/README +++ b/README @@ -3,11 +3,13 @@ Author: Michal Ludvig + Copyright (c) TGRMN Software - http://www.tgrmn.com - and contributors S3tools / S3cmd project homepage: http://s3tools.org S3tools / S3cmd mailing lists: + * Announcements of new releases: s3tools-announce@lists.sourceforge.net @@ -16,31 +18,49 @@ * Bug reports s3tools-bugs@lists.sourceforge.net - -Amazon S3 homepage: - http://aws.amazon.com/s3 !!! !!! Please consult INSTALL file for installation instructions! !!! + +What is S3cmd +-------------- +S3cmd is a free command line tool and client for uploading, +retrieving and managing data in Amazon S3 and other cloud +storage service providers that use the S3 protocol, such as +Google Cloud Storage or DreamHost DreamObjects. It is best +suited for power users who are familiar with command line +programs. It is also ideal for batch scripts and automated +backup to S3, triggered from cron, etc. + +S3cmd is written in Python. It's an open source project +available under GNU Public License v2 (GPLv2) and is free +for both commercial and private use. You will only have +to pay Amazon for using their storage. + +Lots of features and options have been added to S3cmd, +since its very first release in 2008.... we recently counted +more than 60 command line options, including multipart +uploads, encryption, incremental backup, s3 sync, ACL and +Metadata management, S3 bucket size, bucket policies, and +more! What is Amazon S3 ----------------- Amazon S3 provides a managed internet-accessible storage service where anyone can store any amount of data and -retrieve it later again. Maximum amount of data in one -"object" is 5GB, maximum number of objects is not limited. - -S3 is a paid service operated by the well known Amazon.com -internet book shop. Before storing anything into S3 you -must sign up for an "AWS" account (where AWS = Amazon Web -Services) to obtain a pair of identifiers: Access Key and -Secret Key. You will need to give these keys to S3cmd. +retrieve it later again. + +S3 is a paid service operated by Amazon. Before storing +anything into S3 you must sign up for an "AWS" account +(where AWS = Amazon Web Services) to obtain a pair of +identifiers: Access Key and Secret Key. You will need to +give these keys to S3cmd. Think of them as if they were a username and password for your S3 account. -Pricing explained ------------------ +Amazon S3 pricing explained +--------------------------- At the time of this writing the costs of using S3 are (in USD): $0.15 per GB per month of storage space used @@ -335,11 +355,16 @@ For more information refer to: * S3cmd / S3tools homepage at http://s3tools.org -* Amazon S3 homepage at http://aws.amazon.com/s3 - -Enjoy! - -Michal Ludvig -* michal@logix.cz -* http://www.logix.cz/michal - + +=========================================================================== +Copyright (C) 2014 TGRMN Software - http://www.tgrmn.com - and contributors + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. \ No newline at end of file diff --git a/S3/ACL.py b/S3/ACL.py index 2408d06..71a29ae 100644 --- a/S3/ACL.py +++ b/S3/ACL.py @@ -2,6 +2,7 @@ ## Author: Michal Ludvig ## http://www.logix.cz/michal ## License: GPL Version 2 +## Copyright: TGRMN Software and contributors from Utils import getTreeFromXml @@ -159,14 +160,14 @@ grantee.permission = permission if name.find('@') > -1: - grantee.name = grantee.name.lower + grantee.name = grantee.name.lower() grantee.xsi_type = "AmazonCustomerByEmail" grantee.tag = "EmailAddress" elif name.find('http://acs.amazonaws.com/groups/') > -1: grantee.xsi_type = "Group" grantee.tag = "URI" else: - grantee.name = grantee.name.lower + grantee.name = grantee.name.lower() grantee.xsi_type = "CanonicalUser" grantee.tag = "ID" diff --git a/S3/AccessLog.py b/S3/AccessLog.py index 7ae99ef..6db70b1 100644 --- a/S3/AccessLog.py +++ b/S3/AccessLog.py @@ -2,6 +2,7 @@ ## Author: Michal Ludvig ## http://www.logix.cz/michal ## License: GPL Version 2 +## Copyright: TGRMN Software and contributors import S3Uri from Exceptions import ParameterError diff --git a/S3/BidirMap.py b/S3/BidirMap.py index 0d2849a..c0417cc 100644 --- a/S3/BidirMap.py +++ b/S3/BidirMap.py @@ -2,6 +2,7 @@ ## Author: Michal Ludvig ## http://www.logix.cz/michal ## License: GPL Version 2 +## Copyright: TGRMN Software and contributors class BidirMap(object): def __init__(self, **map): diff --git a/S3/CloudFront.py b/S3/CloudFront.py index 83ca07d..eb81ea9 100644 --- a/S3/CloudFront.py +++ b/S3/CloudFront.py @@ -2,6 +2,7 @@ ## Author: Michal Ludvig ## http://www.logix.cz/michal ## License: GPL Version 2 +## Copyright: TGRMN Software and contributors import sys import time @@ -15,6 +16,7 @@ except ImportError: import elementtree.ElementTree as ET +from S3 import S3 from Config import Config from Exceptions import * from Utils import getTreeFromXml, appendXmlTextNode, getDictFromTree, dateS3toPython, sign_string, getBucketFromHostname, getHostnameFromBucket @@ -280,11 +282,12 @@ def __str__(self): tree = ET.Element("InvalidationBatch") + s3 = S3(Config()) for path in self.paths: if len(path) < 1 or path[0] != "/": path = "/" + path - appendXmlTextNode("Path", path, tree) + appendXmlTextNode("Path", s3.urlencode_string(path), tree) appendXmlTextNode("CallerReference", self.reference, tree) return ET.tostring(tree) @@ -432,7 +435,7 @@ new_paths = [] default_index_suffix = '/' + default_index_file for path in paths: - if path.endswith(default_index_suffix) or path == default_index_file: + if path.endswith(default_index_suffix) or path == default_index_file: if invalidate_default_index_on_cf: new_paths.append(path) if invalidate_default_index_root_on_cf: diff --git a/S3/Config.py b/S3/Config.py index 302b9d0..7a55589 100644 --- a/S3/Config.py +++ b/S3/Config.py @@ -2,6 +2,7 @@ ## Author: Michal Ludvig ## http://www.logix.cz/michal ## License: GPL Version 2 +## Copyright: TGRMN Software and contributors import logging from logging import debug, info, warning, error @@ -92,7 +93,6 @@ debug_exclude = {} debug_include = {} encoding = "utf-8" - add_content_encoding = True urlencoding_mode = "normal" log_target_prefix = "" reduced_redundancy = False @@ -110,22 +110,37 @@ cache_file = "" add_headers = "" ignore_failed_copy = False + expiry_days = "" + expiry_date = "" + expiry_prefix = "" ## Creating a singleton - def __new__(self, configfile = None): + def __new__(self, configfile = None, access_key=None, secret_key=None): if self._instance is None: self._instance = object.__new__(self) return self._instance - def __init__(self, configfile = None): + def __init__(self, configfile = None, access_key=None, secret_key=None): if configfile: try: self.read_config_file(configfile) except IOError, e: if 'AWS_CREDENTIAL_FILE' in os.environ: self.env_config() + + # override these if passed on the command-line + if access_key and secret_key: + self.access_key = access_key + self.secret_key = secret_key + if len(self.access_key)==0: - self.role_config() + env_access_key = os.environ.get("AWS_ACCESS_KEY", None) or os.environ.get("AWS_ACCESS_KEY_ID", None) + env_secret_key = os.environ.get("AWS_SECRET_KEY", None) or os.environ.get("AWS_SECRET_ACCESS_KEY", None) + if env_access_key: + self.access_key = env_access_key + self.secret_key = env_secret_key + else: + self.role_config() def role_config(self): if sys.version_info[0] * 10 + sys.version_info[1] < 26: @@ -223,31 +238,43 @@ def update_option(self, option, value): if value is None: return + #### Handle environment reference if str(value).startswith("$"): return self.update_option(option, os.getenv(str(value)[1:])) + #### Special treatment of some options ## verbosity must be known to "logging" module if option == "verbosity": + # support integer verboisities try: - setattr(Config, "verbosity", logging._levelNames[value]) - except KeyError: - error("Config: verbosity level '%s' is not valid" % value) + value = int(value) + except ValueError, e: + try: + # otherwise it must be a key known to the logging module + value = logging._levelNames[value] + except KeyError: + error("Config: verbosity level '%s' is not valid" % value) + return + ## allow yes/no, true/false, on/off and 1/0 for boolean options elif type(getattr(Config, option)) is type(True): # bool if str(value).lower() in ("true", "yes", "on", "1"): - setattr(Config, option, True) + value = True elif str(value).lower() in ("false", "no", "off", "0"): - setattr(Config, option, False) + value = False else: error("Config: value of option '%s' must be Yes or No, not '%s'" % (option, value)) + return + elif type(getattr(Config, option)) is type(42): # int try: - setattr(Config, option, int(value)) + value = int(value) except ValueError, e: error("Config: value of option '%s' must be an integer, not '%s'" % (option, value)) - else: # string - setattr(Config, option, value) + return + + setattr(Config, option, value) class ConfigParser(object): def __init__(self, file, sections = []): @@ -305,6 +332,12 @@ def dump(self, section, config): self.stream.write("[%s]\n" % section) for option in config.option_list(): - self.stream.write("%s = %s\n" % (option, getattr(config, option))) + value = getattr(config, option) + if option == "verbosity": + # we turn level numbers back into strings if possible + if isinstance(value,int) and value in logging._levelNames: + value = logging._levelNames[value] + + self.stream.write("%s = %s\n" % (option, value)) # vim:et:ts=4:sts=4:ai diff --git a/S3/ConnMan.py b/S3/ConnMan.py index fbec921..681b76a 100644 --- a/S3/ConnMan.py +++ b/S3/ConnMan.py @@ -1,3 +1,9 @@ +## Amazon S3 manager +## Author: Michal Ludvig +## http://www.logix.cz/michal +## License: GPL Version 2 +## Copyright: TGRMN Software and contributors + import httplib from urlparse import urlparse from threading import Semaphore @@ -34,7 +40,7 @@ conn = None if cfg.proxy_host != "": if ssl: - raise ParameterError("use_ssl=True can't be used with proxy") + raise ParameterError("use_https=True can't be used with proxy") conn_id = "proxy://%s:%s" % (cfg.proxy_host, cfg.proxy_port) else: conn_id = "http%s://%s" % (ssl and "s" or "", hostname) diff --git a/S3/Exceptions.py b/S3/Exceptions.py index b0671e5..4486ce1 100644 --- a/S3/Exceptions.py +++ b/S3/Exceptions.py @@ -2,6 +2,7 @@ ## Author: Michal Ludvig ## http://www.logix.cz/michal ## License: GPL Version 2 +## Copyright: TGRMN Software and contributors from Utils import getTreeFromXml, unicodise, deunicodise from logging import debug, info, warning, error @@ -45,14 +46,13 @@ for header in response["headers"]: debug("HttpHeader: %s: %s" % (header, response["headers"][header])) if response.has_key("data") and response["data"]: - tree = getTreeFromXml(response["data"]) - error_node = tree - if not error_node.tag == "Error": - error_node = tree.find(".//Error") - for child in error_node.getchildren(): - if child.text != "": - debug("ErrorXML: " + child.tag + ": " + repr(child.text)) - self.info[child.tag] = child.text + try: + tree = getTreeFromXml(response["data"]) + except ET.ParseError: + debug("Not an XML response") + else: + self.info.update(self.parse_error_xml(tree)) + self.code = self.info["Code"] self.message = self.info["Message"] self.resource = self.info["Resource"] @@ -63,6 +63,20 @@ if self.info.has_key("Message"): retval += (u": %s" % self.info["Message"]) return retval + + @staticmethod + def parse_error_xml(tree): + info = {} + error_node = tree + if not error_node.tag == "Error": + error_node = tree.find(".//Error") + for child in error_node.getchildren(): + if child.text != "": + debug("ErrorXML: " + child.tag + ": " + repr(child.text)) + info[child.tag] = child.text + + return info + class CloudFrontError(S3Error): pass diff --git a/S3/ExitCodes.py b/S3/ExitCodes.py new file mode 100644 index 0000000..7cfb108 --- /dev/null +++ b/S3/ExitCodes.py @@ -0,0 +1,16 @@ +# patterned on /usr/include/sysexits.h + +EX_OK = 0 +EX_GENERAL = 1 +EX_SOMEFAILED = 2 # some parts of the command succeeded, while others failed +EX_USAGE = 64 # The command was used incorrectly (e.g. bad command line syntax) +EX_SOFTWARE = 70 # internal software error (e.g. S3 error of unknown specificity) +EX_OSERR = 71 # system error (e.g. out of memory) +EX_OSFILE = 72 # OS error (e.g. invalid Python version) +EX_IOERR = 74 # An error occurred while doing I/O on some file. +EX_TEMPFAIL = 75 # temporary failure (S3DownloadError or similar, retry later) +EX_NOPERM = 77 # Insufficient permissions to perform the operation on S3 +EX_CONFIG = 78 # Configuration file error +_EX_SIGNAL = 128 +_EX_SIGINT = 2 +EX_BREAK = _EX_SIGNAL + _EX_SIGINT # Control-C (KeyboardInterrupt raised) diff --git a/S3/FileDict.py b/S3/FileDict.py index 7bf9368..4f2ab6f 100644 --- a/S3/FileDict.py +++ b/S3/FileDict.py @@ -2,9 +2,15 @@ ## Author: Michal Ludvig ## http://www.logix.cz/michal ## License: GPL Version 2 +## Copyright: TGRMN Software and contributors +import logging from SortedDict import SortedDict import Utils +import Config + +zero_length_md5 = "d41d8cd98f00b204e9800998ecf8427e" +cfg = Config.Config() class FileDict(SortedDict): def __init__(self, mapping = {}, ignore_case = True, **kwargs): @@ -13,11 +19,14 @@ self.by_md5 = dict() # {md5: set(relative_files)} def record_md5(self, relative_file, md5): + if md5 is None: return + if md5 == zero_length_md5: return if md5 not in self.by_md5: self.by_md5[md5] = set() self.by_md5[md5].add(relative_file) def find_md5_one(self, md5): + if md5 is None: return None try: return list(self.by_md5.get(md5, set()))[0] except: @@ -29,13 +38,16 @@ if 'md5' in self[relative_file]: return self[relative_file]['md5'] md5 = self.get_hardlink_md5(relative_file) - if md5 is None: + if md5 is None and 'md5' in cfg.sync_checks: + logging.debug(u"doing file I/O to read md5 of %s" % relative_file) md5 = Utils.hash_file_md5(self[relative_file]['full_name']) self.record_md5(relative_file, md5) self[relative_file]['md5'] = md5 return md5 - def record_hardlink(self, relative_file, dev, inode, md5): + def record_hardlink(self, relative_file, dev, inode, md5, size): + if md5 is None: return + if size == 0: return # don't record 0-length files if dev == 0 or inode == 0: return # Windows if dev not in self.hardlinks: self.hardlinks[dev] = dict() @@ -45,10 +57,10 @@ def get_hardlink_md5(self, relative_file): md5 = None - dev = self[relative_file]['dev'] - inode = self[relative_file]['inode'] try: + dev = self[relative_file]['dev'] + inode = self[relative_file]['inode'] md5 = self.hardlinks[dev][inode]['md5'] - except: + except KeyError: pass return md5 diff --git a/S3/FileLists.py b/S3/FileLists.py index 66d00b4..f8602e9 100644 --- a/S3/FileLists.py +++ b/S3/FileLists.py @@ -2,6 +2,7 @@ ## Author: Michal Ludvig ## http://www.logix.cz/michal ## License: GPL Version 2 +## Copyright: TGRMN Software and contributors from S3 import S3 from Config import Config @@ -20,7 +21,7 @@ import re import errno -__all__ = ["fetch_local_list", "fetch_remote_list", "compare_filelists", "filter_exclude_include"] +__all__ = ["fetch_local_list", "fetch_remote_list", "compare_filelists"] def _fswalk_follow_symlinks(path): ''' @@ -59,7 +60,7 @@ yield (dirpath, dirnames, filenames) def filter_exclude_include(src_list): - info(u"Applying --exclude/--include") + debug(u"Applying --exclude/--include") cfg = Config() exclude_list = FileDict(ignore_case = False) for file in src_list.keys(): @@ -90,8 +91,6 @@ def handle_exclude_include_walk(root, dirs, files): cfg = Config() copydirs = copy.copy(dirs) - copyfiles = copy.copy(files) - # exclude dir matches in the current directory # this prevents us from recursing down trees we know we want to ignore for x in copydirs: @@ -99,6 +98,7 @@ debug(u"CHECK: %r" % d) excluded = False for r in cfg.exclude: + if not r.pattern.endswith(u'/'): continue # we only check for directories here if r.search(d): excluded = True debug(u"EXCL-MATCH: '%s'" % (cfg.debug_exclude[r])) @@ -106,6 +106,8 @@ if excluded: ## No need to check for --include if not excluded for r in cfg.include: + if not r.pattern.endswith(u'/'): continue # we only check for directories here + debug(u"INCL-TEST: %s ~ %s" % (d, r.pattern)) if r.search(d): excluded = False debug(u"INCL-MATCH: '%s'" % (cfg.debug_include[r])) @@ -118,31 +120,6 @@ else: debug(u"PASS: %r" % (d)) - # exclude file matches in the current directory - for x in copyfiles: - file = os.path.join(root, x) - debug(u"CHECK: %r" % file) - excluded = False - for r in cfg.exclude: - if r.search(file): - excluded = True - debug(u"EXCL-MATCH: '%s'" % (cfg.debug_exclude[r])) - break - if excluded: - ## No need to check for --include if not excluded - for r in cfg.include: - if r.search(file): - excluded = False - debug(u"INCL-MATCH: '%s'" % (cfg.debug_include[r])) - break - if excluded: - ## Still excluded - ok, action it - debug(u"EXCLUDE: %s" % file) - files.remove(x) - continue - else: - debug(u"PASS: %r" % (file)) - def _get_filelist_from_file(cfg, local_path): def _append(d, key, value): @@ -182,6 +159,48 @@ return result def fetch_local_list(args, is_src = False, recursive = None): + + def _fetch_local_list_info(loc_list): + len_loc_list = len(loc_list) + info(u"Running stat() and reading/calculating MD5 values on %d files, this may take some time..." % len_loc_list) + counter = 0 + for relative_file in loc_list: + counter += 1 + if counter % 1000 == 0: + info(u"[%d/%d]" % (counter, len_loc_list)) + + if relative_file == '-': continue + + full_name = loc_list[relative_file]['full_name'] + try: + sr = os.stat_result(os.stat(full_name)) + except OSError, e: + if e.errno == errno.ENOENT: + # file was removed async to us getting the list + continue + else: + raise + loc_list[relative_file].update({ + 'size' : sr.st_size, + 'mtime' : sr.st_mtime, + 'dev' : sr.st_dev, + 'inode' : sr.st_ino, + 'uid' : sr.st_uid, + 'gid' : sr.st_gid, + 'sr': sr # save it all, may need it in preserve_attrs_list + ## TODO: Possibly more to save here... + }) + if 'md5' in cfg.sync_checks: + md5 = cache.md5(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size) + if md5 is None: + try: + md5 = loc_list.get_md5(relative_file) # this does the file I/O + except IOError: + continue + cache.add(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size, md5) + loc_list.record_hardlink(relative_file, sr.st_dev, sr.st_ino, md5, sr.st_size) + + def _get_filelist_local(loc_list, local_uri, cache): info(u"Compiling list of local files...") @@ -237,35 +256,11 @@ relative_file = replace_nonprintables(relative_file) if relative_file.startswith('./'): relative_file = relative_file[2:] - try: - sr = os.stat_result(os.stat(full_name)) - except OSError, e: - if e.errno == errno.ENOENT: - # file was removed async to us getting the list - continue - else: - raise loc_list[relative_file] = { 'full_name_unicode' : unicodise(full_name), 'full_name' : full_name, - 'size' : sr.st_size, - 'mtime' : sr.st_mtime, - 'dev' : sr.st_dev, - 'inode' : sr.st_ino, - 'uid' : sr.st_uid, - 'gid' : sr.st_gid, - 'sr': sr # save it all, may need it in preserve_attrs_list - ## TODO: Possibly more to save here... } - if 'md5' in cfg.sync_checks: - md5 = cache.md5(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size) - if md5 is None: - try: - md5 = loc_list.get_md5(relative_file) # this does the file I/O - except IOError: - continue - cache.add(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size, md5) - loc_list.record_hardlink(relative_file, sr.st_dev, sr.st_ino, md5) + return loc_list, single_file def _maintain_cache(cache, local_list): @@ -318,11 +313,12 @@ if len(local_list) > 1: single_file = False + local_list, exclude_list = filter_exclude_include(local_list) + _fetch_local_list_info(local_list) _maintain_cache(cache, local_list) - - return local_list, single_file - -def fetch_remote_list(args, require_attribs = False, recursive = None): + return local_list, single_file, exclude_list + +def fetch_remote_list(args, require_attribs = False, recursive = None, uri_params = {}): def _get_remote_attribs(uri, remote_item): response = S3(cfg).object_info(uri) remote_item.update({ @@ -354,9 +350,11 @@ ## { 'xyz/blah.txt' : {} } info(u"Retrieving list of remote files for %s ..." % remote_uri) + empty_fname_re = re.compile(r'\A\s*\Z') s3 = S3(Config()) - response = s3.bucket_list(remote_uri.bucket(), prefix = remote_uri.object(), recursive = recursive) + response = s3.bucket_list(remote_uri.bucket(), prefix = remote_uri.object(), + recursive = recursive, uri_params = uri_params) rem_base_original = rem_base = remote_uri.object() remote_uri_original = remote_uri @@ -376,6 +374,10 @@ else: key = object['Key'][rem_base_len:] ## Beware - this may be '' if object['Key']==rem_base !! object_uri_str = remote_uri.uri() + key + if empty_fname_re.match(key): + # Objects may exist on S3 with empty names (''), which don't map so well to common filesystems. + warning(u"Empty object name on S3 found, ignoring.") + continue rem_list[key] = { 'size' : int(object['Size']), 'timestamp' : dateS3toUnix(object['LastModified']), ## Sadly it's upload time, not our lastmod time :-( @@ -412,13 +414,13 @@ if recursive: for uri in remote_uris: - objectlist = _get_filelist_remote(uri) + objectlist = _get_filelist_remote(uri, recursive = True) for key in objectlist: remote_list[key] = objectlist[key] remote_list.record_md5(key, objectlist.get_md5(key)) else: for uri in remote_uris: - uri_str = str(uri) + uri_str = unicode(uri) ## Wildcards used in remote URI? ## If yes we'll need a bucket listing... wildcard_split_result = re.split("\*|\?", uri_str, maxsplit=1) @@ -449,7 +451,9 @@ md5 = remote_item.get('md5') if md5: remote_list.record_md5(key, md5) - return remote_list + + remote_list, exclude_list = filter_exclude_include(remote_list) + return remote_list, exclude_list def compare_filelists(src_list, dst_list, src_remote, dst_remote, delay_updates = False): diff --git a/S3/MultiPart.py b/S3/MultiPart.py index ed67130..21f0cae 100644 --- a/S3/MultiPart.py +++ b/S3/MultiPart.py @@ -8,7 +8,6 @@ from logging import debug, info, warning, error from Utils import getTextFromXml, getTreeFromXml, formatSize, unicodise, calculateChecksum, parseNodes from Exceptions import S3UploadError -from collections import defaultdict class MultiPartUpload(object): @@ -28,7 +27,7 @@ multipart_response = self.s3.list_multipart(uri, upload_id) tree = getTreeFromXml(multipart_response['data']) - parts = defaultdict(lambda: None) + parts = dict() for elem in parseNodes(tree): try: parts[int(elem['PartNumber'])] = {'checksum': elem['ETag'], 'size': elem['Size']} @@ -93,7 +92,7 @@ else: debug("MultiPart: Uploading from %s" % (self.file.name)) - remote_statuses = defaultdict(lambda: None) + remote_statuses = dict() if self.s3.config.put_continue: remote_statuses = self.get_parts_information(self.uri, self.upload_id) @@ -109,7 +108,7 @@ 'extra' : "[part %d of %d, %s]" % (seq, nr_parts, "%d%sB" % formatSize(current_chunk_size, human_readable = True)) } try: - self.upload_part(seq, offset, current_chunk_size, labels, remote_status = remote_statuses[seq]) + self.upload_part(seq, offset, current_chunk_size, labels, remote_status = remote_statuses.get(seq)) except: error(u"\nUpload of '%s' part %d failed. Use\n %s abortmp %s %s\nto abort the upload, or\n %s --upload-id %s put ...\nto continue the upload." % (self.file.name, seq, sys.argv[0], self.uri, self.upload_id, sys.argv[0], self.upload_id)) @@ -128,7 +127,7 @@ if len(buffer) == 0: # EOF break try: - self.upload_part(seq, offset, current_chunk_size, labels, buffer, remote_status = remote_statuses[seq]) + self.upload_part(seq, offset, current_chunk_size, labels, buffer, remote_status = remote_statuses.get(seq)) except: error(u"\nUpload of '%s' part %d failed. Use\n %s abortmp %s %s\nto abort, or\n %s --upload-id %s put ...\nto continue the upload." % (self.file.name, seq, self.uri, sys.argv[0], self.upload_id, sys.argv[0], self.upload_id)) diff --git a/S3/PkgInfo.py b/S3/PkgInfo.py index 7bd3569..0c8970e 100644 --- a/S3/PkgInfo.py +++ b/S3/PkgInfo.py @@ -1,5 +1,11 @@ +## Amazon S3 manager +## Author: Michal Ludvig +## http://www.logix.cz/michal +## License: GPL Version 2 +## Copyright: TGRMN Software and contributors + package = "s3cmd" -version = "1.5.0-beta1" +version = "1.5.0-rc1" url = "http://s3tools.org" license = "GPL version 2" short_description = "Command line tool for managing Amazon S3 and CloudFront services" diff --git a/S3/Progress.py b/S3/Progress.py index ea776b8..5354004 100644 --- a/S3/Progress.py +++ b/S3/Progress.py @@ -2,6 +2,7 @@ ## Author: Michal Ludvig ## http://www.logix.cz/michal ## License: GPL Version 2 +## Copyright: TGRMN Software and contributors import sys import datetime diff --git a/S3/S3.py b/S3/S3.py index 4dffd6d..1589f7b 100644 --- a/S3/S3.py +++ b/S3/S3.py @@ -2,15 +2,19 @@ ## Author: Michal Ludvig ## http://www.logix.cz/michal ## License: GPL Version 2 +## Copyright: TGRMN Software and contributors import sys import os, os.path import time import errno +import base64 import httplib import logging import mimetypes import re +from xml.sax import saxutils +import base64 from logging import debug, info, warning, error from stat import ST_SIZE @@ -31,46 +35,30 @@ from ConnMan import ConnMan try: - import magic, gzip + import magic try: ## https://github.com/ahupp/python-magic magic_ = magic.Magic(mime=True) def mime_magic_file(file): return magic_.from_file(file) - def mime_magic_buffer(buffer): - return magic_.from_buffer(buffer) except TypeError: ## http://pypi.python.org/pypi/filemagic try: magic_ = magic.Magic(flags=magic.MAGIC_MIME) def mime_magic_file(file): return magic_.id_filename(file) - def mime_magic_buffer(buffer): - return magic_.id_buffer(buffer) except TypeError: ## file-5.11 built-in python bindings magic_ = magic.open(magic.MAGIC_MIME) magic_.load() def mime_magic_file(file): return magic_.file(file) - def mime_magic_buffer(buffer): - return magic_.buffer(buffer) - except AttributeError: ## Older python-magic versions magic_ = magic.open(magic.MAGIC_MIME) magic_.load() def mime_magic_file(file): return magic_.file(file) - def mime_magic_buffer(buffer): - return magic_.buffer(buffer) - - def mime_magic(file): - type = mime_magic_file(file) - if type != "application/x-gzip; charset=binary": - return (type, None) - else: - return (mime_magic_buffer(gzip.open(file).read(8192)), 'gzip') except ImportError, e: if str(e).find("magic") >= 0: @@ -79,12 +67,37 @@ magic_message = "Module python-magic can't be used (%s)." % e.message magic_message += " Guessing MIME types based on file extensions." magic_warned = False - def mime_magic(file): + def mime_magic_file(file): global magic_warned if (not magic_warned): warning(magic_message) magic_warned = True - return mimetypes.guess_type(file) + return mimetypes.guess_type(file)[0] + +def mime_magic(file): + # we can't tell if a given copy of the magic library will take a + # filesystem-encoded string or a unicode value, so try first + # with the encoded string, then unicode. + def _mime_magic(file): + magictype = None + try: + magictype = mime_magic_file(file) + except UnicodeDecodeError: + magictype = mime_magic_file(unicodise(file)) + return magictype + + result = _mime_magic(file) + if result is not None: + if isinstance(result, str): + if ';' in result: + mimetype, charset = result.split(';') + charset = charset[len('charset'):] + result = (mimetype, charset) + else: + result = (result, None) + if result is None: + result = (None, None) + return result __all__ = [] class S3Request(object): @@ -161,6 +174,7 @@ SERVICE = 0x0100, BUCKET = 0x0200, OBJECT = 0x0400, + BATCH = 0x0800, MASK = 0x0700, ) @@ -175,6 +189,7 @@ OBJECT_HEAD = targets["OBJECT"] | http_methods["HEAD"], OBJECT_DELETE = targets["OBJECT"] | http_methods["DELETE"], OBJECT_POST = targets["OBJECT"] | http_methods["POST"], + BATCH_DELETE = targets["BATCH"] | http_methods["POST"], ) codes = { @@ -223,7 +238,7 @@ response["list"] = getListFromXml(response["data"], "Bucket") return response - def bucket_list(self, bucket, prefix = None, recursive = None): + def bucket_list(self, bucket, prefix = None, recursive = None, uri_params = {}): def _list_truncated(data): ## can either be "true" or "false" or be missing completely is_truncated = getTextFromXml(data, ".//IsTruncated") or "false" @@ -235,7 +250,7 @@ def _get_common_prefixes(data): return getListFromXml(data, "CommonPrefixes") - uri_params = {} + uri_params = uri_params.copy() truncated = True list = [] prefixes = [] @@ -367,6 +382,62 @@ return response + def expiration_info(self, uri, bucket_location = None): + headers = SortedDict(ignore_case = True) + bucket = uri.bucket() + body = "" + + request = self.create_request("BUCKET_LIST", bucket = bucket, extra="?lifecycle") + try: + response = self.send_request(request, body) + response['prefix'] = getTextFromXml(response['data'], ".//Rule//Prefix") + response['date'] = getTextFromXml(response['data'], ".//Rule//Expiration//Date") + response['days'] = getTextFromXml(response['data'], ".//Rule//Expiration//Days") + return response + except S3Error, e: + if e.status == 404: + debug("Could not get /?lifecycle - lifecycle probably not configured for this bucket") + return None + raise + + def expiration_set(self, uri, bucket_location = None): + if self.config.expiry_date and self.config.expiry_days: + raise ParameterError("Expect either --expiry-day or --expiry-date") + if not (self.config.expiry_date or self.config.expiry_days): + if self.config.expiry_prefix: + raise ParameterError("Expect either --expiry-day or --expiry-date") + debug("del bucket lifecycle") + bucket = uri.bucket() + body = "" + request = self.create_request("BUCKET_DELETE", bucket = bucket, extra="?lifecycle") + else: + request, body = self._expiration_set(uri) + debug("About to send request '%s' with body '%s'" % (request, body)) + response = self.send_request(request, body) + debug("Received response '%s'" % (response)) + return response + + def _expiration_set(self, uri): + debug("put bucket lifecycle") + body = '' + body += ' ' + body += (' %s' % self.config.expiry_prefix) + body += (' Enabled') + body += (' ') + if self.config.expiry_date: + body += (' %s' % self.config.expiry_date) + elif self.config.expiry_days: + body += (' %s' % self.config.expiry_days) + body += (' ') + body += ' ' + body += '' + + headers = SortedDict(ignore_case = True) + headers['content-md5'] = compute_content_md5(body) + bucket = uri.bucket() + request = self.create_request("BUCKET_CREATE", bucket = bucket, headers = headers, extra="?lifecycle") + return (request, body) + def add_encoding(self, filename, content_type): if content_type.find("charset=") != -1: return False @@ -410,22 +481,22 @@ ## MIME-type handling content_type = self.config.mime_type - content_encoding = None + content_charset = None if filename != "-" and not content_type and self.config.guess_mime_type: if self.config.use_mime_magic: - (content_type, content_encoding) = mime_magic(filename) - else: - (content_type, content_encoding) = mimetypes.guess_type(filename) + (content_type, content_charset) = mime_magic(filename) + else: + (content_type, content_charset) = mimetypes.guess_type(filename) if not content_type: content_type = self.config.default_mime_type + if not content_charset: + content_charset = self.config.encoding.upper() ## add charset to content type - if self.add_encoding(filename, content_type): - content_type = content_type + "; charset=" + self.config.encoding.upper() + if self.add_encoding(filename, content_type) and content_charset is not None: + content_type = content_type + "; charset=" + content_charset headers["content-type"] = content_type - if content_encoding is not None and self.config.add_content_encoding: - headers["content-encoding"] = content_encoding ## Other Amazon S3 attributes if self.config.acl_public: @@ -484,13 +555,42 @@ response = self.recv_file(request, stream, labels, start_position) return response + def object_batch_delete(self, remote_list): + def compose_batch_del_xml(bucket, key_list): + body = u"" + for key in key_list: + uri = S3Uri(key) + if uri.type != "s3": + raise ValueError("Excpected URI type 's3', got '%s'" % uri.type) + if not uri.has_object(): + raise ValueError("URI '%s' has no object" % key) + if uri.bucket() != bucket: + raise ValueError("The batch should contain keys from the same bucket") + object = saxutils.escape(uri.object()) + body += u"%s" % object + body += u"" + body = body.encode('utf-8') + return body + + batch = [remote_list[item]['object_uri_str'] for item in remote_list] + if len(batch) == 0: + raise ValueError("Key list is empty") + bucket = S3Uri(batch[0]).bucket() + request_body = compose_batch_del_xml(bucket, batch) + md5_hash = md5() + md5_hash.update(request_body) + headers = {'content-md5': base64.b64encode(md5_hash.digest())} + request = self.create_request("BATCH_DELETE", bucket = bucket, extra = '?delete', headers = headers) + response = self.send_request(request, request_body) + return response + def object_delete(self, uri): if uri.type != "s3": raise ValueError("Expected URI type 's3', got '%s'" % uri.type) request = self.create_request("OBJECT_DELETE", uri = uri) response = self.send_request(request) return response - + def object_restore(self, uri): if uri.type != "s3": raise ValueError("Expected URI type 's3', got '%s'" % uri.type) @@ -516,13 +616,14 @@ headers["x-amz-acl"] = "public-read" if self.config.reduced_redundancy: headers["x-amz-storage-class"] = "REDUCED_REDUNDANCY" - # if extra_headers: - # headers.update(extra_headers) ## Set server side encryption if self.config.server_side_encryption: headers["x-amz-server-side-encryption"] = "AES256" + if extra_headers: + headers['x-amz-metadata-directive'] = "REPLACE" + headers.update(extra_headers) request = self.create_request("OBJECT_PUT", uri = dst_uri, headers = headers) response = self.send_request(request) return response @@ -581,6 +682,23 @@ def delete_policy(self, uri): request = self.create_request("BUCKET_DELETE", uri = uri, extra = "?policy") debug(u"delete_policy(%s)" % uri) + response = self.send_request(request) + return response + + def set_lifecycle_policy(self, uri, policy): + headers = SortedDict(ignore_case = True) + headers['content-md5'] = compute_content_md5(policy) + request = self.create_request("BUCKET_CREATE", uri = uri, + extra = "?lifecycle", headers=headers) + body = policy + debug(u"set_lifecycle_policy(%s): policy-xml: %s" % (uri, body)) + request.sign() + response = self.send_request(request, body=body) + return response + + def delete_lifecycle_policy(self, uri): + request = self.create_request("BUCKET_DELETE", uri = uri, extra = "?lifecycle") + debug(u"delete_lifecycle_policy(%s)" % uri) response = self.send_request(request) return response @@ -742,6 +860,8 @@ ConnMan.put(conn) except ParameterError, e: raise + except (IOError, OSError), e: + raise except Exception, e: if retries: warning("Retrying failed request: %s (%s)" % (resource['uri'], e)) @@ -949,6 +1069,8 @@ debug("Response: %s" % response) except ParameterError, e: raise + except (IOError, OSError), e: + raise except Exception, e: if self.config.progress_meter: progress.done("failed") @@ -1001,6 +1123,8 @@ if self.config.progress_meter: progress.update(delta_position = len(data)) ConnMan.put(conn) + except (IOError, OSError), e: + raise except Exception, e: if self.config.progress_meter: progress.done("failed") @@ -1037,10 +1161,14 @@ response["md5"] = response["headers"]["etag"] md5_hash = response["headers"]["etag"] - try: - md5_hash = response["s3cmd-attrs"]["md5"] - except KeyError: - pass + if not 'x-amz-meta-s3tools-gpgenc' in response["headers"]: + # we can't trust our stored md5 because we + # encrypted the file after calculating it but before + # uploading it. + try: + md5_hash = response["s3cmd-attrs"]["md5"] + except KeyError: + pass response["md5match"] = md5_hash.find(response["md5"]) >= 0 response["elapsed"] = timestamp_end - timestamp_start @@ -1062,4 +1190,11 @@ key, val = attr.split(":") attrs[key] = val return attrs + +def compute_content_md5(body): + m = md5(body) + base64md5 = base64.encodestring(m.digest()) + if base64md5[-1] == '\n': + base64md5 = base64md5[0:-1] + return base64md5 # vim:et:ts=4:sts=4:ai diff --git a/S3/S3Uri.py b/S3/S3Uri.py index 81e8d47..23defe2 100644 --- a/S3/S3Uri.py +++ b/S3/S3Uri.py @@ -2,6 +2,7 @@ ## Author: Michal Ludvig ## http://www.logix.cz/michal ## License: GPL Version 2 +## Copyright: TGRMN Software and contributors import os import re @@ -74,7 +75,7 @@ return bool(self._object) def uri(self): - return "/".join(["s3:/", self._bucket, self._object]) + return u"/".join([u"s3:/", self._bucket, self._object]) def is_dns_compatible(self): return check_bucket_name_dns_conformity(self._bucket) diff --git a/S3/SortedDict.py b/S3/SortedDict.py index b7b2247..0cddc0e 100644 --- a/S3/SortedDict.py +++ b/S3/SortedDict.py @@ -2,6 +2,7 @@ ## Author: Michal Ludvig ## http://www.logix.cz/michal ## License: GPL Version 2 +## Copyright: TGRMN Software and contributors from BidirMap import BidirMap import Utils @@ -46,6 +47,12 @@ def __iter__(self): return SortedDictIterator(self, self.keys()) + def __getslice__(self, i=0, j=-1): + keys = self.keys()[i:j] + r = SortedDict(ignore_case = self.ignore_case) + for k in keys: + r[k] = self[k] + return r if __name__ == "__main__": diff --git a/S3/Utils.py b/S3/Utils.py index bfebf9a..e67e672 100644 --- a/S3/Utils.py +++ b/S3/Utils.py @@ -2,6 +2,7 @@ ## Author: Michal Ludvig ## http://www.logix.cz/michal ## License: GPL Version 2 +## Copyright: TGRMN Software and contributors import datetime import os @@ -15,9 +16,25 @@ import base64 import errno import urllib - +from calendar import timegm from logging import debug, info, warning, error - +from ExitCodes import EX_OSFILE +try: + import dateutil.parser +except ImportError: + sys.stderr.write(u""" +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +ImportError trying to import dateutil.parser. +Please install the python dateutil module: +$ sudo apt-get install python-dateutil + or +$ sudo yum install python-dateutil + or +$ pip install python-dateutil +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +""") + sys.stderr.flush() + sys.exit(EX_OSFILE) import Config import Exceptions @@ -76,6 +93,11 @@ except ExpatError, e: error(e) raise Exceptions.ParameterError("Bucket contains invalid filenames. Please run: s3cmd fixbucket s3://your-bucket/") + except Exception, e: + error(e) + error(xml) + raise + __all__.append("getTreeFromXml") def getListFromXml(xml, node): @@ -133,23 +155,22 @@ __all__.append("appendXmlTextNode") def dateS3toPython(date): - date = re.compile("(\.\d*)?Z").sub(".000Z", date) - return time.strptime(date, "%Y-%m-%dT%H:%M:%S.000Z") + # Reset milliseconds to 000 + date = re.compile('\.[0-9]*(?:[Z\\-\\+]*?)').sub(".000", date) + return dateutil.parser.parse(date, fuzzy=True) __all__.append("dateS3toPython") def dateS3toUnix(date): - ## FIXME: This should be timezone-aware. - ## Currently the argument to strptime() is GMT but mktime() - ## treats it as "localtime". Anyway... - return time.mktime(dateS3toPython(date)) + ## NOTE: This is timezone-aware and return the timestamp regarding GMT + return timegm(dateS3toPython(date).utctimetuple()) __all__.append("dateS3toUnix") def dateRFC822toPython(date): - return rfc822.parsedate(date) + return dateutil.parser.parse(date, fuzzy=True) __all__.append("dateRFC822toPython") def dateRFC822toUnix(date): - return time.mktime(dateRFC822toPython(date)) + return timegm(dateRFC822toPython(date).utctimetuple()) __all__.append("dateRFC822toUnix") def formatSize(size, human_readable = False, floating_point = False): @@ -166,20 +187,8 @@ __all__.append("formatSize") def formatDateTime(s3timestamp): - try: - import pytz - timezone = pytz.timezone(os.environ.get('TZ', 'UTC')) - tz = pytz.timezone('UTC') - ## Can't unpack args and follow that with kwargs in python 2.5 - ## So we pass them all as kwargs - params = zip(('year', 'month', 'day', 'hour', 'minute', 'second', 'tzinfo'), - dateS3toPython(s3timestamp)[0:6] + (tz,)) - params = dict(params) - utc_dt = datetime.datetime(**params) - dt_object = utc_dt.astimezone(timezone) - except ImportError: - dt_object = datetime.datetime(*dateS3toPython(s3timestamp)[0:6]) - return dt_object.strftime("%Y-%m-%d %H:%M") + date_obj = dateutil.parser.parse(s3timestamp, fuzzy=True) + return date_obj.strftime("%Y-%m-%d %H:%M") __all__.append("formatDateTime") def convertTupleListToDict(list): @@ -364,12 +373,13 @@ """Shared implementation of sign_url methods. Takes a hash of 'bucket', 'object' and 'expiry' as args.""" parms['expiry']=time_to_epoch(parms['expiry']) parms['access_key']=Config.Config().access_key + parms['host_base']=Config.Config().host_base debug("Expiry interpreted as epoch time %s", parms['expiry']) signtext = 'GET\n\n\n%(expiry)d\n/%(bucket)s/%(object)s' % parms debug("Signing plaintext: %r", signtext) parms['sig'] = urllib.quote_plus(sign_string(signtext)) debug("Urlencoded signature: %s", parms['sig']) - return "http://%(bucket)s.s3.amazonaws.com/%(object)s?AWSAccessKeyId=%(access_key)s&Expires=%(expiry)d&Signature=%(sig)s" % parms + return "http://%(bucket)s.%(host_base)s/%(object)s?AWSAccessKeyId=%(access_key)s&Expires=%(expiry)d&Signature=%(sig)s" % parms def time_to_epoch(t): """Convert time specified in a variety of forms into UNIX epoch time. @@ -478,13 +488,14 @@ __all__.append("calculateChecksum") -# Deal with the fact that pwd and grp modules don't exist for Windos +# Deal with the fact that pwd and grp modules don't exist for Windows try: import pwd def getpwuid_username(uid): """returns a username from the password databse for the given uid""" return pwd.getpwuid(uid).pw_name except ImportError: + import getpass def getpwuid_username(uid): return getpass.getuser() __all__.append("getpwuid_username") diff --git a/TODO b/TODO deleted file mode 100644 index 48bc7f0..0000000 --- a/TODO +++ /dev/null @@ -1,52 +0,0 @@ -TODO list for s3cmd project -=========================== - -- Before 1.0.0 (or asap after 1.0.0) - - Make 'sync s3://bkt/some-filename local/other-filename' work - (at the moment it'll always download). - - Enable --exclude for [ls]. - - Allow change /tmp to somewhere else - - With --guess-mime use 'magic' module if available. - - Support --preserve for [put] and [get]. Update manpage. - - Don't let --continue fail if the file is already fully downloaded. - - Option --mime-type should set mime type with 'cp' and 'mv'. - If possible --guess-mime-type should do as well. - - Make upload throttling configurable. - - Allow removing 'DefaultRootObject' from CloudFront distributions. - - Get s3://bucket/non-existent creates empty local file 'non-existent' - - Add 'geturl' command, both Unicode and urlencoded output. - - Add a command for generating "Query String Authentication" URLs. - - Support --acl-grant (together with --acl-public/private) for [put] and [sync] - - Filter 's3cmd ls' output by --bucket-location= - -- After 1.0.0 - - Sync must backup non-files as well. At least directories, - symlinks and device nodes. - - Speed up upload / download with multiple threads. - (see http://blog.50projects.com/p/s3cmd-modifications.html) - - Sync should be able to update metadata (UID, timstamps, etc) - if only these change (i.e. same content, different metainfo). - - If GPG fails error() and exit. If un-GPG fails save the - file with .gpg extension. - - Keep backup files remotely on put/sync-to if requested - (move the old 'object' to e.g. 'object~' and only then upload - the new one). Could be more advanced to keep, say, last 5 - copies, etc. - - Memory consumption on very large upload sets is terribly high. - - Implement per-bucket (or per-regexp?) default settings. For - example regarding ACLs, encryption, etc. - -- Implement GPG for sync - (it's not that easy since it won't be easy to compare - the encrypted-remote-object size with local file. - either we can store the metadata in a dedicated file - where we face a risk of inconsistencies, or we'll store - the metadata encrypted in each object header where we'll - have to do large number for object/HEAD requests. tough - call). - Or we can only compare local timestamps with remote object - timestamps. If the local one is older we'll *assume* it - hasn't been changed. But what to do about remote2local sync? - -- Keep man page up to date and write some more documentation - - Yeah, right ;-) diff --git a/artwork/AtomicClockRadio.ttf b/artwork/AtomicClockRadio.ttf deleted file mode 100644 index e70d025..0000000 Binary files a/artwork/AtomicClockRadio.ttf and /dev/null differ diff --git a/artwork/TypeRa.ttf b/artwork/TypeRa.ttf deleted file mode 100644 index f6ad985..0000000 Binary files a/artwork/TypeRa.ttf and /dev/null differ diff --git a/artwork/site-top-full-size.xcf b/artwork/site-top-full-size.xcf deleted file mode 100644 index 2a7a038..0000000 Binary files a/artwork/site-top-full-size.xcf and /dev/null differ diff --git a/artwork/site-top-label-download.png b/artwork/site-top-label-download.png deleted file mode 100644 index 4d5090e..0000000 Binary files a/artwork/site-top-label-download.png and /dev/null differ diff --git a/artwork/site-top-label-s3cmd.png b/artwork/site-top-label-s3cmd.png deleted file mode 100644 index 3c52213..0000000 Binary files a/artwork/site-top-label-s3cmd.png and /dev/null differ diff --git a/artwork/site-top-label-s3sync.png b/artwork/site-top-label-s3sync.png deleted file mode 100644 index d408562..0000000 Binary files a/artwork/site-top-label-s3sync.png and /dev/null differ diff --git a/artwork/site-top-s3tools-logo.png b/artwork/site-top-s3tools-logo.png deleted file mode 100644 index cbb6357..0000000 Binary files a/artwork/site-top-s3tools-logo.png and /dev/null differ diff --git a/artwork/site-top.jpg b/artwork/site-top.jpg deleted file mode 100644 index d6c80d9..0000000 Binary files a/artwork/site-top.jpg and /dev/null differ diff --git a/artwork/site-top.png b/artwork/site-top.png deleted file mode 100644 index a38dcee..0000000 Binary files a/artwork/site-top.png and /dev/null differ diff --git a/artwork/site-top.xcf b/artwork/site-top.xcf deleted file mode 100644 index b75e5ee..0000000 Binary files a/artwork/site-top.xcf and /dev/null differ diff --git a/format-manpage.pl b/format-manpage.pl deleted file mode 100755 index 15191b3..0000000 --- a/format-manpage.pl +++ /dev/null @@ -1,200 +0,0 @@ -#!/usr/bin/perl - -# Format s3cmd.1 manpage -# Usage: -# s3cmd --help | format-manpage.pl > s3cmd.1 - -use strict; - -my $commands = ""; -my $cfcommands = ""; -my $wscommands = ""; -my $options = ""; - -while (<>) { - if (/^Commands:/) { - while (<>) { - last if (/^\s*$/); - my ($desc, $cmd, $cmdline); - ($desc = $_) =~ s/^\s*(.*?)\s*$/$1/; - ($cmdline = <>) =~ s/^\s*s3cmd (.*?) (.*?)\s*$/s3cmd \\fB$1\\fR \\fI$2\\fR/; - $cmd = $1; - if ($cmd =~ /^cf/) { - $cfcommands .= ".TP\n$cmdline\n$desc\n"; - } elsif ($cmd =~ /^ws/) { - $wscommands .= ".TP\n$cmdline\n$desc\n"; - } else { - $commands .= ".TP\n$cmdline\n$desc\n"; - } - } - } - if (/^Options:/) { - my ($opt, $desc); - while (<>) { - last if (/^\s*$/); - $_ =~ s/(.*?)\s*$/$1/; - $desc = ""; - $opt = ""; - if (/^ (-.*)/) { - $opt = $1; - if ($opt =~ / /) { - ($opt, $desc) = split(/\s\s+/, $opt, 2); - } - $opt =~ s/(-[^ ,=\.]+)/\\fB$1\\fR/g; - $opt =~ s/-/\\-/g; - $options .= ".TP\n$opt\n"; - } else { - $_ =~ s/\s*(.*?)\s*$/$1/; - $_ =~ s/(--[^ ,=\.]+)/\\fB$1\\fR/g; - $desc .= $_; - } - if ($desc) { - $options .= "$desc\n"; - } - } - } -} -print " -.\\\" !!! IMPORTANT: This file is generated from s3cmd --help output using format-manpage.pl -.\\\" !!! Do your changes either in s3cmd file or in 'format-manpage.pl' otherwise -.\\\" !!! they will be overwritten! - -.TH s3cmd 1 -.SH NAME -s3cmd \\- tool for managing Amazon S3 storage space and Amazon CloudFront content delivery network -.SH SYNOPSIS -.B s3cmd -[\\fIOPTIONS\\fR] \\fICOMMAND\\fR [\\fIPARAMETERS\\fR] -.SH DESCRIPTION -.PP -.B s3cmd -is a command line client for copying files to/from -Amazon S3 (Simple Storage Service) and performing other -related tasks, for instance creating and removing buckets, -listing objects, etc. - -.SH COMMANDS -.PP -.B s3cmd -can do several \\fIactions\\fR specified by the following \\fIcommands\\fR. -$commands - -.PP -Commands for static WebSites configuration -$wscommands - -.PP -Commands for CloudFront management -$cfcommands - -.SH OPTIONS -.PP -Some of the below specified options can have their default -values set in -.B s3cmd -config file (by default \$HOME/.s3cmd). As it's a simple text file -feel free to open it with your favorite text editor and do any -changes you like. -$options - -.SH EXAMPLES -One of the most powerful commands of \\fIs3cmd\\fR is \\fBs3cmd sync\\fR used for -synchronising complete directory trees to or from remote S3 storage. To some extent -\\fBs3cmd put\\fR and \\fBs3cmd get\\fR share a similar behaviour with \\fBsync\\fR. -.PP -Basic usage common in backup scenarios is as simple as: -.nf - s3cmd sync /local/path/ s3://test-bucket/backup/ -.fi -.PP -This command will find all files under /local/path directory and copy them -to corresponding paths under s3://test-bucket/backup on the remote side. -For example: -.nf - /local/path/\\fBfile1.ext\\fR \\-> s3://bucket/backup/\\fBfile1.ext\\fR - /local/path/\\fBdir123/file2.bin\\fR \\-> s3://bucket/backup/\\fBdir123/file2.bin\\fR -.fi -.PP -However if the local path doesn't end with a slash the last directory's name -is used on the remote side as well. Compare these with the previous example: -.nf - s3cmd sync /local/path s3://test-bucket/backup/ -.fi -will sync: -.nf - /local/\\fBpath/file1.ext\\fR \\-> s3://bucket/backup/\\fBpath/file1.ext\\fR - /local/\\fBpath/dir123/file2.bin\\fR \\-> s3://bucket/backup/\\fBpath/dir123/file2.bin\\fR -.fi -.PP -To retrieve the files back from S3 use inverted syntax: -.nf - s3cmd sync s3://test-bucket/backup/ /tmp/restore/ -.fi -that will download files: -.nf - s3://bucket/backup/\\fBfile1.ext\\fR \\-> /tmp/restore/\\fBfile1.ext\\fR - s3://bucket/backup/\\fBdir123/file2.bin\\fR \\-> /tmp/restore/\\fBdir123/file2.bin\\fR -.fi -.PP -Without the trailing slash on source the behaviour is similar to -what has been demonstrated with upload: -.nf - s3cmd sync s3://test-bucket/backup /tmp/restore/ -.fi -will download the files as: -.nf - s3://bucket/\\fBbackup/file1.ext\\fR \\-> /tmp/restore/\\fBbackup/file1.ext\\fR - s3://bucket/\\fBbackup/dir123/file2.bin\\fR \\-> /tmp/restore/\\fBbackup/dir123/file2.bin\\fR -.fi -.PP -All source file names, the bold ones above, are matched against \\fBexclude\\fR -rules and those that match are then re\\-checked against \\fBinclude\\fR rules to see -whether they should be excluded or kept in the source list. -.PP -For the purpose of \\fB\\-\\-exclude\\fR and \\fB\\-\\-include\\fR matching only the -bold file names above are used. For instance only \\fBpath/file1.ext\\fR is tested -against the patterns, not \\fI/local/\\fBpath/file1.ext\\fR -.PP -Both \\fB\\-\\-exclude\\fR and \\fB\\-\\-include\\fR work with shell-style wildcards (a.k.a. GLOB). -For a greater flexibility s3cmd provides Regular-expression versions of the two exclude options -named \\fB\\-\\-rexclude\\fR and \\fB\\-\\-rinclude\\fR. -The options with ...\\fB\\-from\\fR suffix (eg \\-\\-rinclude\\-from) expect a filename as -an argument. Each line of such a file is treated as one pattern. -.PP -There is only one set of patterns built from all \\fB\\-\\-(r)exclude(\\-from)\\fR options -and similarly for include variant. Any file excluded with eg \\-\\-exclude can -be put back with a pattern found in \\-\\-rinclude\\-from list. -.PP -Run s3cmd with \\fB\\-\\-dry\\-run\\fR to verify that your rules work as expected. -Use together with \\fB\\-\\-debug\\fR get detailed information -about matching file names against exclude and include rules. -.PP -For example to exclude all files with \".jpg\" extension except those beginning with a number use: -.PP - \\-\\-exclude '*.jpg' \\-\\-rinclude '[0-9].*\\.jpg' -.SH SEE ALSO -For the most up to date list of options run -.B s3cmd \\-\\-help -.br -For more info about usage, examples and other related info visit project homepage at -.br -.B http://s3tools.org -.SH DONATIONS -Please consider a donation if you have found s3cmd useful: -.br -.B http://s3tools.org/donate -.SH AUTHOR -Written by Michal Ludvig and 15+ contributors -.SH CONTACT, SUPPORT -Preferred way to get support is our mailing list: -.I s3tools\\-general\@lists.sourceforge.net -.SH REPORTING BUGS -Report bugs to -.I s3tools\\-bugs\@lists.sourceforge.net -.SH COPYRIGHT -Copyright \\(co 2007,2008,2009,2010,2011,2012 Michal Ludvig -.br -This is free software. You may redistribute copies of it under the terms of -the GNU General Public License version 2 . -There is NO WARRANTY, to the extent permitted by law. -"; diff --git a/magic b/magic deleted file mode 100644 index 7eda929..0000000 --- a/magic +++ /dev/null @@ -1,63 +0,0 @@ -# Additional magic for common web file types - -0 string/b {\ " JSON data -!:mime application/json -0 string/b {\ } JSON data -!:mime application/json -0 string/b [ JSON data -!:mime application/json - -0 search/4000 function ->&0 search/32/b )\ { JavaScript program -!:mime application/javascript - -0 search/4000 @media CSS stylesheet -!:mime text/css -0 search/4000 @import CSS stylesheet -!:mime text/css -0 search/4000 @namespace CSS stylesheet -!:mime text/css -0 search/4000/b {\ background CSS stylesheet -!:mime text/css -0 search/4000/b {\ border CSS stylesheet -!:mime text/css -0 search/4000/b {\ bottom CSS stylesheet -!:mime text/css -0 search/4000/b {\ color CSS stylesheet -!:mime text/css -0 search/4000/b {\ cursor CSS stylesheet -!:mime text/css -0 search/4000/b {\ direction CSS stylesheet -!:mime text/css -0 search/4000/b {\ display CSS stylesheet -!:mime text/css -0 search/4000/b {\ float CSS stylesheet -!:mime text/css -0 search/4000/b {\ font CSS stylesheet -!:mime text/css -0 search/4000/b {\ height CSS stylesheet -!:mime text/css -0 search/4000/b {\ left CSS stylesheet -!:mime text/css -0 search/4000/b {\ line- CSS stylesheet -!:mime text/css -0 search/4000/b {\ margin CSS stylesheet -!:mime text/css -0 search/4000/b {\ padding CSS stylesheet -!:mime text/css -0 search/4000/b {\ position CSS stylesheet -!:mime text/css -0 search/4000/b {\ right CSS stylesheet -!:mime text/css -0 search/4000/b {\ text- CSS stylesheet -!:mime text/css -0 search/4000/b {\ top CSS stylesheet -!:mime text/css -0 search/4000/b {\ width CSS stylesheet -!:mime text/css -0 search/4000/b {\ visibility CSS stylesheet -!:mime text/css -0 search/4000/b {\ -moz- CSS stylesheet -!:mime text/css -0 search/4000/b {\ -webkit- CSS stylesheet -!:mime text/css diff --git a/run-tests.py b/run-tests.py deleted file mode 100755 index f3184ca..0000000 --- a/run-tests.py +++ /dev/null @@ -1,540 +0,0 @@ -#!/usr/bin/env python -# -*- coding=utf-8 -*- - -## Amazon S3cmd - testsuite -## Author: Michal Ludvig -## http://www.logix.cz/michal -## License: GPL Version 2 - -import sys -import os -import re -from subprocess import Popen, PIPE, STDOUT -import locale -import getpass - -count_pass = 0 -count_fail = 0 -count_skip = 0 - -test_counter = 0 -run_tests = [] -exclude_tests = [] - -verbose = False - -if os.name == "posix": - have_wget = True -elif os.name == "nt": - have_wget = False -else: - print "Unknown platform: %s" % os.name - sys.exit(1) - -## Unpack testsuite/ directory -if not os.path.isdir('testsuite') and os.path.isfile('testsuite.tar.gz'): - os.system("tar -xz -f testsuite.tar.gz") -if not os.path.isdir('testsuite'): - print "Something went wrong while unpacking testsuite.tar.gz" - sys.exit(1) - -os.system("tar -xf testsuite/checksum.tar -C testsuite") -if not os.path.isfile('testsuite/checksum/cksum33.txt'): - print "Something went wrong while unpacking testsuite/checkum.tar" - sys.exit(1) - -## Fix up permissions for permission-denied tests -os.chmod("testsuite/permission-tests/permission-denied-dir", 0444) -os.chmod("testsuite/permission-tests/permission-denied.txt", 0000) - -## Patterns for Unicode tests -patterns = {} -patterns['UTF-8'] = u"ŪņЇЌœđЗ/☺ unicode € rocks ™" -patterns['GBK'] = u"12月31日/1-特色條目" - -encoding = locale.getpreferredencoding() -if not encoding: - print "Guessing current system encoding failed. Consider setting $LANG variable." - sys.exit(1) -else: - print "System encoding: " + encoding - -have_encoding = os.path.isdir('testsuite/encodings/' + encoding) -if not have_encoding and os.path.isfile('testsuite/encodings/%s.tar.gz' % encoding): - os.system("tar xvz -C testsuite/encodings -f testsuite/encodings/%s.tar.gz" % encoding) - have_encoding = os.path.isdir('testsuite/encodings/' + encoding) - -if have_encoding: - #enc_base_remote = "%s/xyz/%s/" % (pbucket(1), encoding) - enc_pattern = patterns[encoding] -else: - print encoding + " specific files not found." - -if not os.path.isdir('testsuite/crappy-file-name'): - os.system("tar xvz -C testsuite -f testsuite/crappy-file-name.tar.gz") - # TODO: also unpack if the tarball is newer than the directory timestamp - # for instance when a new version was pulled from SVN. - -def test(label, cmd_args = [], retcode = 0, must_find = [], must_not_find = [], must_find_re = [], must_not_find_re = []): - def command_output(): - print "----" - print " ".join([arg.find(" ")>=0 and "'%s'" % arg or arg for arg in cmd_args]) - print "----" - print stdout - print "----" - - def failure(message = ""): - global count_fail - if message: - message = " (%r)" % message - print "\x1b[31;1mFAIL%s\x1b[0m" % (message) - count_fail += 1 - command_output() - #return 1 - sys.exit(1) - def success(message = ""): - global count_pass - if message: - message = " (%r)" % message - print "\x1b[32;1mOK\x1b[0m%s" % (message) - count_pass += 1 - if verbose: - command_output() - return 0 - def skip(message = ""): - global count_skip - if message: - message = " (%r)" % message - print "\x1b[33;1mSKIP\x1b[0m%s" % (message) - count_skip += 1 - return 0 - def compile_list(_list, regexps = False): - if regexps == False: - _list = [re.escape(item.encode(encoding, "replace")) for item in _list] - - return [re.compile(item, re.MULTILINE) for item in _list] - - global test_counter - test_counter += 1 - print ("%3d %s " % (test_counter, label)).ljust(30, "."), - sys.stdout.flush() - - if run_tests.count(test_counter) == 0 or exclude_tests.count(test_counter) > 0: - return skip() - - if not cmd_args: - return skip() - - p = Popen(cmd_args, stdout = PIPE, stderr = STDOUT, universal_newlines = True) - stdout, stderr = p.communicate() - if retcode != p.returncode: - return failure("retcode: %d, expected: %d" % (p.returncode, retcode)) - - if type(must_find) not in [ list, tuple ]: must_find = [must_find] - if type(must_find_re) not in [ list, tuple ]: must_find_re = [must_find_re] - if type(must_not_find) not in [ list, tuple ]: must_not_find = [must_not_find] - if type(must_not_find_re) not in [ list, tuple ]: must_not_find_re = [must_not_find_re] - - find_list = [] - find_list.extend(compile_list(must_find)) - find_list.extend(compile_list(must_find_re, regexps = True)) - find_list_patterns = [] - find_list_patterns.extend(must_find) - find_list_patterns.extend(must_find_re) - - not_find_list = [] - not_find_list.extend(compile_list(must_not_find)) - not_find_list.extend(compile_list(must_not_find_re, regexps = True)) - not_find_list_patterns = [] - not_find_list_patterns.extend(must_not_find) - not_find_list_patterns.extend(must_not_find_re) - - for index in range(len(find_list)): - match = find_list[index].search(stdout) - if not match: - return failure("pattern not found: %s" % find_list_patterns[index]) - for index in range(len(not_find_list)): - match = not_find_list[index].search(stdout) - if match: - return failure("pattern found: %s (match: %s)" % (not_find_list_patterns[index], match.group(0))) - - return success() - -def test_s3cmd(label, cmd_args = [], **kwargs): - if not cmd_args[0].endswith("s3cmd"): - cmd_args.insert(0, "python") - cmd_args.insert(1, "s3cmd") - - return test(label, cmd_args, **kwargs) - -def test_mkdir(label, dir_name): - if os.name in ("posix", "nt"): - cmd = ['mkdir', '-p'] - else: - print "Unknown platform: %s" % os.name - sys.exit(1) - cmd.append(dir_name) - return test(label, cmd) - -def test_rmdir(label, dir_name): - if os.path.isdir(dir_name): - if os.name == "posix": - cmd = ['rm', '-rf'] - elif os.name == "nt": - cmd = ['rmdir', '/s/q'] - else: - print "Unknown platform: %s" % os.name - sys.exit(1) - cmd.append(dir_name) - return test(label, cmd) - else: - return test(label, []) - -def test_flushdir(label, dir_name): - test_rmdir(label + "(rm)", dir_name) - return test_mkdir(label + "(mk)", dir_name) - -def test_copy(label, src_file, dst_file): - if os.name == "posix": - cmd = ['cp', '-f'] - elif os.name == "nt": - cmd = ['copy'] - else: - print "Unknown platform: %s" % os.name - sys.exit(1) - cmd.append(src_file) - cmd.append(dst_file) - return test(label, cmd) - -bucket_prefix = u"%s-" % getpass.getuser() -print "Using bucket prefix: '%s'" % bucket_prefix - -argv = sys.argv[1:] -while argv: - arg = argv.pop(0) - if arg.startswith('--bucket-prefix='): - print "Usage: '--bucket-prefix PREFIX', not '--bucket-prefix=PREFIX'" - sys.exit(0) - if arg in ("-h", "--help"): - print "%s A B K..O -N" % sys.argv[0] - print "Run tests number A, B and K through to O, except for N" - sys.exit(0) - if arg in ("-l", "--list"): - exclude_tests = range(0, 999) - break - if arg in ("-v", "--verbose"): - verbose = True - continue - if arg in ("-p", "--bucket-prefix"): - try: - bucket_prefix = argv.pop(0) - except IndexError: - print "Bucket prefix option must explicitly supply a bucket name prefix" - sys.exit(0) - continue - if arg.find("..") >= 0: - range_idx = arg.find("..") - range_start = arg[:range_idx] or 0 - range_end = arg[range_idx+2:] or 999 - run_tests.extend(range(int(range_start), int(range_end) + 1)) - elif arg.startswith("-"): - exclude_tests.append(int(arg[1:])) - else: - run_tests.append(int(arg)) - -if not run_tests: - run_tests = range(0, 999) - -# helper functions for generating bucket names -def bucket(tail): - '''Test bucket name''' - label = 'autotest' - if str(tail) == '3': - label = 'Autotest' - return '%ss3cmd-%s-%s' % (bucket_prefix, label, tail) - -def pbucket(tail): - '''Like bucket(), but prepends "s3://" for you''' - return 's3://' + bucket(tail) - -## ====== Remove test buckets -test_s3cmd("Remove test buckets", ['rb', '-r', pbucket(1), pbucket(2), pbucket(3)], - must_find = [ "Bucket '%s/' removed" % pbucket(1), - "Bucket '%s/' removed" % pbucket(2), - "Bucket '%s/' removed" % pbucket(3) ]) - - -## ====== Create one bucket (EU) -test_s3cmd("Create one bucket (EU)", ['mb', '--bucket-location=EU', pbucket(1)], - must_find = "Bucket '%s/' created" % pbucket(1)) - - - -## ====== Create multiple buckets -test_s3cmd("Create multiple buckets", ['mb', pbucket(2), pbucket(3)], - must_find = [ "Bucket '%s/' created" % pbucket(2), "Bucket '%s/' created" % pbucket(3)]) - - -## ====== Invalid bucket name -test_s3cmd("Invalid bucket name", ["mb", "--bucket-location=EU", pbucket('EU')], - retcode = 1, - must_find = "ERROR: Parameter problem: Bucket name '%s' contains disallowed character" % bucket('EU'), - must_not_find_re = "Bucket.*created") - - -## ====== Buckets list -test_s3cmd("Buckets list", ["ls"], - must_find = [ "autotest-1", "autotest-2", "Autotest-3" ], must_not_find_re = "autotest-EU") - - -## ====== Sync to S3 -test_s3cmd("Sync to S3", ['sync', 'testsuite/', pbucket(1) + '/xyz/', '--exclude', 'demo/*', '--exclude', '*.png', '--no-encrypt', '--exclude-from', 'testsuite/exclude.encodings' ], - must_find = [ "WARNING: 32 non-printable characters replaced in: crappy-file-name/non-printables ^A^B^C^D^E^F^G^H^I^J^K^L^M^N^O^P^Q^R^S^T^U^V^W^X^Y^Z^[^\^]^^^_^? +-[\]^<>%%\"'#{}`&?.end", - "WARNING: File can not be uploaded: testsuite/permission-tests/permission-denied.txt: Permission denied", - "stored as '%s/xyz/crappy-file-name/non-printables ^A^B^C^D^E^F^G^H^I^J^K^L^M^N^O^P^Q^R^S^T^U^V^W^X^Y^Z^[^\^]^^^_^? +-[\\]^<>%%%%\"'#{}`&?.end'" % pbucket(1) ], - must_not_find_re = [ "demo/", "\.png$", "permission-denied-dir" ]) - -if have_encoding: - ## ====== Sync UTF-8 / GBK / ... to S3 - test_s3cmd("Sync %s to S3" % encoding, ['sync', 'testsuite/encodings/' + encoding, '%s/xyz/encodings/' % pbucket(1), '--exclude', 'demo/*', '--no-encrypt' ], - must_find = [ u"File 'testsuite/encodings/%(encoding)s/%(pattern)s' stored as '%(pbucket)s/xyz/encodings/%(encoding)s/%(pattern)s'" % { 'encoding' : encoding, 'pattern' : enc_pattern , 'pbucket' : pbucket(1)} ]) - - -## ====== List bucket content -test_s3cmd("List bucket content", ['ls', '%s/xyz/' % pbucket(1) ], - must_find_re = [ u"DIR %s/xyz/binary/$" % pbucket(1) , u"DIR %s/xyz/etc/$" % pbucket(1) ], - must_not_find = [ u"random-crap.md5", u"/demo" ]) - - -## ====== List bucket recursive -must_find = [ u"%s/xyz/binary/random-crap.md5" % pbucket(1) ] -if have_encoding: - must_find.append(u"%(pbucket)s/xyz/encodings/%(encoding)s/%(pattern)s" % { 'encoding' : encoding, 'pattern' : enc_pattern, 'pbucket' : pbucket(1) }) - -test_s3cmd("List bucket recursive", ['ls', '--recursive', pbucket(1)], - must_find = must_find, - must_not_find = [ "logo.png" ]) - -## ====== FIXME -# test_s3cmd("Recursive put", ['put', '--recursive', 'testsuite/etc', '%s/xyz/' % pbucket(1) ]) - - -## ====== Clean up local destination dir -test_flushdir("Clean testsuite-out/", "testsuite-out") - - -## ====== Sync from S3 -must_find = [ "File '%s/xyz/binary/random-crap.md5' stored as 'testsuite-out/xyz/binary/random-crap.md5'" % pbucket(1) ] -if have_encoding: - must_find.append(u"File '%(pbucket)s/xyz/encodings/%(encoding)s/%(pattern)s' stored as 'testsuite-out/xyz/encodings/%(encoding)s/%(pattern)s' " % { 'encoding' : encoding, 'pattern' : enc_pattern, 'pbucket' : pbucket(1) }) -test_s3cmd("Sync from S3", ['sync', '%s/xyz' % pbucket(1), 'testsuite-out'], - must_find = must_find) - - -## ====== Remove 'demo' directory -test_rmdir("Remove 'dir-test/'", "testsuite-out/xyz/dir-test/") - - -## ====== Create dir with name of a file -test_mkdir("Create file-dir dir", "testsuite-out/xyz/dir-test/file-dir") - - -## ====== Skip dst dirs -test_s3cmd("Skip over dir", ['sync', '%s/xyz' % pbucket(1), 'testsuite-out'], - must_find = "WARNING: testsuite-out/xyz/dir-test/file-dir is a directory - skipping over") - - -## ====== Clean up local destination dir -test_flushdir("Clean testsuite-out/", "testsuite-out") - - -## ====== Put public, guess MIME -test_s3cmd("Put public, guess MIME", ['put', '--guess-mime-type', '--acl-public', 'testsuite/etc/logo.png', '%s/xyz/etc/logo.png' % pbucket(1)], - must_find = [ "stored as '%s/xyz/etc/logo.png'" % pbucket(1) ]) - - -## ====== Retrieve from URL -if have_wget: - test("Retrieve from URL", ['wget', '-O', 'testsuite-out/logo.png', 'http://%s.s3.amazonaws.com/xyz/etc/logo.png' % bucket(1)], - must_find_re = [ 'logo.png.*saved \[22059/22059\]' ]) - - -## ====== Change ACL to Private -test_s3cmd("Change ACL to Private", ['setacl', '--acl-private', '%s/xyz/etc/l*.png' % pbucket(1)], - must_find = [ "logo.png: ACL set to Private" ]) - - -## ====== Verify Private ACL -if have_wget: - test("Verify Private ACL", ['wget', '-O', 'testsuite-out/logo.png', 'http://%s.s3.amazonaws.com/xyz/etc/logo.png' % bucket(1)], - retcode = 8, - must_find_re = [ 'ERROR 403: Forbidden' ]) - - -## ====== Change ACL to Public -test_s3cmd("Change ACL to Public", ['setacl', '--acl-public', '--recursive', '%s/xyz/etc/' % pbucket(1) , '-v'], - must_find = [ "logo.png: ACL set to Public" ]) - - -## ====== Verify Public ACL -if have_wget: - test("Verify Public ACL", ['wget', '-O', 'testsuite-out/logo.png', 'http://%s.s3.amazonaws.com/xyz/etc/logo.png' % bucket(1)], - must_find_re = [ 'logo.png.*saved \[22059/22059\]' ]) - - -## ====== Sync more to S3 -test_s3cmd("Sync more to S3", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt' ], - must_find = [ "File 'testsuite/demo/some-file.xml' stored as '%s/xyz/demo/some-file.xml' " % pbucket(1) ], - must_not_find = [ "File 'testsuite/etc/linked.png' stored as '%s/xyz/etc/linked.png" % pbucket(1) ]) - - -## ====== Don't check MD5 sum on Sync -test_copy("Change file cksum1.txt", "testsuite/checksum/cksum2.txt", "testsuite/checksum/cksum1.txt") -test_copy("Change file cksum33.txt", "testsuite/checksum/cksum2.txt", "testsuite/checksum/cksum33.txt") -test_s3cmd("Don't check MD5", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt', '--no-check-md5'], - must_find = [ "cksum33.txt" ], - must_not_find = [ "cksum1.txt" ]) - - -## ====== Check MD5 sum on Sync -test_s3cmd("Check MD5", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt', '--check-md5'], - must_find = [ "cksum1.txt" ]) - - -## ====== Rename within S3 -test_s3cmd("Rename within S3", ['mv', '%s/xyz/etc/logo.png' % pbucket(1), '%s/xyz/etc2/Logo.PNG' % pbucket(1)], - must_find = [ 'File %s/xyz/etc/logo.png moved to %s/xyz/etc2/Logo.PNG' % (pbucket(1), pbucket(1))]) - - -## ====== Rename (NoSuchKey) -test_s3cmd("Rename (NoSuchKey)", ['mv', '%s/xyz/etc/logo.png' % pbucket(1), '%s/xyz/etc2/Logo.PNG' % pbucket(1)], - retcode = 1, - must_find_re = [ 'ERROR:.*NoSuchKey' ], - must_not_find = [ 'File %s/xyz/etc/logo.png moved to %s/xyz/etc2/Logo.PNG' % (pbucket(1), pbucket(1)) ]) - -## ====== Sync more from S3 (invalid src) -test_s3cmd("Sync more from S3 (invalid src)", ['sync', '--delete-removed', '%s/xyz/DOESNOTEXIST' % pbucket(1), 'testsuite-out'], - must_not_find = [ "deleted: testsuite-out/logo.png" ]) - -## ====== Sync more from S3 -test_s3cmd("Sync more from S3", ['sync', '--delete-removed', '%s/xyz' % pbucket(1), 'testsuite-out'], - must_find = [ "deleted: testsuite-out/logo.png", - "File '%s/xyz/etc2/Logo.PNG' stored as 'testsuite-out/xyz/etc2/Logo.PNG' (22059 bytes" % pbucket(1), - "File '%s/xyz/demo/some-file.xml' stored as 'testsuite-out/xyz/demo/some-file.xml' " % pbucket(1) ], - must_not_find_re = [ "not-deleted.*etc/logo.png" ]) - - -## ====== Make dst dir for get -test_rmdir("Remove dst dir for get", "testsuite-out") - - -## ====== Get multiple files -test_s3cmd("Get multiple files", ['get', '%s/xyz/etc2/Logo.PNG' % pbucket(1), '%s/xyz/etc/AtomicClockRadio.ttf' % pbucket(1), 'testsuite-out'], - retcode = 1, - must_find = [ 'Destination must be a directory or stdout when downloading multiple sources.' ]) - - -## ====== Make dst dir for get -test_mkdir("Make dst dir for get", "testsuite-out") - - -## ====== Get multiple files -test_s3cmd("Get multiple files", ['get', '%s/xyz/etc2/Logo.PNG' % pbucket(1), '%s/xyz/etc/AtomicClockRadio.ttf' % pbucket(1), 'testsuite-out'], - must_find = [ u"saved as 'testsuite-out/Logo.PNG'", u"saved as 'testsuite-out/AtomicClockRadio.ttf'" ]) - -## ====== Upload files differing in capitalisation -test_s3cmd("blah.txt / Blah.txt", ['put', '-r', 'testsuite/blahBlah', pbucket(1)], - must_find = [ '%s/blahBlah/Blah.txt' % pbucket(1), '%s/blahBlah/blah.txt' % pbucket(1)]) - -## ====== Copy between buckets -test_s3cmd("Copy between buckets", ['cp', '%s/xyz/etc2/Logo.PNG' % pbucket(1), '%s/xyz/etc2/logo.png' % pbucket(3)], - must_find = [ "File %s/xyz/etc2/Logo.PNG copied to %s/xyz/etc2/logo.png" % (pbucket(1), pbucket(3)) ]) - -## ====== Recursive copy -test_s3cmd("Recursive copy, set ACL", ['cp', '-r', '--acl-public', '%s/xyz/' % pbucket(1), '%s/copy' % pbucket(2), '--exclude', 'demo/dir?/*.txt', '--exclude', 'non-printables*'], - must_find = [ "File %s/xyz/etc2/Logo.PNG copied to %s/copy/etc2/Logo.PNG" % (pbucket(1), pbucket(2)), - "File %s/xyz/blahBlah/Blah.txt copied to %s/copy/blahBlah/Blah.txt" % (pbucket(1), pbucket(2)), - "File %s/xyz/blahBlah/blah.txt copied to %s/copy/blahBlah/blah.txt" % (pbucket(1), pbucket(2)) ], - must_not_find = [ "demo/dir1/file1-1.txt" ]) - -## ====== Verify ACL and MIME type -test_s3cmd("Verify ACL and MIME type", ['info', '%s/copy/etc2/Logo.PNG' % pbucket(2) ], - must_find_re = [ "MIME type:.*image/png", - "ACL:.*\*anon\*: READ", - "URL:.*http://%s.s3.amazonaws.com/copy/etc2/Logo.PNG" % bucket(2) ]) - -## ====== Rename within S3 -test_s3cmd("Rename within S3", ['mv', '%s/copy/etc2/Logo.PNG' % pbucket(2), '%s/copy/etc/logo.png' % pbucket(2)], - must_find = [ 'File %s/copy/etc2/Logo.PNG moved to %s/copy/etc/logo.png' % (pbucket(2), pbucket(2))]) - -## ====== Sync between buckets -test_s3cmd("Sync remote2remote", ['sync', '%s/xyz/' % pbucket(1), '%s/copy/' % pbucket(2), '--delete-removed', '--exclude', 'non-printables*'], - must_find = [ "File %s/xyz/demo/dir1/file1-1.txt copied to %s/copy/demo/dir1/file1-1.txt" % (pbucket(1), pbucket(2)), - "remote copy: etc/logo.png -> etc2/Logo.PNG", - "deleted: '%s/copy/etc/logo.png'" % pbucket(2) ], - must_not_find = [ "blah.txt" ]) - -## ====== Don't Put symbolic link -test_s3cmd("Don't put symbolic links", ['put', 'testsuite/etc/linked1.png', 's3://%s/xyz/' % bucket(1),], - must_not_find_re = [ "linked1.png"]) - -## ====== Put symbolic link -test_s3cmd("Put symbolic links", ['put', 'testsuite/etc/linked1.png', 's3://%s/xyz/' % bucket(1),'--follow-symlinks' ], - must_find = [ "File 'testsuite/etc/linked1.png' stored as '%s/xyz/linked1.png'" % pbucket(1)]) - -## ====== Sync symbolic links -test_s3cmd("Sync symbolic links", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt', '--follow-symlinks' ], - must_find = ["remote copy: etc2/Logo.PNG -> etc/linked.png"], - # Don't want to recursively copy linked directories! - must_not_find_re = ["etc/more/linked-dir/more/give-me-more.txt", - "etc/brokenlink.png"], - ) - -## ====== Multi source move -test_s3cmd("Multi-source move", ['mv', '-r', '%s/copy/blahBlah/Blah.txt' % pbucket(2), '%s/copy/etc/' % pbucket(2), '%s/moved/' % pbucket(2)], - must_find = [ "File %s/copy/blahBlah/Blah.txt moved to %s/moved/Blah.txt" % (pbucket(2), pbucket(2)), - "File %s/copy/etc/AtomicClockRadio.ttf moved to %s/moved/AtomicClockRadio.ttf" % (pbucket(2), pbucket(2)), - "File %s/copy/etc/TypeRa.ttf moved to %s/moved/TypeRa.ttf" % (pbucket(2), pbucket(2)) ], - must_not_find = [ "blah.txt" ]) - -## ====== Verify move -test_s3cmd("Verify move", ['ls', '-r', pbucket(2)], - must_find = [ "%s/moved/Blah.txt" % pbucket(2), - "%s/moved/AtomicClockRadio.ttf" % pbucket(2), - "%s/moved/TypeRa.ttf" % pbucket(2), - "%s/copy/blahBlah/blah.txt" % pbucket(2) ], - must_not_find = [ "%s/copy/blahBlah/Blah.txt" % pbucket(2), - "%s/copy/etc/AtomicClockRadio.ttf" % pbucket(2), - "%s/copy/etc/TypeRa.ttf" % pbucket(2) ]) - -## ====== Simple delete -test_s3cmd("Simple delete", ['del', '%s/xyz/etc2/Logo.PNG' % pbucket(1)], - must_find = [ "File %s/xyz/etc2/Logo.PNG deleted" % pbucket(1) ]) - - -## ====== Recursive delete maximum exceeed -test_s3cmd("Recursive delete maximum exceeded", ['del', '--recursive', '--max-delete=1', '--exclude', 'Atomic*', '%s/xyz/etc' % pbucket(1)], - must_not_find = [ "File %s/xyz/etc/TypeRa.ttf deleted" % pbucket(1) ]) - -## ====== Recursive delete -test_s3cmd("Recursive delete", ['del', '--recursive', '--exclude', 'Atomic*', '%s/xyz/etc' % pbucket(1)], - must_find = [ "File %s/xyz/etc/TypeRa.ttf deleted" % pbucket(1) ], - must_find_re = [ "File .*/etc/logo.png deleted" ], - must_not_find = [ "AtomicClockRadio.ttf" ]) - -## ====== Recursive delete all -test_s3cmd("Recursive delete all", ['del', '--recursive', '--force', pbucket(1)], - must_find_re = [ "File .*binary/random-crap deleted" ]) - - -## ====== Remove empty bucket -test_s3cmd("Remove empty bucket", ['rb', pbucket(1)], - must_find = [ "Bucket '%s/' removed" % pbucket(1) ]) - - -## ====== Remove remaining buckets -test_s3cmd("Remove remaining buckets", ['rb', '--recursive', pbucket(2), pbucket(3)], - must_find = [ "Bucket '%s/' removed" % pbucket(2), - "Bucket '%s/' removed" % pbucket(3) ]) - -# vim:et:ts=4:sts=4:ai diff --git a/s3cmd b/s3cmd index e696a30..b6abacc 100755 --- a/s3cmd +++ b/s3cmd @@ -1,15 +1,28 @@ #!/usr/bin/env python -## Amazon S3 manager -## Author: Michal Ludvig -## http://www.logix.cz/michal -## License: GPL Version 2 +## -------------------------------------------------------------------- +## s3cmd - S3 client +## +## Authors : Michal Ludvig and contributors +## Copyright : TGRMN Software - http://www.tgrmn.com - and contributors +## Website : http://s3tools.org +## License : GPL Version 2 +## -------------------------------------------------------------------- +## This program is free software; you can redistribute it and/or modify +## it under the terms of the GNU General Public License as published by +## the Free Software Foundation; either version 2 of the License, or +## (at your option) any later version. +## This program is distributed in the hope that it will be useful, +## but WITHOUT ANY WARRANTY; without even the implied warranty of +## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +## GNU General Public License for more details. +## -------------------------------------------------------------------- import sys if float("%d.%d" %(sys.version_info[0], sys.version_info[1])) < 2.4: - sys.stderr.write("ERROR: Python 2.4 or higher required, sorry.\n") - sys.exit(1) + sys.stderr.write(u"ERROR: Python 2.4 or higher required, sorry.\n") + sys.exit(EX_OSFILE) import logging import time @@ -46,10 +59,15 @@ uri = S3Uri(args[0]) if uri.type == "s3" and uri.has_bucket(): subcmd_bucket_usage(s3, uri) - return + return EX_OK subcmd_bucket_usage_all(s3) + return EX_OK def subcmd_bucket_usage_all(s3): + """ + Returns: sum of bucket sizes as integer + Raises: S3Error + """ response = s3.list_all_buckets() buckets_size = 0 @@ -61,8 +79,14 @@ total_size_str = str(total_size) + size_coeff output(u"".rjust(8, "-")) output(u"%s Total" % (total_size_str.ljust(8))) + return size def subcmd_bucket_usage(s3, uri): + """ + Returns: bucket size as integer + Raises: S3Error + """ + bucket = uri.bucket() object = uri.object() @@ -78,9 +102,7 @@ except S3Error, e: if S3.codes.has_key(e.info["Code"]): error(S3.codes[e.info["Code"]] % bucket) - return - else: - raise + raise # objects in the current scope: for obj in response["list"]: @@ -101,8 +123,9 @@ uri = S3Uri(args[0]) if uri.type == "s3" and uri.has_bucket(): subcmd_bucket_list(s3, uri) - return + return EX_OK subcmd_buckets_list_all(s3) + return EX_OK def cmd_buckets_list_all_all(args): s3 = S3(Config()) @@ -112,7 +135,7 @@ for bucket in response["list"]: subcmd_bucket_list(s3, S3Uri("s3://" + bucket["Name"])) output(u"") - + return EX_OK def subcmd_buckets_list_all(s3): response = s3.list_all_buckets() @@ -134,9 +157,7 @@ except S3Error, e: if S3.codes.has_key(e.info["Code"]): error(S3.codes[e.info["Code"]] % bucket) - return - else: - raise + raise if cfg.list_md5: format_string = u"%(timestamp)16s %(size)9s%(coeff)1s %(md5)32s %(uri)s" @@ -183,9 +204,8 @@ except S3Error, e: if S3.codes.has_key(e.info["Code"]): error(S3.codes[e.info["Code"]] % uri.bucket()) - return - else: - raise + raise + return EX_OK def cmd_website_info(args): s3 = S3(Config()) @@ -205,9 +225,8 @@ except S3Error, e: if S3.codes.has_key(e.info["Code"]): error(S3.codes[e.info["Code"]] % uri.bucket()) - return - else: - raise + raise + return EX_OK def cmd_website_create(args): s3 = S3(Config()) @@ -221,9 +240,8 @@ except S3Error, e: if S3.codes.has_key(e.info["Code"]): error(S3.codes[e.info["Code"]] % uri.bucket()) - return - else: - raise + raise + return EX_OK def cmd_website_delete(args): s3 = S3(Config()) @@ -237,32 +255,59 @@ except S3Error, e: if S3.codes.has_key(e.info["Code"]): error(S3.codes[e.info["Code"]] % uri.bucket()) - return - else: - raise - -def cmd_bucket_delete(args): - def _bucket_delete_one(uri): - try: - response = s3.bucket_delete(uri.bucket()) - except S3Error, e: - if e.info['Code'] == 'BucketNotEmpty' and (cfg.force or cfg.recursive): - warning(u"Bucket is not empty. Removing all the objects from it first. This may take some time...") - subcmd_object_del_uri(uri.uri(), recursive = True) - return _bucket_delete_one(uri) - elif S3.codes.has_key(e.info["Code"]): - error(S3.codes[e.info["Code"]] % uri.bucket()) - return - else: - raise - + raise + return EX_OK + +def cmd_expiration_set(args): s3 = S3(Config()) for arg in args: uri = S3Uri(arg) if not uri.type == "s3" or not uri.has_bucket() or uri.has_object(): raise ParameterError("Expecting S3 URI with just the bucket name set instead of '%s'" % arg) - _bucket_delete_one(uri) - output(u"Bucket '%s' removed" % uri.uri()) + try: + response = s3.expiration_set(uri, cfg.bucket_location) + if response["status"] is 200: + output(u"Bucket '%s': expiration configuration is set." % (uri.uri())) + elif response["status"] is 204: + output(u"Bucket '%s': expiration configuration is deleted." % (uri.uri())) + except S3Error, e: + if S3.codes.has_key(e.info["Code"]): + error(S3.codes[e.info["Code"]] % uri.bucket()) + raise + return EX_OK + +def cmd_bucket_delete(args): + def _bucket_delete_one(uri): + try: + response = s3.bucket_delete(uri.bucket()) + output(u"Bucket '%s' removed" % uri.uri()) + except S3Error, e: + if e.info['Code'] == 'NoSuchBucket': + if cfg.force: + return EX_OK + else: + return EX_USAGE + if e.info['Code'] == 'BucketNotEmpty' and (cfg.force or cfg.recursive): + warning(u"Bucket is not empty. Removing all the objects from it first. This may take some time...") + rc = subcmd_batch_del(uri_str = uri.uri()) + if rc == EX_OK: + return _bucket_delete_one(uri) + else: + output(u"Bucket was not removed") + elif S3.codes.has_key(e.info["Code"]): + error(S3.codes[e.info["Code"]] % uri.bucket()) + raise + return EX_OK + + s3 = S3(Config()) + for arg in args: + uri = S3Uri(arg) + if not uri.type == "s3" or not uri.has_bucket() or uri.has_object(): + raise ParameterError("Expecting S3 URI with just the bucket name set instead of '%s'" % arg) + rc = _bucket_delete_one(uri) + if rc != EX_OK: + return rc + return EX_OK def cmd_object_put(args): cfg = Config() @@ -275,18 +320,19 @@ destination_base_uri = S3Uri(args.pop()) if destination_base_uri.type != 's3': raise ParameterError("Destination must be S3Uri. Got: %s" % destination_base_uri) - destination_base = str(destination_base_uri) + destination_base = unicode(destination_base_uri) if len(args) == 0: raise ParameterError("Nothing to upload. Expecting a local file or directory.") - local_list, single_file_local = fetch_local_list(args, is_src = True) - - local_list, exclude_list = filter_exclude_include(local_list) + local_list, single_file_local, exclude_list = fetch_local_list(args, is_src = True) local_count = len(local_list) info(u"Summary: %d local files to upload" % local_count) + + if local_count == 0: + raise ParameterError("Nothing to upload.") if local_count > 0: if not single_file_local and '-' in local_list.keys(): @@ -312,7 +358,7 @@ output(u"upload: %s -> %s" % (nicekey, local_list[key]['remote_uri'])) warning(u"Exiting now because of --dry-run") - return + return EX_OK seq = 0 for key in local_list: @@ -325,7 +371,7 @@ full_name = full_name_orig seq_label = "[%d of %d]" % (seq, local_count) if Config().encrypt: - exitcode, full_name, extra_headers["x-amz-meta-s3tools-gpgenc"] = gpg_encrypt(full_name_orig) + gpg_exitcode, full_name, extra_headers["x-amz-meta-s3tools-gpgenc"] = gpg_encrypt(full_name_orig) if cfg.preserve_attrs or local_list[key]['size'] > (cfg.multipart_chunk_size_mb * 1024 * 1024): attr_header = _build_attr_header(local_list, key) debug(u"attr_header: %s" % attr_header) @@ -350,6 +396,7 @@ if Config().encrypt and full_name != full_name_orig: debug(u"Removing temporary encrypted file: %s" % unicodise(full_name)) os.remove(full_name) + return EX_OK def cmd_object_get(args): cfg = Config() @@ -398,8 +445,7 @@ if len(args) == 0: raise ParameterError("Nothing to download. Expecting S3 URI.") - remote_list = fetch_remote_list(args, require_attribs = False) - remote_list, exclude_list = filter_exclude_include(remote_list) + remote_list, exclude_list = fetch_remote_list(args, require_attribs = False) remote_count = len(remote_list) @@ -430,7 +476,7 @@ output(u"download: %s -> %s" % (remote_list[key]['object_uri_str'], remote_list[key]['local_filename'])) warning(u"Exiting now because of --dry-run") - return + return EX_OK seq = 0 for key in remote_list: @@ -479,6 +525,12 @@ continue try: response = s3.object_get(uri, dst_stream, start_position = start_position, extra_label = seq_label) + except S3DownloadError, e: + error(u"%s: Skipping that file. This is usually a transient error, please try again later." % e) + if not file_exists: # Delete, only if file didn't exist before! + debug(u"object_get failed for '%s', deleting..." % (destination,)) + os.unlink(destination) + continue except S3Error, e: if not file_exists: # Delete, only if file didn't exist before! debug(u"object_get failed for '%s', deleting..." % (destination,)) @@ -488,6 +540,10 @@ if response["headers"].has_key("x-amz-meta-s3tools-gpgenc"): gpg_decrypt(destination, response["headers"]["x-amz-meta-s3tools-gpgenc"]) response["size"] = os.stat(destination)[6] + if response["headers"].has_key("last-modified") and destination != "-": + last_modified = time.mktime(time.strptime(response["headers"]["last-modified"], "%a, %d %b %Y %H:%M:%S GMT")) + os.utime(destination, (last_modified, last_modified)) + debug("set mtime to %s" % last_modified) if not Config().progress_meter and destination != "-": speed_fmt = formatSize(response["speed"], human_readable = True, floating_point = True) output(u"File %s saved as '%s' (%d bytes in %0.1f seconds, %0.2f %sB/s)" % @@ -495,34 +551,90 @@ if Config().delete_after_fetch: s3.object_delete(uri) output(u"File %s removed after fetch" % (uri)) + return EX_OK def cmd_object_del(args): + recursive = Config().recursive for uri_str in args: uri = S3Uri(uri_str) if uri.type != "s3": raise ParameterError("Expecting S3 URI instead of '%s'" % uri_str) if not uri.has_object(): - if Config().recursive and not Config().force: + if recursive and not Config().force: raise ParameterError("Please use --force to delete ALL contents of %s" % uri_str) - elif not Config().recursive: + elif not recursive: raise ParameterError("File name required, not only the bucket name. Alternatively use --recursive") - subcmd_object_del_uri(uri_str) + + if not recursive: + rc = subcmd_object_del_uri(uri_str) + else: + rc = subcmd_batch_del(uri_str = uri_str) + if not rc: + return rc + return EX_OK + +def subcmd_batch_del(uri_str = None, bucket = None, remote_list = None): + """ + Returns: EX_OK + Raises: ValueError + """ + + def _batch_del(remote_list): + s3 = S3(cfg) + to_delete = remote_list[:1000] + remote_list = remote_list[1000:] + while len(to_delete): + debug(u"Batch delete %d, remaining %d" % (len(to_delete), len(remote_list))) + if not cfg.dry_run: + response = s3.object_batch_delete(to_delete) + output('\n'.join((u"File %s deleted" % to_delete[p]['object_uri_str']) for p in to_delete)) + to_delete = remote_list[:1000] + remote_list = remote_list[1000:] + + if remote_list is not None and len(remote_list) == 0: + return False + + if len([item for item in [uri_str, bucket, remote_list] if item]) != 1: + raise ValueError("One and only one of 'uri_str', 'bucket', 'remote_list' can be specified.") + + if bucket: # bucket specified + uri_str = "s3://%s" % bucket + if remote_list is None: # uri_str specified + remote_list, exclude_list = fetch_remote_list(uri_str, require_attribs = False) + + if len(remote_list) == 0: + warning(u"Remote list is empty.") + return EX_OK + + if cfg.max_delete > 0 and len(remote_list) > cfg.max_delete: + warning(u"delete: maximum requested number of deletes would be exceeded, none performed.") + return EX_OK + + _batch_del(remote_list) + + if cfg.dry_run: + warning(u"Exiting now because of --dry-run") + return EX_OK + return EX_OK def subcmd_object_del_uri(uri_str, recursive = None): + """ + Returns: True if XXX, False if XXX + Raises: ValueError + """ s3 = S3(cfg) if recursive is None: recursive = cfg.recursive - remote_list = fetch_remote_list(uri_str, require_attribs = False, recursive = recursive) - remote_list, exclude_list = filter_exclude_include(remote_list) + remote_list, exclude_list = fetch_remote_list(uri_str, require_attribs = False, recursive = recursive) remote_count = len(remote_list) info(u"Summary: %d remote files to delete" % remote_count) if cfg.max_delete > 0 and remote_count > cfg.max_delete: warning(u"delete: maximum requested number of deletes would be exceeded, none performed.") - return + return False if cfg.dry_run: for key in exclude_list: @@ -531,21 +643,21 @@ output(u"delete: %s" % remote_list[key]['object_uri_str']) warning(u"Exiting now because of --dry-run") - return + return True for key in remote_list: item = remote_list[key] response = s3.object_delete(S3Uri(item['object_uri_str'])) output(u"File %s deleted" % item['object_uri_str']) - + return True + def cmd_object_restore(args): s3 = S3(cfg) - + if cfg.restore_days < 1: raise ParameterError("You must restore a file for 1 or more days") - remote_list = fetch_remote_list(args, require_attribs = False, recursive = cfg.recursive) - remote_list, exclude_list = filter_exclude_include(remote_list) + remote_list, exclude_list = fetch_remote_list(args, require_attribs = False, recursive = cfg.recursive) remote_count = len(remote_list) @@ -558,29 +670,35 @@ output(u"restore: %s" % remote_list[key]['object_uri_str']) warning(u"Exiting now because of --dry-run") - return + return EX_OK for key in remote_list: item = remote_list[key] - + uri = S3Uri(item['object_uri_str']) if not item['object_uri_str'].endswith("/"): response = s3.object_restore(S3Uri(item['object_uri_str'])) output(u"File %s restoration started" % item['object_uri_str']) else: debug(u"Skipping directory since only files may be restored") - + return EX_OK + def subcmd_cp_mv(args, process_fce, action_str, message): - if len(args) < 2: + if action_str != 'modify' and len(args) < 2: raise ParameterError("Expecting two or more S3 URIs for " + action_str) - dst_base_uri = S3Uri(args.pop()) + if action_str == 'modify' and len(args) < 1: + raise ParameterError("Expecting one or more S3 URIs for " + action_str) + if action_str != 'modify': + dst_base_uri = S3Uri(args.pop()) + else: + dst_base_uri = S3Uri(args[-1]) + if dst_base_uri.type != "s3": raise ParameterError("Destination must be S3 URI. To download a file use 'get' or 'sync'.") destination_base = dst_base_uri.uri() - remote_list = fetch_remote_list(args, require_attribs = False) - remote_list, exclude_list = filter_exclude_include(remote_list) + remote_list, exclude_list = fetch_remote_list(args, require_attribs = False) remote_count = len(remote_list) @@ -605,7 +723,7 @@ output(u"%s: %s -> %s" % (action_str, remote_list[key]['object_uri_str'], remote_list[key]['dest_name'])) warning(u"Exiting now because of --dry-run") - return + return EX_OK seq = 0 for key in remote_list: @@ -627,14 +745,19 @@ warning(u"Key not found %s" % item['object_uri_str']) else: raise + return EX_OK def cmd_cp(args): s3 = S3(Config()) - subcmd_cp_mv(args, s3.object_copy, "copy", "File %(src)s copied to %(dst)s") + return subcmd_cp_mv(args, s3.object_copy, "copy", u"File %(src)s copied to %(dst)s") + +def cmd_modify(args): + s3 = S3(Config()) + return subcmd_cp_mv(args, s3.object_copy, "modify", u"File %(src)s modified") def cmd_mv(args): s3 = S3(Config()) - subcmd_cp_mv(args, s3.object_move, "move", "File %(src)s moved to %(dst)s") + return subcmd_cp_mv(args, s3.object_move, "move", u"File %(src)s moved to %(dst)s") def cmd_info(args): s3 = S3(Config()) @@ -667,9 +790,23 @@ info = s3.bucket_info(uri) output(u"%s (bucket):" % uri.uri()) output(u" Location: %s" % info['bucket-location']) + try: + expiration = s3.expiration_info(uri, cfg.bucket_location) + expiration_desc = "Expiration Rule: " + if expiration['prefix'] == "": + expiration_desc += "all objects in this bucket " + else: + expiration_desc += "objects with key prefix '" + expiration['prefix'] + "' " + expiration_desc += "will expire in '" + if expiration['days']: + expiration_desc += expiration['days'] + "' day(s) after creation" + elif expiration['date']: + expiration_desc += expiration['date'] + "' " + output(u" %s" % expiration_desc) + except: + output(u" Expiration Rule: none") acl = s3.get_acl(uri) acl_grant_list = acl.getGrantList() - try: policy = s3.get_policy(uri) output(u" policy: %s" % policy) @@ -684,9 +821,8 @@ except S3Error, e: if S3.codes.has_key(e.info["Code"]): error(S3.codes[e.info["Code"]] % uri.bucket()) - return - else: - raise + raise + return EX_OK def filedicts_to_keys(*args): keys = set() @@ -697,36 +833,19 @@ return keys def cmd_sync_remote2remote(args): - def _do_deletes(s3, dst_list): - if cfg.max_delete > 0 and len(dst_list) > cfg.max_delete: - warning(u"delete: maximum requested number of deletes would be exceeded, none performed.") - return - # Delete items in destination that are not in source - if cfg.dry_run: - for key in dst_list: - output(u"delete: %s" % dst_list[key]['object_uri_str']) - else: - for key in dst_list: - uri = S3Uri(dst_list[key]['object_uri_str']) - s3.object_delete(uri) - output(u"deleted: '%s'" % uri) - s3 = S3(Config()) # Normalise s3://uri (e.g. assert trailing slash) destination_base = unicode(S3Uri(args[-1])) - src_list = fetch_remote_list(args[:-1], recursive = True, require_attribs = True) - dst_list = fetch_remote_list(destination_base, recursive = True, require_attribs = True) + src_list, src_exclude_list = fetch_remote_list(args[:-1], recursive = True, require_attribs = True) + dst_list, dst_exclude_list = fetch_remote_list(destination_base, recursive = True, require_attribs = True) src_count = len(src_list) orig_src_count = src_count dst_count = len(dst_list) info(u"Found %d source files, %d destination files" % (src_count, dst_count)) - - src_list, src_exclude_list = filter_exclude_include(src_list) - dst_list, dst_exclude_list = filter_exclude_include(dst_list) src_list, dst_list, update_list, copy_pairs = compare_filelists(src_list, dst_list, src_remote = True, dst_remote = True, delay_updates = cfg.delay_updates) @@ -752,7 +871,7 @@ for key in src_list: output(u"Sync: %s -> %s" % (src_list[key]['object_uri_str'], src_list[key]['target_uri'])) warning(u"Exiting now because of --dry-run") - return + return EX_OK # if there are copy pairs, we can't do delete_before, on the chance # we need one of the to-be-deleted files as a copy source. @@ -765,7 +884,7 @@ # Delete items in destination that are not in source if cfg.delete_removed and not cfg.delete_after: - _do_deletes(s3, dst_list) + subcmd_batch_del(remote_list = dst_list) def _upload(src_list, seq, src_count): file_list = src_list.keys() @@ -809,7 +928,8 @@ # Delete items in destination that are not in source if cfg.delete_removed and cfg.delete_after: - _do_deletes(s3, dst_list) + subcmd_batch_del(remote_list = dst_list) + return EX_OK def cmd_sync_remote2local(args): def _do_deletes(local_list): @@ -823,17 +943,14 @@ s3 = S3(Config()) destination_base = args[-1] - local_list, single_file_local = fetch_local_list(destination_base, is_src = False, recursive = True) - remote_list = fetch_remote_list(args[:-1], recursive = True, require_attribs = True) + local_list, single_file_local, dst_exclude_list = fetch_local_list(destination_base, is_src = False, recursive = True) + remote_list, src_exclude_list = fetch_remote_list(args[:-1], recursive = True, require_attribs = True) local_count = len(local_list) remote_count = len(remote_list) orig_remote_count = remote_count info(u"Found %d remote files, %d local files" % (remote_count, local_count)) - - remote_list, src_exclude_list = filter_exclude_include(remote_list) - local_list, dst_exclude_list = filter_exclude_include(local_list) remote_list, local_list, update_list, copy_pairs = compare_filelists(remote_list, local_list, src_remote = True, dst_remote = False, delay_updates = cfg.delay_updates) @@ -844,7 +961,6 @@ info(u"Summary: %d remote files to download, %d local files to delete, %d local files to hardlink" % (remote_count + update_count, local_count, copy_pairs_count)) - empty_fname_re = re.compile(r'\A\s*\Z') def _set_local_filename(remote_list, destination_base): if len(remote_list) == 0: return @@ -857,12 +973,7 @@ if destination_base[-1] != os.path.sep: destination_base += os.path.sep for key in remote_list: - local_basename = key - if empty_fname_re.match(key): - # Objects may exist on S3 with empty names (''), which don't map so well to common filesystems. - local_basename = '__AWS-EMPTY-OBJECT-NAME__' - warning(u"Empty object name on S3 found, saving locally as %s" % (local_basename)) - local_filename = destination_base + local_basename + local_filename = destination_base + key if os.path.sep != "/": local_filename = os.path.sep.join(local_filename.split("/")) remote_list[key]['local_filename'] = deunicodise(local_filename) @@ -883,7 +994,7 @@ output(u"download: %s -> %s" % (update_list[key]['object_uri_str'], update_list[key]['local_filename'])) warning(u"Exiting now because of --dry-run") - return + return EX_OK # if there are copy pairs, we can't do delete_before, on the chance # we need one of the to-be-deleted files as a copy source. @@ -898,6 +1009,8 @@ _do_deletes(local_list) def _download(remote_list, seq, total, total_size, dir_cache): + original_umask = os.umask(0); + os.umask(original_umask); file_list = remote_list.keys() file_list.sort() for file in file_list: @@ -905,33 +1018,61 @@ item = remote_list[file] uri = S3Uri(item['object_uri_str']) dst_file = item['local_filename'] + is_empty_directory = dst_file.endswith('/') seq_label = "[%d of %d]" % (seq, total) try: dst_dir = os.path.dirname(dst_file) if not dir_cache.has_key(dst_dir): dir_cache[dst_dir] = Utils.mkdir_with_parents(dst_dir) if dir_cache[dst_dir] == False: - warning(u"%s: destination directory not writable: %s" % (file, dst_dir)) + warning(u"%s: destination directory not writable: %s" % (unicodise(file), unicodise(dst_dir))) continue + try: - debug(u"dst_file=%s" % unicodise(dst_file)) - # create temporary files (of type .s3cmd.XXXX.tmp) in the same directory - # for downloading and then rename once downloaded - chkptfd, chkptfname = tempfile.mkstemp(".tmp",".s3cmd.",os.path.dirname(dst_file)) - debug(u"created chkptfname=%s" % unicodise(chkptfname)) - dst_stream = os.fdopen(chkptfd, "wb") - response = s3.object_get(uri, dst_stream, extra_label = seq_label) - dst_stream.close() - # download completed, rename the file to destination - os.rename(chkptfname, dst_file) - + if not is_empty_directory: # ignore empty directory at S3: + debug(u"dst_file=%s" % unicodise(dst_file)) + # create temporary files (of type .s3cmd.XXXX.tmp) in the same directory + # for downloading and then rename once downloaded + chkptfd, chkptfname = tempfile.mkstemp(".tmp",".s3cmd.",os.path.dirname(dst_file)) + debug(u"created chkptfname=%s" % unicodise(chkptfname)) + dst_stream = os.fdopen(chkptfd, "wb") + response = s3.object_get(uri, dst_stream, extra_label = seq_label) + dst_stream.close() + # download completed, rename the file to destination + os.rename(chkptfname, dst_file) + debug(u"renamed chkptfname=%s to dst_file=%s" % (unicodise(chkptfname), unicodise(dst_file))) + except OSError, e: + if e.errno == errno.EISDIR: + warning(u"%s is a directory - skipping over" % unicodise(dst_file)) + continue + else: + raise + except S3DownloadError, e: + error(u"%s: Skipping that file. This is usually a transient error, please try again later." % e) + os.unlink(chkptfname) + continue + except S3Error, e: + warning(u"Remote file %s S3Error: %s" % (e.resource, e)) + continue + + try: # set permissions on destination file - original_umask = os.umask(0); - os.umask(original_umask); - mode = 0777 - original_umask; + if not is_empty_directory: # a normal file + mode = 0777 - original_umask; + else: # an empty directory, make them readable/executable + mode = 0775 debug(u"mode=%s" % oct(mode)) os.chmod(dst_file, mode); - debug(u"renamed chkptfname=%s to dst_file=%s" % (unicodise(chkptfname), unicodise(dst_file))) + except: + raise + + # because we don't upload empty directories, + # we can continue the loop here, we won't be setting stat info. + # if we do start to upload empty directories, we'll have to reconsider this. + if is_empty_directory: + continue + + try: if response.has_key('s3cmd-attrs') and cfg.preserve_attrs: attrs = response['s3cmd-attrs'] if attrs.has_key('mode'): @@ -944,19 +1085,20 @@ uid = int(attrs['uid']) gid = int(attrs['gid']) os.lchown(dst_file,uid,gid) + elif response["headers"].has_key("last-modified"): + last_modified = time.mktime(time.strptime(response["headers"]["last-modified"], "%a, %d %b %Y %H:%M:%S GMT")) + os.utime(dst_file, (last_modified, last_modified)) + debug("set mtime to %s" % last_modified) except OSError, e: try: dst_stream.close() os.remove(chkptfname) except: pass if e.errno == errno.EEXIST: - warning(u"%s exists - not overwriting" % (dst_file)) + warning(u"%s exists - not overwriting" % unicodise(dst_file)) continue if e.errno in (errno.EPERM, errno.EACCES): - warning(u"%s not writable: %s" % (dst_file, e.strerror)) - continue - if e.errno == errno.EISDIR: - warning(u"%s is a directory - skipping over" % dst_file) + warning(u"%s not writable: %s" % (unicodise(dst_file), e.strerror)) continue raise e except KeyboardInterrupt: @@ -981,7 +1123,7 @@ os.remove(chkptfname) except: pass except S3DownloadError, e: - error(u"%s: download failed too many times. Skipping that file." % file) + error(u"%s: download failed too many times. Skipping that file. This is usually a transient error, please try again later." % file) continue speed_fmt = formatSize(response["speed"], human_readable = True, floating_point = True) if not Config().progress_meter: @@ -1019,6 +1161,7 @@ if cfg.delete_removed and cfg.delete_after: _do_deletes(local_list) + return EX_OK def local_copy(copy_pairs, destination_base): # Do NOT hardlink local files by default, that'd be silly @@ -1065,17 +1208,19 @@ if attr == 'uname': try: val = Utils.getpwuid_username(local_list[src]['uid']) - except KeyError: + except (KeyError, TypeError): attr = "uid" val = local_list[src].get('uid') - warning(u"%s: Owner username not known. Storing UID=%d instead." % (src, val)) + if val: + warning(u"%s: Owner username not known. Storing UID=%d instead." % (src, val)) elif attr == 'gname': try: val = Utils.getgrgid_grpname(local_list[src].get('gid')) - except KeyError: + except (KeyError, TypeError): attr = "gid" val = local_list[src].get('gid') - warning(u"%s: Owner groupname not known. Storing GID=%d instead." % (src, val)) + if val: + warning(u"%s: Owner groupname not known. Storing GID=%d instead." % (src, val)) elif attr == 'md5': try: val = local_list.get_md5(src) @@ -1098,31 +1243,25 @@ def cmd_sync_local2remote(args): - - def _do_deletes(s3, remote_list): - if cfg.max_delete > 0 and len(remote_list) > cfg.max_delete: - warning(u"delete: maximum requested number of deletes would be exceeded, none performed.") - return - for key in remote_list: - uri = S3Uri(remote_list[key]['object_uri_str']) - s3.object_delete(uri) - output(u"deleted: '%s'" % uri) - def _single_process(local_list): + any_child_failed = False for dest in destinations: ## Normalize URI to convert s3://bkt to s3://bkt/ (trailing slash) destination_base_uri = S3Uri(dest) if destination_base_uri.type != 's3': raise ParameterError("Destination must be S3Uri. Got: %s" % destination_base_uri) destination_base = str(destination_base_uri) - _child(destination_base, local_list) - return destination_base_uri + rc = _child(destination_base, local_list) + if rc: + any_child_failed = True + return any_child_failed def _parent(): # Now that we've done all the disk I/O to look at the local file system and # calculate the md5 for each file, fork for each destination to upload to them separately # and in parallel child_pids = [] + any_child_failed = False for dest in destinations: ## Normalize URI to convert s3://bkt to s3://bkt/ (trailing slash) @@ -1140,8 +1279,10 @@ while len(child_pids): (pid, status) = os.wait() child_pids.remove(pid) - - return + if status: + any_child_failed = True + + return any_child_failed def _child(destination_base, local_list): def _set_remote_uri(local_list, destination_base, single_file_local): @@ -1186,16 +1327,13 @@ uploaded_objects_list.append(uri.object()) return seq, total_size - remote_list = fetch_remote_list(destination_base, recursive = True, require_attribs = True) + remote_list, dst_exclude_list = fetch_remote_list(destination_base, recursive = True, require_attribs = True) local_count = len(local_list) orig_local_count = local_count remote_count = len(remote_list) info(u"Found %d local files, %d remote files" % (local_count, remote_count)) - - local_list, src_exclude_list = filter_exclude_include(local_list) - remote_list, dst_exclude_list = filter_exclude_include(remote_list) if single_file_local and len(local_list) == 1 and len(remote_list) == 1: ## Make remote_key same as local_key for comparison if we're dealing with only one file @@ -1210,8 +1348,9 @@ update_count = len(update_list) copy_count = len(copy_pairs) remote_count = len(remote_list) - - info(u"Summary: %d local files to upload, %d files to remote copy, %d remote files to delete" % (local_count + update_count, copy_count, remote_count)) + upload_count = local_count + update_count + + info(u"Summary: %d local files to upload, %d files to remote copy, %d remote files to delete" % (upload_count, copy_count, remote_count)) _set_remote_uri(local_list, destination_base, single_file_local) _set_remote_uri(update_list, destination_base, single_file_local) @@ -1231,7 +1370,7 @@ output(u"delete: %s" % remote_list[key]['object_uri_str']) warning(u"Exiting now because of --dry-run") - return + return EX_OK # if there are copy pairs, we can't do delete_before, on the chance # we need one of the to-be-deleted files as a copy source. @@ -1242,14 +1381,14 @@ warning(u"delete: cowardly refusing to delete because no source files were found. Use --force to override.") cfg.delete_removed = False - if cfg.delete_removed and not cfg.delete_after: - _do_deletes(s3, remote_list) + if cfg.delete_removed and not cfg.delete_after and remote_list: + subcmd_batch_del(remote_list = remote_list) total_size = 0 total_elapsed = 0.0 timestamp_start = time.time() - n, total_size = _upload(local_list, 0, local_count, total_size) - n, total_size = _upload(update_list, n, local_count, total_size) + n, total_size = _upload(local_list, 0, upload_count, total_size) + n, total_size = _upload(update_list, n, upload_count, total_size) n_copies, saved_bytes, failed_copy_files = remote_copy(s3, copy_pairs, destination_base) #upload file that could not be copied @@ -1258,8 +1397,8 @@ _set_remote_uri(failed_copy_files, destination_base, single_file_local) n, total_size = _upload(failed_copy_files, n, failed_copy_count, total_size) - if cfg.delete_removed and cfg.delete_after: - _do_deletes(s3, remote_list) + if cfg.delete_removed and cfg.delete_after and remote_list: + subcmd_batch_del(remote_list = remote_list) total_elapsed = time.time() - timestamp_start total_speed = total_elapsed and total_size/total_elapsed or 0.0 speed_fmt = formatSize(total_speed, human_readable = True, floating_point = True) @@ -1272,7 +1411,7 @@ else: info(outstr) - return + return EX_OK def _invalidate_on_cf(destination_base_uri): cf = CloudFront(cfg) @@ -1298,25 +1437,32 @@ error(u"S3cmd 'sync' doesn't yet support GPG encryption, sorry.") error(u"Either use unconditional 's3cmd put --recursive'") error(u"or disable encryption with --no-encrypt parameter.") - sys.exit(1) - - local_list, single_file_local = fetch_local_list(args[:-1], is_src = True, recursive = True) + sys.exit(EX_USAGE) + + local_list, single_file_local, src_exclude_list = fetch_local_list(args[:-1], is_src = True, recursive = True) destinations = [args[-1]] if cfg.additional_destinations: destinations = destinations + cfg.additional_destinations if 'fork' not in os.__all__ or len(destinations) < 2: - destination_base_uri = _single_process(local_list) + any_child_failed = _single_process(local_list) + destination_base_uri = S3Uri(destinations[-1]) if cfg.invalidate_on_cf: if len(uploaded_objects_list) == 0: info("Nothing to invalidate in CloudFront") else: _invalidate_on_cf(destination_base_uri) else: - _parent() + any_child_failed = _parent() if cfg.invalidate_on_cf: error(u"You cannot use both --cf-invalidate and --add-destination.") + return(EX_USAGE) + + if any_child_failed: + return EX_SOFTWARE + else: + return EX_OK def cmd_sync(args): if (len(args) < 2): @@ -1350,8 +1496,7 @@ else: args.append(arg) - remote_list = fetch_remote_list(args) - remote_list, exclude_list = filter_exclude_include(remote_list) + remote_list, exclude_list = fetch_remote_list(args) remote_count = len(remote_list) @@ -1364,7 +1509,7 @@ output(u"setacl: %s" % remote_list[key]['object_uri_str']) warning(u"Exiting now because of --dry-run") - return + return EX_OK seq = 0 for key in remote_list: @@ -1372,6 +1517,7 @@ seq_label = "[%d of %d]" % (seq, remote_count) uri = S3Uri(remote_list[key]['object_uri_str']) update_acl(s3, uri, seq_label) + return EX_OK def cmd_setpolicy(args): s3 = S3(cfg) @@ -1379,7 +1525,7 @@ policy_file = args[0] policy = open(policy_file, 'r').read() - if cfg.dry_run: return + if cfg.dry_run: return EX_OK response = s3.set_policy(uri, policy) @@ -1387,18 +1533,45 @@ debug(u"response - %s" % response['status']) if response['status'] == 204: output(u"%s: Policy updated" % uri) + return EX_OK def cmd_delpolicy(args): s3 = S3(cfg) uri = S3Uri(args[0]) - if cfg.dry_run: return + if cfg.dry_run: return EX_OK response = s3.delete_policy(uri) #if retsponse['status'] == 200: debug(u"response - %s" % response['status']) output(u"%s: Policy deleted" % uri) - + return EX_OK + +def cmd_setlifecycle(args): + s3 = S3(cfg) + uri = S3Uri(args[1]) + lifecycle_policy_file = args[0] + lifecycle_policy = open(lifecycle_policy_file, 'r').read() + + if cfg.dry_run: return EX_OK + + response = s3.set_lifecycle_policy(uri, lifecycle_policy) + + debug(u"response - %s" % response['status']) + if response['status'] == 204: + output(u"%s: Lifecycle Policy updated" % uri) + return EX_OK + +def cmd_dellifecycle(args): + s3 = S3(cfg) + uri = S3Uri(args[0]) + if cfg.dry_run: return EX_OK + + response = s3.delete_lifecycle_policy(uri) + + debug(u"response - %s" % response['status']) + output(u"%s: Lifecycle Policy deleted" % uri) + return EX_OK def cmd_multipart(args): s3 = S3(cfg) @@ -1418,6 +1591,7 @@ output("%s\t%s\t%s" % (mpupload['Initiated'], "s3://" + uri.bucket() + "/" + mpupload['Key'], mpupload['UploadId'])) except KeyError: pass + return EX_OK def cmd_abort_multipart(args): '''{"cmd":"abortmp", "label":"abort a multipart upload", "param":"s3://BUCKET Id", "func":cmd_abort_multipart, "argc":2},''' @@ -1427,6 +1601,7 @@ response = s3.abort_multipart(uri, id) debug(u"response - %s" % response['status']) output(u"%s" % uri) + return EX_OK def cmd_list_multipart(args): '''{"cmd":"abortmp", "label":"list a multipart upload", "param":"s3://BUCKET Id", "func":cmd_list_multipart, "argc":2},''' @@ -1443,6 +1618,7 @@ output("%s\t%s\t%s\t%s" % (mpupload['LastModified'], mpupload['PartNumber'], mpupload['ETag'], mpupload['Size'])) except: pass + return EX_OK def cmd_accesslog(args): s3 = S3(cfg) @@ -1464,12 +1640,14 @@ if accesslog.isLoggingEnabled(): output(u" Target prefix: %s" % accesslog.targetPrefix().uri()) #output(u" Public Access: %s" % accesslog.isAclPublic()) + return EX_OK def cmd_sign(args): string_to_sign = args.pop() debug("string-to-sign: %r" % string_to_sign) signature = Utils.sign_string(string_to_sign) output("Signature: %s" % signature) + return EX_OK def cmd_signurl(args): expiry = args.pop() @@ -1479,6 +1657,7 @@ debug("url to sign: %r" % url_to_sign) signed_url = Utils.sign_url(url_to_sign, expiry) output(signed_url) + return EX_OK def cmd_fixbucket(args): def _unescape(text): @@ -1546,6 +1725,7 @@ warning("Fixed %d files' names. Their ACL were reset to Private." % count) warning("Use 's3cmd setacl --acl-public s3://...' to make") warning("them publicly readable if required.") + return EX_OK def resolve_list(lst, args): retval = [] @@ -1571,7 +1751,7 @@ "input_file" : filename, "output_file" : tmp_filename, } - info(u"Encrypting file %(input_file)s to %(output_file)s..." % args) + info(u"Encrypting file %s to %s..." % (unicodise(filename), tmp_filename)) command = resolve_list(cfg.gpg_encrypt.split(" "), args) code = gpg_command(command, cfg.gpg_passphrase) return (code, tmp_filename, "gpg") @@ -1584,11 +1764,11 @@ "input_file" : filename, "output_file" : tmp_filename, } - info(u"Decrypting file %(input_file)s to %(output_file)s..." % args) + info(u"Decrypting file %s to %s..." % (unicodise(filename), tmp_filename)) command = resolve_list(cfg.gpg_decrypt.split(" "), args) code = gpg_command(command, cfg.gpg_passphrase) if code == 0 and in_place: - debug(u"Renaming %s to %s" % (tmp_filename, filename)) + debug(u"Renaming %s to %s" % (tmp_filename, unicodise(filename))) os.unlink(filename) os.rename(tmp_filename, filename) tmp_filename = filename @@ -1597,7 +1777,7 @@ def run_configure(config_file, args): cfg = Config() options = [ - ("access_key", "Access Key", "Access key and Secret key are your identifiers for Amazon S3"), + ("access_key", "Access Key", "Access key and Secret key are your identifiers for Amazon S3. Leave them empty for using the env variables."), ("secret_key", "Secret Key"), ("gpg_passphrase", "Encryption password", "Encryption password is used to protect your files from reading\nby unauthorized persons while in transfer to S3"), ("gpg_command", "Path to GPG program"), @@ -1698,10 +1878,15 @@ else: raise Exception("Encryption verification error.") + except S3Error, e: + error(u"Test failed: %s" % (e)) + if e.code == "AccessDenied": + error(u"Are you sure your keys have ListAllMyBuckets permissions?") + val = raw_input("\nRetry configuration? [Y/n] ") + if val.lower().startswith("y") or val == "": + continue except Exception, e: error(u"Test failed: %s" % (e)) - if e.find('403') != -1: - error(u"Are you sure your keys have ListAllMyBuckets permissions?") val = raw_input("\nRetry configuration? [Y/n] ") if val.lower().startswith("y") or val == "": continue @@ -1733,14 +1918,14 @@ except IOError, e: error(u"Writing config file failed: %s: %s" % (config_file, e.strerror)) - sys.exit(1) + sys.exit(EX_IOERR) def process_patterns_from_file(fname, patterns_list): try: fn = open(fname, "rt") except IOError, e: error(e) - sys.exit(1) + sys.exit(EX_IOERR) for pattern in fn: pattern = pattern.strip() if re.match("^#", pattern) or re.match("^\s*$", pattern): @@ -1789,12 +1974,14 @@ {"cmd":"put", "label":"Put file into bucket", "param":"FILE [FILE...] s3://BUCKET[/PREFIX]", "func":cmd_object_put, "argc":2}, {"cmd":"get", "label":"Get file from bucket", "param":"s3://BUCKET/OBJECT LOCAL_FILE", "func":cmd_object_get, "argc":1}, {"cmd":"del", "label":"Delete file from bucket", "param":"s3://BUCKET/OBJECT", "func":cmd_object_del, "argc":1}, + {"cmd":"rm", "label":"Delete file from bucket (alias for del)", "param":"s3://BUCKET/OBJECT", "func":cmd_object_del, "argc":1}, #{"cmd":"mkdir", "label":"Make a virtual S3 directory", "param":"s3://BUCKET/path/to/dir", "func":cmd_mkdir, "argc":1}, {"cmd":"restore", "label":"Restore file from Glacier storage", "param":"s3://BUCKET/OBJECT", "func":cmd_object_restore, "argc":1}, {"cmd":"sync", "label":"Synchronize a directory tree to S3", "param":"LOCAL_DIR s3://BUCKET[/PREFIX] or s3://BUCKET[/PREFIX] LOCAL_DIR", "func":cmd_sync, "argc":2}, {"cmd":"du", "label":"Disk usage by buckets", "param":"[s3://BUCKET[/PREFIX]]", "func":cmd_du, "argc":0}, {"cmd":"info", "label":"Get various information about Buckets or Files", "param":"s3://BUCKET[/OBJECT]", "func":cmd_info, "argc":1}, {"cmd":"cp", "label":"Copy object", "param":"s3://BUCKET1/OBJECT1 s3://BUCKET2[/OBJECT2]", "func":cmd_cp, "argc":2}, + {"cmd":"modify", "label":"Modify object metadata", "param":"s3://BUCKET1/OBJECT", "func":cmd_modify, "argc":1}, {"cmd":"mv", "label":"Move object", "param":"s3://BUCKET1/OBJECT1 s3://BUCKET2[/OBJECT2]", "func":cmd_mv, "argc":2}, {"cmd":"setacl", "label":"Modify Access control list for Bucket or Files", "param":"s3://BUCKET[/OBJECT]", "func":cmd_setacl, "argc":1}, @@ -1815,6 +2002,11 @@ {"cmd":"ws-create", "label":"Create Website from bucket", "param":"s3://BUCKET", "func":cmd_website_create, "argc":1}, {"cmd":"ws-delete", "label":"Delete Website", "param":"s3://BUCKET", "func":cmd_website_delete, "argc":1}, {"cmd":"ws-info", "label":"Info about Website", "param":"s3://BUCKET", "func":cmd_website_info, "argc":1}, + + ## Lifecycle commands + {"cmd":"expire", "label":"Set or delete expiration rule for the bucket", "param":"s3://BUCKET", "func":cmd_expiration_set, "argc":1}, + {"cmd":"setlifecycle", "label":"Upload a lifecycle policy for the bucket", "param":"s3://BUCKET", "func":cmd_setlifecycle, "argc":1}, + {"cmd":"dellifecycle", "label":"Remove a lifecycle policy for the bucket", "param":"s3://BUCKET", "func":cmd_dellifecycle, "argc":1}, ## CloudFront commands {"cmd":"cflist", "label":"List CloudFront distribution points", "param":"", "func":CfCmd.info, "argc":0}, @@ -1918,21 +2110,22 @@ if cmd.has_key("cmd"): commands[cmd["cmd"]] = cmd - default_verbosity = Config().verbosity optparser = OptionParser(option_class=OptionAll, formatter=MyHelpFormatter()) #optparser.disable_interspersed_args() config_file = None - if os.getenv("HOME"): - config_file = os.path.join(os.getenv("HOME"), ".s3cfg") + if os.getenv("S3CMD_CONFIG"): + config_file = os.getenv("S3CMD_CONFIG") elif os.name == "nt" and os.getenv("USERPROFILE"): - config_file = os.path.join(os.getenv("USERPROFILE").decode('mbcs'), "Application Data", "s3cmd.ini") + config_file = os.path.join(os.getenv("USERPROFILE").decode('mbcs'), os.getenv("APPDATA").decode('mbcs') or 'Application Data', "s3cmd.ini") + else: + from os.path import expanduser + config_file = os.path.join(expanduser("~"), ".s3cfg") preferred_encoding = locale.getpreferredencoding() or "UTF-8" optparser.set_defaults(encoding = preferred_encoding) optparser.set_defaults(config = config_file) - optparser.set_defaults(verbosity = default_verbosity) optparser.add_option( "--configure", dest="run_configure", action="store_true", help="Invoke interactive (re)configuration tool. Optionally use as '--configure s3://some-bucket' to test access to a specific bucket instead of attempting to list them all.") optparser.add_option("-c", "--config", dest="config", metavar="FILE", help="Config file name. Defaults to %default") @@ -1991,23 +2184,26 @@ optparser.add_option( "--no-mime-magic", dest="use_mime_magic", action="store_false", help="Don't use mime magic when guessing MIME-type.") optparser.add_option("-m", "--mime-type", dest="mime_type", type="mimetype", metavar="MIME/TYPE", help="Force MIME-type. Override both --default-mime-type and --guess-mime-type.") - optparser.add_option( "--add-header", dest="add_header", action="append", metavar="NAME:VALUE", help="Add a given HTTP header to the upload request. Can be used multiple times. For instance set 'Expires' or 'Cache-Control' headers (or both) using this options if you like.") + optparser.add_option( "--add-header", dest="add_header", action="append", metavar="NAME:VALUE", help="Add a given HTTP header to the upload request. Can be used multiple times. For instance set 'Expires' or 'Cache-Control' headers (or both) using this option.") optparser.add_option( "--server-side-encryption", dest="server_side_encryption", action="store_true", help="Specifies that server-side encryption will be used when putting objects.") optparser.add_option( "--encoding", dest="encoding", metavar="ENCODING", help="Override autodetected terminal and filesystem encoding (character set). Autodetected: %s" % preferred_encoding) - optparser.add_option( "--disable-content-encoding", dest="add_content_encoding", action="store_false", help="Don't include a Content-encoding header to the the uploaded objects") optparser.add_option( "--add-encoding-exts", dest="add_encoding_exts", metavar="EXTENSIONs", help="Add encoding to these comma delimited extensions i.e. (css,js,html) when uploading to S3 )") optparser.add_option( "--verbatim", dest="urlencoding_mode", action="store_const", const="verbatim", help="Use the S3 name as given on the command line. No pre-processing, encoding, etc. Use with caution!") optparser.add_option( "--disable-multipart", dest="enable_multipart", action="store_false", help="Disable multipart upload on files bigger than --multipart-chunk-size-mb") - optparser.add_option( "--multipart-chunk-size-mb", dest="multipart_chunk_size_mb", type="int", action="store", metavar="SIZE", help="Size of each chunk of a multipart upload. Files bigger than SIZE are automatically uploaded as multithreaded-multipart, smaller files are uploaded using the traditional method. SIZE is in Mega-Bytes, default chunk size is %defaultMB, minimum allowed chunk size is 5MB, maximum is 5GB.") + optparser.add_option( "--multipart-chunk-size-mb", dest="multipart_chunk_size_mb", type="int", action="store", metavar="SIZE", help="Size of each chunk of a multipart upload. Files bigger than SIZE are automatically uploaded as multithreaded-multipart, smaller files are uploaded using the traditional method. SIZE is in Mega-Bytes, default chunk size is 15MB, minimum allowed chunk size is 5MB, maximum is 5GB.") optparser.add_option( "--list-md5", dest="list_md5", action="store_true", help="Include MD5 sums in bucket listings (only for 'ls' command).") optparser.add_option("-H", "--human-readable-sizes", dest="human_readable_sizes", action="store_true", help="Print sizes in human readable form (eg 1kB instead of 1234).") optparser.add_option( "--ws-index", dest="website_index", action="store", help="Name of index-document (only for [ws-create] command)") optparser.add_option( "--ws-error", dest="website_error", action="store", help="Name of error-document (only for [ws-create] command)") + + optparser.add_option( "--expiry-date", dest="expiry_date", action="store", help="Indicates when the expiration rule takes effect. (only for [expire] command)") + optparser.add_option( "--expiry-days", dest="expiry_days", action="store", help="Indicates the number of days after object creation the expiration rule takes effect. (only for [expire] command)") + optparser.add_option( "--expiry-prefix", dest="expiry_prefix", action="store", help="Identifying one or more objects with the prefix to which the expiration rule applies. (only for [expire] command)") optparser.add_option( "--progress", dest="progress_meter", action="store_true", help="Display progress meter (default on TTY).") optparser.add_option( "--no-progress", dest="progress_meter", action="store_false", help="Don't display progress meter (default on non-TTY).") @@ -2035,20 +2231,20 @@ '"buckets" and uploading, downloading and removing '+ '"objects" from these buckets.') optparser.epilog = format_commands(optparser.get_prog_name(), commands_list) - optparser.epilog += ("\nFor more information see the project homepage:\n%s\n" % PkgInfo.url) + optparser.epilog += ("\nFor more information, updates and news, visit the s3cmd website:\n%s\n" % PkgInfo.url) optparser.epilog += ("\nConsider a donation if you have found s3cmd useful:\n%s/donate\n" % PkgInfo.url) (options, args) = optparser.parse_args() ## Some mucking with logging levels to enable ## debugging/verbose output for config file parser on request - logging.basicConfig(level=options.verbosity, + logging.basicConfig(level=options.verbosity or Config().verbosity, format='%(levelname)s: %(message)s', stream = sys.stderr) if options.show_version: output(u"s3cmd version %s" % PkgInfo.version) - sys.exit(0) + sys.exit(EX_OK) if options.quiet: try: @@ -2061,10 +2257,10 @@ ## Now finally parse the config file if not options.config: error(u"Can't find a config file. Please use --config option.") - sys.exit(1) + sys.exit(EX_CONFIG) try: - cfg = Config(options.config) + cfg = Config(options.config, options.access_key, options.secret_key) except IOError, e: if options.run_configure: cfg = Config() @@ -2072,11 +2268,10 @@ error(u"%s: %s" % (options.config, e.strerror)) error(u"Configuration file not available.") error(u"Consider using --configure parameter to create one.") - sys.exit(1) - - ## And again some logging level adjustments - ## according to configfile and command line parameters - if options.verbosity != default_verbosity: + sys.exit(EX_CONFIG) + + # allow commandline verbosity config to override config file + if options.verbosity is not None: cfg.verbosity = options.verbosity logging.root.setLevel(cfg.verbosity) @@ -2209,20 +2404,20 @@ if cfg.encrypt and cfg.gpg_passphrase == "": error(u"Encryption requested but no passphrase set in config file.") error(u"Please re-run 's3cmd --configure' and supply it.") - sys.exit(1) + sys.exit(EX_CONFIG) if options.dump_config: cfg.dump_config(sys.stdout) - sys.exit(0) + sys.exit(EX_OK) if options.run_configure: # 'args' may contain the test-bucket URI run_configure(options.config, args) - sys.exit(0) + sys.exit(EX_OK) if len(args) < 1: - error(u"Missing command. Please run with --help for more information.") - sys.exit(1) + optparser.print_help() + sys.exit(EX_USAGE) ## Unicodise all remaining arguments: args = [unicodise(arg) for arg in args] @@ -2236,20 +2431,23 @@ cmd_func = commands[command]["func"] except KeyError, e: error(u"Invalid command: %s" % e) - sys.exit(1) + sys.exit(EX_USAGE) if len(args) < commands[command]["argc"]: error(u"Not enough parameters for command '%s'" % command) - sys.exit(1) + sys.exit(EX_USAGE) try: - cmd_func(args) + rc = cmd_func(args) + if rc is None: # if we missed any cmd_*() returns + rc = EX_GENERAL + return rc except S3Error, e: error(u"S3 error: %s" % e) - sys.exit(1) + sys.exit(EX_SOFTWARE) def report_exception(e, msg=''): - sys.stderr.write(""" + sys.stderr.write(u""" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! An unexpected error has occurred. Please try reproducing the error using @@ -2264,20 +2462,20 @@ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! """ % msg) - s = ' '.join(sys.argv) - sys.stderr.write("""Invoked as: %s""" % s) + s = u' '.join([unicodise(a) for a in sys.argv]) + sys.stderr.write(u"Invoked as: %s\n" % s) tb = traceback.format_exc(sys.exc_info()) e_class = str(e.__class__) e_class = e_class[e_class.rfind(".")+1 : -2] sys.stderr.write(u"Problem: %s: %s\n" % (e_class, e)) try: - sys.stderr.write("S3cmd: %s\n" % PkgInfo.version) + sys.stderr.write(u"S3cmd: %s\n" % PkgInfo.version) except NameError: - sys.stderr.write("S3cmd: unknown version. Module import problem?\n") - sys.stderr.write("python: %s\n" % sys.version) - sys.stderr.write("environment LANG=%s\n" % os.getenv("LANG")) - sys.stderr.write("\n") + sys.stderr.write(u"S3cmd: unknown version. Module import problem?\n") + sys.stderr.write(u"python: %s\n" % sys.version) + sys.stderr.write(u"environment LANG=%s\n" % os.getenv("LANG")) + sys.stderr.write(u"\n") sys.stderr.write(unicode(tb, errors="replace")) if type(e) == ImportError: @@ -2306,6 +2504,7 @@ ## Our modules ## Keep them in try/except block to ## detect any syntax errors in there + from S3.ExitCodes import * from S3.Exceptions import * from S3 import PkgInfo from S3.S3 import S3 @@ -2321,23 +2520,39 @@ from S3.FileLists import * from S3.MultiPart import MultiPartUpload - main() - sys.exit(0) + rc = main() + sys.exit(rc) except ImportError, e: report_exception(e) - sys.exit(1) - - except ParameterError, e: + sys.exit(EX_GENERAL) + + except (ParameterError, InvalidFileError), e: error(u"Parameter problem: %s" % e) - sys.exit(1) + sys.exit(EX_USAGE) + + except (S3DownloadError, S3UploadError, S3RequestError), e: + error(u"S3 Temporary Error: %s. Please try again later." % e) + sys.exit(EX_TEMPFAIL) + + except (S3Error, S3Exception, S3ResponseError, CloudFrontError), e: + report_exception(e) + sys.exit(EX_SOFTWARE) except SystemExit, e: sys.exit(e.code) except KeyboardInterrupt: sys.stderr.write("See ya!\n") - sys.exit(1) + sys.exit(EX_BREAK) + + except IOError, e: + error(e) + sys.exit(EX_IOERR) + + except OSError, e: + error(e) + sys.exit(EX_OSERR) except MemoryError: msg = """ @@ -2348,7 +2563,7 @@ 2) use a 64-bit python on a 64-bit OS with >8GB RAM """ sys.stderr.write(msg) - sys.exit(1) + sys.exit(EX_OSERR) except UnicodeEncodeError, e: lang = os.getenv("LANG") @@ -2359,10 +2574,10 @@ invoking s3cmd. """ % lang report_exception(e, msg) - sys.exit(1) + sys.exit(EX_GENERAL) except Exception, e: report_exception(e) - sys.exit(1) + sys.exit(EX_GENERAL) # vim:et:ts=4:sts=4:ai diff --git a/s3cmd.1 b/s3cmd.1 index 29d13e0..385275b 100644 --- a/s3cmd.1 +++ b/s3cmd.1 @@ -43,6 +43,12 @@ s3cmd \fBdel\fR \fIs3://BUCKET/OBJECT\fR Delete file from bucket .TP +s3cmd \fBrm\fR \fIs3://BUCKET/OBJECT\fR +Delete file from bucket (alias for del) +.TP +s3cmd \fBrestore\fR \fIs3://BUCKET/OBJECT\fR +Restore file from Glacier storage +.TP s3cmd \fBsync\fR \fILOCAL_DIR s3://BUCKET[/PREFIX] or s3://BUCKET[/PREFIX] LOCAL_DIR\fR Synchronize a directory tree to S3 .TP @@ -55,6 +61,9 @@ s3cmd \fBcp\fR \fIs3://BUCKET1/OBJECT1 s3://BUCKET2[/OBJECT2]\fR Copy object .TP +s3cmd \fBmodify\fR \fIs3://BUCKET1/OBJECT\fR +Modify object metadata +.TP s3cmd \fBmv\fR \fIs3://BUCKET1/OBJECT1 s3://BUCKET2[/OBJECT2]\fR Move object .TP @@ -87,6 +96,15 @@ .TP s3cmd \fBfixbucket\fR \fIs3://BUCKET[/PREFIX]\fR Fix invalid file names in a bucket +.TP +s3cmd \fBexpire\fR \fIs3://BUCKET\fR +Set or delete expiration rule for the bucket +.TP +s3cmd \fBsetlifecycle\fR \fIs3://BUCKET\fR +Upload a lifecycle policy for the bucket +.TP +s3cmd \fBdellifecycle\fR \fIs3://BUCKET\fR +Remove a lifecycle policy for the bucket .PP @@ -222,6 +240,10 @@ Permission is one of: read, write, read_acp, wr ite_acp, full_control, all .TP +\fB\-D\fR NUM, \fB\-\-restore\-days\fR=NUM +Number of days to keep restored file available (only +for 'restore' command). +.TP \fB\-\-delete\-removed\fR Delete remote objects with no corresponding local file [sync] @@ -329,8 +351,7 @@ \fB\-\-add\-header\fR=NAME:VALUE Add a given HTTP header to the upload request. Can be used multiple times. For instance set 'Expires' or -'Cache-Control' headers (or both) using this options -if you like. +'Cache-Control' headers (or both) using this option. .TP \fB\-\-server\-side\-encryption\fR Specifies that server-side encryption will be used @@ -339,10 +360,6 @@ \fB\-\-encoding\fR=ENCODING Override autodetected terminal and filesystem encoding (character set). Autodetected: UTF-8 -.TP -\fB\-\-disable\-content\-encoding\fR -Don't include a Content-encoding header to the the -uploaded objects .TP \fB\-\-add\-encoding\-exts\fR=EXTENSIONs Add encoding to these comma delimited extensions i.e. @@ -361,8 +378,8 @@ than SIZE are automatically uploaded as multithreaded- multipart, smaller files are uploaded using the traditional method. SIZE is in Mega-Bytes, default -chunk size is noneMB, minimum allowed chunk size is -5MB, maximum is 5GB. +chunk size is 15MB, minimum allowed chunk size is 5MB, +maximum is 5GB. .TP \fB\-\-list\-md5\fR Include MD5 sums in bucket listings (only for 'ls' @@ -377,6 +394,20 @@ .TP \fB\-\-ws\-error\fR=WEBSITE_ERROR Name of error-document (only for [ws-create] command) +.TP +\fB\-\-expiry\-date\fR=EXPIRY_DATE +Indicates when the expiration rule takes effect. (only +for [expire] command) +.TP +\fB\-\-expiry\-days\fR=EXPIRY_DAYS +Indicates the number of days after object creation the +expiration rule takes effect. (only for [expire] +command) +.TP +\fB\-\-expiry\-prefix\fR=EXPIRY_PREFIX +Identifying one or more objects with the prefix to +which the expiration rule applies. (only for [expire] +command) .TP \fB\-\-progress\fR Display progress meter (default on TTY). @@ -430,7 +461,7 @@ Enable debug output. .TP \fB\-\-version\fR -Show s3cmd version (1.5.0-beta1) and exit. +Show s3cmd version (1.5.0-rc1) and exit. .TP \fB\-F\fR, \fB\-\-follow\-symlinks\fR Follow symbolic links as if they are regular files @@ -517,28 +548,49 @@ For example to exclude all files with ".jpg" extension except those beginning with a number use: .PP \-\-exclude '*.jpg' \-\-rinclude '[0-9].*\.jpg' +.PP +To exclude all files except "*.jpg" extension, use: +.PP + \-\-exclude '*' \-\-include '*.jpg' +.PP +To exclude local directory 'somedir', be sure to use a trailing forward slash, as such: +.PP + \-\-exclude 'somedir/' +.PP + .SH SEE ALSO -For the most up to date list of options run +For the most up to date list of options run: .B s3cmd \-\-help .br -For more info about usage, examples and other related info visit project homepage at -.br +For more info about usage, examples and other related info visit project homepage at: .B http://s3tools.org .SH DONATIONS Please consider a donation if you have found s3cmd useful: .br .B http://s3tools.org/donate .SH AUTHOR -Written by Michal Ludvig and 15+ contributors +Written by Michal Ludvig and contributors .SH CONTACT, SUPPORT Preferred way to get support is our mailing list: +.br .I s3tools\-general@lists.sourceforge.net +.br +or visit the project homepage: +.br +.B http://s3tools.org .SH REPORTING BUGS Report bugs to .I s3tools\-bugs@lists.sourceforge.net .SH COPYRIGHT -Copyright \(co 2007,2008,2009,2010,2011,2012 Michal Ludvig -.br -This is free software. You may redistribute copies of it under the terms of -the GNU General Public License version 2 . -There is NO WARRANTY, to the extent permitted by law. +Copyright \(co 2007-2014 TGRMN Software - http://www.tgrmn.com - and contributors +.br +.SH LICENSE +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. +.br diff --git a/s3cmd.spec.in b/s3cmd.spec.in deleted file mode 100644 index 8868fda..0000000 --- a/s3cmd.spec.in +++ /dev/null @@ -1,155 +0,0 @@ -%{!?python_sitelib: %define python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")} - -%global commit ##COMMIT## -%global shortcommit ##SHORTCOMMIT## - -Name: s3cmd -Version: ##VERSION## -Release: 0.3.git%{shortcommit}%{?dist} -Summary: Tool for accessing Amazon Simple Storage Service - -Group: Applications/Internet -License: GPLv2 -URL: http://s3tools.logix.cz/s3cmd -# git clone git@github.com:mdomsch/s3cmd.git -# git checkout -b origin/merge -#git archive --format tar --prefix s3cmd-1.1.0-beta3-2dfe4a65/ HEAD | gzip -c > s3cmd-1.1.0-beta1-2dfe4a65.tar.gz - -Source0: https://github.com/s3tools/s3cmd/archive/%{commit}/%{name}-%{version}-%{shortcommit}.tar.gz -BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) -BuildArch: noarch - -%if %{!?fedora:16}%{?fedora} < 16 || %{!?rhel:7}%{?rhel} < 7 -BuildRequires: python-devel -%else -BuildRequires: python2-devel -%endif -%if %{!?fedora:8}%{?fedora} < 8 || %{!?rhel:6}%{?rhel} < 6 -# This is in standard library since 2.5 -Requires: python-elementtree -%endif - -%description -S3cmd lets you copy files from/to Amazon S3 -(Simple Storage Service) using a simple to use -command line client. - - -%prep -%setup -q -n s3cmd-%{commit} - -%build - - -%install -rm -rf $RPM_BUILD_ROOT -S3CMD_PACKAGING=Yes python setup.py install --prefix=%{_prefix} --root=$RPM_BUILD_ROOT -install -d $RPM_BUILD_ROOT%{_mandir}/man1 -install -m 644 s3cmd.1 $RPM_BUILD_ROOT%{_mandir}/man1 - - -%clean -rm -rf $RPM_BUILD_ROOT - - -%files -%defattr(-,root,root,-) -%{_bindir}/s3cmd -%{_mandir}/man1/s3cmd.1* -%{python_sitelib}/S3 -%if 0%{?fedora} >= 9 || 0%{?rhel} >= 6 -%{python_sitelib}/s3cmd*.egg-info -%endif -%doc NEWS README - - -%changelog -* Sun Feb 02 2014 Matt Domsch - 1.5.0-0.3.git -- upstream 1.5.0-beta1 plus newer upstream fixes - -* Wed May 29 2013 Matt Domsch - 1.5.0-0.2.gita122d97 -- more upstream bugfixes -- drop pyxattr dep, that codepath got dropped in this release - -* Mon May 20 2013 Matt Domsch - 1.5.0-0.1.gitb1ae0fbe -- upstream 1.5.0-alpha3 plus fixes -- add dep on pyxattr for the --xattr option - -* Tue Jun 19 2012 Matt Domsch - 1.1.0-0.4.git11e5755e -- add local MD5 cache - -* Mon Jun 18 2012 Matt Domsch - 1.1.0-0.3.git7de0789d -- parallelize local->remote syncs - -* Mon Jun 18 2012 Matt Domsch - 1.1.0-0.2.gitf881b162 -- add hardlink / duplicate file detection support - -* Fri Mar 9 2012 Matt Domsch - 1.1.0-0.1.git2dfe4a65 -- build from git for mdomsch patches to s3cmd sync - -* Thu Feb 23 2012 Dennis Gilmore - 1.0.1-1 -- update to 1.0.1 release - -* Sat Jan 14 2012 Fedora Release Engineering - 1.0.0-4 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_17_Mass_Rebuild - -* Thu May 05 2011 Lubomir Rintel (GoodData) - 1.0.0-3 -- No hashlib hackery - -* Wed Feb 09 2011 Fedora Release Engineering - 1.0.0-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild - -* Tue Jan 11 2011 Lubomir Rintel (GoodData) - 1.0.0-1 -- New upstream release - -* Mon Nov 29 2010 Lubomir Rintel (GoodData) - 0.9.9.91-3 -- Patch for broken f14 httplib - -* Thu Jul 22 2010 David Malcolm - 0.9.9.91-2.1 -- Rebuilt for https://fedoraproject.org/wiki/Features/Python_2.7/MassRebuild - -* Wed Apr 28 2010 Lubomir Rintel (GoodData) - 0.9.9.91-1.1 -- Do not use sha1 from hashlib - -* Sun Feb 21 2010 Lubomir Rintel (Good Data) - 0.9.9.91-1 -- New upstream release - -* Sun Jul 26 2009 Fedora Release Engineering - 0.9.9-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_12_Mass_Rebuild - -* Tue Feb 24 2009 Lubomir Rintel (Good Data) - 0.9.9-1 -- New upstream release - -* Sat Nov 29 2008 Ignacio Vazquez-Abrams - 0.9.8.4-2 -- Rebuild for Python 2.6 - -* Tue Nov 11 2008 Lubomir Rintel (Good Data) - 0.9.8.4-1 -- New upstream release, URI encoding patch upstreamed - -* Fri Sep 26 2008 Lubomir Rintel (Good Data) - 0.9.8.3-4 -- Try 3/65536 - -* Fri Sep 26 2008 Lubomir Rintel (Good Data) - 0.9.8.3-3 -- Whoops, forgot to actually apply the patch. - -* Fri Sep 26 2008 Lubomir Rintel (Good Data) - 0.9.8.3-2 -- Fix listing of directories with special characters in names - -* Thu Jul 31 2008 Lubomir Rintel (Good Data) - 0.9.8.3-1 -- New upstream release: Avoid running out-of-memory in MD5'ing large files. - -* Fri Jul 25 2008 Lubomir Rintel (Good Data) - 0.9.8.2-1.1 -- Fix a typo - -* Tue Jul 15 2008 Lubomir Rintel (Good Data) - 0.9.8.2-1 -- New upstream - -* Fri Jul 04 2008 Lubomir Rintel (Good Data) - 0.9.8.1-3 -- Be satisfied with ET provided by 2.5 python - -* Fri Jul 04 2008 Lubomir Rintel (Good Data) - 0.9.8.1-2 -- Added missing python-devel BR, thanks to Marek Mahut -- Packaged the Python egg file - -* Wed Jul 02 2008 Lubomir Rintel (Good Data) - 0.9.8.1-1 -- Initial packaging attempt diff --git a/setup.py b/setup.py index 7478a76..6055d60 100644 --- a/setup.py +++ b/setup.py @@ -74,7 +74,8 @@ Authors: -------- Michal Ludvig -""" % (S3.PkgInfo.long_description) +""" % (S3.PkgInfo.long_description), + requires=["dateutil"] ) # vim:et:ts=4:sts=4:ai diff --git a/testsuite.tar.gz b/testsuite.tar.gz deleted file mode 100644 index 80c9dbb..0000000 Binary files a/testsuite.tar.gz and /dev/null differ diff --git a/upload-to-sf.sh b/upload-to-sf.sh deleted file mode 100755 index 176f4db..0000000 --- a/upload-to-sf.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/sh - -VERSION=$(./s3cmd --version | awk '{print $NF}') -echo -e "Uploading \033[32ms3cmd \033[31m${VERSION}\033[0m ..." -#rsync -avP dist/s3cmd-${VERSION}.* ludvigm@frs.sourceforge.net:uploads/ -ln -f NEWS README.txt -rsync -avP dist/s3cmd-${VERSION}.* README.txt ludvigm,s3tools@frs.sourceforge.net:/home/frs/project/s/s3/s3tools/s3cmd/${VERSION}/