New Upstream Snapshot - s3cmd

Ready changes

Summary

Merged new upstream version: 2.3.0+git20221120.1.6f3e1ba (was: 2.3.0).

Resulting package

Built on 2023-01-19T14:06 (took 7m10s)

The resulting binary packages can be installed (if you have the apt repository enabled) by running one of:

apt install -t fresh-snapshots s3cmd

Lintian Result

Diff

diff --git a/.ci.s3cfg b/.ci.s3cfg
deleted file mode 100644
index af99e1b..0000000
--- a/.ci.s3cfg
+++ /dev/null
@@ -1,76 +0,0 @@
-[default]
-access_key = Q3AM3UQ867SPQQA43P2F
-access_token = 
-add_encoding_exts = 
-add_headers = 
-bucket_location = us-east-1
-ca_certs_file = 
-cache_file = 
-check_ssl_certificate = True
-check_ssl_hostname = True
-cloudfront_host = cloudfront.amazonaws.com
-default_mime_type = binary/octet-stream
-delay_updates = False
-delete_after = False
-delete_after_fetch = False
-delete_removed = False
-dry_run = False
-enable_multipart = True
-encoding = UTF-8
-encrypt = False
-expiry_date = 
-expiry_days = 
-expiry_prefix = 
-follow_symlinks = False
-force = False
-get_continue = False
-gpg_command = /usr/bin/gpg
-gpg_decrypt = %(gpg_command)s -d --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
-gpg_encrypt = %(gpg_command)s -c --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
-gpg_passphrase = 
-guess_mime_type = True
-host_base = localhost:9000
-host_bucket = localhost:9000
-human_readable_sizes = False
-invalidate_default_index_on_cf = False
-invalidate_default_index_root_on_cf = True
-invalidate_on_cf = False
-kms_key = 
-limit = -1
-limitrate = 0
-list_md5 = False
-list_allow_unordered = False
-log_target_prefix = 
-long_listing = False
-max_delete = -1
-mime_type = 
-multipart_chunk_size_mb = 15
-multipart_max_chunks = 10000
-preserve_attrs = True
-progress_meter = True
-proxy_host = 
-proxy_port = 0
-put_continue = False
-recursive = False
-recv_chunk = 65536
-reduced_redundancy = False
-requester_pays = False
-restore_days = 1
-secret_key = zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG
-send_chunk = 65536
-server_side_encryption = False
-signature_v2 = False
-simpledb_host = sdb.amazonaws.com
-skip_existing = False
-socket_timeout = 300
-stats = False
-stop_on_error = False
-storage_class = 
-urlencoding_mode = normal
-use_http_expect = False
-use_https = False
-use_mime_magic = True
-verbosity = WARNING
-website_endpoint = http://%(bucket)s.s3-website-%(location)s.amazonaws.com/
-website_error = 
-website_index = index.html
diff --git a/.dockerignore b/.dockerignore
deleted file mode 100644
index f94d468..0000000
--- a/.dockerignore
+++ /dev/null
@@ -1,4 +0,0 @@
-testsuite
-testsuite-out
-/dist
-build/*
diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
deleted file mode 100644
index ad1c501..0000000
--- a/.github/FUNDING.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-# These are supported funding model platforms
-
-github: [fviard]
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
deleted file mode 100644
index ac408ac..0000000
--- a/.github/workflows/test.yml
+++ /dev/null
@@ -1,52 +0,0 @@
-name: Test
-
-on: [push, pull_request, workflow_dispatch]
-
-jobs:
-  test:
-    runs-on: ubuntu-latest
-    strategy:
-      matrix:
-        python-version: ['2.7', '3.5', '3.6', '3.7', '3.8', '3.9', '3.10-dev']
-    env:
-      cache-revision: 1
-    steps:
-      - uses: actions/checkout@v2
-      - name: Set up Python ${{ matrix.python-version }}
-        uses: actions/setup-python@v2
-        with:
-          python-version: ${{ matrix.python-version }}
-      - name: Install dependencies
-        run: |
-          python -m pip install --upgrade pip
-          pip install .
-      - name: Cache minio
-        id: cache-minio
-        uses: actions/cache@v2
-        env:
-          cache-name: cache-minio
-        with:
-          path: ~/cache
-          key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ env.cache-revision }}
-      - name: Download minio on cache miss
-        if: steps.cache-minio.outputs.cache-hit != 'true'
-        run: |
-          mkdir -p ~/cache
-          test ! -e ~/cache/minio && wget -O ~/cache/minio https://dl.minio.io/server/minio/release/linux-amd64/minio || echo "Minio already in cache"
-      - name: Start a local instance of minio
-        run: |
-          export AWS_ACCESS_KEY_ID=Q3AM3UQ867SPQQA43P2F
-          export AWS_SECRET_ACCESS_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG
-          export MINIO_ACCESS_KEY=Q3AM3UQ867SPQQA43P2F
-          export MINIO_SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG
-          chmod +x ~/cache/minio
-          mkdir -p ~/minio_tmp
-          ~/cache/minio server ~/minio_tmp &
-          sleep 4 # give minio some time to start
-      - name: Run tests
-        ## Tests stopped at test 23 because minio doesn't support "quote_plus" used in signatures.
-        run: python ./run-tests-minio.py -c .ci.s3cfg -p baseauto
-      - name: Terminate
-        if: always()
-        continue-on-error: true
-        run: killall minio
diff --git a/.gitignore b/.gitignore
deleted file mode 100644
index 06a228f..0000000
--- a/.gitignore
+++ /dev/null
@@ -1,11 +0,0 @@
-*.pyc
-*.swp
-testsuite
-testsuite-out
-/MANIFEST
-/dist
-build/*
-s3cmd.egg-info
-s3cmd.spec
-.idea
-.s3cfg
diff --git a/.svnignore b/.svnignore
deleted file mode 100644
index 5dcd3f4..0000000
--- a/.svnignore
+++ /dev/null
@@ -1,12 +0,0 @@
-## Run 'svn propset svn:ignore -F .svnignore .' after you change this list
-*.pyc
-.*.swp
-testsuite
-testsuite-out
-MANIFEST
-dist
-build
-s3cmd.egg-info
-s3cmd.spec
-.idea
-.s3cfg
diff --git a/Makefile b/Makefile
deleted file mode 100644
index a62e269..0000000
--- a/Makefile
+++ /dev/null
@@ -1,44 +0,0 @@
-SHELL  := /bin/bash
-VERSION := $(shell /usr/bin/env python2 -c 'from S3 import PkgInfo;print PkgInfo.version')
-SPEC   := s3cmd.spec
-COMMIT := $(shell git rev-parse HEAD)
-SHORTCOMMIT := $(shell git rev-parse --short=8 HEAD)
-TARBALL = s3cmd-$(VERSION)-$(SHORTCOMMIT).tar.gz
-
-release:
-	python2 setup.py register sdist upload --sign
-
-clean:
-	-rm -rf s3cmd-*.tar.gz *.rpm *~ $(SPEC)
-	-find . -name \*.pyc -exec rm \{\} \;
-	-find . -name \*.pyo -exec rm \{\} \;
-
-$(SPEC): $(SPEC).in
-	sed -e 's/##VERSION##/$(VERSION)/' \
-            -e 's/##COMMIT##/$(COMMIT)/' \
-            -e 's/##SHORTCOMMIT##/$(SHORTCOMMIT)/' \
-            $(SPEC).in > $(SPEC)
-
-# fixme: python setup.py sdist also generates a PKG-INFO file which we don't have using straight git archive
-git-tarball:
-	git archive --format tar --prefix s3cmd-$(COMMIT)/ HEAD S3/ s3cmd NEWS README.md LICENSE INSTALL.md setup.cfg s3cmd.1 setup.py| gzip -c > $(TARBALL)
-
-# Use older digest algorithms for local rpmbuilds, as EPEL5 and
-# earlier releases need this.  When building using mock for a
-# particular target, it will use the proper (newer) digests if that
-# target supports it.
-git-rpm: clean git-tarball $(SPEC)
-	tmp_dir=`mktemp -d` ; \
-	mkdir -p $${tmp_dir}/{BUILD,RPMS,SRPMS,SPECS,SOURCES} ; \
-	cp $(TARBALL) $${tmp_dir}/SOURCES ; \
-	cp $(SPEC) $${tmp_dir}/SPECS ; \
-	cd $${tmp_dir} > /dev/null 2>&1; \
-	rpmbuild -ba --define "_topdir $${tmp_dir}" \
-	  --define "_source_filedigest_algorithm 0" \
-	  --define "_binary_filedigest_algorithm 0" \
-	  --define "dist %{nil}" \
-          SPECS/$(SPEC) ; \
-	cd - > /dev/null 2>&1; \
-	cp $${tmp_dir}/RPMS/noarch/* $${tmp_dir}/SRPMS/* . ; \
-	rm -rf $${tmp_dir} ; \
-	rpmlint *.rpm *.spec
diff --git a/ObsoleteChangeLog b/ObsoleteChangeLog
deleted file mode 100644
index f11fad4..0000000
--- a/ObsoleteChangeLog
+++ /dev/null
@@ -1,1459 +0,0 @@
-2011-06-06  Michal Ludvig  <mludvig@logix.net.nz>
-
-===== Migrated to GIT =====
-
-No longer keeping ChangeLog up to date, use git log instead!
-
-* git://github.com/s3tools/s3cmd.git
-
-2011-04-11  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* S3/S3Uri.py: Fixed cf:// uri parsing.
-	* S3/CloudFront.py: Don't fail if there are no cfinval
-	  requests.
-
-2011-04-11  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* S3/PkgInfo.py: Updated to 1.1.0-beta1
-	* NEWS: Updated.
-	* s3cmd.1: Regenerated.
-
-2011-04-11  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* S3/Config.py: Increase socket_timeout from 10 secs to 5 mins.
-
-2011-04-10  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* s3cmd, S3/CloudFront.py, S3/S3Uri.py: Support for checking 
-	  status of CF Invalidation Requests [cfinvalinfo].
-	* s3cmd, S3/CloudFront.py, S3/Config.py: Support for CloudFront
-	  invalidation using [sync --cf-invalidate] command.
-	* S3/Utils.py: getDictFromTree() now recurses into
-	  sub-trees.
-
-2011-03-30  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* S3/CloudFront.py: Fix warning with Python 2.7
-	* S3/CloudFront.py: Cmd._get_dist_name_for_bucket() moved to
-	  CloudFront class.
-
-2011-01-13  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* s3cmd, S3/FileLists.py: Move file/object listing functions
-	  to S3/FileLists.py
-
-2011-01-09  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* Released version 1.0.0
-	  ----------------------
-
-	* S3/PkgInfo.py: Updated to 1.0.0
-	* NEWS: Updated.
-
-2011-01-02  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* s3cmd: Improved r457 (Don't crash when file disappears
-	  before checking MD5).
-	* s3cmd, s3cmd.1, format-manpage.pl: Improved --help text
-	  and manpage.
-	* s3cmd: Removed explicit processing of --follow-symlinks
-	  (is cought by the default / main loop).
-
-2010-12-24  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* s3cmd: Set 10s socket timeout for read()/write().
-	* s3cmd: Added --(no-)check-md5 for [sync].
-	* run-tests.py, testsuite.tar.gz: Added testsuite for
-	  the above.
-	* NEWS: Document the above.
-	* s3cmd: Don't crash when file disappears before
-	  checking MD5.
-
-2010-12-09  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* Released version 1.0.0-rc2
-	  --------------------------
-
-	* S3/PkgInfo.py: Updated to 1.0.0-rc2
-	* NEWS, TODO, s3cmd.1: Updated.
-
-2010-11-13  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* s3cmd: Added support for remote-to-remote sync.
-	  (Based on patch from Sundar Raman - thanks!)
-	* run-tests.py: Testsuite for the above.
-
-2010-11-12  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* s3cmd: Fixed typo in "s3cmd du" error path.
-
-2010-11-12  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* format-manpage.pl: new manpage auto-formatter
-	* s3cmd.1: Updated using the above helper script
-	* setup.py: Warn if manpage is too old.
-
-2010-10-27  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* run-tests.py, testsuite.tar.gz: Keep the testsuite in
-	  SVN as a tarball. There's too many "strange" things 
-	  in the directory for it to be kept in SVN.
-
-2010-10-27  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* TODO: Updated.
-	* upload-to-sf.sh: Updated for new SF.net system
-
-2010-10-26  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* Released version 1.0.0-rc1
-	  --------------------------
-
-	* S3/PkgInfo.py: Updated to 1.0.0-rc1
-	* NEWS, TODO: Updated.
-
-2010-10-26  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* s3cmd, S3/CloudFront.py, S3/Config.py: Added support
-	  for CloudFront DefaultRootObject. Thanks to Luke Andrew.
-
-2010-10-25  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* s3cmd: Improved 'fixbucket' command. Thanks to Srinivasa
-	  Moorthy.
-	* s3cmd: Read config file even if User Profile directory on 
-	  Windows contains non-ascii symbols. Thx Slava Vishnyakov
-
-2010-10-25  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* s3cmd: Don't fail when a local node is a directory
-	  and we expected a file. (as if for example /etc/passwd 
-	  was a dir)
-
-2010-10-25  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* s3cmd, S3/S3.py: Ignore inaccessible (and missing) files
-	  on upload.
-	* run-tests.py: Extended [sync] test to verify correct
-	  handling of inaccessible files.
-	* testsuite/permission-tests: New testsuite files.
-
-2010-10-24  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* S3/S3.py: "Stringify" all headers. Httplib should do
-	  it but some Python 2.7 users reported problems that should
-	  now be fixed.
-	* run-tests.py: Fixed test #6
-
-2010-07-25  Aaron Maxwell  <amax@resymbol.net>
-
-	* S3/Config.py, testsuite/etc/, run-tests.py, s3cmd.1, s3cmd:
-	  Option to follow local symlinks for sync and 
-	  put (--follow-symlinks option), including tests and documentation
-	* run-tests.py: --bucket-prefix option, to allow different 
-	  developers to run tests in their own sandbox
-
-2010-07-08  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* run-tests.py, testsuite/crappy-file-name.tar.gz:
-	  Updated testsuite, work around a problem with [s3cmd cp]
-	  when the source file contains '?' or '\x7f' 
-	  (where the inability to copy '?' is especially annoying).
-
-2010-07-08  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* S3/Utils.py, S3/S3Uri.py: Fixed names after moving 
-	  functions between modules.
-
-2010-06-29  Timothee Groleau <kde@timotheegroleau.com>
-
-	* S3/ACL.py: Fix isAnonRead method on Grantees
-	* ChangeLog: Update name of contributor for Timothee Groleau
-
-2010-06-13  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* s3cmd, S3/CloudFront.py: Both [accesslog] and [cfmodify] 
-	  access logging can now be disabled with --no-access-logging
-
-2010-06-13  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* S3/CloudFront.py: Allow s3:// URI as well as cf:// URI 
-	  for most CloudFront-related commands.
-
-2010-06-12  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* s3cmd, S3/CloudFront.py, S3/Config.py: Support access 
-	  logging for CloudFront distributions.
-	* S3/S3.py, S3/Utils.py: Moved some functions to Utils.py
-	  to make them available to CloudFront.py
-	* NEWS: Document the above.
-
-2010-05-27  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* S3/S3.py: Fix bucket listing for buckets with
-	  over 1000 prefixes. (contributed by Timothee Groleau)
-	* S3/S3.py: Fixed code formating.
-
-2010-05-21  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* s3cmd, S3/S3.py: Added support for bucket locations
-	  outside US/EU (i.e. us-west-1 and ap-southeast-1 as of now).
-
-2010-05-21  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* s3cmd, S3/S3.py, S3/Config.py: Added --reduced-redundancy
-	  switch for Reduced Redundancy Storage.
-
-2010-05-20  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* s3cmd, S3/ACL.py, S3/Config.py: Support for --acl-grant
-	  and --acl-revoke (contributed by Timothee Groleau)
-	* s3cmd: Couple of fixes on top of the above commit.
-	* s3cmd: Pre-parse ACL parameters in OptionS3ACL()
-
-2010-05-20  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* S3/Exceptions.py, S3/S3.py: Some HTTP_400 exceptions 
-	  are retriable.
-
-2010-03-19  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* s3cmd, S3/ACL.py: Print all ACLs for a Grantee
-	(one Grantee can have multiple different Grant entries)
-
-2010-03-19  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* s3cmd: Enable bucket-level ACL setting
-	* s3cmd, S3/AccessLog.py, ...: Added [accesslog] command.
-	* s3cmd: Fix imports from S3.Utils
-
-2009-12-10  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* s3cmd: Path separator conversion on Windows hosts.
-
-2009-10-08  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* Released version 0.9.9.91
-	  -------------------------
-
-	* S3/PkgInfo.py: Updated to 0.9.9.91
-	* NEWS: News for 0.9.9.91
-
-2009-10-08  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* S3/S3.py: fixed reference to _max_retries.
-
-2009-10-06  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* Released version 0.9.9.90
-	  -------------------------
-
-	* S3/PkgInfo.py: Updated to 0.9.9.90
-	* NEWS: News for 0.9.9.90
-
-2009-10-06  Michal Ludvig  <mludvig@logix.net.nz>
-
-	* S3/S3.py: Introduce throttling on upload only after
-	  second failure. I.e. first retry at full speed.
-	* TODO: Updated with new ideas.
-
-2009-06-02  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: New [fixbucket] command for fixing invalid object
-	  names in a given Bucket. For instance names with &#x08; in
-	  them (not sure how people manage to upload them but they do).
-	* S3/S3.py, S3/Utils.py, S3/Config.py: Support methods for 
-	  the above, plus advise user to run 'fixbucket' when XML parsing 
-	  fails.
-	* NEWS: Updated.
-	
-2009-05-29  Michal Ludvig  <michal@logix.cz>
-
-	* S3/Utils.py: New function replace_nonprintables()
-	* s3cmd: Filter local filenames through the above function
-	  to avoid problems with uploaded filenames containing invalid 
-	  XML entities, eg &#08; 
-	* S3/S3.py: Warn if a non-printables char is passed to
-	  urlencode_string() - they should have been replaced earlier 
-	  in the processing.
-	* run-tests.py, TODO, NEWS: Updated.
-	* testsuite/crappy-file-name.tar.gz: Tarball with a crappy-named
-	  file. Untar for the testsuite.
-
-2009-05-29  Michal Ludvig  <michal@logix.cz>
-
-	* testsuite/blahBlah/*: Added files needed for run-tests.py
-
-2009-05-28  Michal Ludvig  <michal@logix.cz>
-
-	* S3/Utils.py (dateS3toPython): Be more relaxed about
-	  timestamps format.
-
-2009-05-28  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd, run-test.py, TODO, NEWS: Added --dry-run
-	  and --exclude/--include for [setacl].
-	* s3cmd, run-test.py, TODO, NEWS: Added --dry-run
-	  and --exclude/--include for [del].
-
-2009-05-28  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Support for recursive [cp] and [mv], including
-	  multiple-source arguments, --include/--exclude,
-	  --dry-run, etc.
-	* run-tests.py: Tests for the above.
-	* S3/S3.py: Preserve metadata (eg ACL or MIME type) 
-	  during [cp] and [mv].
-	* NEWS, TODO: Updated.
-
-2009-05-28  Michal Ludvig  <michal@logix.cz>
-
-	* run-tests.py: Added --verbose mode.
-
-2009-05-27  Michal Ludvig  <michal@logix.cz>
-
-	* NEWS: Added info about --verbatim.
-	* TODO: Added more tasks.
-
-2009-05-27  Michal Ludvig  <michal@logix.cz>
-
-	* S3/SortedDict.py: Add case-sensitive mode.
-	* s3cmd, S3/S3.py, S3/Config.py: Use SortedDict() in 
-	  case-sensitive mode to avoid dropping filenames
-	  differing only in capitalisation
-	* run-tests.py: Testsuite for the above.
-	* NEWS: Updated.
-
-2009-03-20  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3.py: Re-sign requests before retrial to avoid 
-	  RequestTimeTooSkewed errors on failed long-running
-	  uploads.
-	  BTW 'request' now has its own class S3Request.
-
-2009-03-04  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd, S3/Config.py, S3/S3.py: Support for --verbatim.
-
-2009-02-25  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Fixed "put file.ext s3://bkt" (ie just the bucket name).
-	* s3cmd: Fixed reporting of ImportError of S3 modules.
-	* s3cmd: Fixed Error: global name 'real_filename' is not defined
-
-2009-02-24  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: New command [sign]
-	* S3/Utils.py: New function sign_string()
-	* S3/S3.py, S3/CloudFront.py: Use sign_string().
-	* NEWS: Updated.
-
-2009-02-17  Michal Ludvig  <michal@logix.cz>
-
-	* Released version 0.9.9
-	  ----------------------
-
-	* S3/PkgInfo.py: Updated to 0.9.9
-	* NEWS: Compile a big news list for 0.9.9
-
-2009-02-17  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd.1: Document all the new options and commands.
-	* s3cmd, S3/Config.py: Updated some help texts. Removed
-	  option --debug-syncmatch along the way (because --dry-run
-	  with --debug is good enough).
-	* TODO: Updated.
-
-2009-02-16  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Check Python version >= 2.4 as soon as possible.
-
-2009-02-14  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd, S3/Config.py, S3/S3.py: Added --add-header option.
-	* NEWS: Documented --add-header.
-	* run-tests.py: Fixed for new messages.
-
-2009-02-14  Michal Ludvig  <michal@logix.cz>
-
-	* README: Updated for 0.9.9
-	* s3cmd, S3/PkgInfo.py, s3cmd.1: Replaced project 
-	  URLs with http://s3tools.org
-	* NEWS: Improved message.
-
-2009-02-12  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Added --list-md5 for 'ls' command.
-	* S3/Config.py: New setting list_md5
-
-2009-02-12  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Set Content-Length header for requests with 'body'.
-	* s3cmd: And send it for requests with no body as well...
-
-2009-02-02  Michal Ludvig  <michal@logix.cz>
-
-	* Released version 0.9.9-rc3
-	  --------------------------
-
-	* S3/PkgInfo.py, NEWS: Updated for 0.9.9-rc3
-
-2009-02-01  Michal Ludvig  <michal@logix.cz>
-
-	* S3/Exceptions.py: Correct S3Exception.__str__() to
-	  avoid crash in S3Error() subclass. Reported by '~t2~'.
-	* NEWS: Updated.
-
-2009-01-30  Michal Ludvig  <michal@logix.cz>
-
-	* Released version 0.9.9-rc2
-	  --------------------------
-
-	* S3/PkgInfo.py, NEWS, TODO: Updated for 0.9.9-rc2
-
-2009-01-30  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Under some circumstance s3cmd crashed
-	  when put/get/sync had 0 files to transmit. Fixed now.
-
-2009-01-28  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Output 'delete:' in --dry-run only when
-	  used together with --delete-removed. Otherwise
-	  the user will think that without --dry-run it
-	  would really delete the files.
-
-2009-01-27  Michal Ludvig  <michal@logix.cz>
-
-	* Released version 0.9.9-rc1
-	  --------------------------
-
-	* S3/PkgInfo.py, NEWS, TODO: Updated for 0.9.9-rc1
-
-2009-01-26  Michal Ludvig  <michal@logix.cz>
-
-	* Merged CloudFront support from branches/s3cmd-airlock
-	  See the ChangeLog in that branch for details.
-
-2009-01-25  W. Tell  <w_tell -at- sourceforge>
-
-	* s3cmd: Implemented --include and friends.
-
-2009-01-25  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Enabled --dry-run and --exclude for 'put' and 'get'.
-	* S3/Exceptions.py: Remove DeprecationWarning about 
-	  BaseException.message in Python 2.6
-	* s3cmd: Rewritten gpg_command() to use subprocess.Popen()
-	  instead of os.popen4() deprecated in 2.6
-	* TODO: Note about failing GPG.
-
-2009-01-22  Michal Ludvig  <michal@logix.cz>
-
-	* S3/Config.py: guess_mime_type = True (will affect new 
-	  installations only).
-
-2009-01-22  Michal Ludvig  <michal@logix.cz>
-
-	* Released version 0.9.9-pre5
-	  ---------------------------
-
-	* S3/PkgInfo.py, NEWS, TODO: Updated for 0.9.9-pre5
-
-2009-01-22  Michal Ludvig  <michal@logix.cz>
-
-	* run-tests.py: Updated paths for the new sync
-	  semantics.
-	* s3cmd, S3/S3.py: Small fixes to make testsuite happy.
-
-2009-01-21  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Migrated 'sync' local->remote to the new
-	  scheme with fetch_{local,remote}_list().
-	  Enabled --dry-run for 'sync'.
-
-2009-01-20  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Migrated 'sync' remote->local to the new
-	  scheme with fetch_{local,remote}_list().
-	  Changed fetch_remote_list() to return dict() compatible
-	  with fetch_local_list().
-	  Re-implemented --exclude / --include processing.
-	* S3/Utils.py: functions for parsing RFC822 dates (for HTTP
-	  header responses).
-	* S3/Config.py: placeholders for --include.
-
-2009-01-15  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd, S3/S3Uri.py, NEWS: Support for recursive 'put'.
-
-2009-01-13  Michal Ludvig  <michal@logix.cz>
-
-	* TODO: Updated.
-	* s3cmd: renamed (fetch_)remote_keys to remote_list and
-	  a few other renames for consistency.
-
-2009-01-08  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3.py: Some errors during file upload were incorrectly 
-	  interpreted as MD5 mismatch. (bug #2384990)
-	* S3/ACL.py: Move attributes from class to instance.
-	* run-tests.py: Tests for ACL.
-	* s3cmd: Minor messages changes.
-
-2009-01-07  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: New command 'setacl'.
-	* S3/S3.py: Implemented set_acl().
-	* S3/ACL.py: Fill in <Owner/> tag in ACL XML.
-	* NEWS: Info about 'setacl'.
-
-2009-01-07  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Factored remote_keys generation from cmd_object_get()
-	  to fetch_remote_keys().
-	* s3cmd: Display Public URL in 'info' for AnonRead objects.
-	* S3/ACL.py: Generate XML from a current list of Grantees
-
-2009-01-07  Michal Ludvig  <michal@logix.cz>
-
-	* S3/ACL.py: Keep ACL internally as a list of of 'Grantee' objects.
-	* S3/Utils.py: Fix crash in stripNameSpace() when the XML has no NS.
-
-2009-01-07  Michal Ludvig  <michal@logix.cz>
-
-	* S3/ACL.py: New object for handling ACL issues.
-	* S3/S3.py: Moved most of S3.get_acl() to ACL class.
-	* S3/Utils.py: Reworked XML helpers - remove XMLNS before 
-	  parsing the input XML to avoid having all Tags prefixed
-	  with {XMLNS} by ElementTree.
-
-2009-01-03  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Don't fail when neither $HOME nor %USERPROFILE% is set.
-	  (fixes #2483388)
-
-2009-01-01  W. Tell  <w_tell -at- sourceforge>
-
-	* S3/S3.py, S3/Utils.py: Use 'hashlib' instead of md5 and sha 
-	  modules to avoid Python 2.6 warnings.
-
-2008-12-31  Michal Ludvig  <michal@logix.cz>
-
-	* Released version 0.9.9-pre4
-	  ---------------------------
-
-2008-12-31  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Reworked internal handling of unicode vs encoded filenames.
-	  Should replace unknown characters with '?' instead of baling out.
-
-2008-12-31  Michal Ludvig  <michal@logix.cz>
-
-	* run-tests.py: Display system encoding in use.
-	* s3cmd: Print a nice error message when --exclude-from
-	  file is not readable.
-	* S3/PkgInfo.py: Bumped up version to 0.9.9-pre4
-	* S3/Exceptions.py: Added missing imports.
-	* NEWS: Updated.
-	* testsuite: reorganised UTF-8 files, added GBK encoding files,
-	  moved encoding-specific files to 'tar.gz' archives, removed 
-	  unicode dir.
-	* run-tests.py: Adapted to the above change.
-	* run-tests.sh: removed.
-	* testsuite/exclude.encodings: Added.
-	* run-tests.py: Don't assume utf-8, use preferred encoding 
-	  instead.
-	* s3cmd, S3/Utils.py, S3/Exceptions.py, S3/Progress.py,
-	  S3/Config.py, S3/S3.py: Added --encoding switch and 
-	  Config.encoding variable. Don't assume utf-8 for filesystem
-	  and terminal output anymore.
-	* s3cmd: Avoid ZeroDivisionError on fast links.
-	* s3cmd: Unicodised all info() output.
-
-2008-12-30  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Replace unknown Unicode characters with '?'
-	  to avoid UnicodeEncodeError's. Also make all output strings
-	  unicode.
-	* run-tests.py: Exit on failed test. Fixed order of tests.
-
-2008-12-29  Michal Ludvig  <michal@logix.cz>
-
-	* TODO, NEWS: Updated
-	* s3cmd: Improved wildcard get.
-	* run-tests.py: Improved testsuite, added parameters support
-	  to run only specified tests, cleaned up win/posix integration.
-	* S3/Exception.py: Python 2.4 doesn't automatically set 
-	  Exception.message.
-
-2008-12-29  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd, run-tests.py: Make it work on Windows.
-
-2008-12-26  Michal Ludvig  <michal@logix.cz>
-
-	* setup.cfg: Remove explicit install prefix. That should fix
-	  Mac OS X and Windows "setup.py install" runs.
-
-2008-12-22  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd, S3/S3.py, S3/Progress.py: Display "[X of Y]"
-	  in --progress mode.
-	* s3cmd, S3/Config.py: Implemented recursive [get].
-	  Added --skip-existing option for [get] and [sync]. 
-
-2008-12-17  Michal Ludvig  <michal@logix.cz>
-
-	* TODO: Updated
-
-2008-12-14  Michal Ludvig  <michal@logix.cz>
-
-	* S3/Progress.py: Restructured import Utils to avoid import
-	  conflicts.
-
-2008-12-12  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Better Exception output. Print sys.path on ImportError,
-	  don't print backtrace on KeyboardInterrupt
-
-2008-12-11  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Support for multiple sources in 'get' command.
-
-2008-12-10  Michal Ludvig  <michal@logix.cz>
-
-	* TODO: Updated list.
-	* s3cmd: Don't display download/upload completed message
-	  in --progress mode.
-	* S3/S3.py: Pass src/dst names down to Progress class.
-	* S3/Progress.py: added new class ProgressCR - apparently 
-	  ProgressANSI doesn't work on MacOS-X (and perhaps elsewhere).
-	* S3/Config.py: Default progress meter is now ProgressCR
-	* s3cmd: Updated email address for reporting bugs.
-
-2008-12-02  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd, S3/S3.py, NEWS: Support for (non-)recursive 'ls'
-
-2008-12-01  Michal Ludvig  <michal@logix.cz>
-
-	* Released version 0.9.9-pre3
-	  ---------------------------
-
-	* S3/PkgInfo.py: Bumped up version to 0.9.9-pre3
-
-2008-12-01  Michal Ludvig  <michal@logix.cz>
-
-	* run-tests.py: Added a lot of new tests.
-	* testsuite/etc/logo.png: New file.
-
-2008-11-30  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3.py: object_get() -- make start_position argument optional.
-
-2008-11-29  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Delete local files with "sync --delete-removed"
-
-2008-11-25  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd, S3/Progress.py: Fixed Unicode output in Progress meter.
-	* s3cmd: Fixed 'del --recursive' without prefix (i.e. all objects).
-	* TODO: Updated list.
-	* upload-to-sf.sh: Helper script.
-	* S3/PkgInfo.py: Bumped up version to 0.9.9-pre2+svn
-
-2008-11-24  Michal Ludvig  <michal@logix.cz>
-
-	* Released version 0.9.9-pre2
-	  ------------------------
-
-	* S3/PkgInfo.py: Bumped up version to 0.9.9-pre2
-	* NEWS: Added 0.9.9-pre2
-
-2008-11-24  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd, s3cmd.1, S3/S3.py: Display or don't display progress meter
-	  default depends on whether we're on TTY (console) or not.
-
-2008-11-24  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Fixed 'get' conflict.
-	* s3cmd.1, TODO: Document 'mv' command.
-
-2008-11-24  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3.py, s3cmd, S3/Config.py, s3cmd.1: Added --continue for
-	  'get' command, improved 'get' failure resiliency.
-	* S3/Progress.py: Support for progress meter not starting in 0.
-	* S3/S3.py: improved retrying in send_request() and send_file()
-
-2008-11-24  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd, S3/S3.py, NEWS: "s3cmd mv" for moving objects
-
-2008-11-24  Michal Ludvig  <michal@logix.cz>
-
-	* S3/Utils.py: Common XML parser.
-	* s3cmd, S3/Exeptions.py: Print info message on Error.
-
-2008-11-21  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Support for 'cp' command.
-	* S3/S3.py: Added S3.object.copy() method.
-	* s3cmd.1: Document 'cp' command.
-	* NEWS: Let everyone know ;-)
-	Thanks Andrew Ryan for a patch proposal!
-	https://sourceforge.net/forum/forum.php?thread_id=2346987&forum_id=618865
-
-2008-11-17  Michal Ludvig  <michal@logix.cz>
-
-	* S3/Progress.py: Two progress meter implementations.
-	* S3/Config.py, s3cmd: New --progress / --no-progress parameters
-	  and Config() members.
-	* S3/S3.py: Call Progress() in send_file()/recv_file()
-	* NEWS: Let everyone know ;-)
-
-2008-11-16  Michal Ludvig  <michal@logix.cz>
-
-	* NEWS: Fetch 0.9.8.4 release news from 0.9.8.x branch.
-
-2008-11-16  Michal Ludvig  <michal@logix.cz>
-
-	Merge from 0.9.8.x branch, rel 251:
-	* S3/S3.py: Adjusting previous commit (orig 249) - it's not a good idea 
-	  to retry ALL failures. Especially not those code=4xx where AmazonS3 
-	  servers are not happy with our requests.
-	Merge from 0.9.8.x branch, rel 249:
-	* S3/S3.py, S3/Exception.py: Re-issue failed requests in S3.send_request()
-	Merge from 0.9.8.x branch, rel 248:
-	* s3cmd: Don't leak open filehandles in sync. Thx Patrick Linskey for report.
-	Merge from 0.9.8.x branch, rel 247:
-	* s3cmd: Re-raise the right exception.
-	Merge from 0.9.8.x branch, rel 246:
-	* s3cmd, S3/S3.py, S3/Exceptions.py: Don't abort 'sync' or 'put' on files
-	  that can't be open (e.g. Permision denied). Print a warning and skip over
-	  instead.
-	Merge from 0.9.8.x branch, rel 245:
-	* S3/S3.py: Escape parameters in strings. Fixes sync to and 
-	  ls of directories with spaces. (Thx Lubomir Rintel from Fedora Project)
-	Merge from 0.9.8.x branch, rel 244:
-	* s3cmd: Unicode brainfuck again. This time force all output
-	  in UTF-8, will see how many complaints we'll get...
-
-2008-09-16  Michal Ludvig  <michal@logix.cz>
-
-	* NEWS: s3cmd 0.9.8.4 released from branches/0.9.8.x SVN branch.
-
-2008-09-16  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3.py: Don't run into ZeroDivisionError when speed counter
-	  returns 0s elapsed on upload/download file.
-
-2008-09-15  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd, S3/S3.py, S3/Utils.py, S3/S3Uri.py, S3/Exceptions.py:
-	  Yet anoter Unicode round. Unicodised all command line arguments 
-	  before processing.
-
-2008-09-15  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3.py: "s3cmd mb" can create upper-case buckets again
-	  in US. Non-US (e.g. EU) bucket names must conform to strict
-	  DNS-rules.
-	* S3/S3Uri.py: Display public URLs correctly for non-DNS buckets.
-
-2008-09-10  Michal Ludvig  <michal@logix.cz>
-
-	* testsuite, run-tests.py: Added testsuite with first few tests.
-
-2008-09-10  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd, S3/S3Uri.py, S3/S3.py: All internal representations of
-	  S3Uri()s are Unicode (i.e. not UTF-8 but type()==unicode). It 
-	  still doesn't work on non-UTF8 systems though.
-
-2008-09-04  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Rework UTF-8 output to keep sys.stdout untouched (or it'd
-	  break 's3cmd get' to stdout for binary files).
-
-2008-09-03  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd, S3/S3.py, S3/Config.py: Removed --use-old-connect-method
-	  again. Autodetect the need for old connect method instead.
-
-2008-09-03  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd, S3/S3.py: Make --verbose mode more useful and default 
-	  mode less verbose.
-
-2008-09-03  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd, S3/Config.py: [rb] Allow removal of non-empty buckets
-	  with --force.
-	  [mb, rb] Allow multiple arguments, i.e. create or remove
-	  multiple buckets at once.
-	  [del] Perform recursive removal with --recursive (or -r).
-
-2008-09-01  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Refuse 'sync' together with '--encrypt'.
-	* S3/S3.py: removed object_{get,put,delete}_uri() functions
-	  and made object_{get,put,delete}() accept URI instead of 
-	  bucket/object parameters.
-
-2008-09-01  Michal Ludvig  <michal@logix.cz>
-
-	* S3/PkgInfo.py: Bumped up version to 0.9.9-pre1
-
-2008-09-01  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd, S3/S3.py, S3/Config.py: Allow access to upper-case
-	  named buckets again with --use-old-connect-method 
-	  (uses http://s3.amazonaws.com/bucket/object instead of
-	  http://bucket.s3.amazonaws.com/object)
-
-2008-08-19  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Always output UTF-8, even on output redirects.
-
-2008-08-01  Michal Ludvig  <michal@logix.cz>
-
-	* TODO: Add some items
-
-2008-07-29  Michal Ludvig  <michal@logix.cz>
-
-	* Released version 0.9.8.3
-	  ------------------------
-
-2008-07-29  Michal Ludvig  <michal@logix.cz>
-
-	* S3/PkgInfo.py: Bumped up version to 0.9.8.3
-	* NEWS: Added 0.9.8.3
-
-2008-07-29  Michal Ludvig  <michal@logix.cz>
-
-	* S3/Utils.py (hash_file_md5): Hash files in 32kB chunks
-	  instead of reading it all up to a memory first to avoid
-	  OOM on large files.
-
-2008-07-07  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd.1: couple of syntax fixes from Mikhail Gusarov
-
-2008-07-03  Michal Ludvig  <michal@logix.cz>
-
-	* Released version 0.9.8.2
-	  ------------------------
-
-2008-07-03  Michal Ludvig  <michal@logix.cz>
-
-	* S3/PkgInfo.py: Bumped up version to 0.9.8.2
-	* NEWS: Added 0.9.8.2
-	* s3cmd: Print version info on 'unexpected error' output.
-
-2008-06-30  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3.py: Re-upload when Amazon doesn't send ETag
-	  in PUT response. It happens from time to time for
-	  unknown reasons. Thanks "Burtc" for report and
-	  "hermzz" for fix.
-
-2008-06-27  Michal Ludvig  <michal@logix.cz>
-
-	* Released version 0.9.8.1
-	  ------------------------
-
-2008-06-27  Michal Ludvig  <michal@logix.cz>
-
-	* S3/PkgInfo.py: Bumped up version to 0.9.8.1
-	* NEWS: Added 0.9.8.1
-	* s3cmd: make 'cfg' global
-	* run-tests.sh: Sort-of testsuite
-
-2008-06-23  Michal Ludvig  <michal@logix.cz>
-
-	* Released version 0.9.8
-	  ----------------------
-
-2008-06-23  Michal Ludvig  <michal@logix.cz>
-
-	* S3/PkgInfo.py: Bumped up version to 0.9.8
-	* NEWS: Added 0.9.8
-	* TODO: Removed completed tasks
-
-2008-06-23  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Last-minute compatibility fixes for Python 2.4
-	* s3cmd, s3cmd.1: --debug-exclude is an alias for --debug-syncmatch
-	* s3cmd: Don't require $HOME env variable to be set.
-	  Fixes #2000133
-	* s3cmd: Wrapped all execution in a try/except block
-	  to catch all exceptions and ask for a report.
-
-2008-06-18  Michal Ludvig  <michal@logix.cz>
-
-	* S3/PkgInfo.py: Version 0.9.8-rc3
-
-2008-06-18  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3.py: Bucket name can't contain upper-case letters (S3/DNS limitation).
-
-2008-06-12  Michal Ludvig  <michal@logix.cz>
-
-	* S3/PkgInfo.py: Version 0.9.8-rc2
-
-2008-06-12  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd, s3cmd.1: Added GLOB (shell-style wildcard) exclude, renamed
-	  orig regexp-style --exclude to --rexclude
-
-2008-06-11  Michal Ludvig  <michal@logix.cz>
-
-	* S3/PkgInfo.py: Version 0.9.8-rc1
-
-2008-06-11  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Remove python 2.5 specific code (try/except/finally 
-	  block) and make s3cmd compatible with python 2.4 again.
-	* s3cmd, S3/Config.py, s3cmd.1: Added --exclude-from and --debug-syncmatch
-	  switches for sync.
-
-2008-06-10  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Added --exclude switch for sync.
-	* s3cmd.1, NEWS: Document --exclude
-
-2008-06-05  Michal Ludvig  <michal@logix.cz>
-
-	* Released version 0.9.7
-	  ----------------------
-
-2008-06-05  Michal Ludvig  <michal@logix.cz>
-
-	* S3/PkgInfo.py: Bumped up version to 0.9.7
-	* NEWS: Added 0.9.7
-	* TODO: Removed completed tasks
-	* s3cmd, s3cmd.1: Updated help texts, 
-	  removed --dry-run option as it's not implemented.
-	
-2008-06-05  Michal Ludvig  <michal@logix.cz>
-
-	* S3/Config.py: Store more file attributes in sync to S3.
-	* s3cmd: Make sync remote2local more error-resilient.
-
-2008-06-04  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Implemented cmd_sync_remote2local() for restoring
-	  backup from S3 to a local filesystem
-	* S3/S3.py: S3.object_get_uri() now requires writable stream 
-	  and not a path name.
-	* S3/Utils.py: Added mkdir_with_parents()
-
-2008-06-04  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Refactored cmd_sync() in preparation 
-	  for remote->local sync.
-
-2008-04-30  Michal Ludvig  <michal@logix.cz>
-
-	* s3db, S3/SimpleDB.py: Implemented almost full SimpleDB API.
-
-2008-04-29  Michal Ludvig  <michal@logix.cz>
-
-	* s3db, S3/SimpleDB.py: Initial support for Amazon SimpleDB. 
-	  For now implements ListDomains() call and most of the 
-	  infrastructure required for request creation.
-
-2008-04-29  Michal Ludvig  <michal@logix.cz>
-
-	* S3/Exceptions.py: Exceptions moved out of S3.S3
-	* S3/SortedDict.py: rewritten from scratch to preserve
-	  case of keys while still sorting in case-ignore mode.
-
-2008-04-28  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3.py: send_file() now computes MD5 sum of the file
-	  being uploaded, compares with ETag returned by Amazon
-	  and retries upload if they don't match.
-
-2008-03-05  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd, S3/S3.py, S3/Utils.py: Throttle upload speed and retry 
-	  when upload failed.
-	  Report download/upload speed and time elapsed.
-
-2008-02-28  Michal Ludvig  <michal@logix.cz>
-
-	* Released version 0.9.6
-	  ----------------------
-
-2008-02-28  Michal Ludvig  <michal@logix.cz>
-
-	* S3/PkgInfo.py: bumped up version to 0.9.6
-	* NEWS: What's new in 0.9.6
-
-2008-02-27  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd, s3cmd.1: Updated help and man page.
-	* S3/S3.py, S3/Utils.py, s3cmd: Support for 's3cmd info' command.
-	* s3cmd: Fix crash when 'sync'ing files with unresolvable owner uid/gid.
-	* S3/S3.py, S3/Utils.py: open files in binary mode (otherwise windows
-	  users have problems).
-	* S3/S3.py: modify 'x-amz-date' format (problems reported on MacOS X). 
-	  Thanks Jon Larkowski for fix.
-
-2008-02-27  Michal Ludvig  <michal@logix.cz>
-
-	* TODO: Updated wishlist.
-
-2008-02-11  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3.py: Properly follow RedirectPermanent responses for EU buckets
-	* S3/S3.py: Create public buckets with -P (#1837328)
-	* S3/S3.py, s3cmd: Correctly display public URL on uploads.
-	* S3/S3.py, S3/Config.py: Support for MIME types. Both 
-	default and guessing. Fixes bug #1872192 (Thanks Martin Herr)
-
-2007-11-13  Michal Ludvig  <michal@logix.cz>
-
-	* Released version 0.9.5
-	  ----------------------
-
-2007-11-13  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3.py: Support for buckets stored in Europe, access now 
-	  goes via <bucket>.s3.amazonaws.com where possible.
-
-2007-11-12  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Support for storing file attributes (like ownership, 
-	  mode, etc) in sync operation.
-	* s3cmd, S3/S3.py: New command 'ib' to get information about 
-	  bucket (only 'LocationConstraint' supported for now).
-
-2007-10-01  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Fix typo in argument name (patch
-	  from Kim-Minh KAPLAN, SF #1804808)
-
-2007-09-25  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Exit with error code on error (patch
-	  from Kim-Minh KAPLAN, SF #1800583)
-
-2007-09-25  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3.py: Don't fail if bucket listing doesn't have
-	  <IsTruncated> node.
-	* s3cmd: Create ~/.s3cfg with 0600 permissions.
-
-2007-09-13  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Improved 'sync'
-	* S3/S3.py: Support for buckets with over 1000 objects.
-
-2007-09-03  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Small tweaks to --configure workflow.
-
-2007-09-02  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Initial support for 'sync' operation. For
-	  now only local->s3 direction. In this version doesn't
-	  work well with non-ASCII filenames and doesn't support
-	  encryption.
-
-2007-08-24  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd, S3/Util.py: More ElementTree imports cleanup
-
-2007-08-19  Michal Ludvig  <michal@logix.cz>
-
-	* NEWS: Added news for 0.9.5
-
-2007-08-19  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Better handling of multiple arguments for put, get and del
-
-2007-08-14  Michal Ludvig  <michal@logix.cz>
-
-	* setup.py, S3/Utils.py: Try import xml.etree.ElementTree
-	  or elementtree.ElementTree module.
-
-2007-08-14  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd.1: Add info about --encrypt parameter.
-
-2007-08-14  Michal Ludvig  <michal@logix.cz>
-
-	* S3/PkgInfo.py: Bump up version to 0.9.5-pre
-
-2007-08-13  Michal Ludvig  <michal@logix.cz>
-
-	* Released version 0.9.4
-	  ----------------------
-
-2007-08-13  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3.py: Added function urlencode_string() that encodes
-	  non-ascii characters in object name before sending it to S3.
-
-2007-08-13  Michal Ludvig  <michal@logix.cz>
-
-	* README: Updated Amazon S3 pricing overview
-
-2007-08-13  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd, S3/Config.py, S3/S3.py: HTTPS support
-
-2007-07-20  Michal Ludvig  <michal@logix.cz>
-
-	* setup.py: Check correct Python version and ElementTree availability.
-
-2007-07-05  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: --configure support for Proxy
-	* S3/S3.py: HTTP proxy support from
-	  John D. Rowell <jdrowell@exerciseyourbrain.com>
-
-2007-06-19  Michal Ludvig  <michal@logix.cz>
-
-	* setup.py: Check for S3CMD_PACKAGING and don't install
-	  manpages and docs if defined.
-	* INSTALL: Document the above change.
-	* MANIFEST.in: Include uncompressed manpage
-
-2007-06-17  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Added encryption key support to --configure
-	* S3/PkgInfo.py: Bump up version to 0.9.4-pre
-	* setup.py: Cleaned up some rpm-specific stuff that 
-	  caused problems to Debian packager Mikhail Gusarov
-	* setup.cfg: Removed [bdist_rpm] section
-	* MANIFEST.in: Include S3/*.py
-
-2007-06-16  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd.1: Syntax fixes from Mikhail Gusarov <dottedmag@dottedmag.net>
-
-2007-05-27  Michal Ludvig  <michal@logix.cz>
-
-	* Support for on-the-fly GPG encryption.
-
-2007-05-26  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd.1: Add info about "s3cmd du" command.
-
-2007-05-26  Michal Ludvig  <michal@logix.cz>
-
-	* Released version 0.9.3
-	  ----------------------
-
-2007-05-26  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Patch from Basil Shubin <basil.shubin@gmail.com>
-	  adding support for "s3cmd du" command.
-	* s3cmd: Modified output format of "s3cmd du" to conform
-	  with unix "du".
-	* setup.cfg: Require Python 2.5 in RPM. Otherwise it needs
-	  to require additional python modules (e.g. ElementTree)
-	  which may have different names in different distros. It's 
-	  indeed still possible to manually install s3cmd with 
-	  Python 2.4 and appropriate modules.
-
-2007-04-09  Michal Ludvig  <michal@logix.cz>
-
-	* Released version 0.9.2
-	  ----------------------
-
-2007-04-09  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd.1: Added manpage
-	* Updated infrastructure files to create "better"
-	  distribution archives.
-
-2007-03-26  Michal Ludvig  <michal@logix.cz>
-
-	* setup.py, S3/PkgInfo.py: Move package info out of setup.py
-	* s3cmd: new parameter --version
-	* s3cmd, S3/S3Uri.py: Output public HTTP URL for objects
-	  stored with Public ACL.
-	  
-2007-02-28  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Verify supplied accesskey and secretkey
-	  in interactive configuration path.
-	* S3/Config.py: Hide access key and secret key
-	  from debug output.
-	* S3/S3.py: Modify S3Error exception to work
-	  in python 2.4 (=> don't expect Exception is
-	  a new-style class).
-	* s3cmd: Updated for the above change.
-
-2007-02-19  Michal Ludvig  <michal@logix.cz>
-
-	* NEWS, INSTALL, README, setup.py: Added
-	  more documentation.
-
-2007-02-19  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3.py, s3cmd: New feature - allow "get" to stdout
-
-2007-02-19  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3fs.py: Removed (development moved to branch s3fs-devel).
-
-2007-02-08  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3fs.py: 
-	  - Implemented mknod()
-	  - Can create directory structure
-	  - Rewritten to use SQLite3. Currently can create
-	    the filesystem, and a root inode.
-
-2007-02-07  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd (from /s3py:74): Renamed SVN top-level project
-	  s3py to s3cmd
-
-2007-02-07  Michal Ludvig  <michal@logix.cz>
-
-	* setup.cfg: Only require Python 2.4, not 2.5
-	* S3/Config.py: Removed show_uri - no longer needed,
-	  it's now default
-
-2007-02-07  Michal Ludvig  <michal@logix.cz>
-
-	* setup.py
-	  - Version 0.9.1
-
-2007-02-07  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd: Change all "exit()" calls to "sys.exit()"
-	  and allow for python 2.4
-	* S3/S3.py: Removed dependency on hashlib -> allow for python 2.4
-
-2007-01-27  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3.py, S3/S3Uri.py: Case insensitive regex in S3Uri.py
-
-2007-01-26  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3fs.py: Added support for stroing/loading inodes.
-	  No data yet however.
-
-2007-01-26  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3fs.py: Initial version of S3fs module. 
-	  Can create filesystem via "S3fs.mkfs()"
-
-2007-01-26  Michal Ludvig  <michal@logix.cz>
-
-	* S3/BidirMap.py, S3/Config.py, S3/S3.py, S3/S3Uri.py,
-	  S3/SortedDict.py, S3/Utils.py, s3cmd: Added headers with
-	  copyright to all files
-	* S3/S3.py, S3/S3Uri.py: Removed S3.compose_uri(), introduced
-	  S3UriS3.compose_uri() instead.
-
-2007-01-26  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3.py, S3/S3Uri.py, s3cmd: 
-	  - Converted all users of parse_uri to S3Uri class API
-	  - Removed "cp" command again. Will have to use 'put'
-	    and 'get' for now.
-
-2007-01-25  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3Uri.py: New module S3/S3Uri.py
-	* S3/S3.py, s3cmd: Converted "put" operation to use
-	  the new S3Uri class.
-
-2007-01-24  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3.py
-	* s3cmd
-	  - Added 'cp' command
-	  - Renamed parse_s3_uri to parse_uri (this will go away anyway)
-
-2007-01-19  Michal Ludvig  <michal@logix.cz>
-
-	* setup.cfg
-	* setup.py
-	  - Include README into tarballs
-
-2007-01-19  Michal Ludvig  <michal@logix.cz>
-
-	* README
-	  - Added comprehensive README file
-
-2007-01-19  Michal Ludvig  <michal@logix.cz>
-
-	* setup.cfg
-	* setup.py
-	  - Added configuration for setup.py sdist
-
-2007-01-19  Michal Ludvig  <michal@logix.cz>
-
-	* S3/Config.py
-	* s3cmd
-	  - Added interactive configurator (--configure)
-	  - Added config dumper (--dump-config)
-	  - Improved --help output
-
-2007-01-19  Michal Ludvig  <michal@logix.cz>
-
-	* setup.cfg
-	* setup.py
-	  Added info for building RPM packages.
-
-2007-01-18  Michal Ludvig  <michal@logix.cz>
-
-	* S3/Config.py
-	* S3/S3.py
-	* s3cmd
-	  Moved class Config from S3/S3.py to S3/Config.py
-
-2007-01-18  Michal Ludvig  <michal@logix.cz>
-
-	* S3/Config.py (from /s3py/trunk/S3/ConfigParser.py:47)
-	* S3/ConfigParser.py
-	* S3/S3.py
-	  Renamed S3/ConfigParser.py to S3/Config.py
-
-2007-01-18  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd
-	  Added info about homepage
-
-2007-01-17  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3.py
-	* s3cmd
-	  - Use prefix for listings if specified.
-	  - List all commands in --help
-
-2007-01-16  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3.py
-	* s3cmd
-	  Major rework of Config class:
-	  - Renamed from AwsConfig to Config
-	  - Converted to Singleton (see Config.__new__() and an article on
-	    Wikipedia)
-	  - No more explicit listing of options - use introspection to get them
-	    (class variables that of type str, int or bool that don't start with
-	    underscore)
-	  - Check values read from config file and verify their type.
-	  
-	  Added OptionMimeType and -m/-M options. Not yet implemented
-	  functionality in the rest of S3/S3.py
-
-2007-01-15  Michal Ludvig  <michal@logix.cz>
-
-	* S3/S3.py
-	* s3cmd
-	  - Merged list-buckets and bucket-list-objects operations into
-	    a single 'ls' command.
-	  - New parameter -P for uploading publicly readable objects
-
-2007-01-14  Michal Ludvig  <michal@logix.cz>
-
-	* s3.py
-	* setup.py
-	  Renamed s3.py to s3cmd (take 2)
-
-2007-01-14  Michal Ludvig  <michal@logix.cz>
-
-	* s3cmd (from /s3py/trunk/s3.py:45)
-	  Renamed s3.py to s3cmd
-
-2007-01-14  Michal Ludvig  <michal@logix.cz>
-
-	* S3
-	* S3/S3.py
-	* s3.py
-	* setup.py
-	  All classes from s3.py go to S3/S3.py
-	  Added setup.py
-
-2007-01-14  Michal Ludvig  <michal@logix.cz>
-
-	* s3.py
-	  Minor fix S3.utils -> S3.Utils
-
-2007-01-14  Michal Ludvig  <michal@logix.cz>
-
-	* .svnignore
-	* BidirMap.py
-	* ConfigParser.py
-	* S3
-	* S3/BidirMap.py (from /s3py/trunk/BidirMap.py:35)
-	* S3/ConfigParser.py (from /s3py/trunk/ConfigParser.py:38)
-	* S3/SortedDict.py (from /s3py/trunk/SortedDict.py:35)
-	* S3/Utils.py (from /s3py/trunk/utils.py:39)
-	* S3/__init__.py
-	* SortedDict.py
-	* s3.py
-	* utils.py
-	  Moved modules to their own package
-
-2007-01-12  Michal Ludvig  <michal@logix.cz>
-
-	* s3.py
-	  Added "del" command
-	  Converted all (?) commands to accept s3-uri
-	  Added -u/--show-uri parameter
-
-2007-01-11  Michal Ludvig  <michal@logix.cz>
-
-	* s3.py
-	  Verify MD5 on received files
-	  Improved upload of multiple files
-	  Initial S3-URI support (more tbd)
-
-2007-01-11  Michal Ludvig  <michal@logix.cz>
-
-	* s3.py
-	  Minor fixes:
-	  - store names of parsed files in AwsConfig
-	  - Print total size with upload/download
-
-2007-01-11  Michal Ludvig  <michal@logix.cz>
-
-	* s3.py
-	* utils.py
-	  Added support for sending and receiving files.
-
-2007-01-11  Michal Ludvig  <michal@logix.cz>
-
-	* ConfigParser.py
-	* s3.py
-	  List all Objects in all Buckets command
-	  Yet another logging improvement
-	  Version check for Python 2.5 or higher
-
-2007-01-11  Michal Ludvig  <michal@logix.cz>
-
-	* ConfigParser.py
-	* s3.py
-	* utils.py
-	  Added ConfigParser
-	  Improved setting logging levels
-	  It can now quite reliably list buckets and objects
-
-2007-01-11  Michal Ludvig  <michal@logix.cz>
-
-	* .svnignore
-	  Added ignore list
-
-2007-01-11  Michal Ludvig  <michal@logix.cz>
-
-	* .svnignore
-	* BidirMap.py
-	* SortedDict.py
-	* s3.py
-	* utils.py
-	  Initial import
diff --git a/PKG-INFO b/PKG-INFO
new file mode 100644
index 0000000..77bac57
--- /dev/null
+++ b/PKG-INFO
@@ -0,0 +1,54 @@
+Metadata-Version: 2.1
+Name: s3cmd
+Version: 2.3.0.dev0
+Summary: Command line tool for managing Amazon S3 and CloudFront services
+Home-page: http://s3tools.org
+Author: Michal Ludvig
+Author-email: michal@logix.cz
+Maintainer: github.com/fviard, github.com/matteobar
+Maintainer-email: s3tools-bugs@lists.sourceforge.net
+License: GNU GPL v2+
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Console
+Classifier: Environment :: MacOS X
+Classifier: Environment :: Win32 (MS Windows)
+Classifier: Intended Audience :: End Users/Desktop
+Classifier: Intended Audience :: System Administrators
+Classifier: License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)
+Classifier: Natural Language :: English
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Unix
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Topic :: System :: Archiving
+Classifier: Topic :: Utilities
+License-File: LICENSE
+
+
+
+S3cmd lets you copy files from/to Amazon S3
+(Simple Storage Service) using a simple to use
+command line client. Supports rsync-like backup,
+GPG encryption, and more. Also supports management
+of Amazon's CloudFront content delivery network.
+
+
+Authors:
+--------
+    Florent Viard <florent@sodria.com>
+
+    Michal Ludvig  <michal@logix.cz>
+
+    Matt Domsch (github.com/mdomsch)
diff --git a/RELEASE_INSTRUCTIONS b/RELEASE_INSTRUCTIONS
deleted file mode 100644
index 193bd04..0000000
--- a/RELEASE_INSTRUCTIONS
+++ /dev/null
@@ -1,98 +0,0 @@
-Instructions for s3cmd maintainers for doing a tagged release and publishing on sourceforge.net.
-In the below, 2.1.0 is the example version being released.  Salt to taste.
-
-Dependency that could be needed for the release:
-    pip install --user twine
-
-1.  Make a fresh clone of the repo:
-    git clone ssh+git://git@github.com/s3tools/s3cmd s3cmd-release
-
-2.  Run ./run-tests.py to verify it all works OK.
-
-3.  Update version to 2.1.0 in S3/PkgInfo.py
-
-4.  Update manpage with ./s3cmd --help | ./format-manpage.pl > s3cmd.1
-
-5.  Update NEWS with info about new features. Best to extract from git with:
-    git log --abbrev-commit --no-merges v2.0.2..
-    (list all tags with: "git tag")
-
-6.  Verify the above changes:
-    git diff --check && git diff
-    git status
-    (The only changed files should be NEWS, s3cmd.1, S3/PkgInfo.py)
-
-7.  Remove testsuite (intentionally inaccessible files break the next
-    step):
-    chmod -R +rwx testsuite/permission-tests/permission-denied-dir && rm -rf testsuite
-
-8.  If everything worked fine commit the above changes:
-    git commit -a -m "Update version to 2.1.0"
-
-9.  Tag it:
-    git tag --sign -a v2.1.0 -m "Tag v2.1.0"
-
-10. Push back to github:
-    git push --tags
-
-11. Build the "Source Distribution" and the universal "Wheel" package:
-    python setup.py sdist bdist_wheel --universal
-    -> Creates dist/s3cmd-2.1.0.tar.gz , dist/s3cmd-2.1.0.zip and dist/s3cmd-2.1.0-py2.py3-none-any.whl
-
-12. Generate the GPG signatures for the previously generated artefacts
-    gpg2 --default-key XXXX --detach-sign -a dist/s3cmd-2.1.0.tar.gz
-    gpg2 --default-key XXXX --detach-sign -a dist/s3cmd-2.1.0.zip
-    gpg2 --default-key XXXX --detach-sign -a dist/s3cmd-2.1.0-py2.py3-none-any.whl
-
-13. Publish to PyPi, so 'pip install s3cmd' downloads the new version.
-    twine upload dist/s3cmd-2.1.0.tar.gz* dist/s3cmd-2.1.0-py2.py3-none-any.whl*
-
-Note: we only publish the .tar.gz and the .whl (+.asc signatures) to Pypi, and
-not the .zip as Pypi only accepts a single "sdist" source file for a given version.
-
-
-GitHub releases
-
-1. Login to github.com/s3tools/s3cmd
-
-2.  You will see your new tag in the Tags tab.  Click "Draft a new
-    release".
-
-3.  In the 'Tag version' drop-down, select your new tag.
-
-4.  In the 'Release title' field, name it v2.1.0.
-
-5.  In the 'Describe this release' text box, add in this release's
-    notes from the NEWS file.
-
-6.  Upload all 4 files from dist/.
-
-7.  Click "Publish release"
-
-
-
-SourceForge releases
-
-1.  Login to sf.net
-
-2.  Go to https://sourceforge.net/p/s3tools/admin/
-
-3.  Files -> s3cmd -> Add Folder -> Enter "2.1.0" -> Create
-
-4.  Go into 2.1.0 -> Add File -> upload dist/s3cmd-2.1.0.tar.gz
-
-5.  Once uploaded click the little "i" icon on the right and click
-    "Select all" under "Default Download For:" to update the default
-    download button to this new version.
-
-6.  Give it a few minutes and verify on the Summary page that the
-    download button has been updated to s3cmd-2.1.0.tar.gz
-
-Now it's time to send out an announcement email to
-s3tools-announce@lists.sourceforge.net and
-s3tools-general@lists.sourceforge.net (check out the s3cmd-announce
-archive for an inspiration :)
-
-And the last step is to ask the respective distribution maintainers
-(Fedora, Debian, Ubuntu, OpenSuse, ...?) to update the package in
-their builds.
diff --git a/S3/BaseUtils.py b/S3/BaseUtils.py
index fd9e3ca..b6b7908 100644
--- a/S3/BaseUtils.py
+++ b/S3/BaseUtils.py
@@ -8,10 +8,13 @@
 
 from __future__ import absolute_import, division
 
+import functools
 import re
+import posixpath
 import sys
 
 from calendar import timegm
+from hashlib import md5
 from logging import debug, warning, error
 
 import xml.dom.minidom
@@ -43,7 +46,7 @@ except ImportError:
     from urllib.parse import quote
 
 try:
-    unicode
+    unicode = unicode
 except NameError:
     # python 3 support
     # In python 3, unicode -> str, and str -> bytes
@@ -52,6 +55,24 @@ except NameError:
 
 __all__ = []
 
+s3path = posixpath
+__all__.append("s3path")
+
+try:
+    md5()
+except ValueError as exc:
+    # md5 is disabled for FIPS-compliant Python builds.
+    # Since s3cmd does not use md5 in a security context,
+    # it is safe to allow the use of it by setting useforsecurity to False.
+    try:
+        md5(usedforsecurity=False)
+        md5 = functools.partial(md5, usedforsecurity=False)
+    except Exception:
+        # "usedforsecurity" is only available on python >= 3.9 or RHEL distributions
+        raise exc
+__all__.append("md5")
+
+
 
 RE_S3_DATESTRING = re.compile('\.[0-9]*(?:[Z\\-\\+]*?)')
 RE_XML_NAMESPACE = re.compile(b'^(<?[^>]+?>\s*|\s*)(<\w+) xmlns=[\'"](https?://[^\'"]+)[\'"]', re.MULTILINE)
diff --git a/S3/CloudFront.py b/S3/CloudFront.py
index 008794c..7b93b3b 100644
--- a/S3/CloudFront.py
+++ b/S3/CloudFront.py
@@ -11,6 +11,7 @@ from __future__ import absolute_import
 import sys
 import time
 import random
+from collections import defaultdict
 from datetime import datetime
 from logging import debug, info, warning, error
 
@@ -21,11 +22,11 @@ except ImportError:
 
 from .S3 import S3
 from .Config import Config
-from .Exceptions import *
+from .Exceptions import CloudFrontError, ParameterError
+from .ExitCodes import EX_OK, EX_GENERAL, EX_PARTIAL
 from .BaseUtils import (getTreeFromXml, appendXmlTextNode, getDictFromTree,
                         dateS3toPython, encode_to_s3, decode_from_s3)
-from .Utils import (getBucketFromHostname, getHostnameFromBucket, deunicodise,
-                    urlencode_string, convertHeaderTupleListToDict)
+from .Utils import (getBucketFromHostname, getHostnameFromBucket, deunicodise, convertHeaderTupleListToDict)
 from .Crypto import sign_string_v2
 from .S3Uri import S3Uri, S3UriS3
 from .ConnMan import ConnMan
@@ -306,7 +307,7 @@ class InvalidationBatch(object):
         for path in self.paths:
             if len(path) < 1 or path[0] != "/":
                 path = "/" + path
-            appendXmlTextNode("Path", urlencode_string(path), tree)
+            appendXmlTextNode("Path", path, tree)
         appendXmlTextNode("CallerReference", self.reference, tree)
         return tree
 
@@ -334,8 +335,6 @@ class CloudFront(object):
         "GetInvalInfo" : { 'method' : "GET", 'resource' : "/%(dist_id)s/invalidation/%(request_id)s" },
     }
 
-    ## Maximum attempts of re-issuing failed requests
-    _max_retries = 5
     dist_list = None
 
     def __init__(self, config):
@@ -523,7 +522,9 @@ class CloudFront(object):
     ## Low-level methods for handling CloudFront requests
     ## --------------------------------------------------
 
-    def send_request(self, op_name, dist_id = None, request_id = None, body = None, headers = None, retries = _max_retries):
+    def send_request(self, op_name, dist_id = None, request_id = None, body = None, headers = None, retries = None):
+        if retries is None:
+            retries = self.config.max_retries
         if headers is None:
             headers = SortedDict(ignore_case = True)
         operation = self.operations[op_name]
@@ -546,8 +547,7 @@ class CloudFront(object):
         if response["status"] >= 500:
             e = CloudFrontError(response)
             if retries:
-                warning(u"Retrying failed request: %s" % op_name)
-                warning(unicode(e))
+                warning(u"Retrying failed request: %s (%s)" % (op_name, e))
                 warning("Waiting %d sec..." % self._fail_wait(retries))
                 time.sleep(self._fail_wait(retries))
                 return self.send_request(op_name, dist_id, body = body, retries = retries - 1)
@@ -600,7 +600,7 @@ class CloudFront(object):
 
     def _fail_wait(self, retries):
         # Wait a few seconds. The more it fails the more we wait.
-        return (self._max_retries - retries + 1) * 3
+        return (self.config.max_retries - retries + 1) * 3
 
     def get_dist_name_for_bucket(self, uri):
         if uri.type == "cf":
@@ -818,4 +818,90 @@ class Cmd(object):
             pretty_output("Reference", st['InvalidationBatch']['CallerReference'])
             output("")
 
+    @staticmethod
+    def invalidate(args):
+        cfg = Config()
+        cf = CloudFront(cfg)
+        s3 = S3(cfg)
+
+        bucket_paths = defaultdict(list)
+        for arg in args:
+            uri = S3Uri(arg)
+            uobject = uri.object()
+            if not uobject:
+                # If object is not defined, we want to invalidate the whole bucket
+                uobject = '*'
+            elif uobject[-1] == '/':
+                # If object is folder (ie prefix), we want to invalidate the whole content
+                uobject += '*'
+            bucket_paths[uri.bucket()].append(uobject)
+
+        ret = EX_OK
+
+        params = []
+        for bucket, paths in bucket_paths.items():
+            base_uri = S3Uri(u's3://%s' % bucket)
+            cfuri = next(iter(cf.get_dist_name_for_bucket(base_uri)))
+
+            default_index_file = None
+            if cfg.invalidate_default_index_on_cf or cfg.invalidate_default_index_root_on_cf:
+                info_response = s3.website_info(base_uri, cfg.bucket_location)
+                if info_response:
+                    default_index_file = info_response['index_document']
+                    if not default_index_file:
+                        default_index_file = None
+
+            if cfg.dry_run:
+                fulluri_paths = [S3UriS3.compose_uri(bucket, path) for path in paths]
+                output(u"[--dry-run] Would invalidate %r" % fulluri_paths)
+                continue
+            params.append((bucket, paths, base_uri, cfuri, default_index_file))
+
+        if cfg.dry_run:
+            warning(u"Exiting now because of --dry-run")
+            return EX_OK
+
+        nb_success = 0
+        first = True
+        for bucket, paths, base_uri, cfuri, default_index_file in params:
+            if not first:
+                output("")
+            else:
+                first = False
+
+            results = cf.InvalidateObjects(
+                cfuri, paths, default_index_file,
+                cfg.invalidate_default_index_on_cf, cfg.invalidate_default_index_root_on_cf
+            )
+
+            dist_id = cfuri.dist_id()
+            pretty_output("URI", str(base_uri))
+            pretty_output("DistId", dist_id)
+            pretty_output("Nr of paths", len(paths))
+
+            for result in results:
+                result_code = result['status']
+
+                if result_code != 201:
+                    pretty_output("Status", "Failed: %d" % result_code)
+                    ret = EX_GENERAL
+                    continue
+
+                request_id = result['request_id']
+                nb_success += 1
+
+                pretty_output("Status", "Created")
+                pretty_output("RequestId", request_id)
+                pretty_output("Info", u"Check progress with: s3cmd cfinvalinfo %s/%s"
+                              % (dist_id, request_id))
+
+            if ret != EX_OK and cfg.stop_on_error:
+                error(u"Exiting now because of --stop-on-error")
+                break
+
+        if ret != EX_OK and nb_success:
+            ret = EX_PARTIAL
+
+        return ret
+
 # vim:et:ts=4:sts=4:ai
diff --git a/S3/Config.py b/S3/Config.py
index ef409f2..1642c4b 100644
--- a/S3/Config.py
+++ b/S3/Config.py
@@ -161,6 +161,7 @@ class Config(object):
         u'md5',      # File MD5 (if known)
         #u'acl',     # Full ACL (not yet supported)
     ]
+    keep_dirs = False
     delete_removed = False
     delete_after = False
     delete_after_fetch = False
@@ -223,6 +224,7 @@ class Config(object):
     expiry_days = u""
     expiry_date = u""
     expiry_prefix = u""
+    skip_destination_validation = False
     signature_v2 = False
     limitrate = 0
     requester_pays = False
@@ -248,6 +250,8 @@ class Config(object):
     # allow the listing results to be returned in unsorted order.
     # This may be faster when listing very large buckets.
     list_allow_unordered = False
+    # Maximum attempts of re-issuing failed requests
+    max_retries = 5
 
     ## Creating a singleton
     def __new__(self, configfile = None, access_key=None, secret_key=None, access_token=None):
diff --git a/S3/Crypto.py b/S3/Crypto.py
index 10bfadb..14961b4 100644
--- a/S3/Crypto.py
+++ b/S3/Crypto.py
@@ -18,7 +18,7 @@ except ImportError:
 
 from . import Config
 from logging import debug
-from .BaseUtils import encode_to_s3, decode_from_s3, s3_quote
+from .BaseUtils import encode_to_s3, decode_from_s3, s3_quote, md5, unicode
 from .Utils import time_to_epoch, deunicodise, check_bucket_name_dns_support
 from .SortedDict import SortedDict
 
@@ -29,6 +29,7 @@ from hashlib import sha1, sha256
 
 __all__ = []
 
+
 def format_param_str(params, always_have_equal=False, limited_keys=None):
     """
     Format URL parameters from a params dict and returns
@@ -56,6 +57,7 @@ def format_param_str(params, always_have_equal=False, limited_keys=None):
     return param_str and "?" + param_str[1:]
 __all__.append("format_param_str")
 
+
 ### AWS Version 2 signing
 def sign_string_v2(string_to_sign):
     """Sign a string with the secret key, returning base64 encoded results.
@@ -71,6 +73,7 @@ def sign_string_v2(string_to_sign):
     return signature
 __all__.append("sign_string_v2")
 
+
 def sign_request_v2(method='GET', canonical_uri='/', params=None, cur_headers=None):
     """Sign a string with the secret key, returning base64 encoded results.
     By default the configured secret key is used, but may be overridden as
@@ -122,6 +125,7 @@ def sign_request_v2(method='GET', canonical_uri='/', params=None, cur_headers=No
     return new_headers
 __all__.append("sign_request_v2")
 
+
 def sign_url_v2(url_to_sign, expiry):
     """Sign a URL in s3://bucket/object form with the given expiry
     time. The object will be accessible via the signed URL until the
@@ -137,6 +141,7 @@ def sign_url_v2(url_to_sign, expiry):
     )
 __all__.append("sign_url_v2")
 
+
 def sign_url_base_v2(**parms):
     """Shared implementation of sign_url methods. Takes a hash of 'bucket', 'object' and 'expiry' as args."""
     content_disposition=Config.Config().content_disposition
@@ -171,10 +176,13 @@ def sign_url_base_v2(**parms):
     if content_type:
         url += "&response-content-type=" + s3_quote(content_type, unicode_output=True)
     return url
+__all__.append("sign_url_base_v2")
+
 
 def sign(key, msg):
     return hmac.new(key, encode_to_s3(msg), sha256).digest()
 
+
 def getSignatureKey(key, dateStamp, regionName, serviceName):
     """
     Input: unicode params
@@ -186,6 +194,7 @@ def getSignatureKey(key, dateStamp, regionName, serviceName):
     kSigning = sign(kService, 'aws4_request')
     return kSigning
 
+
 def sign_request_v4(method='GET', host='', canonical_uri='/', params=None,
                     region='us-east-1', cur_headers=None, body=b''):
     service = 's3'
@@ -251,36 +260,85 @@ def sign_request_v4(method='GET', host='', canonical_uri='/', params=None,
     return new_headers
 __all__.append("sign_request_v4")
 
-def checksum_sha256_file(filename, offset=0, size=None):
-    try:
-        hash = sha256()
-    except Exception:
-        # fallback to Crypto SHA256 module
-        hash = sha256.new()
-    with open(deunicodise(filename),'rb') as f:
-        if size is None:
-            for chunk in iter(lambda: f.read(8192), b''):
-                hash.update(chunk)
-        else:
-            f.seek(offset)
-            size_left = size
-            while size_left > 0:
-                chunk = f.read(min(8192, size_left))
-                if not chunk:
-                    break
-                size_left -= len(chunk)
-                hash.update(chunk)
+
+def checksum_file_descriptor(file_desc, offset=0, size=None, hash_func=sha256):
+    hash = hash_func()
+
+    if size is None:
+        for chunk in iter(lambda: file_desc.read(8192), b''):
+            hash.update(chunk)
+    else:
+        file_desc.seek(offset)
+        size_left = size
+        while size_left > 0:
+            chunk = file_desc.read(min(8192, size_left))
+            if not chunk:
+                break
+            size_left -= len(chunk)
+            hash.update(chunk)
 
     return hash
+__all__.append("checksum_file_stream")
+
+
+def checksum_sha256_file(file, offset=0, size=None):
+    if not isinstance(file, unicode):
+        # file is directly a file descriptor
+        return checksum_file_descriptor(file, offset, size, sha256)
+
+    # Otherwise, we expect file to be a filename
+    with open(deunicodise(file),'rb') as fp:
+        return checksum_file_descriptor(fp, offset, size, sha256)
+
+__all__.append("checksum_sha256_file")
+
 
 def checksum_sha256_buffer(buffer, offset=0, size=None):
-    try:
-        hash = sha256()
-    except Exception:
-        # fallback to Crypto SHA256 module
-        hash = sha256.new()
+    hash = sha256()
     if size is None:
         hash.update(buffer)
     else:
         hash.update(buffer[offset:offset+size])
     return hash
+__all__.append("checksum_sha256_buffer")
+
+
+def generate_content_md5(body):
+    m = md5(encode_to_s3(body))
+    base64md5 = encodestring(m.digest())
+    base64md5 = decode_from_s3(base64md5)
+    if base64md5[-1] == '\n':
+        base64md5 = base64md5[0:-1]
+    return decode_from_s3(base64md5)
+__all__.append("generate_content_md5")
+
+
+def hash_file_md5(filename):
+    h = md5()
+    with open(deunicodise(filename), "rb") as fp:
+        while True:
+            # Hash 32kB chunks
+            data = fp.read(32*1024)
+            if not data:
+                break
+            h.update(data)
+    return h.hexdigest()
+__all__.append("hash_file_md5")
+
+
+def calculateChecksum(buffer, mfile, offset, chunk_size, send_chunk):
+    md5_hash = md5()
+    size_left = chunk_size
+    if buffer == '':
+        mfile.seek(offset)
+        while size_left > 0:
+            data = mfile.read(min(send_chunk, size_left))
+            if not data:
+                break
+            md5_hash.update(data)
+            size_left -= len(data)
+    else:
+        md5_hash.update(buffer)
+
+    return md5_hash.hexdigest()
+__all__.append("calculateChecksum")
diff --git a/S3/Exceptions.py b/S3/Exceptions.py
index 99f5358..350fce0 100644
--- a/S3/Exceptions.py
+++ b/S3/Exceptions.py
@@ -65,7 +65,7 @@ class S3Exception(Exception):
     message = property(_get_message, _set_message)
 
 
-class S3Error (S3Exception):
+class S3Error(S3Exception):
     def __init__(self, response):
         self.status = response["status"]
         self.reason = response["reason"]
diff --git a/S3/FileDict.py b/S3/FileDict.py
index 3890248..de017c3 100644
--- a/S3/FileDict.py
+++ b/S3/FileDict.py
@@ -10,6 +10,7 @@ from __future__ import absolute_import
 
 import logging
 from .SortedDict import SortedDict
+from .Crypto import hash_file_md5
 from . import Utils
 from . import Config
 
@@ -45,7 +46,7 @@ class FileDict(SortedDict):
         md5 = self.get_hardlink_md5(relative_file)
         if md5 is None and 'md5' in cfg.sync_checks:
             logging.debug(u"doing file I/O to read md5 of %s" % relative_file)
-            md5 = Utils.hash_file_md5(self[relative_file]['full_name'])
+            md5 = hash_file_md5(self[relative_file]['full_name'])
         self.record_md5(relative_file, md5)
         self[relative_file]['md5'] = md5
         return md5
diff --git a/S3/FileLists.py b/S3/FileLists.py
index 3b194f0..823b4ce 100644
--- a/S3/FileLists.py
+++ b/S3/FileLists.py
@@ -12,7 +12,7 @@ from .S3 import S3
 from .Config import Config
 from .S3Uri import S3Uri
 from .FileDict import FileDict
-from .BaseUtils import dateS3toUnix, dateRFC822toUnix
+from .BaseUtils import dateS3toUnix, dateRFC822toUnix, s3path
 from .Utils import unicodise, deunicodise, deunicodise_s, replace_nonprintables
 from .Exceptions import ParameterError
 from .HashCache import HashCache
@@ -26,6 +26,8 @@ import re
 import errno
 import io
 
+from stat import S_ISDIR
+
 PY3 = (sys.version_info >= (3, 0))
 
 __all__ = ["fetch_local_list", "fetch_remote_list", "compare_filelists"]
@@ -196,7 +198,7 @@ def _get_filelist_from_file(cfg, local_path):
         result.append((key, [], values))
     return result
 
-def fetch_local_list(args, is_src = False, recursive = None):
+def fetch_local_list(args, is_src = False, recursive = None, with_dirs=False):
 
     def _fetch_local_list_info(loc_list):
         len_loc_list = len(loc_list)
@@ -211,7 +213,9 @@ def fetch_local_list(args, is_src = False, recursive = None):
             if relative_file == '-':
                 continue
 
-            full_name = loc_list[relative_file]['full_name']
+            loc_list_item = loc_list[relative_file]
+            full_name = loc_list_item['full_name']
+            is_dir = loc_list_item['is_dir']
             try:
                 sr = os.stat_result(os.stat(deunicodise(full_name)))
             except OSError as e:
@@ -220,22 +224,34 @@ def fetch_local_list(args, is_src = False, recursive = None):
                     continue
                 else:
                     raise
+
+            if is_dir:
+                size = 0
+            else:
+                size = sr.st_size
+
             loc_list[relative_file].update({
-                'size' : sr.st_size,
+                'size' : size,
                 'mtime' : sr.st_mtime,
                 'dev'   : sr.st_dev,
                 'inode' : sr.st_ino,
                 'uid' : sr.st_uid,
                 'gid' : sr.st_gid,
-                'sr': sr # save it all, may need it in preserve_attrs_list
+                'sr': sr, # save it all, may need it in preserve_attrs_list
                 ## TODO: Possibly more to save here...
             })
             total_size += sr.st_size
+
+            if is_dir:
+                # A md5 can't be calculated with a directory path
+                continue
+
             if 'md5' in cfg.sync_checks:
                 md5 = cache.md5(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size)
                 if md5 is None:
                         try:
-                            md5 = loc_list.get_md5(relative_file) # this does the file I/O
+                            # this does the file I/O
+                            md5 = loc_list.get_md5(relative_file)
                         except IOError:
                             continue
                         cache.add(sr.st_dev, sr.st_ino, sr.st_mtime, sr.st_size, md5)
@@ -243,7 +259,7 @@ def fetch_local_list(args, is_src = False, recursive = None):
         return total_size
 
 
-    def _get_filelist_local(loc_list, local_uri, cache):
+    def _get_filelist_local(loc_list, local_uri, cache, with_dirs):
         info(u"Compiling list of local files...")
 
         if local_uri.basename() == "-":
@@ -261,8 +277,10 @@ def fetch_local_list(args, is_src = False, recursive = None):
                 'gid' : gid,
                 'dev' : 0,
                 'inode': 0,
+                'is_dir': False,
             }
             return loc_list, True
+
         if local_uri.isdir():
             local_base = local_uri.basename()
             local_path = local_uri.path()
@@ -280,29 +298,39 @@ def fetch_local_list(args, is_src = False, recursive = None):
             local_path = local_uri.dirname()
             filelist = [( local_path, [], [local_uri.basename()] )]
             single_file = True
+
         for root, dirs, files in filelist:
             rel_root = root.replace(local_path, local_base, 1)
-            for f in files:
-                full_name = os.path.join(root, f)
-                if not os.path.isfile(deunicodise(full_name)):
-                    if os.path.exists(deunicodise(full_name)):
-                        warning(u"Skipping over non regular file: %s" % full_name)
-                    continue
-                if os.path.islink(deunicodise(full_name)):
-                    if not cfg.follow_symlinks:
-                        warning(u"Skipping over symbolic link: %s" % full_name)
+            if not with_dirs:
+                iter_elements = ((files, False),)
+            else:
+                iter_elements = ((dirs, True), (files, False))
+            for elements, is_dir in iter_elements:
+                for f in elements:
+                    full_name = os.path.join(root, f)
+                    if not is_dir and not os.path.isfile(deunicodise(full_name)):
+                        if os.path.exists(deunicodise(full_name)):
+                            warning(u"Skipping over non regular file: %s" % full_name)
                         continue
-                relative_file = os.path.join(rel_root, f)
-                if os.path.sep != "/":
-                    # Convert non-unix dir separators to '/'
-                    relative_file = "/".join(relative_file.split(os.path.sep))
-                if cfg.urlencoding_mode == "normal":
-                    relative_file = replace_nonprintables(relative_file)
-                if relative_file.startswith('./'):
-                    relative_file = relative_file[2:]
-                loc_list[relative_file] = {
-                    'full_name' : full_name,
-                }
+                    if os.path.islink(deunicodise(full_name)):
+                        if not cfg.follow_symlinks:
+                            warning(u"Skipping over symbolic link: %s" % full_name)
+                            continue
+                    relative_file = os.path.join(rel_root, f)
+                    if os.path.sep != "/":
+                        # Convert non-unix dir separators to '/'
+                        relative_file = "/".join(relative_file.split(os.path.sep))
+                    if cfg.urlencoding_mode == "normal":
+                        relative_file = replace_nonprintables(relative_file)
+                    if relative_file.startswith('./'):
+                        relative_file = relative_file[2:]
+                    if is_dir and relative_file and relative_file[-1] != '/':
+                        relative_file += '/'
+
+                    loc_list[relative_file] = {
+                        'full_name' : full_name,
+                        'is_dir': is_dir,
+                    }
 
         return loc_list, single_file
 
@@ -353,7 +381,7 @@ def fetch_local_list(args, is_src = False, recursive = None):
         local_uris.append(uri)
 
     for uri in local_uris:
-        list_for_uri, single_file = _get_filelist_local(local_list, uri, cache)
+        list_for_uri, single_file = _get_filelist_local(local_list, uri, cache, with_dirs)
 
     ## Single file is True if and only if the user
     ## specified one local URI and that URI represents
@@ -375,9 +403,9 @@ def fetch_remote_list(args, require_attribs = False, recursive = None, uri_param
             return
 
         remote_item.update({
-        'size': int(response['headers']['content-length']),
-        'md5': response['headers']['etag'].strip('"\''),
-        'timestamp': dateRFC822toUnix(response['headers']['last-modified'])
+            'size': int(response['headers']['content-length']),
+            'md5': response['headers']['etag'].strip('"\''),
+            'timestamp': dateRFC822toUnix(response['headers']['last-modified'])
         })
         try:
             md5 = response['s3cmd-attrs']['md5']
@@ -403,7 +431,6 @@ def fetch_remote_list(args, require_attribs = False, recursive = None, uri_param
         ## { 'xyz/blah.txt' : {} }
 
         info(u"Retrieving list of remote files for %s ..." % remote_uri)
-        empty_fname_re = re.compile(r'\A\s*\Z')
 
         total_size = 0
 
@@ -420,34 +447,43 @@ def fetch_remote_list(args, require_attribs = False, recursive = None, uri_param
         rem_list = FileDict(ignore_case = False)
         break_now = False
         for object in response['list']:
-            if object['Key'] == rem_base_original and object['Key'][-1] != "/":
+            object_key = object['Key']
+            object_size = int(object['Size'])
+            is_dir = (object_key[-1] == '/')
+
+            if object_key == rem_base_original and not is_dir:
                 ## We asked for one file and we got that file :-)
-                key = unicodise(os.path.basename(deunicodise(object['Key'])))
+                key = s3path.basename(object_key)
                 object_uri_str = remote_uri_original.uri()
                 break_now = True
-                rem_list = FileDict(ignore_case = False)   ## Remove whatever has already been put to rem_list
+                # Remove whatever has already been put to rem_list
+                rem_list = FileDict(ignore_case = False)
             else:
-                key = object['Key'][rem_base_len:]      ## Beware - this may be '' if object['Key']==rem_base !!
+                # Beware - this may be '' if object_key==rem_base !!
+                key = object_key[rem_base_len:]
                 object_uri_str = remote_uri.uri() + key
-            if empty_fname_re.match(key):
+
+            if not key:
                 # Objects may exist on S3 with empty names (''), which don't map so well to common filesystems.
-                warning(u"Empty object name on S3 found, ignoring.")
+                warning(u"Found empty root object name on S3, ignoring.")
                 continue
+
             rem_list[key] = {
-                'size' : int(object['Size']),
+                'size' : object_size,
                 'timestamp' : dateS3toUnix(object['LastModified']), ## Sadly it's upload time, not our lastmod time :-(
                 'md5' : object['ETag'].strip('"\''),
-                'object_key' : object['Key'],
+                'object_key' : object_key,
                 'object_uri_str' : object_uri_str,
                 'base_uri' : remote_uri,
                 'dev' : None,
                 'inode' : None,
+                'is_dir': is_dir,
             }
             if '-' in rem_list[key]['md5']: # always get it for multipart uploads
                 _get_remote_attribs(S3Uri(object_uri_str), rem_list[key])
             md5 = rem_list[key]['md5']
             rem_list.record_md5(key, md5)
-            total_size += int(object['Size'])
+            total_size += object_size
             if break_now:
                 break
         return rem_list, total_size
@@ -483,7 +519,9 @@ def fetch_remote_list(args, require_attribs = False, recursive = None, uri_param
             ## Wildcards used in remote URI?
             ## If yes we'll need a bucket listing...
             wildcard_split_result = re.split("\*|\?", uri_str, maxsplit=1)
-            if len(wildcard_split_result) == 2: # wildcards found
+
+            if len(wildcard_split_result) == 2:
+                ## If wildcards found
                 prefix, rest = wildcard_split_result
                 ## Only request recursive listing if the 'rest' of the URI,
                 ## i.e. the part after first wildcard, contains '/'
@@ -496,13 +534,15 @@ def fetch_remote_list(args, require_attribs = False, recursive = None, uri_param
                         remote_list[key] = objectlist[key]
             else:
                 ## No wildcards - simply append the given URI to the list
-                key = unicodise(os.path.basename(deunicodise(uri.object())))
+                key = s3path.basename(uri.object())
                 if not key:
                     raise ParameterError(u"Expecting S3 URI with a filename or --recursive: %s" % uri.uri())
+                is_dir = (key and key[-1] == '/')
                 remote_item = {
                     'base_uri': uri,
                     'object_uri_str': uri.uri(),
-                    'object_key': uri.object()
+                    'object_key': uri.object(),
+                    'is_dir': is_dir,
                 }
                 if require_attribs:
                     _get_remote_attribs(uri, remote_item)
@@ -524,24 +564,33 @@ def compare_filelists(src_list, dst_list, src_remote, dst_remote):
     def _compare(src_list, dst_lst, src_remote, dst_remote, file):
         """Return True if src_list[file] matches dst_list[file], else False"""
         attribs_match = True
-        if not (file in src_list and file in dst_list):
-            info(u"%s: does not exist in one side or the other: src_list=%s, dst_list=%s" % (file, file in src_list, file in dst_list))
+        src_file = src_list.get(file)
+        dst_file = dst_list.get(file)
+        if not src_file or not dst_file:
+            info(u"%s: does not exist in one side or the other: src_list=%s, dst_list=%s"
+                 % (file, bool(src_file), bool(dst_file)))
             return False
 
         ## check size first
         if 'size' in cfg.sync_checks:
-            if 'size' in dst_list[file] and 'size' in src_list[file]:
-                if dst_list[file]['size'] != src_list[file]['size']:
-                    debug(u"xfer: %s (size mismatch: src=%s dst=%s)" % (file, src_list[file]['size'], dst_list[file]['size']))
-                    attribs_match = False
+            src_size = src_file.get('size')
+            dst_size = dst_file.get('size')
+            if dst_size is not None and src_size is not None and dst_size != src_size:
+                debug(u"xfer: %s (size mismatch: src=%s dst=%s)" % (file, src_size, dst_size))
+                attribs_match = False
 
         ## check md5
         compare_md5 = 'md5' in cfg.sync_checks
         # Multipart-uploaded files don't have a valid md5 sum - it ends with "...-nn"
         if compare_md5:
-            if (src_remote == True and '-' in src_list[file]['md5']) or (dst_remote == True and '-' in dst_list[file]['md5']):
+            if (src_remote == True and '-' in src_file['md5']) or (dst_remote == True and '-' in dst_file['md5']):
                 compare_md5 = False
                 info(u"disabled md5 check for %s" % file)
+
+        if compare_md5 and src_file['is_dir'] == True:
+            # For directories, nothing to do if they already exist
+            compare_md5 = False
+
         if attribs_match and compare_md5:
             try:
                 src_md5 = src_list.get_md5(file)
@@ -569,13 +618,30 @@ def compare_filelists(src_list, dst_list, src_remote, dst_remote):
     ## Items left on copy_pairs will be copied from dst1 to dst2
     update_list = FileDict(ignore_case = False)
     ## Items left on dst_list will be deleted
-    copy_pairs = []
+    copy_pairs = {}
 
     debug("Comparing filelists (direction: %s -> %s)" % (__direction_str(src_remote), __direction_str(dst_remote)))
 
+    src_dir_cache = set()
+
     for relative_file in src_list.keys():
         debug(u"CHECK: '%s'" % relative_file)
 
+        if src_remote:
+            # Most of the time, there will not be dir objects on the remote side
+            # we still need to have a "virtual" list of them to not think that there
+            # are unmatched dirs with the local side.
+            dir_idx = relative_file.rfind('/')
+            if dir_idx > 0:
+                path = relative_file[:dir_idx+1]
+                while path and path not in src_dir_cache:
+                    src_dir_cache.add(path)
+                    # Also add to cache, all the parent dirs
+                    try:
+                        path = path[:path.rindex('/', 0, -1)+1]
+                    except ValueError:
+                        continue
+
         if relative_file in dst_list:
             ## Was --skip-existing requested?
             if cfg.skip_existing:
@@ -606,9 +672,12 @@ def compare_filelists(src_list, dst_list, src_remote, dst_remote):
                     md5 = None
                 if md5 is not None and md5 in dst_list.by_md5:
                     # Found one, we want to copy
-                    dst1 = dst_list.find_md5_one(md5)
-                    debug(u"DST COPY src: '%s' -> '%s'" % (dst1, relative_file))
-                    copy_pairs.append((src_list[relative_file], dst1, relative_file, md5))
+                    copy_src_file = dst_list.find_md5_one(md5)
+                    debug(u"DST COPY src: '%s' -> '%s'" % (copy_src_file, relative_file))
+                    src_item = src_list[relative_file]
+                    src_item["md5"] = md5
+                    src_item["copy_src"] = copy_src_file
+                    copy_pairs[relative_file] = src_item
                     del(src_list[relative_file])
                     del(dst_list[relative_file])
                 else:
@@ -626,12 +695,14 @@ def compare_filelists(src_list, dst_list, src_remote, dst_remote):
                 md5 = src_list.get_md5(relative_file)
             except IOError:
                 md5 = None
-            dst1 = dst_list.find_md5_one(md5)
-            if dst1 is not None:
+            copy_src_file = dst_list.find_md5_one(md5)
+            if copy_src_file is not None:
                 # Found one, we want to copy
-                debug(u"DST COPY dst: '%s' -> '%s'" % (dst1, relative_file))
-                copy_pairs.append((src_list[relative_file], dst1,
-                                   relative_file, md5))
+                debug(u"DST COPY dst: '%s' -> '%s'" % (copy_src_file, relative_file))
+                src_item = src_list[relative_file]
+                src_item["md5"] = md5
+                src_item["copy_src"] = copy_src_file
+                copy_pairs[relative_file] = src_item
                 del(src_list[relative_file])
             else:
                 # we don't have this file, and we don't have a copy of this file elsewhere.  Get it.
@@ -640,8 +711,8 @@ def compare_filelists(src_list, dst_list, src_remote, dst_remote):
                 dst_list.record_md5(relative_file, md5)
 
     for f in dst_list.keys():
-        if f in src_list or f in update_list:
-            # leave only those not on src_list + update_list
+        if f in src_list or f in update_list or f in src_dir_cache:
+            # leave only those not on src_list + update_list + src_dir_cache
             del dst_list[f]
 
     return src_list, dst_list, update_list, copy_pairs
diff --git a/S3/MultiPart.py b/S3/MultiPart.py
index e78a1bb..4c48467 100644
--- a/S3/MultiPart.py
+++ b/S3/MultiPart.py
@@ -8,10 +8,11 @@ from __future__ import absolute_import
 
 import sys
 from logging import debug, info, warning, error
+from .Crypto import calculateChecksum
 from .Exceptions import ParameterError
 from .S3Uri import S3UriS3
 from .BaseUtils import getTextFromXml, getTreeFromXml, s3_quote, parseNodes
-from .Utils import formatSize, calculateChecksum
+from .Utils import formatSize
 
 SIZE_1MB = 1024 * 1024
 
diff --git a/S3/PkgInfo.py b/S3/PkgInfo.py
index 216c96e..e675410 100644
--- a/S3/PkgInfo.py
+++ b/S3/PkgInfo.py
@@ -7,7 +7,7 @@
 ## Copyright: TGRMN Software and contributors
 
 package = "s3cmd"
-version = "2.3.0"
+version = "2.3.0-dev"
 url = "http://s3tools.org"
 license = "GNU GPL v2+"
 short_description = "Command line tool for managing Amazon S3 and CloudFront services"
diff --git a/S3/S3.py b/S3/S3.py
index 4439893..9bf2e92 100644
--- a/S3/S3.py
+++ b/S3/S3.py
@@ -18,32 +18,20 @@ import pprint
 from xml.sax import saxutils
 from socket import timeout as SocketTimeoutException
 from logging import debug, info, warning, error
-from stat import ST_SIZE
+from stat import ST_SIZE, ST_MODE, S_ISDIR, S_ISREG
 try:
     # python 3 support
     from urlparse import urlparse
 except ImportError:
     from urllib.parse import urlparse
-try:
-    # Python 2 support
-    from base64 import encodestring
-except ImportError:
-    # Python 3.9.0+ support
-    from base64 import encodebytes as encodestring
 
 import select
 
-try:
-    from hashlib import md5
-except ImportError:
-    from md5 import md5
-
 from .BaseUtils import (getListFromXml, getTextFromXml, getRootTagName,
-                        decode_from_s3, encode_to_s3, s3_quote)
-from .Utils import (convertHeaderTupleListToDict, hash_file_md5, unicodise,
+                        decode_from_s3, encode_to_s3, md5, s3_quote)
+from .Utils import (convertHeaderTupleListToDict, unicodise,
                     deunicodise, check_bucket_name,
-                    check_bucket_name_dns_support, getHostnameFromBucket,
-                    calculateChecksum)
+                    check_bucket_name_dns_support, getHostnameFromBucket)
 from .SortedDict import SortedDict
 from .AccessLog import AccessLog
 from .ACL import ACL, GranteeLogDelivery
@@ -54,7 +42,8 @@ from .MultiPart import MultiPartUpload
 from .S3Uri import S3Uri
 from .ConnMan import ConnMan
 from .Crypto import (sign_request_v2, sign_request_v4, checksum_sha256_file,
-                     checksum_sha256_buffer, format_param_str)
+                     checksum_sha256_buffer, generate_content_md5,
+                     hash_file_md5, calculateChecksum, format_param_str)
 
 try:
     from ctypes import ArgumentError
@@ -257,9 +246,6 @@ class S3(object):
         "BucketAlreadyExists" : "Bucket '%s' already exists",
     }
 
-    ## Maximum attempts of re-issuing failed requests
-    _max_retries = 5
-
     def __init__(self, config):
         self.config = config
         self.fallback_to_signature_v2 = False
@@ -353,8 +339,8 @@ class S3(object):
         def _get_common_prefixes(data):
             return getListFromXml(data, "CommonPrefixes")
 
-        def _get_next_marker(data, current_list):
-            return getTextFromXml(response["data"], "NextMarker") or current_list[-1]["Key"]
+        def _get_next_marker(data, current_elts, key):
+            return getTextFromXml(response["data"], "NextMarker") or current_elts[-1][key]
 
         uri_params = uri_params and uri_params.copy() or {}
         truncated = True
@@ -377,9 +363,10 @@ class S3(object):
                 if limit == -1 or num_objects + num_prefixes < limit:
                     if current_list:
                         uri_params['marker'] = \
-                            _get_next_marker(response["data"], current_list)
+                            _get_next_marker(response["data"], current_list, "Key")
                     elif current_prefixes:
-                        uri_params['marker'] = current_prefixes[-1]["Prefix"]
+                        uri_params['marker'] = \
+                            _get_next_marker(response["data"], current_prefixes, "Prefix")
                     else:
                         # Unexpectedly, the server lied, and so the previous
                         # response was not truncated. So, no new key to get.
@@ -488,6 +475,10 @@ class S3(object):
             response['requester-pays'] = self.get_bucket_requester_pays(uri)
         except S3Error as e:
             response['requester-pays'] = None
+        try:
+            response['versioning'] = self.get_versioning(uri)
+        except S3Error as e:
+            response['versioning'] = None
         return response
 
     def website_info(self, uri, bucket_location = None):
@@ -606,7 +597,7 @@ class S3(object):
         body += '</LifecycleConfiguration>'
 
         headers = SortedDict(ignore_case = True)
-        headers['content-md5'] = compute_content_md5(body)
+        headers['content-md5'] = generate_content_md5(body)
         bucket = uri.bucket()
         request =  self.create_request("BUCKET_CREATE", bucket = bucket,
                                        headers = headers, body = body,
@@ -627,6 +618,7 @@ class S3(object):
                 (content_type, content_charset) = mime_magic(filename)
             else:
                 (content_type, content_charset) = mimetypes.guess_type(filename)
+
         if not content_type:
             content_type = self.config.default_mime_type
         return (content_type, content_charset)
@@ -639,14 +631,17 @@ class S3(object):
         content_type += "; charset=" + self.config.encoding.upper()
         return content_type
 
-    def content_type(self, filename=None):
+    def content_type(self, filename=None, is_dir=False):
         # explicit command line argument always wins
         content_type = self.config.mime_type
         content_charset = None
 
         if filename == u'-':
             return self.stdin_content_type()
-        if not content_type:
+
+        if is_dir:
+            content_type = 'application/x-directory'
+        elif not content_type:
             (content_type, content_charset) = self._guess_content_type(filename)
 
         ## add charset to content type
@@ -678,21 +673,36 @@ class S3(object):
         if uri.type != "s3":
             raise ValueError("Expected URI type 's3', got '%s'" % uri.type)
 
-        if filename != "-" and not os.path.isfile(deunicodise(filename)):
-            raise InvalidFileError(u"Not a regular file")
         try:
+            is_dir = False
+            size = 0
             if filename == "-":
+                is_stream = True
                 src_stream = io.open(sys.stdin.fileno(), mode='rb', closefd=False)
                 src_stream.stream_name = u'<stdin>'
-                size = 0
+
             else:
-                src_stream = io.open(deunicodise(filename), mode='rb')
+                is_stream = False
+                filename_bytes = deunicodise(filename)
+
+                stat = os.stat(filename_bytes)
+                mode = stat[ST_MODE]
+
+                if S_ISDIR(mode):
+                    is_dir = True
+                    # Dirs are represented as empty objects on S3
+                    src_stream = io.BytesIO(b'')
+                elif not S_ISREG(mode):
+                    raise InvalidFileError(u"Not a regular file")
+                else:
+                    # Standard normal file
+                    src_stream = io.open(filename_bytes, mode='rb')
+                    size = stat[ST_SIZE]
                 src_stream.stream_name = filename
-                size = os.stat(deunicodise(filename))[ST_SIZE]
         except (IOError, OSError) as e:
             raise InvalidFileError(u"%s" % e.strerror)
 
-        headers = SortedDict(ignore_case = True)
+        headers = SortedDict(ignore_case=True)
         if extra_headers:
             headers.update(extra_headers)
 
@@ -706,7 +716,7 @@ class S3(object):
             headers['x-amz-server-side-encryption-aws-kms-key-id'] = self.config.kms_key
 
         ## MIME-type handling
-        headers["content-type"] = self.content_type(filename=filename)
+        headers["content-type"] = self.content_type(filename=filename, is_dir=is_dir)
 
         ## Other Amazon S3 attributes
         if self.config.acl_public:
@@ -715,10 +725,10 @@ class S3(object):
 
         ## Multipart decision
         multipart = False
-        if not self.config.enable_multipart and filename == "-":
+        if not self.config.enable_multipart and is_stream:
             raise ParameterError("Multi-part upload is required to upload from stdin")
         if self.config.enable_multipart:
-            if size > self.config.multipart_chunk_size_mb * SIZE_1MB or filename == "-":
+            if size > self.config.multipart_chunk_size_mb * SIZE_1MB or is_stream:
                 multipart = True
                 if size > self.config.multipart_max_chunks * self.config.multipart_chunk_size_mb * SIZE_1MB:
                     raise ParameterError("Chunk size %d MB results in more than %d chunks. Please increase --multipart-chunk-size-mb" % \
@@ -770,7 +780,7 @@ class S3(object):
     def object_batch_delete(self, remote_list):
         """ Batch delete given a remote_list """
         uris = [remote_list[item]['object_uri_str'] for item in remote_list]
-        self.object_batch_delete_uri_strs(uris)
+        return self.object_batch_delete_uri_strs(uris)
 
     def object_batch_delete_uri_strs(self, uris):
         """ Batch delete given a list of object uris """
@@ -795,7 +805,7 @@ class S3(object):
             raise ValueError("Key list is empty")
         bucket = S3Uri(batch[0]).bucket()
         request_body = compose_batch_del_xml(bucket, batch)
-        headers = SortedDict({'content-md5': compute_content_md5(request_body),
+        headers = SortedDict({'content-md5': generate_content_md5(request_body),
                    'content-type': 'application/xml'}, ignore_case=True)
         request = self.create_request("BATCH_DELETE", bucket = bucket,
                                       headers = headers, body = request_body,
@@ -1071,6 +1081,27 @@ class S3(object):
         response = self.send_request(request)
         return response
 
+    def set_versioning(self, uri, enabled):
+        headers = SortedDict(ignore_case = True)
+        status = "Enabled" if enabled is True else "Suspended"
+        body = '<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
+        body += '<Status>%s</Status>' % status
+        body += '</VersioningConfiguration>'
+        debug(u"set_versioning(%s)" % body)
+        headers['content-md5'] = compute_content_md5(body)
+        request = self.create_request("BUCKET_CREATE", uri = uri,
+                                      headers = headers, body = body,
+                                      uri_params = {'versioning': None})
+        response = self.send_request(request)
+        return response
+
+    def get_versioning(self, uri):
+        request = self.create_request("BUCKET_LIST", uri = uri,
+                                      uri_params = {'versioning': None})
+        response = self.send_request(request)
+
+        return getTextFromXml(response['data'], "Status")
+
     def get_policy(self, uri):
         request = self.create_request("BUCKET_LIST", bucket = uri.bucket(),
                                       uri_params = {'policy': None})
@@ -1104,7 +1135,7 @@ class S3(object):
         headers = SortedDict(ignore_case = True)
         # TODO check cors is proper json string
         headers['content-type'] = 'application/xml'
-        headers['content-md5'] = compute_content_md5(cors)
+        headers['content-md5'] = generate_content_md5(cors)
         request = self.create_request("BUCKET_CREATE", uri = uri,
                                       headers=headers, body = cors,
                                       uri_params = {'cors': None})
@@ -1120,7 +1151,7 @@ class S3(object):
 
     def set_lifecycle_policy(self, uri, policy):
         headers = SortedDict(ignore_case = True)
-        headers['content-md5'] = compute_content_md5(policy)
+        headers['content-md5'] = generate_content_md5(policy)
         request = self.create_request("BUCKET_CREATE", uri = uri,
                                       headers=headers, body = policy,
                                       uri_params = {'lifecycle': None})
@@ -1159,8 +1190,11 @@ class S3(object):
         return response
 
     def set_notification_policy(self, uri, policy):
+        headers = SortedDict(ignore_case = True)
+        if self.config.skip_destination_validation:
+            headers["x-amz-skip-destination-validation"] = "True"
         request = self.create_request("BUCKET_CREATE", uri = uri,
-                                      body = policy,
+                                      headers = headers, body = policy,
                                       uri_params = {'notification': None})
         debug(u"set_notification_policy(%s): policy-xml: %s" % (uri, policy))
         response = self.send_request(request)
@@ -1376,7 +1410,7 @@ class S3(object):
 
     def _fail_wait(self, retries):
         # Wait a few seconds. The more it fails the more we wait.
-        return (self._max_retries - retries + 1) * 3
+        return (self.config.max_retries - retries + 1) * 3
 
     def _http_redirection_handler(self, request, response, fn, *args, **kwargs):
         # Region info might already be available through the x-amz-bucket-region header
@@ -1500,7 +1534,9 @@ class S3(object):
                 debug("===== FAILED Inner request to determine the bucket "
                       "region =====")
 
-    def send_request(self, request, retries = _max_retries):
+    def send_request(self, request, retries=None):
+        if retries is None:
+            retries = self.config.max_retries
         self.update_region_inner_request(request)
 
         request.body = encode_to_s3(request.body)
@@ -1627,8 +1663,10 @@ class S3(object):
         return response
 
     def send_file(self, request, stream, labels, buffer = '', throttle = 0,
-                  retries = _max_retries, offset = 0, chunk_size = -1,
+                  retries = None, offset = 0, chunk_size = -1,
                   use_expect_continue = None):
+        if retries is None:
+            retries = self.config.max_retries
         self.update_region_inner_request(request)
 
         if use_expect_continue is None:
@@ -1651,7 +1689,7 @@ class S3(object):
         if buffer:
             sha256_hash = checksum_sha256_buffer(buffer, offset, size_total)
         else:
-            sha256_hash = checksum_sha256_file(filename, offset, size_total)
+            sha256_hash = checksum_sha256_file(stream, offset, size_total)
         request.body = sha256_hash
 
         if use_expect_continue:
@@ -1835,6 +1873,10 @@ class S3(object):
                 if response["status"] == 503:
                     ## SlowDown error
                     throttle = throttle and throttle * 5 or 0.01
+                elif response["status"] == 507:
+                    # Not an AWS error, but s3 compatible server possible error:
+                    # InsufficientStorage
+                    try_retry = False
             elif response["status"] == 429:
                 # Not an AWS error, but s3 compatible server possible error:
                 # TooManyRequests/Busy/slowdown
@@ -1846,9 +1888,10 @@ class S3(object):
                 if err.code in ['BadDigest', 'OperationAborted', 'TokenRefreshRequired', 'RequestTimeout']:
                     try_retry = True
 
+            err = S3Error(response)
             if try_retry:
                 if retries:
-                    warning("Upload failed: %s (%s)" % (resource['uri'], S3Error(response)))
+                    warning("Upload failed: %s (%s)" % (resource['uri'], err))
                     if throttle:
                         warning("Retrying on lower speed (throttle=%0.2f)" % throttle)
                     warning("Waiting %d sec..." % self._fail_wait(retries))
@@ -1857,11 +1900,10 @@ class S3(object):
                                           retries - 1, offset, chunk_size, use_expect_continue)
                 else:
                     warning("Too many failures. Giving up on '%s'" % filename)
-                    raise S3UploadError("Too many failures. Giving up on '%s'"
-                                        % filename)
+                    raise S3UploadError("%s" % err)
 
             ## Non-recoverable error
-            raise S3Error(response)
+            raise err
 
         debug("MD5 sums: computed=%s, received=%s" % (md5_computed, response["headers"].get('etag', '').strip('"\'')))
         ## when using KMS encryption, MD5 etag value will not match
@@ -1873,9 +1915,8 @@ class S3(object):
                 return self.send_file(request, stream, labels, buffer, throttle,
                                       retries - 1, offset, chunk_size, use_expect_continue)
             else:
-                warning("Too many failures. Giving up on '%s'" % (filename))
-                raise S3UploadError("Too many failures. Giving up on '%s'"
-                                    % filename)
+                warning("Too many failures. Giving up on '%s'" % filename)
+                raise S3UploadError("MD5 sums of sent and received files don't match!")
 
         return response
 
@@ -1900,7 +1941,9 @@ class S3(object):
         return self.send_file_multipart(src_uri, headers, dst_uri, size,
                                         extra_label)
 
-    def recv_file(self, request, stream, labels, start_position = 0, retries = _max_retries):
+    def recv_file(self, request, stream, labels, start_position=0, retries=None):
+        if retries is None:
+            retries = self.config.max_retries
         self.update_region_inner_request(request)
 
         method_string, resource, headers = request.get_triplet()
@@ -1950,19 +1993,23 @@ class S3(object):
                 warning("Waiting %d sec..." % self._fail_wait(retries))
                 time.sleep(self._fail_wait(retries))
                 # Connection error -> same throttle value
-                return self.recv_file(request, stream, labels, start_position, retries - 1)
+                return self.recv_file(request, stream, labels, start_position,
+                                      retries=retries - 1)
             else:
                 raise S3DownloadError("Download failed for: %s" % resource['uri'])
 
+        if response["status"] < 200 or response["status"] > 299:
+            # In case of error, we still need to flush the read buffer to be able to re-use
+            # the connection
+            response['data'] = http_response.read()
+
         if response["status"] in [301, 307]:
             ## RedirectPermanent or RedirectTemporary
-            response['data'] = http_response.read()
             return self._http_redirection_handler(request, response,
                                                   self.recv_file, request,
                                                   stream, labels, start_position)
 
         if response["status"] == 400:
-            response['data'] = http_response.read()
             handler_fn = self._http_400_handler(request, response, self.recv_file,
                                                 request, stream, labels, start_position)
             if handler_fn:
@@ -1970,16 +2017,35 @@ class S3(object):
             raise S3Error(response)
 
         if response["status"] == 403:
-            response['data'] = http_response.read()
             return self._http_403_handler(request, response, self.recv_file,
                                           request, stream, labels, start_position)
 
-        if response["status"] == 405: # Method Not Allowed.  Don't retry.
-            response['data'] = http_response.read()
-            raise S3Error(response)
-
         if response["status"] < 200 or response["status"] > 299:
-            response['data'] = http_response.read()
+            try_retry = False
+            if response["status"] == 429:
+                # Not an AWS error, but s3 compatible server possible error:
+                # TooManyRequests/Busy/slowdown
+                try_retry = True
+
+            elif response["status"] == 503:
+                # SlowDown error
+                try_retry = True
+
+            if try_retry:
+                resource_uri = resource['uri']
+                if retries:
+                    retry_delay = self._fail_wait(retries)
+                    warning("Retrying failed request: %s (%s)"
+                            % (resource_uri, S3Error(response)))
+                    warning("Waiting %d sec..." % retry_delay)
+                    time.sleep(retry_delay)
+                    return self.recv_file(request, stream, labels, start_position,
+                                          retries=retries - 1)
+                else:
+                    warning("Too many failures. Giving up on '%s'" % resource_uri)
+                    raise S3DownloadError("Download failed for: %s" % resource_uri)
+
+            # Non-recoverable error
             raise S3Error(response)
 
         if start_position == 0:
@@ -2044,7 +2110,8 @@ class S3(object):
                 warning("Waiting %d sec..." % self._fail_wait(retries))
                 time.sleep(self._fail_wait(retries))
                 # Connection error -> same throttle value
-                return self.recv_file(request, stream, labels, current_position, retries - 1)
+                return self.recv_file(request, stream, labels, current_position,
+                                      retries=retries - 1)
             else:
                 raise S3DownloadError("Download failed for: %s" % resource['uri'])
 
@@ -2103,11 +2170,4 @@ def parse_attrs_header(attrs_header):
         attrs[key] = val
     return attrs
 
-def compute_content_md5(body):
-    m = md5(encode_to_s3(body))
-    base64md5 = encodestring(m.digest())
-    base64md5 = decode_from_s3(base64md5)
-    if base64md5[-1] == '\n':
-        base64md5 = base64md5[0:-1]
-    return decode_from_s3(base64md5)
 # vim:et:ts=4:sts=4:ai
diff --git a/S3/S3Uri.py b/S3/S3Uri.py
index 6231179..10d4af5 100644
--- a/S3/S3Uri.py
+++ b/S3/S3Uri.py
@@ -109,26 +109,37 @@ class S3UriS3(S3Uri):
 
     @staticmethod
     def httpurl_to_s3uri(http_url):
-        m=re.match("(https?://)?([^/]+)/?(.*)", http_url, re.IGNORECASE | re.UNICODE)
+        m = re.match("(https?://)?([^/]+)/?(.*)", http_url, re.IGNORECASE | re.UNICODE)
         hostname, object = m.groups()[1:]
         hostname = hostname.lower()
-        if hostname == "s3.amazonaws.com":
+
+        # Worst case scenario, we would like to be able to match something like
+        # my.website.com.s3-fips.dualstack.us-west-1.amazonaws.com.cn
+        m = re.match("(.*\.)?s3(?:\-[^\.]*)?(?:\.dualstack)?(?:\.[^\.]*)?\.amazonaws\.com(?:\.cn)?$",
+                     hostname, re.IGNORECASE | re.UNICODE)
+        if not m:
+            raise ValueError("Unable to parse URL: %s" % http_url)
+
+        bucket = m.groups()[0]
+        if not bucket:
             ## old-style url: http://s3.amazonaws.com/bucket/object
-            if object.count("/") == 0:
+            if "/" not in object:
                 ## no object given
                 bucket = object
                 object = ""
             else:
                 ## bucket/object
                 bucket, object = object.split("/", 1)
-        elif hostname.endswith(".s3.amazonaws.com"):
-            ## new-style url: http://bucket.s3.amazonaws.com/object
-            bucket = hostname[:-(len(".s3.amazonaws.com"))]
         else:
-            raise ValueError("Unable to parse URL: %s" % http_url)
-        return S3Uri(u"s3://%(bucket)s/%(object)s" % {
-            'bucket' : bucket,
-            'object' : object })
+            ## new-style url: http://bucket.s3.amazonaws.com/object
+            bucket = bucket.rstrip('.')
+
+        return S3Uri(
+            u"s3://%(bucket)s/%(object)s" % {
+                'bucket' : bucket,
+                'object' : object
+            }
+        )
 
 class S3UriS3FS(S3Uri):
     type = "s3fs"
diff --git a/S3/SortedDict.py b/S3/SortedDict.py
index a2720e8..bb2d3e5 100644
--- a/S3/SortedDict.py
+++ b/S3/SortedDict.py
@@ -11,13 +11,20 @@ from __future__ import absolute_import, print_function
 from .BidirMap import BidirMap
 
 class SortedDictIterator(object):
-    def __init__(self, sorted_dict, keys):
+    def __init__(self, sorted_dict, keys, reverse=False):
         self.sorted_dict = sorted_dict
         self.keys = keys
+        if reverse:
+            self.pop_index = -1
+        else:
+            self.pop_index = 0
+
+    def __iter__(self):
+        return self
 
     def __next__(self):
         try:
-            return self.keys.pop(0)
+            return self.keys.pop(self.pop_index)
         except IndexError:
             raise StopIteration
 
@@ -54,6 +61,9 @@ class SortedDict(dict):
     def __iter__(self):
         return SortedDictIterator(self, self.keys())
 
+    def __reversed__(self):
+        return SortedDictIterator(self, self.keys(), reverse=True)
+
     def __getitem__(self, index):
         """Override to support the "get_slice" for python3 """
         if isinstance(index, slice):
diff --git a/S3/Utils.py b/S3/Utils.py
index a40f439..cf6e748 100644
--- a/S3/Utils.py
+++ b/S3/Utils.py
@@ -14,7 +14,6 @@ import re
 import string as string_mod
 import random
 import errno
-from hashlib import md5
 from logging import debug
 
 
@@ -30,7 +29,7 @@ import S3.Config
 import S3.Exceptions
 
 from S3.BaseUtils import (base_urlencode_string, base_replace_nonprintables,
-                          base_unicodise, base_deunicodise)
+                          base_unicodise, base_deunicodise, md5)
 
 
 __all__ = []
@@ -101,19 +100,6 @@ def mktmpfile(prefix = os.getenv('TMP','/tmp') + "/tmpfile-", randchars = 20):
 __all__.append("mktmpfile")
 
 
-def hash_file_md5(filename):
-    h = md5()
-    with open(deunicodise(filename), "rb") as fp:
-        while True:
-            # Hash 32kB chunks
-            data = fp.read(32*1024)
-            if not data:
-                break
-            h.update(data)
-    return h.hexdigest()
-__all__.append("hash_file_md5")
-
-
 def mkdir_with_parents(dir_name):
     """
     mkdir_with_parents(dst_dir)
@@ -309,24 +295,6 @@ def getHostnameFromBucket(bucket):
 __all__.append("getHostnameFromBucket")
 
 
-def calculateChecksum(buffer, mfile, offset, chunk_size, send_chunk):
-    md5_hash = md5()
-    size_left = chunk_size
-    if buffer == '':
-        mfile.seek(offset)
-        while size_left > 0:
-            data = mfile.read(min(send_chunk, size_left))
-            if not data:
-                break
-            md5_hash.update(data)
-            size_left -= len(data)
-    else:
-        md5_hash.update(buffer)
-
-    return md5_hash.hexdigest()
-__all__.append("calculateChecksum")
-
-
 # Deal with the fact that pwd and grp modules don't exist for Windows
 try:
     import pwd
diff --git a/debian/changelog b/debian/changelog
index 9c5efd1..e7133a8 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,9 +1,10 @@
-s3cmd (2.3.0-2) UNRELEASED; urgency=medium
+s3cmd (2.3.0+git20221120.1.6f3e1ba-1) UNRELEASED; urgency=medium
 
   * Set upstream metadata fields: Bug-Submit, Repository-Browse.
   * Update standards version to 4.6.1, no changes needed.
+  * New upstream snapshot.
 
- -- Debian Janitor <janitor@jelmer.uk>  Wed, 05 Oct 2022 11:06:51 -0000
+ -- Debian Janitor <janitor@jelmer.uk>  Thu, 19 Jan 2023 14:01:05 -0000
 
 s3cmd (2.3.0-1) unstable; urgency=medium
 
diff --git a/format-manpage.pl b/format-manpage.pl
deleted file mode 100755
index 17695f4..0000000
--- a/format-manpage.pl
+++ /dev/null
@@ -1,223 +0,0 @@
-#!/usr/bin/perl
-
-# Format s3cmd.1 manpage
-# Usage:
-#   s3cmd --help | format-manpage.pl > s3cmd.1
-
-use strict;
-
-my $commands = "";
-my $cfcommands = "";
-my $wscommands = "";
-my $options = "";
-
-while (<>) {
-	if (/^Commands:/) {
-		while (<>) {
-			last if (/^\s*$/);
-			my ($desc, $cmd, $cmdline);
-			($desc = $_) =~ s/^\s*(.*?)\s*$/$1/;
-			($cmdline = <>) =~ s/^\s*s3cmd (.*?) (.*?)\s*$/s3cmd \\fB$1\\fR \\fI$2\\fR/;
-			$cmd = $1;
-			$cmdline =~ s/-/\\-/g;
-			if ($cmd =~ /^cf/) {
-				$cfcommands .= ".TP\n$cmdline\n$desc\n";
-			} elsif ($cmd =~ /^ws/) {
-				$wscommands .= ".TP\n$cmdline\n$desc\n";
-			} else {
-				$commands .= ".TP\n$cmdline\n$desc\n";
-			}
-		}
-	}
-	if (/^Options:/) {
-		my ($opt, $desc);
-		while (<>) {
-			last if (/^\s*$/);
-			$_ =~ s/(.*?)\s*$/$1/;
-			$desc = "";
-			$opt = "";
-			if (/^  (-.*)/) {
-				$opt = $1;
-				if ($opt =~ /  /) {
-					($opt, $desc) = split(/\s\s+/, $opt, 2);
-				}
-				$opt =~ s/(-[^ ,=\.]+)/\\fB$1\\fR/g;
-				# escape all single dashes
-				$opt =~ s/-/\\-/g;
-				$options .= ".TP\n$opt\n";
-			} else {
-				$_ =~ s/\s*(.*?)\s*$/$1/;
-				$_ =~ s/(--[^ ,=\.]+)/\\fB$1\\fR/g;
-				# escape word 'Cache-Control'
-				$_ =~ s/'(\S+-\S+)'/\\&'$1'/g;
-				# escape all single dashes
-				$_ =~ s/-/\\-/g;
-				$desc .= $_;
-			}
-			if ($desc) {
-				$options .= "$desc\n";
-			}
-		}
-	}
-}
-print "
-.\\\" !!! IMPORTANT: This file is generated from s3cmd \\-\\-help output using format-manpage.pl
-.\\\" !!!            Do your changes either in s3cmd file or in 'format\\-manpage.pl' otherwise
-.\\\" !!!            they will be overwritten!
-
-.TH s3cmd 1
-.SH NAME
-s3cmd \\- tool for managing Amazon S3 storage space and Amazon CloudFront content delivery network
-.SH SYNOPSIS
-.B s3cmd
-[\\fIOPTIONS\\fR] \\fICOMMAND\\fR [\\fIPARAMETERS\\fR]
-.SH DESCRIPTION
-.PP
-.B s3cmd
-is a command line client for copying files to/from 
-Amazon S3 (Simple Storage Service) and performing other
-related tasks, for instance creating and removing buckets,
-listing objects, etc.
-
-.SH COMMANDS
-.PP
-.B s3cmd
-can do several \\fIactions\\fR specified by the following \\fIcommands\\fR.
-$commands
-
-.PP
-Commands for static WebSites configuration
-$wscommands
-
-.PP
-Commands for CloudFront management
-$cfcommands
-
-.SH OPTIONS
-.PP
-Some of the below specified options can have their default 
-values set in 
-.B s3cmd
-config file (by default \$HOME/.s3cmd). As it's a simple text file 
-feel free to open it with your favorite text editor and do any
-changes you like. 
-$options
-
-.SH EXAMPLES
-One of the most powerful commands of \\fIs3cmd\\fR is \\fBs3cmd sync\\fR used for 
-synchronising complete directory trees to or from remote S3 storage. To some extent 
-\\fBs3cmd put\\fR and \\fBs3cmd get\\fR share a similar behaviour with \\fBsync\\fR.
-.PP
-Basic usage common in backup scenarios is as simple as:
-.nf
-	s3cmd sync /local/path/ s3://test\\-bucket/backup/
-.fi
-.PP
-This command will find all files under /local/path directory and copy them 
-to corresponding paths under s3://test\\-bucket/backup on the remote side.
-For example:
-.nf
-	/local/path/\\fBfile1.ext\\fR         \\->  s3://bucket/backup/\\fBfile1.ext\\fR
-	/local/path/\\fBdir123/file2.bin\\fR  \\->  s3://bucket/backup/\\fBdir123/file2.bin\\fR
-.fi
-.PP
-However if the local path doesn't end with a slash the last directory's name
-is used on the remote side as well. Compare these with the previous example:
-.nf
-	s3cmd sync /local/path s3://test\\-bucket/backup/
-.fi
-will sync:
-.nf
-	/local/\\fBpath/file1.ext\\fR         \\->  s3://bucket/backup/\\fBpath/file1.ext\\fR
-	/local/\\fBpath/dir123/file2.bin\\fR  \\->  s3://bucket/backup/\\fBpath/dir123/file2.bin\\fR
-.fi
-.PP
-To retrieve the files back from S3 use inverted syntax:
-.nf
-	s3cmd sync s3://test\\-bucket/backup/ ~/restore/
-.fi
-that will download files:
-.nf
-	s3://bucket/backup/\\fBfile1.ext\\fR         \\->  ~/restore/\\fBfile1.ext\\fR
-	s3://bucket/backup/\\fBdir123/file2.bin\\fR  \\->  ~/restore/\\fBdir123/file2.bin\\fR
-.fi
-.PP
-Without the trailing slash on source the behaviour is similar to 
-what has been demonstrated with upload:
-.nf
-	s3cmd sync s3://test\\-bucket/backup ~/restore/
-.fi
-will download the files as:
-.nf
-	s3://bucket/\\fBbackup/file1.ext\\fR         \\->  ~/restore/\\fBbackup/file1.ext\\fR
-	s3://bucket/\\fBbackup/dir123/file2.bin\\fR  \\->  ~/restore/\\fBbackup/dir123/file2.bin\\fR
-.fi
-.PP
-All source file names, the bold ones above, are matched against \\fBexclude\\fR 
-rules and those that match are then re\\-checked against \\fBinclude\\fR rules to see
-whether they should be excluded or kept in the source list.
-.PP
-For the purpose of \\fB\\-\\-exclude\\fR and \\fB\\-\\-include\\fR matching only the 
-bold file names above are used. For instance only \\fBpath/file1.ext\\fR is tested
-against the patterns, not \\fI/local/\\fBpath/file1.ext\\fR
-.PP
-Both \\fB\\-\\-exclude\\fR and \\fB\\-\\-include\\fR work with shell\\-style wildcards (a.k.a. GLOB).
-For a greater flexibility s3cmd provides Regular\\-expression versions of the two exclude options 
-named \\fB\\-\\-rexclude\\fR and \\fB\\-\\-rinclude\\fR. 
-The options with ...\\fB\\-from\\fR suffix (eg \\-\\-rinclude\\-from) expect a filename as
-an argument. Each line of such a file is treated as one pattern.
-.PP
-There is only one set of patterns built from all \\fB\\-\\-(r)exclude(\\-from)\\fR options
-and similarly for include variant. Any file excluded with eg \\-\\-exclude can 
-be put back with a pattern found in \\-\\-rinclude\\-from list.
-.PP
-Run s3cmd with \\fB\\-\\-dry\\-run\\fR to verify that your rules work as expected. 
-Use together with \\fB\\-\\-debug\\fR get detailed information
-about matching file names against exclude and include rules.
-.PP
-For example to exclude all files with \".jpg\" extension except those beginning with a number use:
-.PP
-	\\-\\-exclude '*.jpg' \\-\\-rinclude '[0\\-9].*\\.jpg'
-.PP
-To exclude all files except \"*.jpg\" extension, use:
-.PP
-	\\-\\-exclude '*' \\-\\-include '*.jpg'
-.PP
-To exclude local directory 'somedir', be sure to use a trailing forward slash, as such:
-.PP
-	\\-\\-exclude 'somedir/'
-.PP
-
-.SH SEE ALSO
-For the most up to date list of options run: 
-.B s3cmd \\-\\-help
-.br
-For more info about usage, examples and other related info visit project homepage at:
-.B http://s3tools.org
-.SH AUTHOR
-Written by Michal Ludvig and contributors
-.SH CONTACT, SUPPORT
-Preferred way to get support is our mailing list:
-.br
-.I s3tools\\-general\@lists.sourceforge.net
-.br
-or visit the project homepage:
-.br
-.B http://s3tools.org
-.SH REPORTING BUGS
-Report bugs to 
-.I s3tools\\-bugs\@lists.sourceforge.net
-.SH COPYRIGHT
-Copyright \\(co 2007\\-2015 TGRMN Software \\- http://www.tgrmn.com \\- and contributors
-.br
-.SH LICENSE
-This program is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2 of the License, or
-(at your option) any later version.
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-GNU General Public License for more details.
-.br
-";
diff --git a/magic b/magic
deleted file mode 100644
index 7eda929..0000000
--- a/magic
+++ /dev/null
@@ -1,63 +0,0 @@
-# Additional magic for common web file types
-
-0	string/b	{\ "	JSON data
-!:mime application/json
-0	string/b	{\ }	JSON data
-!:mime application/json
-0	string/b	[	JSON data
-!:mime application/json
-
-0	search/4000	function
->&0	search/32/b	)\ {	JavaScript program
-!:mime application/javascript
-
-0	search/4000	@media	CSS stylesheet
-!:mime text/css
-0	search/4000	@import	CSS stylesheet
-!:mime text/css
-0	search/4000	@namespace	CSS stylesheet
-!:mime text/css
-0	search/4000/b	{\ background	CSS stylesheet
-!:mime text/css
-0	search/4000/b	{\ border	CSS stylesheet
-!:mime text/css
-0	search/4000/b	{\ bottom	CSS stylesheet
-!:mime text/css
-0	search/4000/b	{\ color	CSS stylesheet
-!:mime text/css
-0	search/4000/b	{\ cursor	CSS stylesheet
-!:mime text/css
-0	search/4000/b	{\ direction	CSS stylesheet
-!:mime text/css
-0	search/4000/b	{\ display	CSS stylesheet
-!:mime text/css
-0	search/4000/b	{\ float	CSS stylesheet
-!:mime text/css
-0	search/4000/b	{\ font	CSS stylesheet
-!:mime text/css
-0	search/4000/b	{\ height	CSS stylesheet
-!:mime text/css
-0	search/4000/b	{\ left	CSS stylesheet
-!:mime text/css
-0	search/4000/b	{\ line-	CSS stylesheet
-!:mime text/css
-0	search/4000/b	{\ margin	CSS stylesheet
-!:mime text/css
-0	search/4000/b	{\ padding	CSS stylesheet
-!:mime text/css
-0	search/4000/b	{\ position	CSS stylesheet
-!:mime text/css
-0	search/4000/b	{\ right	CSS stylesheet
-!:mime text/css
-0	search/4000/b	{\ text-	CSS stylesheet
-!:mime text/css
-0	search/4000/b	{\ top	CSS stylesheet
-!:mime text/css
-0	search/4000/b	{\ width	CSS stylesheet
-!:mime text/css
-0	search/4000/b	{\ visibility	CSS stylesheet
-!:mime text/css
-0	search/4000/b	{\ -moz-	CSS stylesheet
-!:mime text/css
-0	search/4000/b	{\ -webkit-	CSS stylesheet
-!:mime text/css
diff --git a/run-tests-minio.py b/run-tests-minio.py
deleted file mode 100755
index c493a81..0000000
--- a/run-tests-minio.py
+++ /dev/null
@@ -1,827 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-## Amazon S3cmd - testsuite
-## Author: Michal Ludvig <michal@logix.cz>
-##         http://www.logix.cz/michal
-## License: GPL Version 2
-## Copyright: TGRMN Software and contributors
-
-from __future__ import absolute_import, print_function
-
-import sys
-import os
-import re
-import time
-from subprocess import Popen, PIPE, STDOUT
-import locale
-import getpass
-import S3.Exceptions
-import S3.Config
-from S3.ExitCodes import *
-
-try:
-    unicode
-except NameError:
-    # python 3 support
-    # In python 3, unicode -> str, and str -> bytes
-    unicode = str
-
-count_pass = 0
-count_fail = 0
-count_skip = 0
-
-test_counter = 0
-run_tests = []
-exclude_tests = []
-
-verbose = False
-
-encoding = locale.getpreferredencoding()
-if not encoding:
-    print("Guessing current system encoding failed. Consider setting $LANG variable.")
-    sys.exit(1)
-else:
-    print("System encoding: " + encoding)
-
-try:
-    unicode
-except NameError:
-    # python 3 support
-    # In python 3, unicode -> str, and str -> bytes
-    unicode = str
-
-def unicodise(string, encoding = "utf-8", errors = "replace"):
-    """
-    Convert 'string' to Unicode or raise an exception.
-    Config can't use toolbox from Utils that is itself using Config
-    """
-    if type(string) == unicode:
-        return string
-
-    try:
-        return unicode(string, encoding, errors)
-    except UnicodeDecodeError:
-        raise UnicodeDecodeError("Conversion to unicode failed: %r" % string)
-
-# https://stackoverflow.com/questions/377017/test-if-executable-exists-in-python/377028#377028
-def which(program):
-    def is_exe(fpath):
-        return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
-
-    fpath, fname = os.path.split(program)
-    if fpath:
-        if is_exe(program):
-            return program
-    else:
-        for path in os.environ["PATH"].split(os.pathsep):
-            path = path.strip('"')
-            exe_file = os.path.join(path, program)
-            if is_exe(exe_file):
-                return exe_file
-
-    return None
-
-if which('curl') is not None:
-    have_curl = True
-else:
-    have_curl = False
-
-config_file = None
-if os.getenv("HOME"):
-    config_file = os.path.join(unicodise(os.getenv("HOME"), encoding),
-                               ".s3cfg")
-elif os.name == "nt" and os.getenv("USERPROFILE"):
-    config_file = os.path.join(
-        unicodise(os.getenv("USERPROFILE"), encoding),
-        os.getenv("APPDATA") and unicodise(os.getenv("APPDATA"), encoding)
-        or 'Application Data',
-        "s3cmd.ini")
-
-
-## Unpack testsuite/ directory
-if not os.path.isdir('testsuite') and os.path.isfile('testsuite.tar.gz'):
-    os.system("tar -xz -f testsuite.tar.gz")
-if not os.path.isdir('testsuite'):
-    print("Something went wrong while unpacking testsuite.tar.gz")
-    sys.exit(1)
-
-os.system("tar -xf testsuite/checksum.tar -C testsuite")
-if not os.path.isfile('testsuite/checksum/cksum33.txt'):
-    print("Something went wrong while unpacking testsuite/checkum.tar")
-    sys.exit(1)
-
-## Fix up permissions for permission-denied tests
-os.chmod("testsuite/permission-tests/permission-denied-dir", 0o444)
-os.chmod("testsuite/permission-tests/permission-denied.txt", 0o000)
-
-## Patterns for Unicode tests
-patterns = {}
-patterns['UTF-8'] = u"ŪņЇЌœđЗ/☺ unicode € rocks ™"
-patterns['GBK'] = u"12月31日/1-特色條目"
-
-have_encoding = os.path.isdir('testsuite/encodings/' + encoding)
-if not have_encoding and os.path.isfile('testsuite/encodings/%s.tar.gz' % encoding):
-    os.system("tar xvz -C testsuite/encodings -f testsuite/encodings/%s.tar.gz" % encoding)
-    have_encoding = os.path.isdir('testsuite/encodings/' + encoding)
-
-if have_encoding:
-    #enc_base_remote = "%s/xyz/%s/" % (pbucket(1), encoding)
-    enc_pattern = patterns[encoding]
-else:
-    print(encoding + " specific files not found.")
-
-def unicodise(string):
-    if type(string) == unicode:
-        return string
-
-    return unicode(string, "UTF-8", "replace")
-
-def deunicodise(string):
-    if type(string) != unicode:
-        return string
-
-    return string.encode("UTF-8", "replace")
-
-if not os.path.isdir('testsuite/crappy-file-name'):
-    os.system("tar xvz -C testsuite -f testsuite/crappy-file-name.tar.gz")
-    # TODO: also unpack if the tarball is newer than the directory timestamp
-    #       for instance when a new version was pulled from SVN.
-
-def test(label, cmd_args = [], retcode = 0, must_find = [], must_not_find = [], must_find_re = [], must_not_find_re = [], stdin = None):
-    def command_output():
-        print("----")
-        print(" ".join([" " in arg and "'%s'" % arg or arg for arg in cmd_args]))
-        print("----")
-        print(stdout)
-        print("----")
-
-    def failure(message = ""):
-        global count_fail
-        if message:
-            message = u"  (%r)" % message
-        print(u"\x1b[31;1mFAIL%s\x1b[0m" % (message))
-        count_fail += 1
-        command_output()
-        #return 1
-        sys.exit(1)
-    def success(message = ""):
-        global count_pass
-        if message:
-            message = "  (%r)" % message
-        print("\x1b[32;1mOK\x1b[0m%s" % (message))
-        count_pass += 1
-        if verbose:
-            command_output()
-        return 0
-    def skip(message = ""):
-        global count_skip
-        if message:
-            message = "  (%r)" % message
-        print("\x1b[33;1mSKIP\x1b[0m%s" % (message))
-        count_skip += 1
-        return 0
-    def compile_list(_list, regexps = False):
-        if regexps == False:
-            _list = [re.escape(item) for item in _list]
-
-        return [re.compile(item, re.MULTILINE) for item in _list]
-
-    global test_counter
-    test_counter += 1
-    print(("%3d  %s " % (test_counter, label)).ljust(30, "."), end=' ')
-    sys.stdout.flush()
-
-    if run_tests.count(test_counter) == 0 or exclude_tests.count(test_counter) > 0:
-        return skip()
-
-    if not cmd_args:
-        return skip()
-
-    p = Popen(cmd_args, stdin = stdin, stdout = PIPE, stderr = STDOUT, universal_newlines = True, close_fds = True)
-    stdout, stderr = p.communicate()
-    if type(retcode) not in [list, tuple]: retcode = [retcode]
-    if p.returncode not in retcode:
-        return failure("retcode: %d, expected one of: %s" % (p.returncode, retcode))
-
-    if type(must_find) not in [ list, tuple ]: must_find = [must_find]
-    if type(must_find_re) not in [ list, tuple ]: must_find_re = [must_find_re]
-    if type(must_not_find) not in [ list, tuple ]: must_not_find = [must_not_find]
-    if type(must_not_find_re) not in [ list, tuple ]: must_not_find_re = [must_not_find_re]
-
-    find_list = []
-    find_list.extend(compile_list(must_find))
-    find_list.extend(compile_list(must_find_re, regexps = True))
-    find_list_patterns = []
-    find_list_patterns.extend(must_find)
-    find_list_patterns.extend(must_find_re)
-
-    not_find_list = []
-    not_find_list.extend(compile_list(must_not_find))
-    not_find_list.extend(compile_list(must_not_find_re, regexps = True))
-    not_find_list_patterns = []
-    not_find_list_patterns.extend(must_not_find)
-    not_find_list_patterns.extend(must_not_find_re)
-
-    for index in range(len(find_list)):
-        stdout = unicodise(stdout)
-        match = find_list[index].search(stdout)
-        if not match:
-            return failure("pattern not found: %s" % find_list_patterns[index])
-    for index in range(len(not_find_list)):
-        match = not_find_list[index].search(stdout)
-        if match:
-            return failure("pattern found: %s (match: %s)" % (not_find_list_patterns[index], match.group(0)))
-
-    return success()
-
-def test_s3cmd(label, cmd_args = [], **kwargs):
-    if not cmd_args[0].endswith("s3cmd"):
-        cmd_args.insert(0, "python")
-        cmd_args.insert(1, "s3cmd")
-        if config_file:
-            cmd_args.insert(2, "-c")
-            cmd_args.insert(3, config_file)
-
-    return test(label, cmd_args, **kwargs)
-
-def test_mkdir(label, dir_name):
-    if os.name in ("posix", "nt"):
-        cmd = ['mkdir', '-p']
-    else:
-        print("Unknown platform: %s" % os.name)
-        sys.exit(1)
-    cmd.append(dir_name)
-    return test(label, cmd)
-
-def test_rmdir(label, dir_name):
-    if os.path.isdir(dir_name):
-        if os.name == "posix":
-            cmd = ['rm', '-rf']
-        elif os.name == "nt":
-            cmd = ['rmdir', '/s/q']
-        else:
-            print("Unknown platform: %s" % os.name)
-            sys.exit(1)
-        cmd.append(dir_name)
-        return test(label, cmd)
-    else:
-        return test(label, [])
-
-def test_flushdir(label, dir_name):
-    test_rmdir(label + "(rm)", dir_name)
-    return test_mkdir(label + "(mk)", dir_name)
-
-def test_copy(label, src_file, dst_file):
-    if os.name == "posix":
-        cmd = ['cp', '-f']
-    elif os.name == "nt":
-        cmd = ['copy']
-    else:
-        print("Unknown platform: %s" % os.name)
-        sys.exit(1)
-    cmd.append(src_file)
-    cmd.append(dst_file)
-    return test(label, cmd)
-
-def test_curl_HEAD(label, src_file, **kwargs):
-    cmd = ['curl', '--silent', '--head', '-include', '--location']
-    cmd.append(src_file)
-    return test(label, cmd, **kwargs)
-
-bucket_prefix = u"%s-" % getpass.getuser().lower()
-
-argv = sys.argv[1:]
-while argv:
-    arg = argv.pop(0)
-    if arg.startswith('--bucket-prefix='):
-        print("Usage: '--bucket-prefix PREFIX', not '--bucket-prefix=PREFIX'")
-        sys.exit(0)
-    if arg in ("-h", "--help"):
-        print("%s A B K..O -N" % sys.argv[0])
-        print("Run tests number A, B and K through to O, except for N")
-        sys.exit(0)
-
-    if arg in ("-c", "--config"):
-        config_file = argv.pop(0)
-        continue
-    if arg in ("-l", "--list"):
-        exclude_tests = range(0, 999)
-        break
-    if arg in ("-v", "--verbose"):
-        verbose = True
-        continue
-    if arg in ("-p", "--bucket-prefix"):
-        try:
-            bucket_prefix = argv.pop(0)
-        except IndexError:
-            print("Bucket prefix option must explicitly supply a bucket name prefix")
-            sys.exit(0)
-        continue
-    if ".." in arg:
-        range_idx = arg.find("..")
-        range_start = arg[:range_idx] or 0
-        range_end = arg[range_idx+2:] or 999
-        run_tests.extend(range(int(range_start), int(range_end) + 1))
-    elif arg.startswith("-"):
-        exclude_tests.append(int(arg[1:]))
-    else:
-        run_tests.append(int(arg))
-
-print("Using bucket prefix: '%s'" % bucket_prefix)
-
-cfg = S3.Config.Config(config_file)
-
-if not run_tests:
-    run_tests = range(0, 999)
-
-# helper functions for generating bucket names
-def bucket(tail):
-        '''Test bucket name'''
-        label = 'autotest'
-        if str(tail) == '3':
-                label = 'autotest'
-        return '%ss3cmd-%s-%s' % (bucket_prefix, label, tail)
-
-def pbucket(tail):
-        '''Like bucket(), but prepends "s3://" for you'''
-        return 's3://' + bucket(tail)
-
-## ====== Remove test buckets
-test_s3cmd("Remove test buckets", ['rb', '-r', '--force', pbucket(1), pbucket(2), pbucket(3)])
-
-## ====== verify they were removed
-test_s3cmd("Verify no test buckets", ['ls'],
-           must_not_find = [pbucket(1), pbucket(2), pbucket(3)])
-
-
-## ====== Create one bucket (EU)
-# Disabled for minio
-#test_s3cmd("Create one bucket (EU)", ['mb', '--bucket-location=EU', pbucket(1)],
-#    must_find = "Bucket '%s/' created" % pbucket(1))
-test_s3cmd("Create one bucket", ['mb', pbucket(1)],
-    must_find = "Bucket '%s/' created" % pbucket(1))
-
-
-
-## ====== Create multiple buckets
-test_s3cmd("Create multiple buckets", ['mb', pbucket(2), pbucket(3)],
-    must_find = [ "Bucket '%s/' created" % pbucket(2), "Bucket '%s/' created" % pbucket(3)])
-
-
-## ====== Invalid bucket name
-test_s3cmd("Invalid bucket name", ["mb", "--bucket-location=EU", pbucket('EU')],
-    retcode = EX_USAGE,
-    must_find = "ERROR: Parameter problem: Bucket name '%s' contains disallowed character" % bucket('EU'),
-    must_not_find_re = "Bucket.*created")
-
-
-## ====== Buckets list
-test_s3cmd("Buckets list", ["ls"],
-    must_find = [ pbucket(1), pbucket(2), pbucket(3) ], must_not_find_re = pbucket('EU'))
-
-## ====== Directory for cache
-test_flushdir("Create cache dir", "testsuite/cachetest")
-
-## ====== Sync to S3
-# Modified for Minio (exclude crappy dir)
-test_s3cmd("Sync to S3", ['sync', 'testsuite/', pbucket(1) + '/xyz/', '--exclude', 'demo/*', '--exclude', '*.png', '--no-encrypt', '--exclude-from', 'testsuite/exclude.encodings', '--exclude', 'crappy-file-name/*', '--exclude', 'testsuite/cachetest/.s3cmdcache', '--cache-file', 'testsuite/cachetest/.s3cmdcache'],
-           must_find = ["ERROR: Upload of 'testsuite/permission-tests/permission-denied.txt' is not possible (Reason: Permission denied)",
-           ],
-           must_not_find_re = ["demo/", "^(?!WARNING: Skipping).*\.png$", "permission-denied-dir"],
-           retcode = EX_PARTIAL)
-
-## ====== Create new file and sync with caching enabled
-test_mkdir("Create cache dir", "testsuite/cachetest/content")
-with open("testsuite/cachetest/content/testfile", "w"):
-    pass
-
-test_s3cmd("Sync to S3 with caching", ['sync', 'testsuite/', pbucket(1) + '/xyz/', '--exclude', 'demo/*', '--exclude', '*.png', '--no-encrypt', '--exclude-from', 'testsuite/exclude.encodings', '--exclude', 'crappy-file-name/*', '--exclude', 'cachetest/.s3cmdcache', '--cache-file', 'testsuite/cachetest/.s3cmdcache'],
-          must_find = "upload: 'testsuite/cachetest/content/testfile' -> '%s/xyz/cachetest/content/testfile'" % pbucket(1),
-          must_not_find = "upload 'testsuite/cachetest/.s3cmdcache'",
-          retcode = EX_PARTIAL)
-
-## ====== Remove content and retry cached sync with --delete-removed
-test_rmdir("Remove local file", "testsuite/cachetest/content")
-
-test_s3cmd("Sync to S3 and delete removed with caching", ['sync', 'testsuite/', pbucket(1) + '/xyz/', '--exclude', 'demo/*', '--exclude', '*.png', '--no-encrypt', '--exclude-from', 'testsuite/exclude.encodings', '--exclude', 'crappy-file-name/*', '--exclude', 'testsuite/cachetest/.s3cmdcache', '--cache-file', 'testsuite/cachetest/.s3cmdcache', '--delete-removed'],
-          must_find = "delete: '%s/xyz/cachetest/content/testfile'" % pbucket(1),
-          must_not_find = "dictionary changed size during iteration",
-          retcode = EX_PARTIAL)
-
-## ====== Remove cache directory and file
-test_rmdir("Remove cache dir", "testsuite/cachetest")
-
-if have_encoding:
-    ## ====== Sync UTF-8 / GBK / ... to S3
-    test_s3cmd(u"Sync %s to S3" % encoding, ['sync', 'testsuite/encodings/' + encoding, '%s/xyz/encodings/' % pbucket(1), '--exclude', 'demo/*', '--no-encrypt' ],
-        must_find = [ u"'testsuite/encodings/%(encoding)s/%(pattern)s' -> '%(pbucket)s/xyz/encodings/%(encoding)s/%(pattern)s'" % { 'encoding' : encoding, 'pattern' : enc_pattern , 'pbucket' : pbucket(1)} ])
-
-
-## ====== List bucket content
-test_s3cmd("List bucket content", ['ls', '%s/xyz/' % pbucket(1) ],
-    must_find_re = [ u"DIR +%s/xyz/binary/$" % pbucket(1) , u"DIR +%s/xyz/etc/$" % pbucket(1) ],
-    must_not_find = [ u"random-crap.md5", u"/demo" ])
-
-
-## ====== List bucket recursive
-must_find = [ u"%s/xyz/binary/random-crap.md5" % pbucket(1) ]
-if have_encoding:
-    must_find.append(u"%(pbucket)s/xyz/encodings/%(encoding)s/%(pattern)s" % { 'encoding' : encoding, 'pattern' : enc_pattern, 'pbucket' : pbucket(1) })
-
-test_s3cmd("List bucket recursive", ['ls', '--recursive', pbucket(1)],
-    must_find = must_find,
-    must_not_find = [ "logo.png" ])
-
-## ====== FIXME
-test_s3cmd("Recursive put", ['put', '--recursive', 'testsuite/etc', '%s/xyz/' % pbucket(1) ])
-
-
-## ====== Clean up local destination dir
-test_flushdir("Clean testsuite-out/", "testsuite-out")
-
-## ====== Put from stdin
-f = open('testsuite/single-file/single-file.txt', 'r')
-test_s3cmd("Put from stdin", ['put', '-', '%s/single-file/single-file.txt' % pbucket(1)],
-           must_find = ["'<stdin>' -> '%s/single-file/single-file.txt'" % pbucket(1)],
-           stdin = f)
-f.close()
-
-## ====== Multipart put
-os.system('mkdir -p testsuite-out')
-os.system('dd if=/dev/urandom of=testsuite-out/urandom.bin bs=1M count=16 > /dev/null 2>&1')
-test_s3cmd("Put multipart", ['put', '--multipart-chunk-size-mb=5', 'testsuite-out/urandom.bin', '%s/urandom.bin' % pbucket(1)],
-           must_not_find = ['abortmp'])
-
-## ====== Multipart put from stdin
-f = open('testsuite-out/urandom.bin', 'r')
-test_s3cmd("Multipart large put from stdin", ['put', '--multipart-chunk-size-mb=5', '-', '%s/urandom2.bin' % pbucket(1)],
-           must_find = ['%s/urandom2.bin' % pbucket(1)],
-           must_not_find = ['abortmp'],
-           stdin = f)
-f.close()
-
-## ====== Clean up local destination dir
-test_flushdir("Clean testsuite-out/", "testsuite-out")
-
-## ====== Moving things without trailing '/'
-os.system('dd if=/dev/urandom of=testsuite-out/urandom1.bin bs=1k count=1 > /dev/null 2>&1')
-os.system('dd if=/dev/urandom of=testsuite-out/urandom2.bin bs=1k count=1 > /dev/null 2>&1')
-test_s3cmd("Put multiple files", ['put', 'testsuite-out/urandom1.bin', 'testsuite-out/urandom2.bin', '%s/' % pbucket(1)],
-           must_find = ["%s/urandom1.bin" % pbucket(1), "%s/urandom2.bin" % pbucket(1)])
-
-test_s3cmd("Move without '/'", ['mv', '%s/urandom1.bin' % pbucket(1), '%s/urandom2.bin' % pbucket(1), '%s/dir' % pbucket(1)],
-           retcode = 64,
-           must_find = ['Destination must be a directory'])
-
-test_s3cmd("Move recursive w/a '/'",
-           ['-r', 'mv', '%s/dir1' % pbucket(1), '%s/dir2' % pbucket(1)],
-           retcode = 64,
-           must_find = ['Destination must be a directory'])
-
-## ====== Moving multiple files into directory with trailing '/'
-must_find = ["'%s/urandom1.bin' -> '%s/dir/urandom1.bin'" % (pbucket(1),pbucket(1)), "'%s/urandom2.bin' -> '%s/dir/urandom2.bin'" % (pbucket(1),pbucket(1))]
-must_not_find = ["'%s/urandom1.bin' -> '%s/dir'" % (pbucket(1),pbucket(1)), "'%s/urandom2.bin' -> '%s/dir'" % (pbucket(1),pbucket(1))]
-test_s3cmd("Move multiple files",
-           ['mv', '%s/urandom1.bin' % pbucket(1), '%s/urandom2.bin' % pbucket(1), '%s/dir/' % pbucket(1)],
-           must_find = must_find,
-           must_not_find = must_not_find)
-
-## ====== Clean up local destination dir
-test_flushdir("Clean testsuite-out/", "testsuite-out")
-
-## ====== Sync from S3
-must_find = [ "'%s/xyz/binary/random-crap.md5' -> 'testsuite-out/xyz/binary/random-crap.md5'" % pbucket(1) ]
-if have_encoding:
-    must_find.append(u"'%(pbucket)s/xyz/encodings/%(encoding)s/%(pattern)s' -> 'testsuite-out/xyz/encodings/%(encoding)s/%(pattern)s' " % { 'encoding' : encoding, 'pattern' : enc_pattern, 'pbucket' : pbucket(1) })
-test_s3cmd("Sync from S3", ['sync', '%s/xyz' % pbucket(1), 'testsuite-out'],
-    must_find = must_find)
-
-## ====== Remove 'demo' directory
-test_rmdir("Remove 'dir-test/'", "testsuite-out/xyz/dir-test/")
-
-
-## ====== Create dir with name of a file
-test_mkdir("Create file-dir dir", "testsuite-out/xyz/dir-test/file-dir")
-
-
-## ====== Skip dst dirs
-test_s3cmd("Skip over dir", ['sync', '%s/xyz' % pbucket(1), 'testsuite-out'],
-           must_find = "ERROR: Download of 'xyz/dir-test/file-dir' failed (Reason: testsuite-out/xyz/dir-test/file-dir is a directory)",
-           retcode = EX_PARTIAL)
-
-
-## ====== Clean up local destination dir
-test_flushdir("Clean testsuite-out/", "testsuite-out")
-
-
-## ====== Put public, guess MIME
-test_s3cmd("Put public, guess MIME", ['put', '--guess-mime-type', '--acl-public', 'testsuite/etc/logo.png', '%s/xyz/etc/logo.png' % pbucket(1)],
-    must_find = [ "-> '%s/xyz/etc/logo.png'" % pbucket(1) ])
-
-
-## ====== Retrieve from URL
-# Minio: disabled
-#if have_curl:
-#   test_curl_HEAD("Retrieve from URL", 'http://%s.%s/xyz/etc/logo.png' % (bucket(1), cfg.host_base),
-#                   must_find_re = ['Content-Length: 22059'])
-
-## ====== Change ACL to Private
-# Minio: disabled
-#test_s3cmd("Change ACL to Private", ['setacl', '--acl-private', '%s/xyz/etc/l*.png' % pbucket(1)],
-#    must_find = [ "logo.png: ACL set to Private" ])
-
-
-## ====== Verify Private ACL
-# Minio: disabled
-#if have_curl:
-#    test_curl_HEAD("Verify Private ACL", 'http://%s.%s/xyz/etc/logo.png' % (bucket(1), cfg.host_base),
-#                   must_find_re = [ '403 Forbidden' ])
-
-
-## ====== Change ACL to Public
-# Minio: disabled
-#test_s3cmd("Change ACL to Public", ['setacl', '--acl-public', '--recursive', '%s/xyz/etc/' % pbucket(1) , '-v'],
-#    must_find = [ "logo.png: ACL set to Public" ])
-
-
-## ====== Verify Public ACL
-# Minio: disabled
-#if have_curl:
-#    test_curl_HEAD("Verify Public ACL", 'http://%s.%s/xyz/etc/logo.png' % (bucket(1), cfg.host_base),
-#                   must_find_re = [ '200 OK',
-#                                    'Content-Length: 22059'])
-
-
-## ====== Sync more to S3
-# Modified for Minio (exclude crappy dir)
-test_s3cmd("Sync more to S3", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt', '--exclude', 'crappy-file-name/*' ],
-           must_find = [ "'testsuite/demo/some-file.xml' -> '%s/xyz/demo/some-file.xml' " % pbucket(1) ],
-           must_not_find = [ "'testsuite/etc/linked.png' -> '%s/xyz/etc/linked.png'" % pbucket(1) ],
-           retcode = EX_PARTIAL)
-
-
-## ====== Don't check MD5 sum on Sync
-test_copy("Change file cksum1.txt", "testsuite/checksum/cksum2.txt", "testsuite/checksum/cksum1.txt")
-test_copy("Change file cksum33.txt", "testsuite/checksum/cksum2.txt", "testsuite/checksum/cksum33.txt")
-# Modified for Minio (exclude crappy dir)
-test_s3cmd("Don't check MD5", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt', '--no-check-md5', '--exclude', 'crappy-file-name/*'],
-           must_find = [ "cksum33.txt" ],
-           must_not_find = [ "cksum1.txt" ],
-           retcode = EX_PARTIAL)
-
-
-## ====== Check MD5 sum on Sync
-# Modified for Minio (exclude crappy dir)
-test_s3cmd("Check MD5", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt', '--check-md5', '--exclude', 'crappy-file-name/*'],
-           must_find = [ "cksum1.txt" ],
-           retcode = EX_PARTIAL)
-
-
-## ====== Rename within S3
-test_s3cmd("Rename within S3", ['mv', '%s/xyz/etc/logo.png' % pbucket(1), '%s/xyz/etc2/Logo.PNG' % pbucket(1)],
-    must_find = [ "move: '%s/xyz/etc/logo.png' -> '%s/xyz/etc2/Logo.PNG'" % (pbucket(1), pbucket(1))])
-
-
-## ====== Rename (NoSuchKey)
-test_s3cmd("Rename (NoSuchKey)", ['mv', '%s/xyz/etc/logo.png' % pbucket(1), '%s/xyz/etc2/Logo.PNG' % pbucket(1)],
-    retcode = EX_NOTFOUND,
-    must_find_re = [ 'Key not found' ],
-    must_not_find = [ "move: '%s/xyz/etc/logo.png' -> '%s/xyz/etc2/Logo.PNG'" % (pbucket(1), pbucket(1)) ])
-
-## ====== Sync more from S3 (invalid src)
-test_s3cmd("Sync more from S3 (invalid src)", ['sync', '--delete-removed', '%s/xyz/DOESNOTEXIST' % pbucket(1), 'testsuite-out'],
-    must_not_find = [ "delete: 'testsuite-out/logo.png'" ])
-
-## ====== Sync more from S3
-test_s3cmd("Sync more from S3", ['sync', '--delete-removed', '%s/xyz' % pbucket(1), 'testsuite-out'],
-    must_find = [ "'%s/xyz/etc2/Logo.PNG' -> 'testsuite-out/xyz/etc2/Logo.PNG'" % pbucket(1),
-                  "'%s/xyz/demo/some-file.xml' -> 'testsuite-out/xyz/demo/some-file.xml'" % pbucket(1) ],
-    must_not_find_re = [ "not-deleted.*etc/logo.png", "delete: 'testsuite-out/logo.png'" ])
-
-
-## ====== Make dst dir for get
-test_rmdir("Remove dst dir for get", "testsuite-out")
-
-
-## ====== Get multiple files
-test_s3cmd("Get multiple files", ['get', '%s/xyz/etc2/Logo.PNG' % pbucket(1), '%s/xyz/etc/AtomicClockRadio.ttf' % pbucket(1), 'testsuite-out'],
-    retcode = EX_USAGE,
-    must_find = [ 'Destination must be a directory or stdout when downloading multiple sources.' ])
-
-## ====== put/get non-ASCII filenames
-test_s3cmd("Put unicode filenames", ['put', u'testsuite/encodings/UTF-8/ŪņЇЌœđЗ/Žůžo',  u'%s/xyz/encodings/UTF-8/ŪņЇЌœđЗ/Žůžo' % pbucket(1)],
-           retcode = 0,
-           must_find = [ '->' ])
-
-
-## ====== Make dst dir for get
-test_mkdir("Make dst dir for get", "testsuite-out")
-
-
-## ====== put/get non-ASCII filenames
-test_s3cmd("Get unicode filenames", ['get', u'%s/xyz/encodings/UTF-8/ŪņЇЌœđЗ/Žůžo' % pbucket(1), 'testsuite-out'],
-           retcode = 0,
-           must_find = [ '->' ])
-
-
-## ====== Get multiple files
-test_s3cmd("Get multiple files", ['get', '%s/xyz/etc2/Logo.PNG' % pbucket(1), '%s/xyz/etc/AtomicClockRadio.ttf' % pbucket(1), 'testsuite-out'],
-    must_find = [ u"-> 'testsuite-out/Logo.PNG'",
-                  u"-> 'testsuite-out/AtomicClockRadio.ttf'" ])
-
-## ====== Upload files differing in capitalisation
-test_s3cmd("blah.txt / Blah.txt", ['put', '-r', 'testsuite/blahBlah', pbucket(1)],
-    must_find = [ '%s/blahBlah/Blah.txt' % pbucket(1), '%s/blahBlah/blah.txt' % pbucket(1)])
-
-## ====== Copy between buckets
-test_s3cmd("Copy between buckets", ['cp', '%s/xyz/etc2/Logo.PNG' % pbucket(1), '%s/xyz/etc2/logo.png' % pbucket(3)],
-    must_find = [ "remote copy: '%s/xyz/etc2/Logo.PNG' -> '%s/xyz/etc2/logo.png'" % (pbucket(1), pbucket(3)) ])
-
-## ====== Recursive copy
-test_s3cmd("Recursive copy, set ACL", ['cp', '-r', '--acl-public', '%s/xyz/' % pbucket(1), '%s/copy/' % pbucket(2), '--exclude', 'demo/dir?/*.txt', '--exclude', 'non-printables*'],
-    must_find = [ "remote copy: '%s/xyz/etc2/Logo.PNG' -> '%s/copy/etc2/Logo.PNG'" % (pbucket(1), pbucket(2)),
-                  "remote copy: '%s/xyz/blahBlah/Blah.txt' -> '%s/copy/blahBlah/Blah.txt'" % (pbucket(1), pbucket(2)),
-                  "remote copy: '%s/xyz/blahBlah/blah.txt' -> '%s/copy/blahBlah/blah.txt'" % (pbucket(1), pbucket(2)) ],
-    must_not_find = [ "demo/dir1/file1-1.txt" ])
-
-## ====== Verify ACL and MIME type
-# Minio: disable acl check, not supported by minio
-test_s3cmd("Verify ACL and MIME type", ['info', '%s/copy/etc2/Logo.PNG' % pbucket(2) ],
-    must_find_re = [ "MIME type:.*image/png" ])
-
-## ====== modify MIME type
-# Minio: disable acl check, not supported by minio
-# Minio: modifying mime type alone not allowed as copy of same file for them
-#test_s3cmd("Modify MIME type", ['modify', '--mime-type=binary/octet-stream', '%s/copy/etc2/Logo.PNG' % pbucket(2) ])
-
-#test_s3cmd("Verify ACL and MIME type", ['info', '%s/copy/etc2/Logo.PNG' % pbucket(2) ],
-#    must_find_re = [ "MIME type:.*binary/octet-stream" ])
-
-# Minio: disable acl check, not supported by minio
-#test_s3cmd("Modify MIME type back", ['modify', '--mime-type=image/png', '%s/copy/etc2/Logo.PNG' % pbucket(2) ])
-
-# Minio: disable acl check, not supported by minio
-#test_s3cmd("Verify ACL and MIME type", ['info', '%s/copy/etc2/Logo.PNG' % pbucket(2) ],
-#    must_find_re = [ "MIME type:.*image/png" ])
-
-#test_s3cmd("Add cache-control header", ['modify', '--add-header=cache-control: max-age=3600, public', '%s/copy/etc2/Logo.PNG' % pbucket(2) ],
-#    must_find_re = [ "modify: .*" ])
-
-#if have_curl:
-#    test_curl_HEAD("HEAD check Cache-Control present", 'http://%s.%s/copy/etc2/Logo.PNG' % (bucket(2), cfg.host_base),
-#                   must_find_re = [ "Cache-Control: max-age=3600" ])
-
-#test_s3cmd("Remove cache-control header", ['modify', '--remove-header=cache-control', '%s/copy/etc2/Logo.PNG' % pbucket(2) ],
-#           must_find_re = [ "modify: .*" ])
-
-#if have_curl:
-#    test_curl_HEAD("HEAD check Cache-Control not present", 'http://%s.%s/copy/etc2/Logo.PNG' % (bucket(2), cfg.host_base),
-#                   must_not_find_re = [ "Cache-Control: max-age=3600" ])
-
-## ====== sign
-test_s3cmd("sign string", ['sign', 's3cmd'], must_find_re = ["Signature:"])
-test_s3cmd("signurl time", ['signurl', '%s/copy/etc2/Logo.PNG' % pbucket(2), str(int(time.time()) + 60)], must_find_re = ["http://"])
-test_s3cmd("signurl time offset", ['signurl', '%s/copy/etc2/Logo.PNG' % pbucket(2), '+60'], must_find_re = ["https?://"])
-test_s3cmd("signurl content disposition and type", ['signurl', '%s/copy/etc2/Logo.PNG' % pbucket(2), '+60', '--content-disposition=inline; filename=video.mp4', '--content-type=video/mp4'], must_find_re = [ 'response-content-disposition', 'response-content-type' ] )
-
-## ====== Rename within S3
-test_s3cmd("Rename within S3", ['mv', '%s/copy/etc2/Logo.PNG' % pbucket(2), '%s/copy/etc/logo.png' % pbucket(2)],
-    must_find = [ "move: '%s/copy/etc2/Logo.PNG' -> '%s/copy/etc/logo.png'" % (pbucket(2), pbucket(2))])
-
-## ====== Sync between buckets
-test_s3cmd("Sync remote2remote", ['sync', '%s/xyz/' % pbucket(1), '%s/copy/' % pbucket(2), '--delete-removed', '--exclude', 'non-printables*'],
-    must_find = [ "remote copy: '%s/xyz/demo/dir1/file1-1.txt' -> '%s/copy/demo/dir1/file1-1.txt'" % (pbucket(1), pbucket(2)),
-                  "remote copy: 'etc/logo.png' -> 'etc2/Logo.PNG'",
-                  "delete: '%s/copy/etc/logo.png'" % pbucket(2) ],
-    must_not_find = [ "blah.txt" ])
-
-## ====== Don't Put symbolic link
-test_s3cmd("Don't put symbolic links", ['put', 'testsuite/etc/linked1.png', 's3://%s/xyz/' % bucket(1),  '--exclude', 'crappy-file-name/*'],
-           retcode = EX_USAGE,
-           must_find = ["WARNING: Skipping over symbolic link: testsuite/etc/linked1.png"],
-           must_not_find_re = ["^(?!WARNING: Skipping).*linked1.png"])
-
-## ====== Put symbolic link
-test_s3cmd("Put symbolic links", ['put', 'testsuite/etc/linked1.png', 's3://%s/xyz/' % bucket(1),'--follow-symlinks' ,  '--exclude', 'crappy-file-name/*'],
-           must_find = [ "'testsuite/etc/linked1.png' -> '%s/xyz/linked1.png'" % pbucket(1)])
-
-## ====== Sync symbolic links
-test_s3cmd("Sync symbolic links", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt', '--follow-symlinks',  '--exclude', 'crappy-file-name/*' ],
-    must_find = ["remote copy: 'etc2/Logo.PNG' -> 'etc/linked.png'"],
-           # Don't want to recursively copy linked directories!
-           must_not_find_re = ["etc/more/linked-dir/more/give-me-more.txt",
-                               "etc/brokenlink.png"],
-           retcode = EX_PARTIAL)
-
-## ====== Multi source move
-test_s3cmd("Multi-source move", ['mv', '-r', '%s/copy/blahBlah/Blah.txt' % pbucket(2), '%s/copy/etc/' % pbucket(2), '%s/moved/' % pbucket(2)],
-    must_find = [ "move: '%s/copy/blahBlah/Blah.txt' -> '%s/moved/Blah.txt'" % (pbucket(2), pbucket(2)),
-                  "move: '%s/copy/etc/AtomicClockRadio.ttf' -> '%s/moved/AtomicClockRadio.ttf'" % (pbucket(2), pbucket(2)),
-                  "move: '%s/copy/etc/TypeRa.ttf' -> '%s/moved/TypeRa.ttf'" % (pbucket(2), pbucket(2)) ],
-    must_not_find = [ "blah.txt" ])
-
-## ====== Verify move
-test_s3cmd("Verify move", ['ls', '-r', pbucket(2)],
-    must_find = [ "%s/moved/Blah.txt" % pbucket(2),
-                  "%s/moved/AtomicClockRadio.ttf" % pbucket(2),
-                  "%s/moved/TypeRa.ttf" % pbucket(2),
-                  "%s/copy/blahBlah/blah.txt" % pbucket(2) ],
-    must_not_find = [ "%s/copy/blahBlah/Blah.txt" % pbucket(2),
-                      "%s/copy/etc/AtomicClockRadio.ttf" % pbucket(2),
-                      "%s/copy/etc/TypeRa.ttf" % pbucket(2) ])
-
-## ====== List all
-test_s3cmd("List all", ['la'],
-           must_find = [ "%s/urandom.bin" % pbucket(1)])
-
-## ====== Simple delete
-test_s3cmd("Simple delete", ['del', '%s/xyz/etc2/Logo.PNG' % pbucket(1)],
-    must_find = [ "delete: '%s/xyz/etc2/Logo.PNG'" % pbucket(1) ])
-
-## ====== Simple delete with rm
-test_s3cmd("Simple delete with rm", ['rm', '%s/xyz/test_rm/TypeRa.ttf' % pbucket(1)],
-    must_find = [ "delete: '%s/xyz/test_rm/TypeRa.ttf'" % pbucket(1) ])
-
-## ====== Create expiration rule with days and prefix
-# Minio: disabled
-#test_s3cmd("Create expiration rule with days and prefix", ['expire', pbucket(1), '--expiry-days=365', '--expiry-prefix=log/'],
-#    must_find = [ "Bucket '%s/': expiration configuration is set." % pbucket(1)])
-
-## ====== Create expiration rule with date and prefix
-# Minio: disabled
-#test_s3cmd("Create expiration rule with date and prefix", ['expire', pbucket(1), '--expiry-date=2012-12-31T00:00:00.000Z', '--expiry-prefix=log/'],
-#    must_find = [ "Bucket '%s/': expiration configuration is set." % pbucket(1)])
-
-## ====== Create expiration rule with days only
-# Minio: disabled
-#test_s3cmd("Create expiration rule with days only", ['expire', pbucket(1), '--expiry-days=365'],
-#    must_find = [ "Bucket '%s/': expiration configuration is set." % pbucket(1)])
-
-## ====== Create expiration rule with date only
-# Minio: disabled
-#test_s3cmd("Create expiration rule with date only", ['expire', pbucket(1), '--expiry-date=2012-12-31T00:00:00.000Z'],
-#    must_find = [ "Bucket '%s/': expiration configuration is set." % pbucket(1)])
-
-## ====== Get current expiration setting
-# Minio: disabled
-#test_s3cmd("Get current expiration setting", ['info', pbucket(1)],
-#    must_find = [ "Expiration Rule: all objects in this bucket will expire in '2012-12-31T00:00:00.000Z'"])
-
-## ====== Delete expiration rule
-# Minio: disabled
-#test_s3cmd("Delete expiration rule", ['expire', pbucket(1)],
-#    must_find = [ "Bucket '%s/': expiration configuration is deleted." % pbucket(1)])
-
-## ====== set Requester Pays flag
-# Minio: disabled
-#test_s3cmd("Set requester pays", ['payer', '--requester-pays', pbucket(2)])
-
-## ====== get Requester Pays flag
-# Minio: disabled
-#test_s3cmd("Get requester pays flag", ['info', pbucket(2)],
-#    must_find = [ "Payer:     Requester"])
-
-## ====== ls using Requester Pays flag
-# Minio: disabled
-#test_s3cmd("ls using requester pays flag", ['ls', '--requester-pays', pbucket(2)])
-
-## ====== clear Requester Pays flag
-# Minio: disabled
-#test_s3cmd("Clear requester pays", ['payer', pbucket(2)])
-
-## ====== get Requester Pays flag
-# Minio: disabled
-#test_s3cmd("Get requester pays flag", ['info', pbucket(2)],
-#    must_find = [ "Payer:     BucketOwner"])
-
-## ====== Recursive delete maximum exceeed
-test_s3cmd("Recursive delete maximum exceeded", ['del', '--recursive', '--max-delete=1', '--exclude', 'Atomic*', '%s/xyz/etc' % pbucket(1)],
-    must_not_find = [ "delete: '%s/xyz/etc/TypeRa.ttf'" % pbucket(1) ])
-
-## ====== Recursive delete
-test_s3cmd("Recursive delete", ['del', '--recursive', '--exclude', 'Atomic*', '%s/xyz/etc' % pbucket(1)],
-    must_find = [ "delete: '%s/xyz/etc/TypeRa.ttf'" % pbucket(1) ],
-    must_find_re = [ "delete: '.*/etc/logo.png'" ],
-    must_not_find = [ "AtomicClockRadio.ttf" ])
-
-## ====== Recursive delete with rm
-test_s3cmd("Recursive delete with rm", ['rm', '--recursive', '--exclude', 'Atomic*', '%s/xyz/test_rm' % pbucket(1)],
-    must_find = [ "delete: '%s/xyz/test_rm/more/give-me-more.txt'" % pbucket(1) ],
-    must_find_re = [ "delete: '.*/test_rm/logo.png'" ],
-    must_not_find = [ "AtomicClockRadio.ttf" ])
-
-## ====== Recursive delete all
-test_s3cmd("Recursive delete all", ['del', '--recursive', '--force', pbucket(1)],
-    must_find_re = [ "delete: '.*binary/random-crap'" ])
-
-## ====== Remove empty bucket
-test_s3cmd("Remove empty bucket", ['rb', pbucket(1)],
-    must_find = [ "Bucket '%s/' removed" % pbucket(1) ])
-
-## ====== Remove remaining buckets
-test_s3cmd("Remove remaining buckets", ['rb', '--recursive', pbucket(2), pbucket(3)],
-    must_find = [ "Bucket '%s/' removed" % pbucket(2),
-              "Bucket '%s/' removed" % pbucket(3) ])
-
-# vim:et:ts=4:sts=4:ai
diff --git a/run-tests.docker.README.md b/run-tests.docker.README.md
deleted file mode 100644
index 988e11d..0000000
--- a/run-tests.docker.README.md
+++ /dev/null
@@ -1,47 +0,0 @@
-# Running test cases with Docker
-## (The  markdown formatting in this file is best viewed on Github)
-
-### tl;dr
-
-* Place a valid .s3cfg file in the root project directory.
-* `docker build -t s3cmd-tests --build-arg pyVersion=3.6 -f run-tests.dockerfile .`  
-Note the trailing period and substitute your desired Python version as needed.
-* `docker run --rm s3cmd-tests`
-
-### More Details
-
-The included run-tests.dockerfile allows contributors to easily test their changes with Python versions that aren't installed locally.
-
-Docker must, of course, be installed on your system if it is not already. See https://docs.docker.com/install/ for instructions.
-
-To begin, build the Dockerfile into an image for the Python version you wish to test against.  The build must be repeated whenever your source changes, but the Python image itself will be cached.  To build:
-
-* Place a valid .s3cfg file in the root project directory.  While .s3cfg has been added to the .gitignore to avoid sending your credentials to public repositories, you should still make sure you remove it when your testing is complete.
-
-* Run `docker build -t s3cmd-tests -f run-tests.dockerfile .` (the trailing period is required)
-
-  This will:
-
-  * Download the latest Python Docker image
-  * Add a testuser group and account
-  * Copy the .s3cfg into the user's home directory (/home/testuser)
-  * Copy the entire project folder into /home/testuser/src/s3cmd
-  * Install s3cmd dependencies (as root)
-
-The main purpose of this Dockerfile is to allow you to run with multiple Python versions.  To see the Docker Python images available, visit [Docker Hub](https://hub.docker.com/_/python).  Most of the Linux variants should be usable, but the "alpine" variants will result in the smallest downloads and images.  For example:
-
-`docker build -t s3cmd-tests --build-arg pyVersion=3.8.1-alpine3.11 -f run-tests.dockerfile .`
-
-After successfully building the image, you can run it with `docker run --rm s3cmd-tests`.  This will execute the run-tests.py script in the Docker container with your .s3cfg credentials.
-
-Normal `run-tests.py` options may appended.  For example:
-
-`docker run --rm s3cmd-tests --bucket-prefix mytests`
-
-Additional notes:
-
-* If you would like to enter a shell in the container, use `docker run -i -t --rm --entrypoint sh s3cmd-tests`.
-  * `bash` may be specified if you are using a Python image that supports it (not Alpine).
-* If it has been a few days since your last usage, you should check for updates to the upstream Python docker image using `docker pull python` or `docker pull python:3.7` (substituting your desired version)
-* Rebuilding does not over-write a previous image, but instead creates a new image and "untags" the previous one.  Use `docker images` to show all the images on your system, and `docker image prune` to cleanup unused, untagged images.  Please use this command carefully if you have other Docker images on your system.
-* When testing is completed, remove unused Python images with `docker rmi python:3.7`, substituting the tag/version you wish to remove. `docker images` will list the images on your system.
diff --git a/run-tests.dockerfile b/run-tests.dockerfile
deleted file mode 100644
index 2e19203..0000000
--- a/run-tests.dockerfile
+++ /dev/null
@@ -1,24 +0,0 @@
-ARG pyVersion=latest
-FROM python:${pyVersion}
-ARG pyVersion
-RUN addgroup testuser \
-  && adduser \
-     --home /home/testuser \
-     --ingroup testuser \
-     --disabled-password \
-     --gecos "" \
-     testuser
-
-USER testuser
-RUN mkdir /home/testuser/src
-WORKDIR /home/testuser/src
-COPY --chown=testuser ./ s3cmd
-COPY --chown=testuser .s3cfg /home/testuser/
-USER root
-WORKDIR /home/testuser/src/s3cmd
-RUN pip install .
-USER testuser
-
-ENTRYPOINT ["python","run-tests.py"]
-
-RUN echo Built with Python version $(python --version)
diff --git a/run-tests.py b/run-tests.py
deleted file mode 100755
index 2bcf560..0000000
--- a/run-tests.py
+++ /dev/null
@@ -1,814 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-## Amazon S3cmd - testsuite
-## Author: Michal Ludvig <michal@logix.cz>
-##         http://www.logix.cz/michal
-## License: GPL Version 2
-## Copyright: TGRMN Software and contributors
-
-from __future__ import absolute_import, print_function
-
-import sys
-import os
-import re
-import time
-from subprocess import Popen, PIPE, STDOUT
-import locale
-import getpass
-import S3.Exceptions
-import S3.Config
-from S3.ExitCodes import *
-
-try:
-    unicode
-except NameError:
-    # python 3 support
-    # In python 3, unicode -> str, and str -> bytes
-    unicode = str
-
-count_pass = 0
-count_fail = 0
-count_skip = 0
-
-test_counter = 0
-run_tests = []
-exclude_tests = []
-
-verbose = False
-
-encoding = locale.getpreferredencoding()
-if not encoding:
-    print("Guessing current system encoding failed. Consider setting $LANG variable.")
-    sys.exit(1)
-else:
-    print("System encoding: " + encoding)
-
-try:
-    unicode
-except NameError:
-    # python 3 support
-    # In python 3, unicode -> str, and str -> bytes
-    unicode = str
-
-def unicodise(string, encoding = "utf-8", errors = "replace"):
-    """
-    Convert 'string' to Unicode or raise an exception.
-    Config can't use toolbox from Utils that is itself using Config
-    """
-    if type(string) == unicode:
-        return string
-
-    try:
-        return unicode(string, encoding, errors)
-    except UnicodeDecodeError:
-        raise UnicodeDecodeError("Conversion to unicode failed: %r" % string)
-
-# https://stackoverflow.com/questions/377017/test-if-executable-exists-in-python/377028#377028
-def which(program):
-    def is_exe(fpath):
-        return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
-
-    fpath, fname = os.path.split(program)
-    if fpath:
-        if is_exe(program):
-            return program
-    else:
-        for path in os.environ["PATH"].split(os.pathsep):
-            path = path.strip('"')
-            exe_file = os.path.join(path, program)
-            if is_exe(exe_file):
-                return exe_file
-
-    return None
-
-if which('curl') is not None:
-    have_curl = True
-else:
-    have_curl = False
-
-config_file = None
-if os.getenv("HOME"):
-    config_file = os.path.join(unicodise(os.getenv("HOME"), encoding),
-                               ".s3cfg")
-elif os.name == "nt" and os.getenv("USERPROFILE"):
-    config_file = os.path.join(
-        unicodise(os.getenv("USERPROFILE"), encoding),
-        os.getenv("APPDATA") and unicodise(os.getenv("APPDATA"), encoding)
-        or 'Application Data',
-        "s3cmd.ini")
-
-
-## Unpack testsuite/ directory
-if not os.path.isdir('testsuite') and os.path.isfile('testsuite.tar.gz'):
-    os.system("tar -xz -f testsuite.tar.gz")
-if not os.path.isdir('testsuite'):
-    print("Something went wrong while unpacking testsuite.tar.gz")
-    sys.exit(1)
-
-os.system("tar -xf testsuite/checksum.tar -C testsuite")
-if not os.path.isfile('testsuite/checksum/cksum33.txt'):
-    print("Something went wrong while unpacking testsuite/checkum.tar")
-    sys.exit(1)
-
-## Fix up permissions for permission-denied tests
-os.chmod("testsuite/permission-tests/permission-denied-dir", 0o444)
-os.chmod("testsuite/permission-tests/permission-denied.txt", 0o000)
-
-## Patterns for Unicode tests
-patterns = {}
-patterns['UTF-8'] = u"ŪņЇЌœđЗ/☺ unicode € rocks ™"
-patterns['GBK'] = u"12月31日/1-特色條目"
-
-have_encoding = os.path.isdir('testsuite/encodings/' + encoding)
-if not have_encoding and os.path.isfile('testsuite/encodings/%s.tar.gz' % encoding):
-    os.system("tar xvz -C testsuite/encodings -f testsuite/encodings/%s.tar.gz" % encoding)
-    have_encoding = os.path.isdir('testsuite/encodings/' + encoding)
-
-if have_encoding:
-    #enc_base_remote = "%s/xyz/%s/" % (pbucket(1), encoding)
-    enc_pattern = patterns[encoding]
-else:
-    print(encoding + " specific files not found.")
-
-def unicodise(string):
-    if type(string) == unicode:
-        return string
-
-    return unicode(string, "UTF-8", "replace")
-
-def deunicodise(string):
-    if type(string) != unicode:
-        return string
-
-    return string.encode("UTF-8", "replace")
-
-if not os.path.isdir('testsuite/crappy-file-name'):
-    os.system("tar xvz -C testsuite -f testsuite/crappy-file-name.tar.gz")
-    # TODO: also unpack if the tarball is newer than the directory timestamp
-    #       for instance when a new version was pulled from SVN.
-
-def test(label, cmd_args = [], retcode = 0, must_find = [], must_not_find = [], must_find_re = [], must_not_find_re = [], stdin = None):
-    def command_output():
-        print("----")
-        print(" ".join([" " in arg and "'%s'" % arg or arg for arg in cmd_args]))
-        print("----")
-        print(stdout)
-        print("----")
-
-    def failure(message = ""):
-        global count_fail
-        if message:
-            message = u"  (%r)" % message
-        print(u"\x1b[31;1mFAIL%s\x1b[0m" % (message))
-        count_fail += 1
-        command_output()
-        #return 1
-        sys.exit(1)
-    def success(message = ""):
-        global count_pass
-        if message:
-            message = "  (%r)" % message
-        print("\x1b[32;1mOK\x1b[0m%s" % (message))
-        count_pass += 1
-        if verbose:
-            command_output()
-        return 0
-    def skip(message = ""):
-        global count_skip
-        if message:
-            message = "  (%r)" % message
-        print("\x1b[33;1mSKIP\x1b[0m%s" % (message))
-        count_skip += 1
-        return 0
-    def compile_list(_list, regexps = False):
-        if regexps == False:
-            _list = [re.escape(item) for item in _list]
-
-        return [re.compile(item, re.MULTILINE) for item in _list]
-
-    global test_counter
-    test_counter += 1
-    print(("%3d  %s " % (test_counter, label)).ljust(30, "."), end=' ')
-    sys.stdout.flush()
-
-    if run_tests.count(test_counter) == 0 or exclude_tests.count(test_counter) > 0:
-        return skip()
-
-    if not cmd_args:
-        return skip()
-
-    p = Popen(cmd_args, stdin = stdin, stdout = PIPE, stderr = STDOUT, universal_newlines = True, close_fds = True)
-    stdout, stderr = p.communicate()
-    if type(retcode) not in [list, tuple]: retcode = [retcode]
-    if p.returncode not in retcode:
-        return failure("retcode: %d, expected one of: %s" % (p.returncode, retcode))
-
-    if type(must_find) not in [ list, tuple ]: must_find = [must_find]
-    if type(must_find_re) not in [ list, tuple ]: must_find_re = [must_find_re]
-    if type(must_not_find) not in [ list, tuple ]: must_not_find = [must_not_find]
-    if type(must_not_find_re) not in [ list, tuple ]: must_not_find_re = [must_not_find_re]
-
-    find_list = []
-    find_list.extend(compile_list(must_find))
-    find_list.extend(compile_list(must_find_re, regexps = True))
-    find_list_patterns = []
-    find_list_patterns.extend(must_find)
-    find_list_patterns.extend(must_find_re)
-
-    not_find_list = []
-    not_find_list.extend(compile_list(must_not_find))
-    not_find_list.extend(compile_list(must_not_find_re, regexps = True))
-    not_find_list_patterns = []
-    not_find_list_patterns.extend(must_not_find)
-    not_find_list_patterns.extend(must_not_find_re)
-
-    for index in range(len(find_list)):
-        stdout = unicodise(stdout)
-        match = find_list[index].search(stdout)
-        if not match:
-            return failure("pattern not found: %s" % find_list_patterns[index])
-    for index in range(len(not_find_list)):
-        match = not_find_list[index].search(stdout)
-        if match:
-            return failure("pattern found: %s (match: %s)" % (not_find_list_patterns[index], match.group(0)))
-
-    return success()
-
-def test_s3cmd(label, cmd_args = [], **kwargs):
-    if not cmd_args[0].endswith("s3cmd"):
-        cmd_args.insert(0, "python")
-        cmd_args.insert(1, "s3cmd")
-        if config_file:
-            cmd_args.insert(2, "-c")
-            cmd_args.insert(3, config_file)
-
-    return test(label, cmd_args, **kwargs)
-
-def test_mkdir(label, dir_name):
-    if os.name in ("posix", "nt"):
-        cmd = ['mkdir', '-p']
-    else:
-        print("Unknown platform: %s" % os.name)
-        sys.exit(1)
-    cmd.append(dir_name)
-    return test(label, cmd)
-
-def test_rmdir(label, dir_name):
-    if os.path.isdir(dir_name):
-        if os.name == "posix":
-            cmd = ['rm', '-rf']
-        elif os.name == "nt":
-            cmd = ['rmdir', '/s/q']
-        else:
-            print("Unknown platform: %s" % os.name)
-            sys.exit(1)
-        cmd.append(dir_name)
-        return test(label, cmd)
-    else:
-        return test(label, [])
-
-def test_flushdir(label, dir_name):
-    test_rmdir(label + "(rm)", dir_name)
-    return test_mkdir(label + "(mk)", dir_name)
-
-def test_copy(label, src_file, dst_file):
-    if os.name == "posix":
-        cmd = ['cp', '-f']
-    elif os.name == "nt":
-        cmd = ['copy']
-    else:
-        print("Unknown platform: %s" % os.name)
-        sys.exit(1)
-    cmd.append(src_file)
-    cmd.append(dst_file)
-    return test(label, cmd)
-
-def test_curl_HEAD(label, src_file, **kwargs):
-    cmd = ['curl', '--silent', '--head', '-include', '--location']
-    cmd.append(src_file)
-    return test(label, cmd, **kwargs)
-
-bucket_prefix = u"%s-" % getpass.getuser().lower()
-
-argv = sys.argv[1:]
-while argv:
-    arg = argv.pop(0)
-    if arg.startswith('--bucket-prefix='):
-        print("Usage: '--bucket-prefix PREFIX', not '--bucket-prefix=PREFIX'")
-        sys.exit(0)
-    if arg in ("-h", "--help"):
-        print("%s A B K..O -N" % sys.argv[0])
-        print("Run tests number A, B and K through to O, except for N")
-        sys.exit(0)
-
-    if arg in ("-c", "--config"):
-        config_file = argv.pop(0)
-        continue
-    if arg in ("-l", "--list"):
-        exclude_tests = range(0, 999)
-        break
-    if arg in ("-v", "--verbose"):
-        verbose = True
-        continue
-    if arg in ("-p", "--bucket-prefix"):
-        try:
-            bucket_prefix = argv.pop(0)
-        except IndexError:
-            print("Bucket prefix option must explicitly supply a bucket name prefix")
-            sys.exit(0)
-        continue
-    if ".." in arg:
-        range_idx = arg.find("..")
-        range_start = arg[:range_idx] or 0
-        range_end = arg[range_idx+2:] or 999
-        run_tests.extend(range(int(range_start), int(range_end) + 1))
-    elif arg.startswith("-"):
-        exclude_tests.append(int(arg[1:]))
-    else:
-        run_tests.append(int(arg))
-
-print("Using bucket prefix: '%s'" % bucket_prefix)
-
-cfg = S3.Config.Config(config_file)
-
-if not run_tests:
-    run_tests = range(0, 999)
-
-# helper functions for generating bucket names
-def bucket(tail):
-        '''Test bucket name'''
-        label = 'autotest'
-        if str(tail) == '3':
-                label = 'autotest'
-        return '%ss3cmd-%s-%s' % (bucket_prefix, label, tail)
-
-def pbucket(tail):
-        '''Like bucket(), but prepends "s3://" for you'''
-        return 's3://' + bucket(tail)
-
-## ====== Remove test buckets
-test_s3cmd("Remove test buckets", ['rb', '-r', '--force', pbucket(1), pbucket(2), pbucket(3)])
-
-## ====== verify they were removed
-test_s3cmd("Verify no test buckets", ['ls'],
-           must_not_find = [pbucket(1), pbucket(2), pbucket(3)])
-
-
-## ====== Create one bucket (EU)
-test_s3cmd("Create one bucket (EU)", ['mb', '--bucket-location=EU', pbucket(1)],
-    must_find = "Bucket '%s/' created" % pbucket(1))
-
-
-
-## ====== Create multiple buckets
-test_s3cmd("Create multiple buckets", ['mb', pbucket(2), pbucket(3)],
-    must_find = [ "Bucket '%s/' created" % pbucket(2), "Bucket '%s/' created" % pbucket(3)])
-
-
-## ====== Invalid bucket name
-test_s3cmd("Invalid bucket name", ["mb", "--bucket-location=EU", pbucket('EU')],
-    retcode = EX_USAGE,
-    must_find = "ERROR: Parameter problem: Bucket name '%s' contains disallowed character" % bucket('EU'),
-    must_not_find_re = "Bucket.*created")
-
-
-## ====== Buckets list
-test_s3cmd("Buckets list", ["ls"],
-    must_find = [ pbucket(1), pbucket(2), pbucket(3) ], must_not_find_re = pbucket('EU'))
-
-## ====== Directory for cache
-test_flushdir("Create cache dir", "testsuite/cachetest")
-
-## ====== Sync to S3
-test_s3cmd("Sync to S3", ['sync', 'testsuite/', pbucket(1) + '/xyz/', '--exclude', 'demo/*', '--exclude', '*.png', '--no-encrypt', '--exclude-from', 'testsuite/exclude.encodings', '--exclude', 'testsuite/cachetest/.s3cmdcache', '--cache-file', 'testsuite/cachetest/.s3cmdcache'],
-           must_find = ["ERROR: Upload of 'testsuite/permission-tests/permission-denied.txt' is not possible (Reason: Permission denied)",
-                        "WARNING: 32 non-printable characters replaced in: crappy-file-name/non-printables",
-           ],
-           must_not_find_re = ["demo/", "^(?!WARNING: Skipping).*\.png$", "permission-denied-dir"],
-           retcode = EX_PARTIAL)
-
-## ====== Create new file and sync with caching enabled
-test_mkdir("Create cache dir", "testsuite/cachetest/content")
-with open("testsuite/cachetest/content/testfile", "w"):
-    pass
-
-test_s3cmd("Sync to S3 with caching", ['sync', 'testsuite/', pbucket(1) + '/xyz/', '--exclude', 'demo/*', '--exclude', '*.png', '--no-encrypt', '--exclude-from', 'testsuite/exclude.encodings', '--exclude', 'cachetest/.s3cmdcache', '--cache-file', 'testsuite/cachetest/.s3cmdcache' ],
-          must_find = "upload: 'testsuite/cachetest/content/testfile' -> '%s/xyz/cachetest/content/testfile'" % pbucket(1),
-          must_not_find = "upload 'testsuite/cachetest/.s3cmdcache'",
-          retcode = EX_PARTIAL)
-
-## ====== Remove content and retry cached sync with --delete-removed
-test_rmdir("Remove local file", "testsuite/cachetest/content")
-
-test_s3cmd("Sync to S3 and delete removed with caching", ['sync', 'testsuite/', pbucket(1) + '/xyz/', '--exclude', 'demo/*', '--exclude', '*.png', '--no-encrypt', '--exclude-from', 'testsuite/exclude.encodings', '--exclude', 'testsuite/cachetest/.s3cmdcache', '--cache-file', 'testsuite/cachetest/.s3cmdcache', '--delete-removed'],
-          must_find = "delete: '%s/xyz/cachetest/content/testfile'" % pbucket(1),
-          must_not_find = "dictionary changed size during iteration",
-          retcode = EX_PARTIAL)
-
-## ====== Remove cache directory and file
-test_rmdir("Remove cache dir", "testsuite/cachetest")
-
-if have_encoding:
-    ## ====== Sync UTF-8 / GBK / ... to S3
-    test_s3cmd(u"Sync %s to S3" % encoding, ['sync', 'testsuite/encodings/' + encoding, '%s/xyz/encodings/' % pbucket(1), '--exclude', 'demo/*', '--no-encrypt' ],
-        must_find = [ u"'testsuite/encodings/%(encoding)s/%(pattern)s' -> '%(pbucket)s/xyz/encodings/%(encoding)s/%(pattern)s'" % { 'encoding' : encoding, 'pattern' : enc_pattern , 'pbucket' : pbucket(1)} ])
-
-
-## ====== List bucket content
-test_s3cmd("List bucket content", ['ls', '%s/xyz/' % pbucket(1) ],
-    must_find_re = [ u"DIR +%s/xyz/binary/$" % pbucket(1) , u"DIR +%s/xyz/etc/$" % pbucket(1) ],
-    must_not_find = [ u"random-crap.md5", u"/demo" ])
-
-
-## ====== List bucket recursive
-must_find = [ u"%s/xyz/binary/random-crap.md5" % pbucket(1) ]
-if have_encoding:
-    must_find.append(u"%(pbucket)s/xyz/encodings/%(encoding)s/%(pattern)s" % { 'encoding' : encoding, 'pattern' : enc_pattern, 'pbucket' : pbucket(1) })
-
-test_s3cmd("List bucket recursive", ['ls', '--recursive', pbucket(1)],
-    must_find = must_find,
-    must_not_find = [ "logo.png" ])
-
-## ====== FIXME
-test_s3cmd("Recursive put", ['put', '--recursive', 'testsuite/etc', '%s/xyz/' % pbucket(1) ])
-
-
-## ====== Clean up local destination dir
-test_flushdir("Clean testsuite-out/", "testsuite-out")
-
-## ====== Put from stdin
-f = open('testsuite/single-file/single-file.txt', 'r')
-test_s3cmd("Put from stdin", ['put', '-', '%s/single-file/single-file.txt' % pbucket(1)],
-           must_find = ["'<stdin>' -> '%s/single-file/single-file.txt'" % pbucket(1)],
-           stdin = f)
-f.close()
-
-## ====== Multipart put
-os.system('mkdir -p testsuite-out')
-os.system('dd if=/dev/urandom of=testsuite-out/urandom.bin bs=1M count=16 > /dev/null 2>&1')
-test_s3cmd("Put multipart", ['put', '--multipart-chunk-size-mb=5', 'testsuite-out/urandom.bin', '%s/urandom.bin' % pbucket(1)],
-           must_not_find = ['abortmp'])
-
-## ====== Multipart put from stdin
-f = open('testsuite-out/urandom.bin', 'r')
-test_s3cmd("Multipart large put from stdin", ['put', '--multipart-chunk-size-mb=5', '-', '%s/urandom2.bin' % pbucket(1)],
-           must_find = ['%s/urandom2.bin' % pbucket(1)],
-           must_not_find = ['abortmp'],
-           stdin = f)
-f.close()
-
-## ====== Clean up local destination dir
-test_flushdir("Clean testsuite-out/", "testsuite-out")
-
-## ====== Moving things without trailing '/'
-os.system('dd if=/dev/urandom of=testsuite-out/urandom1.bin bs=1k count=1 > /dev/null 2>&1')
-os.system('dd if=/dev/urandom of=testsuite-out/urandom2.bin bs=1k count=1 > /dev/null 2>&1')
-test_s3cmd("Put multiple files", ['put', 'testsuite-out/urandom1.bin', 'testsuite-out/urandom2.bin', '%s/' % pbucket(1)],
-           must_find = ["%s/urandom1.bin" % pbucket(1), "%s/urandom2.bin" % pbucket(1)])
-
-test_s3cmd("Move without '/'", ['mv', '%s/urandom1.bin' % pbucket(1), '%s/urandom2.bin' % pbucket(1), '%s/dir' % pbucket(1)],
-           retcode = 64,
-           must_find = ['Destination must be a directory'])
-
-test_s3cmd("Move recursive w/a '/'",
-           ['-r', 'mv', '%s/dir1' % pbucket(1), '%s/dir2' % pbucket(1)],
-           retcode = 64,
-           must_find = ['Destination must be a directory'])
-
-## ====== Moving multiple files into directory with trailing '/'
-must_find = ["'%s/urandom1.bin' -> '%s/dir/urandom1.bin'" % (pbucket(1),pbucket(1)), "'%s/urandom2.bin' -> '%s/dir/urandom2.bin'" % (pbucket(1),pbucket(1))]
-must_not_find = ["'%s/urandom1.bin' -> '%s/dir'" % (pbucket(1),pbucket(1)), "'%s/urandom2.bin' -> '%s/dir'" % (pbucket(1),pbucket(1))]
-test_s3cmd("Move multiple files",
-           ['mv', '%s/urandom1.bin' % pbucket(1), '%s/urandom2.bin' % pbucket(1), '%s/dir/' % pbucket(1)],
-           must_find = must_find,
-           must_not_find = must_not_find)
-
-## ====== Clean up local destination dir
-test_flushdir("Clean testsuite-out/", "testsuite-out")
-
-## ====== Sync from S3
-must_find = [ "'%s/xyz/binary/random-crap.md5' -> 'testsuite-out/xyz/binary/random-crap.md5'" % pbucket(1) ]
-if have_encoding:
-    must_find.append(u"'%(pbucket)s/xyz/encodings/%(encoding)s/%(pattern)s' -> 'testsuite-out/xyz/encodings/%(encoding)s/%(pattern)s' " % { 'encoding' : encoding, 'pattern' : enc_pattern, 'pbucket' : pbucket(1) })
-test_s3cmd("Sync from S3", ['sync', '%s/xyz' % pbucket(1), 'testsuite-out'],
-    must_find = must_find)
-
-## ====== Remove 'demo' directory
-test_rmdir("Remove 'dir-test/'", "testsuite-out/xyz/dir-test/")
-
-
-## ====== Create dir with name of a file
-test_mkdir("Create file-dir dir", "testsuite-out/xyz/dir-test/file-dir")
-
-
-## ====== Skip dst dirs
-test_s3cmd("Skip over dir", ['sync', '%s/xyz' % pbucket(1), 'testsuite-out'],
-           must_find = "ERROR: Download of 'xyz/dir-test/file-dir' failed (Reason: testsuite-out/xyz/dir-test/file-dir is a directory)",
-           retcode = EX_PARTIAL)
-
-
-## ====== Clean up local destination dir
-test_flushdir("Clean testsuite-out/", "testsuite-out")
-
-
-## ====== Put public, guess MIME
-test_s3cmd("Put public, guess MIME", ['put', '--guess-mime-type', '--acl-public', 'testsuite/etc/logo.png', '%s/xyz/etc/logo.png' % pbucket(1)],
-    must_find = [ "-> '%s/xyz/etc/logo.png'" % pbucket(1) ])
-
-
-## ====== Retrieve from URL
-if have_curl:
-    test_curl_HEAD("Retrieve from URL", 'http://%s.%s/xyz/etc/logo.png' % (bucket(1), cfg.host_base),
-                   must_find_re = ['Content-Length: 22059'])
-
-## ====== Change ACL to Private
-test_s3cmd("Change ACL to Private", ['setacl', '--acl-private', '%s/xyz/etc/l*.png' % pbucket(1)],
-    must_find = [ "logo.png: ACL set to Private" ])
-
-
-## ====== Verify Private ACL
-if have_curl:
-    test_curl_HEAD("Verify Private ACL", 'http://%s.%s/xyz/etc/logo.png' % (bucket(1), cfg.host_base),
-                   must_find_re = [ '403 Forbidden' ])
-
-
-## ====== Change ACL to Public
-test_s3cmd("Change ACL to Public", ['setacl', '--acl-public', '--recursive', '%s/xyz/etc/' % pbucket(1) , '-v'],
-    must_find = [ "logo.png: ACL set to Public" ])
-
-
-## ====== Verify Public ACL
-if have_curl:
-    test_curl_HEAD("Verify Public ACL", 'http://%s.%s/xyz/etc/logo.png' % (bucket(1), cfg.host_base),
-                   must_find_re = [ '200 OK',
-                                    'Content-Length: 22059'])
-
-
-## ====== Sync more to S3
-test_s3cmd("Sync more to S3", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt' ],
-           must_find = [ "'testsuite/demo/some-file.xml' -> '%s/xyz/demo/some-file.xml' " % pbucket(1) ],
-           must_not_find = [ "'testsuite/etc/linked.png' -> '%s/xyz/etc/linked.png'" % pbucket(1) ],
-           retcode = EX_PARTIAL)
-
-
-## ====== Don't check MD5 sum on Sync
-test_copy("Change file cksum1.txt", "testsuite/checksum/cksum2.txt", "testsuite/checksum/cksum1.txt")
-test_copy("Change file cksum33.txt", "testsuite/checksum/cksum2.txt", "testsuite/checksum/cksum33.txt")
-test_s3cmd("Don't check MD5", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt', '--no-check-md5'],
-           must_find = [ "cksum33.txt" ],
-           must_not_find = [ "cksum1.txt" ],
-           retcode = EX_PARTIAL)
-
-
-## ====== Check MD5 sum on Sync
-test_s3cmd("Check MD5", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt', '--check-md5'],
-           must_find = [ "cksum1.txt" ],
-           retcode = EX_PARTIAL)
-
-
-## ====== Rename within S3
-test_s3cmd("Rename within S3", ['mv', '%s/xyz/etc/logo.png' % pbucket(1), '%s/xyz/etc2/Logo.PNG' % pbucket(1)],
-    must_find = [ "move: '%s/xyz/etc/logo.png' -> '%s/xyz/etc2/Logo.PNG'" % (pbucket(1), pbucket(1))])
-
-
-## ====== Rename (NoSuchKey)
-test_s3cmd("Rename (NoSuchKey)", ['mv', '%s/xyz/etc/logo.png' % pbucket(1), '%s/xyz/etc2/Logo.PNG' % pbucket(1)],
-    retcode = EX_NOTFOUND,
-    must_find_re = [ 'Key not found' ],
-    must_not_find = [ "move: '%s/xyz/etc/logo.png' -> '%s/xyz/etc2/Logo.PNG'" % (pbucket(1), pbucket(1)) ])
-
-## ====== Sync more from S3 (invalid src)
-test_s3cmd("Sync more from S3 (invalid src)", ['sync', '--delete-removed', '%s/xyz/DOESNOTEXIST' % pbucket(1), 'testsuite-out'],
-    must_not_find = [ "delete: 'testsuite-out/logo.png'" ])
-
-## ====== Sync more from S3
-test_s3cmd("Sync more from S3", ['sync', '--delete-removed', '%s/xyz' % pbucket(1), 'testsuite-out'],
-    must_find = [ "'%s/xyz/etc2/Logo.PNG' -> 'testsuite-out/xyz/etc2/Logo.PNG'" % pbucket(1),
-                  "'%s/xyz/demo/some-file.xml' -> 'testsuite-out/xyz/demo/some-file.xml'" % pbucket(1) ],
-    must_not_find_re = [ "not-deleted.*etc/logo.png", "delete: 'testsuite-out/logo.png'" ])
-
-
-## ====== Make dst dir for get
-test_rmdir("Remove dst dir for get", "testsuite-out")
-
-
-## ====== Get multiple files
-test_s3cmd("Get multiple files", ['get', '%s/xyz/etc2/Logo.PNG' % pbucket(1), '%s/xyz/etc/AtomicClockRadio.ttf' % pbucket(1), 'testsuite-out'],
-    retcode = EX_USAGE,
-    must_find = [ 'Destination must be a directory or stdout when downloading multiple sources.' ])
-
-## ====== put/get non-ASCII filenames
-test_s3cmd("Put unicode filenames", ['put', u'testsuite/encodings/UTF-8/ŪņЇЌœđЗ/Žůžo',  u'%s/xyz/encodings/UTF-8/ŪņЇЌœđЗ/Žůžo' % pbucket(1)],
-           retcode = 0,
-           must_find = [ '->' ])
-
-
-## ====== Make dst dir for get
-test_mkdir("Make dst dir for get", "testsuite-out")
-
-
-## ====== put/get non-ASCII filenames
-test_s3cmd("Get unicode filenames", ['get', u'%s/xyz/encodings/UTF-8/ŪņЇЌœđЗ/Žůžo' % pbucket(1), 'testsuite-out'],
-           retcode = 0,
-           must_find = [ '->' ])
-
-
-## ====== Get multiple files
-test_s3cmd("Get multiple files", ['get', '%s/xyz/etc2/Logo.PNG' % pbucket(1), '%s/xyz/etc/AtomicClockRadio.ttf' % pbucket(1), 'testsuite-out'],
-    must_find = [ u"-> 'testsuite-out/Logo.PNG'",
-                  u"-> 'testsuite-out/AtomicClockRadio.ttf'" ])
-
-## ====== Upload files differing in capitalisation
-test_s3cmd("blah.txt / Blah.txt", ['put', '-r', 'testsuite/blahBlah', pbucket(1)],
-    must_find = [ '%s/blahBlah/Blah.txt' % pbucket(1), '%s/blahBlah/blah.txt' % pbucket(1)])
-
-## ====== Copy between buckets
-test_s3cmd("Copy between buckets", ['cp', '%s/xyz/etc2/Logo.PNG' % pbucket(1), '%s/xyz/etc2/logo.png' % pbucket(3)],
-    must_find = [ "remote copy: '%s/xyz/etc2/Logo.PNG' -> '%s/xyz/etc2/logo.png'" % (pbucket(1), pbucket(3)) ])
-
-## ====== Recursive copy
-test_s3cmd("Recursive copy, set ACL", ['cp', '-r', '--acl-public', '%s/xyz/' % pbucket(1), '%s/copy/' % pbucket(2), '--exclude', 'demo/dir?/*.txt', '--exclude', 'non-printables*'],
-    must_find = [ "remote copy: '%s/xyz/etc2/Logo.PNG' -> '%s/copy/etc2/Logo.PNG'" % (pbucket(1), pbucket(2)),
-                  "remote copy: '%s/xyz/blahBlah/Blah.txt' -> '%s/copy/blahBlah/Blah.txt'" % (pbucket(1), pbucket(2)),
-                  "remote copy: '%s/xyz/blahBlah/blah.txt' -> '%s/copy/blahBlah/blah.txt'" % (pbucket(1), pbucket(2)) ],
-    must_not_find = [ "demo/dir1/file1-1.txt" ])
-
-## ====== Verify ACL and MIME type
-test_s3cmd("Verify ACL and MIME type", ['info', '%s/copy/etc2/Logo.PNG' % pbucket(2) ],
-    must_find_re = [ "MIME type:.*image/png",
-                     "ACL:.*\*anon\*: READ",
-                     "URL:.*https?://%s.%s/copy/etc2/Logo.PNG" % (bucket(2), cfg.host_base) ])
-
-## ====== modify MIME type
-test_s3cmd("Modify MIME type", ['modify', '--mime-type=binary/octet-stream', '%s/copy/etc2/Logo.PNG' % pbucket(2) ])
-
-test_s3cmd("Verify ACL and MIME type", ['info', '%s/copy/etc2/Logo.PNG' % pbucket(2) ],
-    must_find_re = [ "MIME type:.*binary/octet-stream",
-                     "ACL:.*\*anon\*: READ",
-                     "URL:.*https?://%s.%s/copy/etc2/Logo.PNG" % (bucket(2), cfg.host_base) ])
-
-test_s3cmd("Modify MIME type back", ['modify', '--mime-type=image/png', '%s/copy/etc2/Logo.PNG' % pbucket(2) ])
-
-test_s3cmd("Verify ACL and MIME type", ['info', '%s/copy/etc2/Logo.PNG' % pbucket(2) ],
-    must_find_re = [ "MIME type:.*image/png",
-                     "ACL:.*\*anon\*: READ",
-                     "URL:.*https?://%s.%s/copy/etc2/Logo.PNG" % (bucket(2), cfg.host_base) ])
-
-test_s3cmd("Add cache-control header", ['modify', '--add-header=cache-control: max-age=3600, public', '%s/copy/etc2/Logo.PNG' % pbucket(2) ],
-    must_find_re = [ "modify: .*" ])
-
-if have_curl:
-    test_curl_HEAD("HEAD check Cache-Control present", 'http://%s.%s/copy/etc2/Logo.PNG' % (bucket(2), cfg.host_base),
-                   must_find_re = [ "Cache-Control: max-age=3600" ])
-
-test_s3cmd("Remove cache-control header", ['modify', '--remove-header=cache-control', '%s/copy/etc2/Logo.PNG' % pbucket(2) ],
-           must_find_re = [ "modify: .*" ])
-
-if have_curl:
-    test_curl_HEAD("HEAD check Cache-Control not present", 'http://%s.%s/copy/etc2/Logo.PNG' % (bucket(2), cfg.host_base),
-                   must_not_find_re = [ "Cache-Control: max-age=3600" ])
-
-## ====== sign
-test_s3cmd("sign string", ['sign', 's3cmd'], must_find_re = ["Signature:"])
-test_s3cmd("signurl time", ['signurl', '%s/copy/etc2/Logo.PNG' % pbucket(2), str(int(time.time()) + 60)], must_find_re = ["http://"])
-test_s3cmd("signurl time offset", ['signurl', '%s/copy/etc2/Logo.PNG' % pbucket(2), '+60'], must_find_re = ["https?://"])
-test_s3cmd("signurl content disposition and type", ['signurl', '%s/copy/etc2/Logo.PNG' % pbucket(2), '+60', '--content-disposition=inline; filename=video.mp4', '--content-type=video/mp4'], must_find_re = [ 'response-content-disposition', 'response-content-type' ] )
-
-## ====== Rename within S3
-test_s3cmd("Rename within S3", ['mv', '%s/copy/etc2/Logo.PNG' % pbucket(2), '%s/copy/etc/logo.png' % pbucket(2)],
-    must_find = [ "move: '%s/copy/etc2/Logo.PNG' -> '%s/copy/etc/logo.png'" % (pbucket(2), pbucket(2))])
-
-## ====== Sync between buckets
-test_s3cmd("Sync remote2remote", ['sync', '%s/xyz/' % pbucket(1), '%s/copy/' % pbucket(2), '--delete-removed', '--exclude', 'non-printables*'],
-    must_find = [ "remote copy: '%s/xyz/demo/dir1/file1-1.txt' -> '%s/copy/demo/dir1/file1-1.txt'" % (pbucket(1), pbucket(2)),
-                  "remote copy: 'etc/logo.png' -> 'etc2/Logo.PNG'",
-                  "delete: '%s/copy/etc/logo.png'" % pbucket(2) ],
-    must_not_find = [ "blah.txt" ])
-
-## ====== Exclude directory
-test_s3cmd("Exclude directory", ['put', '-r', 'testsuite/demo/', pbucket(1) + '/xyz/demo/', '--exclude', 'dir1/', '-d'],
-           must_find = ["'testsuite/demo/dir2/file2-1.bin' -> '%s/xyz/demo/dir2/file2-1.bin'" % pbucket(1),
-                        "DEBUG: EXCLUDE: 'testsuite/demo/dir1/'"],  # whole directory is excluded
-           must_not_find = ["'testsuite/demo/dir1/file1-1.txt' -> '%s/xyz/demo/dir1/file1-1.txt'" % pbucket(1),
-                            "DEBUG: EXCLUDE: 'dir1/file1-1.txt'"  # file is not synced, but also single file is not excluded
-                           ])
-
-## ====== Don't Put symbolic link
-test_s3cmd("Don't put symbolic links", ['put', 'testsuite/etc/linked1.png', 's3://%s/xyz/' % bucket(1),],
-           retcode = EX_USAGE,
-           must_find = ["WARNING: Skipping over symbolic link: testsuite/etc/linked1.png"],
-           must_not_find_re = ["^(?!WARNING: Skipping).*linked1.png"])
-
-## ====== Put symbolic link
-test_s3cmd("Put symbolic links", ['put', 'testsuite/etc/linked1.png', 's3://%s/xyz/' % bucket(1),'--follow-symlinks' ],
-           must_find = [ "'testsuite/etc/linked1.png' -> '%s/xyz/linked1.png'" % pbucket(1)])
-
-## ====== Sync symbolic links
-test_s3cmd("Sync symbolic links", ['sync', 'testsuite/', 's3://%s/xyz/' % bucket(1), '--no-encrypt', '--follow-symlinks' ],
-    must_find = ["remote copy: 'etc2/Logo.PNG' -> 'etc/linked.png'"],
-           # Don't want to recursively copy linked directories!
-           must_not_find_re = ["etc/more/linked-dir/more/give-me-more.txt",
-                               "etc/brokenlink.png"],
-           retcode = EX_PARTIAL)
-
-## ====== Multi source move
-test_s3cmd("Multi-source move", ['mv', '-r', '%s/copy/blahBlah/Blah.txt' % pbucket(2), '%s/copy/etc/' % pbucket(2), '%s/moved/' % pbucket(2)],
-    must_find = [ "move: '%s/copy/blahBlah/Blah.txt' -> '%s/moved/Blah.txt'" % (pbucket(2), pbucket(2)),
-                  "move: '%s/copy/etc/AtomicClockRadio.ttf' -> '%s/moved/AtomicClockRadio.ttf'" % (pbucket(2), pbucket(2)),
-                  "move: '%s/copy/etc/TypeRa.ttf' -> '%s/moved/TypeRa.ttf'" % (pbucket(2), pbucket(2)) ],
-    must_not_find = [ "blah.txt" ])
-
-## ====== Verify move
-test_s3cmd("Verify move", ['ls', '-r', pbucket(2)],
-    must_find = [ "%s/moved/Blah.txt" % pbucket(2),
-                  "%s/moved/AtomicClockRadio.ttf" % pbucket(2),
-                  "%s/moved/TypeRa.ttf" % pbucket(2),
-                  "%s/copy/blahBlah/blah.txt" % pbucket(2) ],
-    must_not_find = [ "%s/copy/blahBlah/Blah.txt" % pbucket(2),
-                      "%s/copy/etc/AtomicClockRadio.ttf" % pbucket(2),
-                      "%s/copy/etc/TypeRa.ttf" % pbucket(2) ])
-
-## ====== List all
-test_s3cmd("List all", ['la'],
-           must_find = [ "%s/urandom.bin" % pbucket(1)])
-
-## ====== Simple delete
-test_s3cmd("Simple delete", ['del', '%s/xyz/etc2/Logo.PNG' % pbucket(1)],
-    must_find = [ "delete: '%s/xyz/etc2/Logo.PNG'" % pbucket(1) ])
-
-## ====== Simple delete with rm
-test_s3cmd("Simple delete with rm", ['rm', '%s/xyz/test_rm/TypeRa.ttf' % pbucket(1)],
-    must_find = [ "delete: '%s/xyz/test_rm/TypeRa.ttf'" % pbucket(1) ])
-
-## ====== Create expiration rule with days and prefix
-test_s3cmd("Create expiration rule with days and prefix", ['expire', pbucket(1), '--expiry-days=365', '--expiry-prefix=log/'],
-    must_find = [ "Bucket '%s/': expiration configuration is set." % pbucket(1)])
-
-## ====== Create expiration rule with date and prefix
-test_s3cmd("Create expiration rule with date and prefix", ['expire', pbucket(1), '--expiry-date=2020-12-31T00:00:00.000Z', '--expiry-prefix=log/'],
-    must_find = [ "Bucket '%s/': expiration configuration is set." % pbucket(1)])
-
-## ====== Create expiration rule with days only
-test_s3cmd("Create expiration rule with days only", ['expire', pbucket(1), '--expiry-days=365'],
-    must_find = [ "Bucket '%s/': expiration configuration is set." % pbucket(1)])
-
-## ====== Create expiration rule with date only
-test_s3cmd("Create expiration rule with date only", ['expire', pbucket(1), '--expiry-date=2020-12-31T00:00:00.000Z'],
-    must_find = [ "Bucket '%s/': expiration configuration is set." % pbucket(1)])
-
-## ====== Get current expiration setting
-test_s3cmd("Get current expiration setting", ['info', pbucket(1)],
-    must_find = [ "Expiration Rule: all objects in this bucket will expire in '2020-12-31T00:00:00.000Z'"])
-
-## ====== Delete expiration rule
-test_s3cmd("Delete expiration rule", ['expire', pbucket(1)],
-    must_find = [ "Bucket '%s/': expiration configuration is deleted." % pbucket(1)])
-
-## ====== set Requester Pays flag
-test_s3cmd("Set requester pays", ['payer', '--requester-pays', pbucket(2)])
-
-## ====== get Requester Pays flag
-test_s3cmd("Get requester pays flag", ['info', pbucket(2)],
-    must_find = [ "Payer:     Requester"])
-
-## ====== ls using Requester Pays flag
-test_s3cmd("ls using requester pays flag", ['ls', '--requester-pays', pbucket(2)])
-
-## ====== clear Requester Pays flag
-test_s3cmd("Clear requester pays", ['payer', pbucket(2)])
-
-## ====== get Requester Pays flag
-test_s3cmd("Get requester pays flag", ['info', pbucket(2)],
-    must_find = [ "Payer:     BucketOwner"])
-
-## ====== Recursive delete maximum exceeed
-test_s3cmd("Recursive delete maximum exceeded", ['del', '--recursive', '--max-delete=1', '--exclude', 'Atomic*', '%s/xyz/etc' % pbucket(1)],
-    must_not_find = [ "delete: '%s/xyz/etc/TypeRa.ttf'" % pbucket(1) ])
-
-## ====== Recursive delete
-test_s3cmd("Recursive delete", ['del', '--recursive', '--exclude', 'Atomic*', '%s/xyz/etc' % pbucket(1)],
-    must_find = [ "delete: '%s/xyz/etc/TypeRa.ttf'" % pbucket(1) ],
-    must_find_re = [ "delete: '.*/etc/logo.png'" ],
-    must_not_find = [ "AtomicClockRadio.ttf" ])
-
-## ====== Recursive delete with rm
-test_s3cmd("Recursive delete with rm", ['rm', '--recursive', '--exclude', 'Atomic*', '%s/xyz/test_rm' % pbucket(1)],
-    must_find = [ "delete: '%s/xyz/test_rm/more/give-me-more.txt'" % pbucket(1) ],
-    must_find_re = [ "delete: '.*/test_rm/logo.png'" ],
-    must_not_find = [ "AtomicClockRadio.ttf" ])
-
-## ====== Recursive delete all
-test_s3cmd("Recursive delete all", ['del', '--recursive', '--force', pbucket(1)],
-    must_find_re = [ "delete: '.*binary/random-crap'" ])
-
-## ====== Remove empty bucket
-test_s3cmd("Remove empty bucket", ['rb', pbucket(1)],
-    must_find = [ "Bucket '%s/' removed" % pbucket(1) ])
-
-## ====== Remove remaining buckets
-test_s3cmd("Remove remaining buckets", ['rb', '--recursive', pbucket(2), pbucket(3)],
-    must_find = [ "Bucket '%s/' removed" % pbucket(2),
-              "Bucket '%s/' removed" % pbucket(3) ])
-
-# vim:et:ts=4:sts=4:ai
diff --git a/s3cmd b/s3cmd
old mode 100755
new mode 100644
index d59a4e4..8168059
--- a/s3cmd
+++ b/s3cmd
@@ -211,9 +211,11 @@ def subcmd_bucket_list(s3, uri, limit):
         # %(size)5s%(coeff)1s
         format_size = u"%5d%1s"
         dir_str = u"DIR".rjust(6)
+        dirobj_str = u"DIROBJ".rjust(6)
     else:
         format_size = u"%12d%s"
         dir_str = u"DIR".rjust(12)
+        dirobj_str = u"DIROBJ".rjust(12)
     if cfg.long_listing:
         format_string = u"%(timestamp)16s %(size)s  %(md5)-35s  %(storageclass)-11s  %(uri)s"
     elif cfg.list_md5:
@@ -232,24 +234,29 @@ def subcmd_bucket_list(s3, uri, limit):
     for object in response["list"]:
         md5 = object.get('ETag', '').strip('"\'')
         storageclass = object.get('StorageClass','')
+        object_key = object['Key']
 
         if cfg.list_md5:
             if '-' in md5: # need to get md5 from the object
-                object_uri = uri.compose_uri(bucket, object["Key"])
+                object_uri = uri.compose_uri(bucket, object_key)
                 info_response = s3.object_info(S3Uri(object_uri))
                 try:
                     md5 = info_response['s3cmd-attrs']['md5']
                 except KeyError:
                     pass
 
-        size_and_coeff = formatSize(object["Size"],
-                                    Config().human_readable_sizes)
+        if object_key[-1] == '/':
+            size_str = dirobj_str
+        else:
+            size_and_coeff = formatSize(object["Size"], Config().human_readable_sizes)
+            size_str = format_size % size_and_coeff
+
         output(format_string % {
             "timestamp": formatDateTime(object["LastModified"]),
-            "size" : format_size % size_and_coeff,
+            "size" : size_str,
             "md5" : md5,
             "storageclass" : storageclass,
-            "uri": uri.compose_uri(bucket, object["Key"]),
+            "uri": uri.compose_uri(bucket, object_key),
             })
 
     if response["truncated"]:
@@ -287,7 +294,7 @@ def cmd_website_info(args):
                 output(u"Index document:   %s" % response['index_document'])
                 output(u"Error document:   %s" % response['error_document'])
             else:
-                output(u"Bucket %s: Unable to receive website configuration." % (uri.uri()))
+                output(u"Bucket %s: No website configuration found." % (uri.uri()))
         except S3Error as e:
             if e.info["Code"] in S3.codes:
                 error(S3.codes[e.info["Code"]] % uri.bucket())
@@ -396,7 +403,8 @@ def cmd_object_put(args):
     if len(args) == 0:
         raise ParameterError("Nothing to upload. Expecting a local file or directory.")
 
-    local_list, single_file_local, exclude_list, total_size_local = fetch_local_list(args, is_src = True)
+    local_list, single_file_local, exclude_list, total_size_local = fetch_local_list(
+        args, is_src=True, with_dirs=cfg.keep_dirs)
 
     local_count = len(local_list)
 
@@ -563,7 +571,8 @@ def cmd_object_get(args):
                 local_filename = destination_base + key
                 if os.path.sep != "/":
                     local_filename = os.path.sep.join(local_filename.split("/"))
-                remote_list[key]['local_filename'] = local_filename
+                remote_obj = remote_list[key]
+                remote_obj['local_filename'] = local_filename
 
     if cfg.dry_run:
         for key in exclude_list:
@@ -582,8 +591,14 @@ def cmd_object_get(args):
         uri = S3Uri(item['object_uri_str'])
         ## Encode / Decode destination with "replace" to make sure it's compatible with current encoding
         destination = unicodise_safe(item['local_filename'])
+        destination_bytes = deunicodise(destination)
+        last_modified_ts = item['timestamp']
+
         seq_label = "[%d of %d]" % (seq, remote_count)
 
+        is_dir_obj = item['is_dir']
+
+        response = None
         start_position = 0
 
         if destination == "-":
@@ -591,26 +606,47 @@ def cmd_object_get(args):
             dst_stream = io.open(sys.__stdout__.fileno(), mode='wb', closefd=False)
             dst_stream.stream_name = u'<stdout>'
             file_exists = True
+        elif is_dir_obj:
+            ## Folder
+            try:
+                file_exists = os.path.exists(destination_bytes)
+                if not file_exists:
+                    info(u"Creating directory: %s" % destination)
+                    os.makedirs(destination_bytes)
+            except IOError as e:
+                # If dir was created at the same time by a race condition, it is ok.
+                if e.errno != errno.EEXIST:
+                    error(u"Creation of directory '%s' failed (Reason: %s)"
+                        % (destination, e.strerror))
+                    if cfg.stop_on_error:
+                        error(u"Exiting now because of --stop-on-error")
+                        raise
+                    ret = EX_PARTIAL
+                    continue
+            if file_exists and not cfg.force:
+                # Directory already exists and we don't want to update metadata
+                continue
+            dst_stream = None
         else:
             ## File
             try:
-                file_exists = os.path.exists(deunicodise(destination))
+                file_exists = os.path.exists(destination_bytes)
                 try:
-                    dst_stream = io.open(deunicodise(destination), mode='ab')
+                    dst_stream = io.open(destination_bytes, mode='ab')
                     dst_stream.stream_name = destination
                 except IOError as e:
                     if e.errno != errno.ENOENT:
                         raise
-                    basename = destination[:destination.rindex(os.path.sep)]
-                    info(u"Creating directory: %s" % basename)
-                    os.makedirs(deunicodise(basename))
-                    dst_stream = io.open(deunicodise(destination), mode='ab')
+                    dst_dir_bytes = os.path.dirname(destination)
+                    info(u"Creating directory: %s" % unicodise(dst_dir_bytes))
+                    os.makedirs(dst_dir_bytes)
+                    dst_stream = io.open(destination_bytes, mode='ab')
                     dst_stream.stream_name = destination
 
                 if file_exists:
                     force = False
                     skip = False
-                    if Config().get_continue:
+                    if cfg.get_continue:
                         start_position = dst_stream.tell()
                         item_size = item['size']
                         if start_position == item_size:
@@ -619,9 +655,9 @@ def cmd_object_get(args):
                             info(u"Download forced for '%s' as source is "
                                  "smaller than local file" % destination)
                             force = True
-                    elif Config().force:
+                    elif cfg.force:
                         force = True
-                    elif Config().skip_existing:
+                    elif cfg.skip_existing:
                         skip = True
                     else:
                         dst_stream.close()
@@ -651,16 +687,20 @@ def cmd_object_get(args):
                 continue
 
         try:
-            try:
-                response = s3.object_get(uri, dst_stream, destination, start_position = start_position, extra_label = seq_label)
-            finally:
-                dst_stream.close()
+            # Retrieve the file content
+            if dst_stream:
+                try:
+                    response = s3.object_get(uri, dst_stream, destination,
+                                             start_position=start_position,
+                                             extra_label=seq_label)
+                finally:
+                    dst_stream.close()
         except S3DownloadError as e:
             error(u"Download of '%s' failed (Reason: %s)" % (destination, e))
             # Delete, only if file didn't exist before!
             if not file_exists:
                 debug(u"object_get failed for '%s', deleting..." % (destination,))
-                os.unlink(deunicodise(destination))
+                os.unlink(destination_bytes)
             if cfg.stop_on_error:
                 error(u"Exiting now because of --stop-on-error")
                 raise
@@ -670,17 +710,36 @@ def cmd_object_get(args):
             error(u"Download of '%s' failed (Reason: %s)" % (destination, e))
             if not file_exists: # Delete, only if file didn't exist before!
                 debug(u"object_get failed for '%s', deleting..." % (destination,))
-                os.unlink(deunicodise(destination))
+                os.unlink(destination_bytes)
             raise
 
-        if "x-amz-meta-s3tools-gpgenc" in response["headers"]:
-            gpg_decrypt(destination, response["headers"]["x-amz-meta-s3tools-gpgenc"])
-            response["size"] = os.stat(deunicodise(destination))[6]
-        if "last-modified" in response["headers"] and destination != "-":
-            last_modified = time.mktime(time.strptime(response["headers"]["last-modified"], "%a, %d %b %Y %H:%M:%S GMT"))
-            os.utime(deunicodise(destination), (last_modified, last_modified))
-            debug("set mtime to %s" % last_modified)
-        if not Config().progress_meter and destination != "-":
+        """
+        # TODO Enable once we add restoring s3cmd-attrs in get command
+        if is_dir_obj and cfg.preserve_attrs:
+            # Retrieve directory info to restore s3cmd-attrs metadata
+            try:
+                response = s3.object_info(uri)
+            except S3Error as exc:
+                error(u"Retrieving directory metadata for '%s' failed (Reason: %s)"
+                      % (destination, exc))
+                if cfg.stop_on_error:
+                    error(u"Exiting now because of --stop-on-error")
+                    raise
+                ret = EX_PARTIAL
+                continue
+        """
+
+        if response:
+            if "x-amz-meta-s3tools-gpgenc" in response["headers"]:
+                gpg_decrypt(destination, response["headers"]["x-amz-meta-s3tools-gpgenc"])
+                response["size"] = os.stat(destination_bytes)[6]
+            if "last-modified" in response["headers"]:
+                last_modified_ts = time.mktime(time.strptime(response["headers"]["last-modified"], "%a, %d %b %Y %H:%M:%S GMT"))
+
+        if last_modified_ts and destination != "-":
+            os.utime(destination_bytes, (last_modified_ts, last_modified_ts))
+            debug("set mtime to %s" % last_modified_ts)
+        if not Config().progress_meter and destination != "-" and not is_dir_obj:
             speed_fmt = formatSize(response["speed"], human_readable = True, floating_point = True)
             output(u"download: '%s' -> '%s' (%d bytes in %0.1f seconds, %0.2f %sB/s)" %
                 (uri, destination, response["size"], response["elapsed"], speed_fmt[0], speed_fmt[1]))
@@ -1023,6 +1082,8 @@ def cmd_info(args):
                                               or 'none'))
                 output(u"   Payer:     %s" % (info['requester-pays']
                                               or 'none'))
+                output(u"   Versioning:%s" % (info['versioning']
+                                              or 'none'))
                 expiration = s3.expiration_info(uri, cfg.bucket_location)
                 if expiration and expiration['prefix'] is not None:
                     expiration_desc = "Expiration Rule: "
@@ -1115,8 +1176,9 @@ def cmd_sync_remote2remote(args):
         if source_arg.endswith('/'):
             destbase_with_source_list.add(destination_base)
         else:
-            destbase_with_source_list.add(os.path.join(destination_base,
-                                                  os.path.basename(source_arg)))
+            destbase_with_source_list.add(s3path.join(
+                destination_base, s3path.basename(source_arg)
+            ))
 
     stats_info = StatsInfo()
 
@@ -1262,10 +1324,17 @@ def cmd_sync_remote2local(args):
         if cfg.max_delete > 0 and len(local_list) > cfg.max_delete:
             warning(u"delete: maximum requested number of deletes would be exceeded, none performed.")
             return total_size
-        for key in local_list:
-            os.unlink(deunicodise(local_list[key]['full_name']))
-            output(u"delete: '%s'" % local_list[key]['full_name'])
-            total_size += local_list[key].get(u'size', 0)
+
+        # Reverse used to delete children before parent folders
+        for key in reversed(local_list):
+            item = local_list[key]
+            full_path = item['full_name']
+            if item.get('is_dir', True):
+                os.rmdir(deunicodise(full_path))
+            else:
+                os.unlink(deunicodise(full_path))
+            output(u"delete: '%s'" % full_path)
+            total_size += item.get(u'size', 0)
         return len(local_list), total_size
 
     destination_base = args[-1]
@@ -1298,24 +1367,22 @@ def cmd_sync_remote2local(args):
         else:
             destbase_with_source_list.add(os.path.join(destination_base,
                                                       os.path.basename(source_arg)))
-    local_list, single_file_local, dst_exclude_list, local_total_size = fetch_local_list(destbase_with_source_list, is_src = False, recursive = True)
+    # with_dirs is True, as we always want to compare source with the actual full local content
+    local_list, single_file_local, dst_exclude_list, local_total_size = fetch_local_list(
+        destbase_with_source_list, is_src=False, recursive=True, with_dirs=True
+    )
 
     local_count = len(local_list)
     remote_count = len(remote_list)
     orig_remote_count = remote_count
 
-    info(u"Found %d remote files, %d local files" % (remote_count, local_count))
+    info(u"Found %d remote file objects, %d local files and directories" % (remote_count, local_count))
 
     remote_list, local_list, update_list, copy_pairs = compare_filelists(remote_list, local_list, src_remote = True, dst_remote = False)
 
-    local_count = len(local_list)
-    remote_count = len(remote_list)
-    update_count = len(update_list)
-    copy_pairs_count = len(copy_pairs)
-
-    info(u"Summary: %d remote files to download, %d local files to delete, %d local files to hardlink" % (remote_count + update_count, local_count, copy_pairs_count))
+    dir_cache = {}
 
-    def _set_local_filename(remote_list, destination_base, source_args):
+    def _set_local_filename(remote_list, destination_base, source_args, dir_cache):
         if len(remote_list) == 0:
             return
 
@@ -1334,14 +1401,53 @@ def cmd_sync_remote2local(args):
 
         if destination_base[-1] != os.path.sep:
             destination_base += os.path.sep
+
         for key in remote_list:
             local_filename = destination_base + key
             if os.path.sep != "/":
                 local_filename = os.path.sep.join(local_filename.split("/"))
-            remote_list[key]['local_filename'] = local_filename
 
-    _set_local_filename(remote_list, destination_base, source_args)
-    _set_local_filename(update_list, destination_base, source_args)
+            item = remote_list[key]
+            item['local_filename'] = local_filename
+
+            # Create parent folders if needed
+            # Extract key dirname
+            key_dir_path = key.rsplit('/', 1)[0]
+            dst_dir = None
+            if key_dir_path not in dir_cache:
+                if cfg.dry_run:
+                    mkdir_ret = True
+                else:
+                    dst_dir = unicodise(os.path.dirname(deunicodise(local_filename)))
+                    mkdir_ret = Utils.mkdir_with_parents(dst_dir)
+                # Also add to cache, all the parent dirs
+                path = key_dir_path
+                while path and path not in dir_cache:
+                    dir_cache[path] = mkdir_ret
+                    last_slash_idx = path.rfind('/')
+                    if last_slash_idx in [-1, 0]:
+                        break
+                    path = path[:last_slash_idx]
+            if dir_cache[key_dir_path] == False:
+                if not dst_dir:
+                    dst_dir = unicodise(os.path.dirname(deunicodise(local_filename)))
+                if cfg.stop_on_error:
+                    error(u"Exiting now because of --stop-on-error")
+                    raise OSError("Download of '%s' failed (Reason: %s destination directory is not writable)" % (key, dst_dir))
+                error(u"Download of '%s' failed (Reason: %s destination directory is not writable)" % (key, dst_dir))
+                item['mark_failed'] = True
+                ret = EX_PARTIAL
+                continue
+
+    _set_local_filename(remote_list, destination_base, source_args, dir_cache)
+    _set_local_filename(update_list, destination_base, source_args, dir_cache)
+
+    local_count = len(local_list)
+    remote_count = len(remote_list)
+    update_count = len(update_list)
+    copy_pairs_count = len(copy_pairs)
+
+    info(u"Summary: %d remote files to download, %d local files to delete, %d local files to hardlink" % (remote_count + update_count, local_count, copy_pairs_count))
 
     if cfg.dry_run:
         keys = filedicts_to_keys(src_exclude_list, dst_exclude_list)
@@ -1372,7 +1478,7 @@ def cmd_sync_remote2local(args):
     else:
         deleted_count, deleted_size = (0, 0)
 
-    def _download(remote_list, seq, total, total_size, dir_cache):
+    def _download(remote_list, seq, total, total_size):
         original_umask = os.umask(0)
         os.umask(original_umask)
         file_list = remote_list.keys()
@@ -1383,29 +1489,28 @@ def cmd_sync_remote2local(args):
             item = remote_list[file]
             uri = S3Uri(item['object_uri_str'])
             dst_file = item['local_filename']
-            is_empty_directory = dst_file.endswith('/')
+            last_modified_ts = item['timestamp']
+            is_dir = item['is_dir']
             seq_label = "[%d of %d]" % (seq, total)
 
-            dst_dir = unicodise(os.path.dirname(deunicodise(dst_file)))
-            if not dst_dir in dir_cache:
-                dir_cache[dst_dir] = Utils.mkdir_with_parents(dst_dir)
-            if dir_cache[dst_dir] == False:
-                if cfg.stop_on_error:
-                    error(u"Exiting now because of --stop-on-error")
-                    raise OSError("Download of '%s' failed (Reason: %s destination directory is not writable)" % (file, dst_dir))
-                error(u"Download of '%s' failed (Reason: %s destination directory is not writable)" % (file, dst_dir))
-                ret = EX_PARTIAL
+            if item.get('mark_failed', False):
+                # Item is skipped because there was previously an issue with
+                # its destination directory.
                 continue
 
+            response = None
+            dst_files_b = deunicodise(dst_file)
             try:
                 chkptfname_b = ''
-                if not is_empty_directory: # ignore empty directory at S3:
+                # ignore empty directory at S3:
+                if not is_dir:
                     debug(u"dst_file=%s" % dst_file)
                     # create temporary files (of type .s3cmd.XXXX.tmp) in the same directory
                     # for downloading and then rename once downloaded
                     # unicode provided to mkstemp argument
-                    chkptfd, chkptfname_b = tempfile.mkstemp(u".tmp", u".s3cmd.",
-                                                           os.path.dirname(dst_file))
+                    chkptfd, chkptfname_b = tempfile.mkstemp(
+                        u".tmp", u".s3cmd.", os.path.dirname(dst_file)
+                    )
                     with io.open(chkptfd, mode='wb') as dst_stream:
                         dst_stream.stream_name = unicodise(chkptfname_b)
                         debug(u"created chkptfname=%s" % dst_stream.stream_name)
@@ -1415,20 +1520,24 @@ def cmd_sync_remote2local(args):
                     if os.name == "nt":
                         # Windows is really a bad OS. Rename can't overwrite an existing file
                         try:
-                            os.unlink(deunicodise(dst_file))
+                            os.unlink(dst_files_b)
                         except OSError:
                             pass
-                    os.rename(chkptfname_b, deunicodise(dst_file))
-                    debug(u"renamed chkptfname=%s to dst_file=%s" % (dst_stream.stream_name, dst_file))
+                    os.rename(chkptfname_b, dst_files_b)
+                    debug(u"renamed chkptfname=%s to dst_file=%s"
+                          % (dst_stream.stream_name, dst_file))
             except OSError as exc:
                 allow_partial = True
 
                 if exc.errno == errno.EISDIR:
-                    error(u"Download of '%s' failed (Reason: %s is a directory)" % (file, dst_file))
+                    error(u"Download of '%s' failed (Reason: %s is a directory)"
+                          % (file, dst_file))
                 elif os.name != "nt" and exc.errno == errno.ETXTBSY:
-                    error(u"Download of '%s' failed (Reason: %s is currently open for execute, cannot be overwritten)" % (file, dst_file))
+                    error(u"Download of '%s' failed (Reason: %s is currently open for execute, cannot be overwritten)"
+                          % (file, dst_file))
                 elif exc.errno == errno.EPERM or exc.errno == errno.EACCES:
-                    error(u"Download of '%s' failed (Reason: %s permission denied)" % (file, dst_file))
+                    error(u"Download of '%s' failed (Reason: %s permission denied)"
+                          % (file, dst_file))
                 elif exc.errno == errno.EBUSY:
                     error(u"Download of '%s' failed (Reason: %s is busy)" % (file, dst_file))
                 elif exc.errno == errno.EFBIG:
@@ -1489,46 +1598,72 @@ def cmd_sync_remote2local(args):
 
             try:
                 # set permissions on destination file
-                if not is_empty_directory: # a normal file
+                if not is_dir: # a normal file
                     mode = 0o777 - original_umask
                 else:
                     # an empty directory, make them readable/executable
                     mode = 0o775
                 debug(u"mode=%s" % oct(mode))
-                os.chmod(deunicodise(dst_file), mode)
+                os.chmod(dst_files_b, mode)
             except:
                 raise
 
-            # because we don't upload empty directories,
-            # we can continue the loop here, we won't be setting stat info.
-            # if we do start to upload empty directories, we'll have to reconsider this.
-            if is_empty_directory:
-                continue
+            # We can't get metadata for directories from an object_get, so we have to
+            # request them explicitly
+            if is_dir and cfg.preserve_attrs:
+                try:
+                    response = s3.object_info(uri)
+                except S3Error as exc:
+                    error(u"Retrieving directory metadata for '%s' failed (Reason: %s)"
+                        % (dst_file, exc))
+                    if cfg.stop_on_error:
+                        error(u"Exiting now because of --stop-on-error")
+                        raise
+                    ret = EX_PARTIAL
+                    continue
 
             try:
-                if 's3cmd-attrs' in response and cfg.preserve_attrs:
+                if response and 's3cmd-attrs' in response and cfg.preserve_attrs:
                     attrs = response['s3cmd-attrs']
-                    if 'mode' in attrs:
-                        os.chmod(deunicodise(dst_file), int(attrs['mode']))
-                    if 'mtime' in attrs or 'atime' in attrs:
-                        mtime = ('mtime' in attrs) and int(attrs['mtime']) or int(time.time())
-                        atime = ('atime' in attrs) and int(attrs['atime']) or int(time.time())
-                        os.utime(deunicodise(dst_file), (atime, mtime))
-                    if 'uid' in attrs and 'gid' in attrs:
-                        uid = int(attrs['uid'])
-                        gid = int(attrs['gid'])
-                        os.lchown(deunicodise(dst_file),uid,gid)
-                elif 'last-modified' in response['headers']:
-                    last_modified = time.mktime(time.strptime(response["headers"]["last-modified"], "%a, %d %b %Y %H:%M:%S GMT"))
-                    os.utime(deunicodise(dst_file), (last_modified, last_modified))
-                    debug("set mtime to %s" % last_modified)
+                    attr_mode = attrs.get('mode')
+                    attr_mtime = attrs.get('mtime')
+                    attr_atime = attrs.get('atime')
+                    attr_uid = attrs.get('uid')
+                    attr_gid = attrs.get('gid')
+                    if attr_mode is not None:
+                        os.chmod(dst_files_b, int(attr_mode))
+                    if attr_mtime is not None or attr_atime is not None:
+                        default_time = int(time.time())
+                        mtime = attr_mtime is not None and int(attr_mtime) or default_time
+                        atime = attr_atime is not None and int(attr_atime) or default_time
+                        os.utime(dst_files_b, (atime, mtime))
+                    if attr_uid is not None and attr_gid is not None:
+                        uid = int(attr_uid)
+                        gid = int(attr_gid)
+                        try:
+                            os.lchown(dst_files_b, uid, gid)
+                        except Exception as exc:
+                            exc.failed_step = 'lchown'
+                            raise
+                else:
+                    if response and 'last-modified' in response['headers']:
+                        last_modified_ts = time.mktime(time.strptime(
+                            response["headers"]["last-modified"],
+                            "%a, %d %b %Y %H:%M:%S GMT"
+                        ))
+                    if last_modified_ts:
+                        os.utime(dst_files_b, (last_modified_ts, last_modified_ts))
+                        debug("set mtime to %s" % last_modified_ts)
             except OSError as e:
                 ret = EX_PARTIAL
                 if e.errno == errno.EEXIST:
-                    warning(u"%s exists - not overwriting" % dst_file)
+                    warning(u"'%s' exists - not overwriting" % dst_file)
                     continue
                 if e.errno in (errno.EPERM, errno.EACCES):
-                    warning(u"%s not writable: %s" % (dst_file, e.strerror))
+                    if getattr(e, 'failed_step') == 'lchown':
+                        warning(u"Can't set owner/group: '%s' (%s)" % (dst_file, e.strerror))
+                    else:
+                        warning(u"Attrs not writable: '%s' (%s)" % (dst_file, e.strerror))
                     if cfg.stop_on_error:
                         raise e
                     continue
@@ -1542,18 +1677,16 @@ def cmd_sync_remote2local(args):
                 if cfg.stop_on_error:
                     raise OSError(e)
                 continue
-            finally:
-                try:
-                    os.remove(chkptfname_b)
-                except Exception:
-                    pass
 
-            speed_fmt = formatSize(response["speed"], human_readable = True, floating_point = True)
-            if not Config().progress_meter:
-                output(u"download: '%s' -> '%s' (%d bytes in %0.1f seconds, %0.2f %sB/s) %s" %
-                    (uri, dst_file, response["size"], response["elapsed"], speed_fmt[0], speed_fmt[1],
-                    seq_label))
-            total_size += response["size"]
+            if is_dir:
+                output(u"mkdir: '%s' -> '%s' %s" % (uri, dst_file, seq_label))
+            else:
+                speed_fmt = formatSize(response["speed"], human_readable = True, floating_point = True)
+                if not Config().progress_meter:
+                    output(u"download: '%s' -> '%s' (%d bytes in %0.1f seconds, %0.2f %sB/s) %s" %
+                        (uri, dst_file, response["size"], response["elapsed"], speed_fmt[0], speed_fmt[1],
+                        seq_label))
+                total_size += response["size"]
             if Config().delete_after_fetch:
                 s3.object_delete(uri)
                 output(u"File '%s' removed after syncing" % (uri))
@@ -1562,16 +1695,22 @@ def cmd_sync_remote2local(args):
     size_transferred = 0
     total_elapsed = 0.0
     timestamp_start = time.time()
-    dir_cache = {}
     seq = 0
-    ret, seq, size_transferred = _download(remote_list, seq, remote_count + update_count, size_transferred, dir_cache)
-    status, seq, size_transferred = _download(update_list, seq, remote_count + update_count, size_transferred, dir_cache)
+    ret, seq, size_transferred = _download(remote_list, seq, remote_count + update_count, size_transferred)
+    remote_list = None
+
+    status, seq, size_transferred = _download(update_list, seq, remote_count + update_count, size_transferred)
     if ret == EX_OK:
         ret = status
+    update_list = None
 
+    _set_local_filename(copy_pairs, destination_base, source_args, dir_cache)
     n_copies, size_copies, failed_copy_list = local_copy(copy_pairs, destination_base)
-    _set_local_filename(failed_copy_list, destination_base, source_args)
-    status, seq, size_transferred = _download(failed_copy_list, seq, len(failed_copy_list) + remote_count + update_count, size_transferred, dir_cache)
+    copy_pairs = None
+    dir_cache = None
+
+    # Download files that failed during local_copy
+    status, seq, size_transferred = _download(failed_copy_list, seq, len(failed_copy_list) + remote_count + update_count, size_transferred)
     if ret == EX_OK:
         ret = status
 
@@ -1608,19 +1747,23 @@ def local_copy(copy_pairs, destination_base):
     # For instance all empty files would become hardlinked together!
     saved_bytes = 0
     failed_copy_list = FileDict()
-    for (src_obj, dst1, relative_file, md5) in copy_pairs:
-        src_file = os.path.join(destination_base, dst1)
-        dst_file = os.path.join(destination_base, relative_file)
-        dst_dir = os.path.dirname(deunicodise(dst_file))
+
+    if destination_base[-1] != os.path.sep:
+        destination_base += os.path.sep
+
+    for relative_file, src_obj in copy_pairs.items():
+        src_file = destination_base + src_obj['copy_src']
+        if os.path.sep != "/":
+            src_file = os.path.sep.join(src_file.split("/"))
+
+        dst_file = src_obj['local_filename']
         try:
-            if not os.path.isdir(deunicodise(dst_dir)):
-                debug("MKDIR %s" % dst_dir)
-                os.makedirs(deunicodise(dst_dir))
             debug(u"Copying %s to %s" % (src_file, dst_file))
             shutil.copy2(deunicodise(src_file), deunicodise(dst_file))
             saved_bytes += src_obj.get(u'size', 0)
         except (IOError, OSError) as e:
-            warning(u'Unable to copy or hardlink files %s -> %s (Reason: %s)' % (src_file, dst_file, e))
+            warning(u'Unable to copy or hardlink files %s -> %s (Reason: %s)'
+                    % (src_file, dst_file, e))
             failed_copy_list[relative_file] = src_obj
     return len(copy_pairs), saved_bytes, failed_copy_list
 
@@ -1631,32 +1774,36 @@ def remote_copy(s3, copy_pairs, destination_base, uploaded_objects_list=None,
     failed_copy_list = FileDict()
     seq = 0
     src_count = len(copy_pairs)
-    for (src_obj, dst1, dst2, src_md5) in copy_pairs:
+    for relative_file, src_obj in copy_pairs.items():
+        copy_src_file = src_obj['copy_src']
+        src_md5 = src_obj['md5']
+
         seq += 1
-        debug(u"Remote Copying from %s to %s" % (dst1, dst2))
-        dst1_uri = S3Uri(destination_base + dst1)
-        dst2_uri = S3Uri(destination_base + dst2)
+        debug(u"Remote Copying from %s to %s" % (copy_src_file, relative_file))
+        src_uri = S3Uri(destination_base + copy_src_file)
+        dst_uri = S3Uri(destination_base + relative_file)
         src_obj_size = src_obj.get(u'size', 0)
         seq_label = "[%d of %d]" % (seq, src_count)
         extra_headers = copy(cfg.extra_headers)
         if metadata_update:
             # source is a real local file with its own personal metadata
-            attr_header = _build_attr_header(src_obj, dst2, src_md5)
+            attr_header = _build_attr_header(src_obj, relative_file, src_md5)
             debug(u"attr_header: %s" % attr_header)
             extra_headers.update(attr_header)
             extra_headers['content-type'] = \
                 s3.content_type(filename=src_obj['full_name'])
         try:
-            s3.object_copy(dst1_uri, dst2_uri, extra_headers,
+            s3.object_copy(src_uri, dst_uri, extra_headers,
                            src_size=src_obj_size,
                            extra_label=seq_label)
-            output(u"remote copy: '%s' -> '%s'  %s" % (dst1, dst2, seq_label))
+            output(u"remote copy: '%s' -> '%s'  %s"
+                   % (copy_src_file, relative_file, seq_label))
             saved_bytes += src_obj_size
             if uploaded_objects_list is not None:
-                uploaded_objects_list.append(dst2)
+                uploaded_objects_list.append(relative_file)
         except Exception:
-            warning(u"Unable to remote copy files '%s' -> '%s'" % (dst1_uri, dst2_uri))
-            failed_copy_list[dst2] = src_obj
+            warning(u"Unable to remote copy files '%s' -> '%s'" % (src_uri, dst_uri))
+            failed_copy_list[relative_file] = src_obj
     return (len(copy_pairs), saved_bytes, failed_copy_list)
 
 def _build_attr_header(src_obj, src_relative_name, md5=None):
@@ -1803,7 +1950,9 @@ def cmd_sync_local2remote(args):
 
         stats_info = StatsInfo()
 
-        local_list, single_file_local, src_exclude_list, local_total_size = fetch_local_list(args[:-1], is_src = True, recursive = True)
+        local_list, single_file_local, src_exclude_list, local_total_size = fetch_local_list(
+            args[:-1], is_src=True, recursive=True, with_dirs=cfg.keep_dirs
+        )
 
         # - The source path is either like "/myPath/my_src_folder" and
         # the user want to upload this single folder and optionally only delete
@@ -1817,8 +1966,9 @@ def cmd_sync_local2remote(args):
         for source_arg in source_args:
             if not source_arg.endswith('/') and os.path.basename(source_arg) != '.' \
                and not single_file_local:
-                destbase_with_source_list.add(os.path.join(destination_base,
-                                                    os.path.basename(source_arg)))
+                destbase_with_source_list.add(s3path.join(
+                    destination_base, os.path.basename(source_arg)
+                ))
             else:
                 destbase_with_source_list.add(destination_base)
 
@@ -1858,8 +2008,8 @@ def cmd_sync_local2remote(args):
                 output(u"upload: '%s' -> '%s'" % (local_list[key]['full_name'], local_list[key]['remote_uri']))
             for key in update_list:
                 output(u"upload: '%s' -> '%s'" % (update_list[key]['full_name'], update_list[key]['remote_uri']))
-            for (src_obj, dst1, dst2, md5) in copy_pairs:
-                output(u"remote copy: '%s' -> '%s'" % (dst1, dst2))
+            for relative_file, item in copy_pairs.items():
+                output(u"remote copy: '%s' -> '%s'" % (relative_file['copy_src'], relative_file))
             if cfg.delete_removed:
                 for key in remote_list:
                     output(u"delete: '%s'" % remote_list[key]['object_uri_str'])
@@ -2047,6 +2197,24 @@ def cmd_setacl(args):
         update_acl(s3, uri, seq_label)
     return EX_OK
 
+def cmd_setversioning(args):
+    cfg = Config()
+    s3 = S3(cfg)
+    bucket_uri = S3Uri(args[0])
+    if bucket_uri.object():
+        raise ParameterError("Only bucket name is required for [setversioning] command")
+    status = args[1]
+    if status not in ["enable", "disable"]:
+        raise ParameterError("Must be 'enable' or 'disable'. Got: %s" % status)
+
+    enabled = True if status == "enable" else False
+    response = s3.set_versioning(bucket_uri, enabled)
+
+    debug(u"response - %s" % response['status'])
+    if response['status'] == 200:
+        output(u"%s: Versioning status updated" % bucket_uri)
+    return EX_OK
+
 def cmd_setpolicy(args):
     cfg = Config()
     s3 = S3(cfg)
@@ -2226,10 +2394,12 @@ def cmd_multipart(args):
     return EX_OK
 
 def cmd_abort_multipart(args):
-    '''{"cmd":"abortmp",   "label":"abort a multipart upload", "param":"s3://BUCKET Id", "func":cmd_abort_multipart, "argc":2},'''
+    '''{"cmd":"abortmp",   "label":"abort a multipart upload", "param":"s3://BUCKET/OBJECT Id", "func":cmd_abort_multipart, "argc":2},'''
     cfg = Config()
     s3 = S3(cfg)
     uri = S3Uri(args[0])
+    if not uri.object():
+        raise ParameterError(u"Expecting S3 URI with a filename: %s" % uri.uri())
     id = args[1]
     response = s3.abort_multipart(uri, id)
     debug(u"response - %s" % response['status'])
@@ -2281,7 +2451,7 @@ def cmd_accesslog(args):
 def cmd_sign(args):
     string_to_sign = args.pop()
     debug(u"string-to-sign: %r" % string_to_sign)
-    signature = Crypto.sign_string_v2(encode_to_s3(string_to_sign))
+    signature = sign_string_v2(encode_to_s3(string_to_sign))
     output(u"Signature: %s" % decode_from_s3(signature))
     return EX_OK
 
@@ -2291,7 +2461,7 @@ def cmd_signurl(args):
     if url_to_sign.type != 's3':
         raise ParameterError("Must be S3Uri. Got: %s" % url_to_sign)
     debug("url to sign: %r" % url_to_sign)
-    signed_url = Crypto.sign_url_v2(url_to_sign, expiry)
+    signed_url = sign_url_v2(url_to_sign, expiry)
     output(signed_url)
     return EX_OK
 
@@ -2527,9 +2697,9 @@ def run_configure(config_file, args):
                         ret_enc = gpg_encrypt(filename)
                         ret_dec = gpg_decrypt(ret_enc[1], ret_enc[2], False)
                         hash = [
-                            Utils.hash_file_md5(filename),
-                            Utils.hash_file_md5(ret_enc[1]),
-                            Utils.hash_file_md5(ret_dec[1]),
+                            hash_file_md5(filename),
+                            hash_file_md5(ret_enc[1]),
+                            hash_file_md5(ret_dec[1]),
                         ]
                         os.unlink(deunicodise(filename))
                         os.unlink(deunicodise(ret_enc[1]))
@@ -2649,6 +2819,7 @@ def get_commands_list():
     {"cmd":"modify", "label":"Modify object metadata", "param":"s3://BUCKET1/OBJECT", "func":cmd_modify, "argc":1},
     {"cmd":"mv", "label":"Move object", "param":"s3://BUCKET1/OBJECT1 s3://BUCKET2[/OBJECT2]", "func":cmd_mv, "argc":2},
     {"cmd":"setacl", "label":"Modify Access control list for Bucket or Files", "param":"s3://BUCKET[/OBJECT]", "func":cmd_setacl, "argc":1},
+    {"cmd":"setversioning", "label":"Modify Bucket Versioning", "param":"s3://BUCKET enable|disable", "func":cmd_setversioning, "argc":2},
 
     {"cmd":"setpolicy", "label":"Modify Bucket Policy", "param":"FILE s3://BUCKET", "func":cmd_setpolicy, "argc":2},
     {"cmd":"delpolicy", "label":"Delete Bucket Policy", "param":"s3://BUCKET", "func":cmd_delpolicy, "argc":1},
@@ -2688,7 +2859,7 @@ def get_commands_list():
     {"cmd":"cfcreate", "label":"Create CloudFront distribution point", "param":"s3://BUCKET", "func":CfCmd.create, "argc":1},
     {"cmd":"cfdelete", "label":"Delete CloudFront distribution point", "param":"cf://DIST_ID", "func":CfCmd.delete, "argc":1},
     {"cmd":"cfmodify", "label":"Change CloudFront distribution point parameters", "param":"cf://DIST_ID", "func":CfCmd.modify, "argc":1},
-    #{"cmd":"cfinval", "label":"Invalidate CloudFront objects", "param":"s3://BUCKET/OBJECT [s3://BUCKET/OBJECT ...]", "func":CfCmd.invalidate, "argc":1},
+    {"cmd":"cfinval", "label":"Invalidate CloudFront objects", "param":"s3://BUCKET/OBJECT [s3://BUCKET/OBJECT ...]", "func":CfCmd.invalidate, "argc":1},
     {"cmd":"cfinvalinfo", "label":"Display CloudFront invalidation request(s) status", "param":"cf://DIST_ID[/INVAL_ID]", "func":CfCmd.invalinfo, "argc":1},
     ]
 
@@ -2848,6 +3019,7 @@ def main():
     optparser.add_option(      "--delete-after-fetch", dest="delete_after_fetch", action="store_true", help="Delete remote objects after fetching to local file (only for [get] and [sync] commands).")
     optparser.add_option("-p", "--preserve", dest="preserve_attrs", action="store_true", help="Preserve filesystem attributes (mode, ownership, timestamps). Default for [sync] command.")
     optparser.add_option(      "--no-preserve", dest="preserve_attrs", action="store_false", help="Don't store FS attributes")
+    optparser.add_option(      "--keep-dirs", dest="keep_dirs", action="store_true", help="Preserve all local directories as remote objects including empty directories. Experimental feature.")
     optparser.add_option(      "--exclude", dest="exclude", action="append", metavar="GLOB", help="Filenames and paths matching GLOB will be excluded from sync")
     optparser.add_option(      "--exclude-from", dest="exclude_from", action="append", metavar="FILE", help="Read --exclude GLOBs from FILE")
     optparser.add_option(      "--rexclude", dest="rexclude", action="append", metavar="REGEXP", help="Filenames and paths matching REGEXP (regular expression) will be excluded from sync")
@@ -2899,6 +3071,8 @@ def main():
     optparser.add_option(      "--expiry-days", dest="expiry_days", action="store", help="Indicates the number of days after object creation the expiration rule takes effect. (only for [expire] command)")
     optparser.add_option(      "--expiry-prefix", dest="expiry_prefix", action="store", help="Identifying one or more objects with the prefix to which the expiration rule applies. (only for [expire] command)")
 
+    optparser.add_option(      "--skip-destination-validation", dest="skip_destination_validation", action="store_true", help="Skips validation of Amazon SQS, Amazon SNS, and AWS Lambda destinations when applying notification configuration. (only for [setnotification] command)")
+
     optparser.add_option(      "--progress", dest="progress_meter", action="store_true", help="Display progress meter (default on TTY).")
     optparser.add_option(      "--no-progress", dest="progress_meter", action="store_false", help="Don't display progress meter (default on non-TTY).")
     optparser.add_option(      "--stats", dest="stats", action="store_true", help="Give some file-transfer stats.")
@@ -2932,6 +3106,7 @@ def main():
     optparser.add_option(      "--requester-pays", dest="requester_pays", action="store_true", help="Set the REQUESTER PAYS flag for operations")
     optparser.add_option("-l", "--long-listing", dest="long_listing", action="store_true", help="Produce long listing [ls]")
     optparser.add_option(      "--stop-on-error", dest="stop_on_error", action="store_true", help="stop if error in transfer")
+    optparser.add_option(      "--max-retries", dest="max_retries", action="store", help="Maximum number of times to retry a failed request before giving up. Default is 5", metavar="NUM")
     optparser.add_option(      "--content-disposition", dest="content_disposition", action="store", help="Provide a Content-Disposition for signed URLs, e.g., \"inline; filename=myvideo.mp4\"")
     optparser.add_option(      "--content-type", dest="content_type", action="store", help="Provide a Content-Type for signed URLs, e.g., \"video/mp4\"")
 
@@ -3267,9 +3442,9 @@ if __name__ == '__main__':
         from S3.FileDict import FileDict
         from S3.S3Uri import S3Uri
         from S3 import Utils
-        from S3 import Crypto
         from S3.BaseUtils import (formatDateTime, getPrettyFromXml,
-                                  encode_to_s3, decode_from_s3)
+                                  encode_to_s3, decode_from_s3, s3path)
+        from S3.Crypto import hash_file_md5, sign_string_v2, sign_url_v2
         from S3.Utils import (formatSize, unicodise_safe, unicodise_s,
                               unicodise, deunicodise, replace_nonprintables)
         from S3.Progress import Progress, StatsInfo
@@ -3319,7 +3494,7 @@ if __name__ == '__main__':
         sys.exit(EX_ACCESSDENIED)
 
     except ConnectionRefusedError as e:
-        error(e)
+        error("Could not connect to server: %s" % e)
         sys.exit(EX_CONNECTIONREFUSED)
         # typically encountered error is:
         # ERROR: [Errno 111] Connection refused
@@ -3334,9 +3509,9 @@ if __name__ == '__main__':
         sys.exit(EX_IOERR)
 
     except IOError as e:
-        if e.errno == errno.ECONNREFUSED:
+        if e.errno in (errno.ECONNREFUSED, errno.EHOSTUNREACH):
             # Python2 does not have ConnectionRefusedError
-            error(e)
+            error("Could not connect to server: %s" % e)
             sys.exit(EX_CONNECTIONREFUSED)
 
         if e.errno == errno.EPIPE:
diff --git a/s3cmd.egg-info/PKG-INFO b/s3cmd.egg-info/PKG-INFO
new file mode 100644
index 0000000..77bac57
--- /dev/null
+++ b/s3cmd.egg-info/PKG-INFO
@@ -0,0 +1,54 @@
+Metadata-Version: 2.1
+Name: s3cmd
+Version: 2.3.0.dev0
+Summary: Command line tool for managing Amazon S3 and CloudFront services
+Home-page: http://s3tools.org
+Author: Michal Ludvig
+Author-email: michal@logix.cz
+Maintainer: github.com/fviard, github.com/matteobar
+Maintainer-email: s3tools-bugs@lists.sourceforge.net
+License: GNU GPL v2+
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Console
+Classifier: Environment :: MacOS X
+Classifier: Environment :: Win32 (MS Windows)
+Classifier: Intended Audience :: End Users/Desktop
+Classifier: Intended Audience :: System Administrators
+Classifier: License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)
+Classifier: Natural Language :: English
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Unix
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Topic :: System :: Archiving
+Classifier: Topic :: Utilities
+License-File: LICENSE
+
+
+
+S3cmd lets you copy files from/to Amazon S3
+(Simple Storage Service) using a simple to use
+command line client. Supports rsync-like backup,
+GPG encryption, and more. Also supports management
+of Amazon's CloudFront content delivery network.
+
+
+Authors:
+--------
+    Florent Viard <florent@sodria.com>
+
+    Michal Ludvig  <michal@logix.cz>
+
+    Matt Domsch (github.com/mdomsch)
diff --git a/s3cmd.egg-info/SOURCES.txt b/s3cmd.egg-info/SOURCES.txt
new file mode 100644
index 0000000..cd18072
--- /dev/null
+++ b/s3cmd.egg-info/SOURCES.txt
@@ -0,0 +1,37 @@
+INSTALL.md
+LICENSE
+MANIFEST.in
+NEWS
+README.md
+s3cmd
+s3cmd.1
+setup.cfg
+setup.py
+S3/ACL.py
+S3/AccessLog.py
+S3/BaseUtils.py
+S3/BidirMap.py
+S3/CloudFront.py
+S3/Config.py
+S3/ConnMan.py
+S3/Crypto.py
+S3/Custom_httplib27.py
+S3/Custom_httplib3x.py
+S3/Exceptions.py
+S3/ExitCodes.py
+S3/FileDict.py
+S3/FileLists.py
+S3/HashCache.py
+S3/MultiPart.py
+S3/PkgInfo.py
+S3/Progress.py
+S3/S3.py
+S3/S3Uri.py
+S3/SortedDict.py
+S3/Utils.py
+S3/__init__.py
+s3cmd.egg-info/PKG-INFO
+s3cmd.egg-info/SOURCES.txt
+s3cmd.egg-info/dependency_links.txt
+s3cmd.egg-info/requires.txt
+s3cmd.egg-info/top_level.txt
\ No newline at end of file
diff --git a/s3cmd.egg-info/dependency_links.txt b/s3cmd.egg-info/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/s3cmd.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/s3cmd.egg-info/requires.txt b/s3cmd.egg-info/requires.txt
new file mode 100644
index 0000000..ffde045
--- /dev/null
+++ b/s3cmd.egg-info/requires.txt
@@ -0,0 +1,2 @@
+python-dateutil
+python-magic
diff --git a/s3cmd.egg-info/top_level.txt b/s3cmd.egg-info/top_level.txt
new file mode 100644
index 0000000..878cb3c
--- /dev/null
+++ b/s3cmd.egg-info/top_level.txt
@@ -0,0 +1 @@
+S3
diff --git a/s3cmd.spec.in b/s3cmd.spec.in
deleted file mode 100644
index 8a40178..0000000
--- a/s3cmd.spec.in
+++ /dev/null
@@ -1,174 +0,0 @@
-%{!?python_sitelib: %define python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
-
-%global commit ##COMMIT##
-%global shortcommit ##SHORTCOMMIT##
-
-Name:           s3cmd
-Version:        ##VERSION##
-Release:        1%{dist}
-Summary:        Tool for accessing Amazon Simple Storage Service
-
-Group:          Applications/Internet
-License:        GPLv2
-URL:            http://s3tools.com
-# git clone https://github.com/s3tools/s3cmd
-# python setup.py sdist
-Source0:        https://github.com/s3tools/s3cmd/archive/%{commit}/%{name}-%{version}-%{shortcommit}.tar.gz
-BuildRoot:      %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
-BuildArch:      noarch
-
-%if %{!?fedora:16}%{?fedora} < 16 || %{!?rhel:7}%{?rhel} < 7
-BuildRequires:  python-devel
-%else
-BuildRequires:  python2-devel
-%endif
-%if %{!?fedora:8}%{?fedora} < 8 || %{!?rhel:6}%{?rhel} < 6
-# This is in standard library since 2.5
-BuildRequires:  python-elementtree
-Requires:       python-elementtree
-%endif
-BuildRequires:  python-dateutil
-BuildRequires:  python-setuptools
-Requires:       python-dateutil
-Requires:       python-magic
-
-%description
-S3cmd lets you copy files from/to Amazon S3
-(Simple Storage Service) using a simple to use
-command line client.
-
-
-%prep
-%setup -q -n s3cmd-%{commit}
-
-%build
-
-
-%install
-rm -rf $RPM_BUILD_ROOT
-S3CMD_PACKAGING=Yes python setup.py install --prefix=%{_prefix} --root=$RPM_BUILD_ROOT
-install -d $RPM_BUILD_ROOT%{_mandir}/man1
-install -m 644 s3cmd.1 $RPM_BUILD_ROOT%{_mandir}/man1
-
-
-%clean
-rm -rf $RPM_BUILD_ROOT
-
-
-%files
-%defattr(-,root,root,-)
-%{_bindir}/s3cmd
-%{_mandir}/man1/s3cmd.1*
-%{python_sitelib}/S3
-%if 0%{?fedora} >= 9 || 0%{?rhel} >= 6
-%{python_sitelib}/s3cmd*.egg-info
-%endif
-%doc NEWS README.md LICENSE
-
-
-%changelog
-* Thu Feb  5 2015 Matt Domsch <mdomsch@fedoraproject.org> - 1.5.1.2-5
-- add Requires: python-magic
-
-* Wed Feb  4 2015 Matt Domsch <mdomsch@fedoraproject.org> - 1.5.1.2-4
-- upstream 1.5.1.2, mostly bug fixes
-- add dependency on python-setuptools
-
- Mon Jan 12 2015 Matt Domsch <mdomsch@fedoraproject.org> - 1.5.0-1
-- upstream 1.5.0 final
-
-* Tue Jul  1 2014 Matt Domsch <mdomsch@fedoraproject.org> - 1.5.0-0.6.rc1
-- upstream 1.5.0-rc1
-
-* Sun Mar 23 2014 Matt Domsch <mdomsch@fedoraproject.org> - 1.5.0-0.4.git
-- upstream 1.5.0-beta1 plus even newer upstream fixes
-
-* Sun Feb 02 2014 Matt Domsch <mdomsch@fedoraproject.org> - 1.5.0-0.3.git
-- upstream 1.5.0-beta1 plus newer upstream fixes
-
-* Wed May 29 2013 Matt Domsch <mdomsch@fedoraproject.org> - 1.5.0-0.2.gita122d97
-- more upstream bugfixes
-- drop pyxattr dep, that codepath got dropped in this release
-
-* Mon May 20 2013 Matt Domsch <mdomsch@fedoraproject.org> - 1.5.0-0.1.gitb1ae0fbe
-- upstream 1.5.0-alpha3 plus fixes
-- add dep on pyxattr for the --xattr option
-
-* Tue Jun 19 2012 Matt Domsch <mdomsch@fedoraproject.org> - 1.1.0-0.4.git11e5755e
-- add local MD5 cache
-
-* Mon Jun 18 2012 Matt Domsch <mdomsch@fedoraproject.org> - 1.1.0-0.3.git7de0789d
-- parallelize local->remote syncs
-
-* Mon Jun 18 2012 Matt Domsch <mdomsch@fedoraproject.org> - 1.1.0-0.2.gitf881b162
-- add hardlink / duplicate file detection support
-
-* Fri Mar  9 2012 Matt Domsch <mdomsch@fedoraproject.org> - 1.1.0-0.1.git2dfe4a65
-- build from git for mdomsch patches to s3cmd sync
-
-* Thu Feb 23 2012 Dennis Gilmore <dennis@ausil.us> - 1.0.1-1
-- update to 1.0.1 release
-
-* Sat Jan 14 2012 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 1.0.0-4
-- Rebuilt for https://fedoraproject.org/wiki/Fedora_17_Mass_Rebuild
-
-* Thu May 05 2011 Lubomir Rintel (GoodData) <lubo.rintel@gooddata.com> - 1.0.0-3
-- No hashlib hackery
-
-* Wed Feb 09 2011 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 1.0.0-2
-- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild
-
-* Tue Jan 11 2011 Lubomir Rintel (GoodData) <lubo.rintel@gooddata.com> - 1.0.0-1
-- New upstream release
-
-* Mon Nov 29 2010 Lubomir Rintel (GoodData) <lubo.rintel@gooddata.com> - 0.9.9.91-3
-- Patch for broken f14 httplib
-
-* Thu Jul 22 2010 David Malcolm <dmalcolm@redhat.com> - 0.9.9.91-2.1
-- Rebuilt for https://fedoraproject.org/wiki/Features/Python_2.7/MassRebuild
-
-* Wed Apr 28 2010 Lubomir Rintel (GoodData) <lubo.rintel@gooddata.com> - 0.9.9.91-1.1
-- Do not use sha1 from hashlib
-
-* Sun Feb 21 2010 Lubomir Rintel (Good Data) <lubo.rintel@gooddata.com> - 0.9.9.91-1
-- New upstream release
-
-* Sun Jul 26 2009 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 0.9.9-2
-- Rebuilt for https://fedoraproject.org/wiki/Fedora_12_Mass_Rebuild
-
-* Tue Feb 24 2009 Lubomir Rintel (Good Data) <lubo.rintel@gooddata.com> - 0.9.9-1
-- New upstream release
-
-* Sat Nov 29 2008 Ignacio Vazquez-Abrams <ivazqueznet+rpm@gmail.com> - 0.9.8.4-2
-- Rebuild for Python 2.6
-
-* Tue Nov 11 2008 Lubomir Rintel (Good Data) <lubo.rintel@gooddata.com> - 0.9.8.4-1
-- New upstream release, URI encoding patch upstreamed
-
-* Fri Sep 26 2008 Lubomir Rintel (Good Data) <lubo.rintel@gooddata.com> - 0.9.8.3-4
-- Try 3/65536
-
-* Fri Sep 26 2008 Lubomir Rintel (Good Data) <lubo.rintel@gooddata.com> - 0.9.8.3-3
-- Whoops, forgot to actually apply the patch.
-
-* Fri Sep 26 2008 Lubomir Rintel (Good Data) <lubo.rintel@gooddata.com> - 0.9.8.3-2
-- Fix listing of directories with special characters in names
-
-* Thu Jul 31 2008 Lubomir Rintel (Good Data) <lubo.rintel@gooddata.com> - 0.9.8.3-1
-- New upstream release: Avoid running out-of-memory in MD5'ing large files.
-
-* Fri Jul 25 2008 Lubomir Rintel (Good Data) <lubo.rintel@gooddata.com> - 0.9.8.2-1.1
-- Fix a typo
-
-* Tue Jul 15 2008 Lubomir Rintel (Good Data) <lubo.rintel@gooddata.com> - 0.9.8.2-1
-- New upstream
-
-* Fri Jul 04 2008 Lubomir Rintel (Good Data) <lubo.rintel@gooddata.com> - 0.9.8.1-3
-- Be satisfied with ET provided by 2.5 python
-
-* Fri Jul 04 2008 Lubomir Rintel (Good Data) <lubo.rintel@gooddata.com> - 0.9.8.1-2
-- Added missing python-devel BR, thanks to Marek Mahut
-- Packaged the Python egg file
-
-* Wed Jul 02 2008 Lubomir Rintel (Good Data) <lubo.rintel@gooddata.com> - 0.9.8.1-1
-- Initial packaging attempt
diff --git a/setup.cfg b/setup.cfg
index f09aa43..c3e7bbe 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,4 +1,10 @@
 [sdist]
 formats = gztar,zip
+
 [bdist_wheel]
 universal = 1
+
+[egg_info]
+tag_build = 
+tag_date = 0
+
diff --git a/testsuite.tar.gz b/testsuite.tar.gz
deleted file mode 100644
index b3e76e7..0000000
Binary files a/testsuite.tar.gz and /dev/null differ
diff --git a/upload-to-sf.sh b/upload-to-sf.sh
deleted file mode 100755
index 176f4db..0000000
--- a/upload-to-sf.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-
-VERSION=$(./s3cmd --version | awk '{print $NF}')
-echo -e "Uploading \033[32ms3cmd \033[31m${VERSION}\033[0m ..."
-#rsync -avP dist/s3cmd-${VERSION}.* ludvigm@frs.sourceforge.net:uploads/
-ln -f NEWS README.txt
-rsync -avP dist/s3cmd-${VERSION}.* README.txt ludvigm,s3tools@frs.sourceforge.net:/home/frs/project/s/s3/s3tools/s3cmd/${VERSION}/

More details

Full run details