New Upstream Release - python-b2sdk

Ready changes

Summary

Merged new upstream version: 1.21.0 (was: 1.18.0).

Diff

diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml
index 4532773..a0e357c 100644
--- a/.github/workflows/cd.yml
+++ b/.github/workflows/cd.yml
@@ -5,7 +5,7 @@ on:
     tags: 'v*'  # push events to matching v*, i.e. v1.0, v20.15.10
 
 env:
-  PYTHON_DEFAULT_VERSION: "3.10"
+  PYTHON_DEFAULT_VERSION: "3.11"
 
 jobs:
   deploy:
@@ -14,11 +14,11 @@ jobs:
       B2_PYPI_PASSWORD: ${{ secrets.B2_PYPI_PASSWORD }}
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
         with:
           fetch-depth: 0
       - name: Set up Python ${{ env.PYTHON_DEFAULT_VERSION }}
-        uses: actions/setup-python@v2
+        uses: actions/setup-python@v4
         with:
           python-version: ${{ env.PYTHON_DEFAULT_VERSION }}
       - name: Display Python version
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 5fb5d1c..eef293f 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -7,21 +7,21 @@ on:
     branches: [master]
 
 env:
-  PYTHON_DEFAULT_VERSION: "3.10"
+  PYTHON_DEFAULT_VERSION: "3.11"
   SKIP_COVERAGE_PYTHON_VERSION_PREFIX: "pypy"
 
 jobs:
   lint:
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
         with:
           fetch-depth: 0
       - uses: codespell-project/actions-codespell@2391250ab05295bddd51e36a8c6295edb6343b0e
         with:
           ignore_words_list: datas
       - name: Set up Python ${{ env.PYTHON_DEFAULT_VERSION }}
-        uses: actions/setup-python@v3
+        uses: actions/setup-python@v4
         with:
           python-version: ${{ env.PYTHON_DEFAULT_VERSION }}
           cache: "pip"
@@ -29,20 +29,21 @@ jobs:
         run: python -m pip install --upgrade nox pip setuptools
       - name: Run linters
         run: nox -vs lint
-      #- name: Validate changelog
-      #- if: ${{ ! startsWith(github.ref, 'refs/heads/dependabot/') }}
-      #- uses: zattoo/changelog@v1
-      #- with:
-      #-   token: ${{ github.token }}
+      - name: Validate changelog
+        # Library was designed to be used with pull requests only.
+        if: ${{ github.event_name == 'pull_request' && github.actor != 'dependabot[bot]' }}
+        uses: zattoo/changelog@v1
+        with:
+          token: ${{ github.token }}
   build:
     needs: lint
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
         with:
           fetch-depth: 0
       - name: Set up Python ${{ env.PYTHON_DEFAULT_VERSION }}
-        uses: actions/setup-python@v3
+        uses: actions/setup-python@v4
         with:
           python-version: ${{ env.PYTHON_DEFAULT_VERSION }}
           cache: "pip"
@@ -57,13 +58,13 @@ jobs:
       B2_TEST_APPLICATION_KEY_ID: ${{ secrets.B2_TEST_APPLICATION_KEY_ID }}
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
         if: ${{ env.B2_TEST_APPLICATION_KEY != '' && env.B2_TEST_APPLICATION_KEY_ID != '' }}  # TODO: skip this whole job instead
         with:
           fetch-depth: 0
       - name: Set up Python ${{ env.PYTHON_DEFAULT_VERSION }}
         if: ${{ env.B2_TEST_APPLICATION_KEY != '' && env.B2_TEST_APPLICATION_KEY_ID != '' }}  # TODO: skip this whole job instead
-        uses: actions/setup-python@v3
+        uses: actions/setup-python@v4
         with:
           python-version: ${{ env.PYTHON_DEFAULT_VERSION }}
           cache: "pip"
@@ -83,7 +84,7 @@ jobs:
       fail-fast: false
       matrix:
         os: ["ubuntu-latest", "macos-latest", "windows-latest"]
-        python-version: ["3.7", "3.8", "3.9", "3.10", "3.11.0-beta.1", "pypy-3.7", "pypy-3.8"]
+        python-version: ["3.7", "3.8", "3.9", "3.10", "3.11.0", "pypy-3.7", "pypy-3.8"]
         exclude:
           - os: "macos-latest"
             python-version: "pypy-3.7"
@@ -94,11 +95,11 @@ jobs:
           - os: "windows-latest"
             python-version: "pypy-3.8"
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
         with:
           fetch-depth: 0
       - name: Set up Python ${{ matrix.python-version }}
-        uses: actions/setup-python@v3
+        uses: actions/setup-python@v4
         with:
           python-version: ${{ matrix.python-version }}
           cache: "pip"
@@ -115,11 +116,11 @@ jobs:
     needs: build
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/checkout@v2
+      - uses: actions/checkout@v3
         with:
           fetch-depth: 0
       - name: Set up Python ${{ env.PYTHON_DEFAULT_VERSION }}
-        uses: actions/setup-python@v3
+        uses: actions/setup-python@v4
         with:
           python-version: ${{ env.PYTHON_DEFAULT_VERSION }}
           cache: "pip"
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 16cdd00..d2d96cb 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,6 +6,67 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
 
 ## [Unreleased]
 
+## [1.21.0] - 2023-04-17
+
+### Added
+* Add support for custom upload timestamp
+
+### Infrastructure
+* Remove dependency from `arrow`
+
+## [1.20.0] - 2023-03-23
+
+### Added
+* Add `use_cache` parameter to `B2Api.list_buckets`
+
+### Changed
+* Connection timeout is now being set explicitly
+
+### Fixed
+* Small files downloaded twice
+
+### Infrastructure
+* Disable changelog verification for dependabot PRs
+
+## [1.19.0] - 2023-01-24
+
+### Added
+* Authorizing a key for a single bucket ensures that this bucket is cached
+* `Bucket.ls` operation supports wildcard matching strings
+* Documentation for `AbstractUploadSource` and its children
+* `InvalidJsonResponse` when the received error is not a proper JSON document
+* Raising `PotentialS3EndpointPassedAsRealm` when a specific misconfiguration is suspected
+* Add `large_file_sha1` support
+* Add support for incremental upload and sync
+* Ability to stream data from an unbound source to B2 (for example stdin)
+
+### Fixed
+* Removed information about replication being in closed beta
+* Don't throw raw `OSError` exceptions when using `DownloadedFile.save_to` to a path that doesn't exist, is a directory or the user doesn't have permissions to write to
+
+### Infrastructure
+* Additional tests for listing files/versions
+* Ensured that changelog validation only happens on pull requests
+* Upgraded GitHub actions checkout to v3, python-setup to v4
+* Additional tests for `IncrementalHexDigester`
+
+## [1.18.0] - 2022-09-20
+
+### Added
+* Logging performance summary of parallel download threads
+* Add `max_download_streams_per_file` parameter to B2Api class and underlying structures
+* Add `is_file_lock_enabled` parameter to `Bucket.update()` and related methods
+
+### Fixed
+* Replace `ReplicationScanResult.source_has_sse_c_enabled` with `source_encryption_mode`
+* Fix `B2Api.get_key()` and `RawSimulator.delete_key()`
+* Fix calling `CopySizeTooBig` exception
+
+### Infrastructure
+* Fix nox's deprecated `session.install()` calls
+* Re-enable changelog validation in CI
+* StatsCollector contains context managers for gathering performance statistics
+
 ## [1.17.3] - 2022-07-15
 
 ### Fixed
@@ -377,7 +438,11 @@ has changed.
 ### Added
 Initial official release of SDK as a separate package (until now it was a part of B2 CLI)
 
-[Unreleased]: https://github.com/Backblaze/b2-sdk-python/compare/v1.17.3...HEAD
+[Unreleased]: https://github.com/Backblaze/b2-sdk-python/compare/v1.21.0...HEAD
+[1.21.0]: https://github.com/Backblaze/b2-sdk-python/compare/v1.20.0...v1.21.0
+[1.20.0]: https://github.com/Backblaze/b2-sdk-python/compare/v1.19.0...v1.20.0
+[1.19.0]: https://github.com/Backblaze/b2-sdk-python/compare/v1.18.0...v1.19.0
+[1.18.0]: https://github.com/Backblaze/b2-sdk-python/compare/v1.17.3...v1.18.0
 [1.17.3]: https://github.com/Backblaze/b2-sdk-python/compare/v1.17.2...v1.17.3
 [1.17.2]: https://github.com/Backblaze/b2-sdk-python/compare/v1.17.1...v1.17.2
 [1.17.1]: https://github.com/Backblaze/b2-sdk-python/compare/v1.17.0...v1.17.1
diff --git a/b2sdk/__init__.py b/b2sdk/__init__.py
index 45bbe22..c423aed 100644
--- a/b2sdk/__init__.py
+++ b/b2sdk/__init__.py
@@ -24,12 +24,3 @@ logging.getLogger('urllib3.connectionpool').addFilter(UrllibWarningFilter())
 import b2sdk.version
 __version__ = b2sdk.version.VERSION
 assert __version__  # PEP-0396
-
-# https://github.com/crsmithdev/arrow/issues/612 - To get rid of the ArrowParseWarning messages in 0.14.3 onward.
-try:
-    from arrow.factory import ArrowParseWarning
-except ImportError:
-    pass
-else:
-    import warnings
-    warnings.simplefilter("ignore", ArrowParseWarning)
diff --git a/b2sdk/_v3/__init__.py b/b2sdk/_v3/__init__.py
index 26583f5..32c8fa6 100644
--- a/b2sdk/_v3/__init__.py
+++ b/b2sdk/_v3/__init__.py
@@ -61,6 +61,7 @@ from b2sdk.utils import (
     hex_sha1_of_bytes,
     hex_sha1_of_file,
     TempDir,
+    IncrementalHexDigester,
 )
 
 from b2sdk.utils import trace_call
@@ -184,6 +185,7 @@ from b2sdk.sync.report import SyncFileReporter
 from b2sdk.sync.report import SyncReport
 from b2sdk.sync.sync import KeepOrDeleteMode
 from b2sdk.sync.sync import Synchronizer
+from b2sdk.sync.sync import UploadMode
 from b2sdk.sync.encryption_provider import AbstractSyncEncryptionSettingsProvider
 from b2sdk.sync.encryption_provider import BasicSyncEncryptionSettingsProvider
 from b2sdk.sync.encryption_provider import ServerDefaultSyncEncryptionSettingsProvider
@@ -235,5 +237,6 @@ from b2sdk.cache import AuthInfoCache
 from b2sdk.cache import DummyCache
 from b2sdk.cache import InMemoryCache
 from b2sdk.http_constants import SRC_LAST_MODIFIED_MILLIS
+from b2sdk.http_constants import LARGE_FILE_SHA1
 from b2sdk.session import B2Session
 from b2sdk.utils.thread_pool import ThreadPoolMixin
diff --git a/b2sdk/_v3/exception.py b/b2sdk/_v3/exception.py
index b2b13b3..d421456 100644
--- a/b2sdk/_v3/exception.py
+++ b/b2sdk/_v3/exception.py
@@ -26,23 +26,29 @@ from b2sdk.exception import BadFileInfo
 from b2sdk.exception import BadJson
 from b2sdk.exception import BadRequest
 from b2sdk.exception import BadUploadUrl
-from b2sdk.exception import BucketIdNotFound
 from b2sdk.exception import BrokenPipe
+from b2sdk.exception import BucketIdNotFound
 from b2sdk.exception import BucketNotAllowed
-from b2sdk.exception import CapabilityNotAllowed
 from b2sdk.exception import CapExceeded
+from b2sdk.exception import CapabilityNotAllowed
 from b2sdk.exception import ChecksumMismatch
 from b2sdk.exception import ClockSkew
 from b2sdk.exception import Conflict
 from b2sdk.exception import ConnectionReset
 from b2sdk.exception import CopyArgumentsMismatch
 from b2sdk.exception import DestFileNewer
+from b2sdk.exception import DestinationDirectoryDoesntAllowOperation
+from b2sdk.exception import DestinationDirectoryDoesntExist
+from b2sdk.exception import DestinationIsADirectory
+from b2sdk.exception import DestinationParentIsNotADirectory
+from b2sdk.exception import DisablingFileLockNotSupported
 from b2sdk.exception import DuplicateBucketName
 from b2sdk.exception import FileAlreadyHidden
 from b2sdk.exception import FileNameNotAllowed
 from b2sdk.exception import FileNotPresent
 from b2sdk.exception import FileSha1Mismatch
 from b2sdk.exception import InvalidAuthToken
+from b2sdk.exception import InvalidJsonResponse
 from b2sdk.exception import InvalidMetadataDirective
 from b2sdk.exception import InvalidRange
 from b2sdk.exception import InvalidUploadSource
@@ -52,13 +58,18 @@ from b2sdk.exception import MissingPart
 from b2sdk.exception import NonExistentBucket
 from b2sdk.exception import NotAllowedByAppKeyError
 from b2sdk.exception import PartSha1Mismatch
+from b2sdk.exception import PotentialS3EndpointPassedAsRealm
 from b2sdk.exception import RestrictedBucket
+from b2sdk.exception import RestrictedBucketMissing
 from b2sdk.exception import RetentionWriteError
+from b2sdk.exception import SSECKeyError
+from b2sdk.exception import SSECKeyIdMismatchInCopy
 from b2sdk.exception import ServiceError
+from b2sdk.exception import SourceReplicationConflict
 from b2sdk.exception import StorageCapExceeded
 from b2sdk.exception import TooManyRequests
-from b2sdk.exception import TransientErrorMixin
 from b2sdk.exception import TransactionCapExceeded
+from b2sdk.exception import TransientErrorMixin
 from b2sdk.exception import TruncatedOutput
 from b2sdk.exception import Unauthorized
 from b2sdk.exception import UnexpectedCloudBehaviour
@@ -67,8 +78,6 @@ from b2sdk.exception import UnknownHost
 from b2sdk.exception import UnrecognizedBucketType
 from b2sdk.exception import UnsatisfiableRange
 from b2sdk.exception import UnusableFileName
-from b2sdk.exception import SSECKeyIdMismatchInCopy
-from b2sdk.exception import SSECKeyError
 from b2sdk.exception import WrongEncryptionModeForBucketDefault
 from b2sdk.exception import interpret_b2_error
 from b2sdk.sync.exception import IncompleteSync
@@ -100,8 +109,8 @@ __all__ = (
     'BrokenPipe',
     'BucketIdNotFound',
     'BucketNotAllowed',
-    'CapabilityNotAllowed',
     'CapExceeded',
+    'CapabilityNotAllowed',
     'ChecksumMismatch',
     'ClockSkew',
     'Conflict',
@@ -109,6 +118,11 @@ __all__ = (
     'CopyArgumentsMismatch',
     'CorruptAccountInfo',
     'DestFileNewer',
+    'DestinationDirectoryDoesntAllowOperation',
+    'DestinationDirectoryDoesntExist',
+    'DestinationIsADirectory',
+    'DestinationParentIsNotADirectory',
+    'DisablingFileLockNotSupported',
     'DuplicateBucketName',
     'EmptyDirectory',
     'EnvironmentEncodingError',
@@ -119,6 +133,7 @@ __all__ = (
     'IncompleteSync',
     'InvalidArgument',
     'InvalidAuthToken',
+    'InvalidJsonResponse',
     'InvalidMetadataDirective',
     'InvalidRange',
     'InvalidUploadSource',
@@ -130,22 +145,25 @@ __all__ = (
     'NotADirectory',
     'NotAllowedByAppKeyError',
     'PartSha1Mismatch',
+    'PotentialS3EndpointPassedAsRealm',
     'RestrictedBucket',
+    'RestrictedBucketMissing',
     'RetentionWriteError',
     'ServiceError',
+    'SourceReplicationConflict',
     'StorageCapExceeded',
     'TooManyRequests',
     'TransactionCapExceeded',
     'TransientErrorMixin',
     'TruncatedOutput',
+    'UnableToCreateDirectory',
     'Unauthorized',
     'UnexpectedCloudBehaviour',
     'UnknownError',
     'UnknownHost',
     'UnrecognizedBucketType',
-    'UnableToCreateDirectory',
-    'UnsupportedFilename',
     'UnsatisfiableRange',
+    'UnsupportedFilename',
     'UnusableFileName',
     'interpret_b2_error',
     'check_invalid_argument',
diff --git a/b2sdk/account_info/abstract.py b/b2sdk/account_info/abstract.py
index 5b4fe1f..c3aaf02 100644
--- a/b2sdk/account_info/abstract.py
+++ b/b2sdk/account_info/abstract.py
@@ -8,7 +8,7 @@
 #
 ######################################################################
 from abc import abstractmethod
-from typing import Optional
+from typing import Optional, List, Tuple
 
 from b2sdk.account_info import exception
 from b2sdk.raw_api import ALL_CAPABILITIES
@@ -51,6 +51,15 @@ class AbstractAccountInfo(metaclass=B2TraceMetaAbstract):
         Remove all stored information.
         """
 
+    @abstractmethod
+    def list_bucket_names_ids(self) -> List[Tuple[str, str]]:
+        """
+        List buckets in the cache.
+
+        :return: list of tuples (bucket_name, bucket_id)
+        """
+        pass
+
     @abstractmethod
     @limit_trace_arguments(only=['self'])
     def refresh_entire_bucket_name_cache(self, name_id_iterable):
diff --git a/b2sdk/account_info/in_memory.py b/b2sdk/account_info/in_memory.py
index faf9220..205cc62 100644
--- a/b2sdk/account_info/in_memory.py
+++ b/b2sdk/account_info/in_memory.py
@@ -8,7 +8,7 @@
 #
 ######################################################################
 
-from typing import Optional
+from typing import Optional, List, Tuple
 
 from .exception import MissingAccountData
 from .upload_url_pool import UrlPoolAccountInfo
@@ -88,6 +88,9 @@ class InMemoryAccountInfo(UrlPoolAccountInfo):
                 return name
         return None
 
+    def list_bucket_names_ids(self) -> List[Tuple[str, str]]:
+        return [(name, id_) for name, id_ in self._buckets.items()]
+
     def save_bucket(self, bucket):
         self._buckets[bucket.name] = bucket.id_
 
diff --git a/b2sdk/account_info/sqlite_account_info.py b/b2sdk/account_info/sqlite_account_info.py
index c2e7d45..1207fcf 100644
--- a/b2sdk/account_info/sqlite_account_info.py
+++ b/b2sdk/account_info/sqlite_account_info.py
@@ -16,7 +16,7 @@ import sqlite3
 import stat
 import threading
 
-from typing import List, Optional
+from typing import List, Optional, Tuple
 
 from .exception import CorruptAccountInfo, MissingAccountData
 from .upload_url_pool import UrlPoolAccountInfo
@@ -593,6 +593,11 @@ class SqliteAccountInfo(UrlPoolAccountInfo):
     def get_bucket_name_or_none_from_bucket_id(self, bucket_id: str) -> Optional[str]:
         return self._safe_query('SELECT bucket_name FROM bucket WHERE bucket_id = ?;', (bucket_id,))
 
+    def list_bucket_names_ids(self) -> List[Tuple[str, str]]:
+        with self._get_connection() as conn:
+            cursor = conn.execute('SELECT bucket_name, bucket_id FROM bucket;')
+            return cursor.fetchall()
+
     def _safe_query(self, query, params):
         try:
             with self._get_connection() as conn:
diff --git a/b2sdk/account_info/stub.py b/b2sdk/account_info/stub.py
index f020659..7cf9827 100644
--- a/b2sdk/account_info/stub.py
+++ b/b2sdk/account_info/stub.py
@@ -8,7 +8,7 @@
 #
 ######################################################################
 
-from typing import Optional
+from typing import Optional, List, Tuple
 import collections
 import threading
 
@@ -76,6 +76,9 @@ class StubAccountInfo(AbstractAccountInfo):
     def get_bucket_name_or_none_from_bucket_id(self, bucket_id: str) -> Optional[str]:
         return None
 
+    def list_bucket_names_ids(self) -> List[Tuple[str, str]]:
+        return list((bucket.bucket_name, bucket.bucket_id) for bucket in self.buckets.values())
+
     def save_bucket(self, bucket):
         self.buckets[bucket.bucket_id] = bucket
 
diff --git a/b2sdk/api.py b/b2sdk/api.py
index 1e3714f..0f84bf8 100644
--- a/b2sdk/api.py
+++ b/b2sdk/api.py
@@ -7,17 +7,19 @@
 # License https://www.backblaze.com/using_b2_code.html
 #
 ######################################################################
-
+import logging
 from typing import Optional, Tuple, List, Generator
+from contextlib import suppress
 
 from .account_info.abstract import AbstractAccountInfo
+from .account_info.exception import MissingAccountData
 from .api_config import B2HttpApiConfig, DEFAULT_HTTP_API_CONFIG
 from .application_key import ApplicationKey, BaseApplicationKey, FullApplicationKey
 from .cache import AbstractCache
 from .bucket import Bucket, BucketFactory
 from .encryption.setting import EncryptionSetting
 from .replication.setting import ReplicationConfiguration
-from .exception import BucketIdNotFound, NonExistentBucket, RestrictedBucket
+from .exception import BucketIdNotFound, NonExistentBucket, RestrictedBucket, RestrictedBucketMissing
 from .file_lock import FileRetentionSetting, LegalHold
 from .file_version import DownloadVersionFactory, FileIdAndName, FileVersion, FileVersionFactory
 from .large_file.services import LargeFileServices
@@ -33,6 +35,8 @@ from .transfer import (
 from .transfer.inbound.downloaded_file import DownloadedFile
 from .utils import B2TraceMeta, b2_url_encode, limit_trace_arguments
 
+logger = logging.getLogger(__name__)
+
 
 def url_for_api(info, api_name):
     """
@@ -63,6 +67,7 @@ class Services:
         max_download_workers: Optional[int] = None,
         save_to_buffer_size: Optional[int] = None,
         check_download_hash: bool = True,
+        max_download_streams_per_file: Optional[int] = None,
     ):
         """
         Initialize Services object using given session.
@@ -73,6 +78,7 @@ class Services:
         :param max_download_workers: maximum number of download threads
         :param save_to_buffer_size: buffer size to use when writing files using DownloadedFile.save_to
         :param check_download_hash: whether to check hash of downloaded files. Can be disabled for files with internal checksums, for example, or to forcefully retrieve objects with corrupted payload or hash value
+        :param max_download_streams_per_file: how many streams to use for parallel downloader
         """
         self.api = api
         self.session = api.session
@@ -81,11 +87,13 @@ class Services:
             services=self, max_workers=max_upload_workers
         )
         self.copy_manager = self.COPY_MANAGER_CLASS(services=self, max_workers=max_copy_workers)
+        assert max_download_streams_per_file is None or max_download_streams_per_file >= 1
         self.download_manager = self.DOWNLOAD_MANAGER_CLASS(
             services=self,
             max_workers=max_download_workers,
             write_buffer_size=save_to_buffer_size,
             check_hash=check_download_hash,
+            max_download_streams_per_file=max_download_streams_per_file,
         )
         self.emerger = Emerger(self)
 
@@ -128,6 +136,7 @@ class B2Api(metaclass=B2TraceMeta):
         max_download_workers: Optional[int] = None,
         save_to_buffer_size: Optional[int] = None,
         check_download_hash: bool = True,
+        max_download_streams_per_file: Optional[int] = None,
     ):
         """
         Initialize the API using the given account info.
@@ -144,6 +153,7 @@ class B2Api(metaclass=B2TraceMeta):
         :param max_download_workers: maximum number of download threads
         :param save_to_buffer_size: buffer size to use when writing files using DownloadedFile.save_to
         :param check_download_hash: whether to check hash of downloaded files. Can be disabled for files with internal checksums, for example, or to forcefully retrieve objects with corrupted payload or hash value
+        :param max_download_streams_per_file: number of streams for parallel download manager
         """
         self.session = self.SESSION_CLASS(
             account_info=account_info, cache=cache, api_config=api_config
@@ -158,6 +168,7 @@ class B2Api(metaclass=B2TraceMeta):
             max_download_workers=max_download_workers,
             save_to_buffer_size=save_to_buffer_size,
             check_download_hash=check_download_hash,
+            max_download_streams_per_file=max_download_streams_per_file,
         )
 
     @property
@@ -193,6 +204,7 @@ class B2Api(metaclass=B2TraceMeta):
         :param str application_key: user's :term:`application key`
         """
         self.session.authorize_account(realm, application_key_id, application_key)
+        self._populate_bucket_cache_from_key()
 
     def get_account_id(self):
         """
@@ -364,7 +376,7 @@ class B2Api(metaclass=B2TraceMeta):
         account_id = self.account_info.get_account_id()
         self.session.delete_bucket(account_id, bucket.id_)
 
-    def list_buckets(self, bucket_name=None, bucket_id=None):
+    def list_buckets(self, bucket_name=None, bucket_id=None, *, use_cache: bool = False):
         """
         Call ``b2_list_buckets`` and return a list of buckets.
 
@@ -376,6 +388,7 @@ class B2Api(metaclass=B2TraceMeta):
 
         :param str bucket_name: the name of the one bucket to return
         :param str bucket_id: the ID of the one bucket to return
+        :param bool use_cache: if ``True`` use cached bucket list if available and not empty
         :rtype: list[b2sdk.v2.Bucket]
         """
         # Give a useful warning if the current application key does not
@@ -387,6 +400,19 @@ class B2Api(metaclass=B2TraceMeta):
         else:
             self.check_bucket_name_restrictions(bucket_name)
 
+        if use_cache:
+            cached_list = self.cache.list_bucket_names_ids()
+            buckets = [
+                self.BUCKET_CLASS(self, cache_b_id, name=cached_b_name)
+                for cached_b_name, cache_b_id in cached_list if (
+                    (bucket_name is None or bucket_name == cached_b_name) and
+                    (bucket_id is None or bucket_id == cache_b_id)
+                )
+            ]
+            if buckets:
+                logger.debug("Using cached bucket list as it is not empty")
+                return buckets
+
         account_id = self.account_info.get_account_id()
 
         response = self.session.list_buckets(
@@ -539,10 +565,13 @@ class B2Api(metaclass=B2TraceMeta):
 
         Raises an exception if profile is not permitted to list keys.
         """
-        return next(
-            self.list_keys(start_application_key_id=key_id),
-            None,
-        )
+        with suppress(StopIteration):
+            key = next(self.list_keys(start_application_key_id=key_id))
+
+            # list_keys() may return some other key if `key_id` does not exist;
+            # thus manually check that we retrieved the right key
+            if key.id_ == key_id:
+                return key
 
     # other
     def get_file_info(self, file_id: str) -> FileVersion:
@@ -584,3 +613,23 @@ class B2Api(metaclass=B2TraceMeta):
         if allowed_bucket_identifier is not None:
             if allowed_bucket_identifier != value:
                 raise RestrictedBucket(allowed_bucket_identifier)
+
+    def _populate_bucket_cache_from_key(self):
+        # If the key is restricted to the bucket, pre-populate the cache with it
+        try:
+            allowed = self.account_info.get_allowed()
+        except MissingAccountData:
+            return
+
+        allowed_bucket_id = allowed.get('bucketId')
+        if allowed_bucket_id is None:
+            return
+
+        allowed_bucket_name = allowed.get('bucketName')
+
+        # If we have bucketId set we still need to check bucketName. If the bucketName is None,
+        # it means that the bucketId belongs to a bucket that was already removed.
+        if allowed_bucket_name is None:
+            raise RestrictedBucketMissing()
+
+        self.cache.save_bucket(self.BUCKET_CLASS(self, allowed_bucket_id, name=allowed_bucket_name))
diff --git a/b2sdk/b2http.py b/b2sdk/b2http.py
index 085a145..5b4a06d 100644
--- a/b2sdk/b2http.py
+++ b/b2sdk/b2http.py
@@ -9,12 +9,15 @@
 ######################################################################
 
 from random import random
+from contextlib import contextmanager
+import datetime
 import io
 import json
+import locale
 import logging
 import socket
+import threading
 
-import arrow
 import requests
 from requests.adapters import HTTPAdapter
 import time
@@ -23,12 +26,14 @@ from typing import Any, Dict, Optional
 
 from .exception import (
     B2Error, B2RequestTimeoutDuringUpload, BadDateFormat, BrokenPipe, B2ConnectionError,
-    B2RequestTimeout, ClockSkew, ConnectionReset, interpret_b2_error, UnknownError, UnknownHost
+    B2RequestTimeout, ClockSkew, ConnectionReset, interpret_b2_error, UnknownError, UnknownHost,
+    InvalidJsonResponse, PotentialS3EndpointPassedAsRealm
 )
 from .api_config import B2HttpApiConfig, DEFAULT_HTTP_API_CONFIG
 from .requests import NotDecompressingResponse
 from .version import USER_AGENT
 
+LOCALE_LOCK = threading.Lock()
 logger = logging.getLogger(__name__)
 
 
@@ -46,6 +51,16 @@ def _print_exception(e, indent=''):
             _print_exception(a, indent + '        ')
 
 
+@contextmanager
+def setlocale(name):
+    with LOCALE_LOCK:
+        saved = locale.setlocale(locale.LC_ALL)
+        try:
+            yield locale.setlocale(locale.LC_ALL, name)
+        finally:
+            locale.setlocale(locale.LC_ALL, saved)
+
+
 class ResponseContextManager:
     """
     A context manager that closes a requests.Response when done.
@@ -58,7 +73,7 @@ class ResponseContextManager:
         return self.response
 
     def __exit__(self, exc_type, exc_val, exc_tb):
-        self.response.close()
+        return None
 
 
 class HttpCallback:
@@ -113,15 +128,16 @@ class ClockSkewHook(HttpCallback):
 
         # Convert the server time to a datetime object
         try:
-            server_time = arrow.get(
-                server_date_str, 'ddd, DD MMM YYYY HH:mm:ss ZZZ'
-            )  # this, unlike datetime.datetime.strptime, always uses English locale
-        except arrow.parser.ParserError:
+            with setlocale("C"):
+                server_time = datetime.datetime.strptime(
+                    server_date_str, '%a, %d %b %Y %H:%M:%S %Z'
+                )
+        except ValueError:
             logger.exception('server returned date in an inappropriate format')
             raise BadDateFormat(server_date_str)
 
         # Get the local time
-        local_time = arrow.utcnow()
+        local_time = datetime.datetime.utcnow()
 
         # Check the difference.
         max_allowed = 10 * 60  # ten minutes, in seconds
@@ -158,6 +174,7 @@ class B2Http:
     is not a part of the interface and is subject to change.
     """
 
+    CONNECTION_TIMEOUT = 3 + 6 + 12 + 24 + 1  # 4 standard tcp retransmissions + 1s latency
     TIMEOUT = 128
     TIMEOUT_FOR_COPY = 1200  # 20 minutes as server-side copy can take time
     TIMEOUT_FOR_UPLOAD = 128
@@ -226,7 +243,7 @@ class B2Http:
                 url,
                 headers=request_headers,
                 data=data,
-                timeout=_timeout or self.TIMEOUT_FOR_UPLOAD,
+                timeout=(self.CONNECTION_TIMEOUT, _timeout or self.TIMEOUT_FOR_UPLOAD),
             )
             self._run_post_request_hooks('POST', url, request_headers, response)
             return response
@@ -312,7 +329,10 @@ class B2Http:
         def do_get():
             self._run_pre_request_hooks('GET', url, request_headers)
             response = self.session.get(
-                url, headers=request_headers, stream=True, timeout=self.TIMEOUT
+                url,
+                headers=request_headers,
+                stream=True,
+                timeout=(self.CONNECTION_TIMEOUT, self.TIMEOUT),
             )
             self._run_post_request_hooks('GET', url, request_headers, response)
             return response
@@ -355,7 +375,10 @@ class B2Http:
         def do_head():
             self._run_pre_request_hooks('HEAD', url, request_headers)
             response = self.session.head(
-                url, headers=request_headers, stream=True, timeout=self.TIMEOUT
+                url,
+                headers=request_headers,
+                stream=True,
+                timeout=(self.CONNECTION_TIMEOUT, self.TIMEOUT),
             )
             self._run_post_request_hooks('HEAD', url, request_headers, response)
             return response
@@ -384,6 +407,7 @@ class B2Http:
 
         :param dict post_params: request parameters
         """
+        response = None
         try:
             response = fcn()
             if response.status_code not in [200, 206]:
@@ -428,6 +452,17 @@ class B2Http:
         except requests.Timeout as e:
             raise B2RequestTimeout(str(e))
 
+        except json.JSONDecodeError:
+            if response is None:
+                raise RuntimeError('Got JSON error without a response.')
+
+            # When the user points to an S3 endpoint, he won't receive the JSON error
+            # he expects. In that case, we can provide at least a hint of "what happened".
+            # s3 url has the form of e.g. https://s3.us-west-000.backblazeb2.com
+            if '://s3.' in response.url:
+                raise PotentialS3EndpointPassedAsRealm(response.content)
+            raise InvalidJsonResponse(response.content)
+
         except Exception as e:
             text = repr(e)
 
diff --git a/b2sdk/bucket.py b/b2sdk/bucket.py
index 2d4f2a9..1854da7 100644
--- a/b2sdk/bucket.py
+++ b/b2sdk/bucket.py
@@ -8,9 +8,12 @@
 #
 ######################################################################
 
+import fnmatch
 import logging
+import pathlib
 
-from typing import Optional, Tuple
+from contextlib import suppress
+from typing import Dict, Optional, Tuple
 
 from .encryption.setting import EncryptionSetting, EncryptionSettingFactory
 from .encryption.types import EncryptionMode
@@ -34,11 +37,13 @@ from .progress import AbstractProgressListener, DoNothingProgressListener
 from .replication.setting import ReplicationConfiguration, ReplicationConfigurationFactory
 from .transfer.emerge.executor import AUTO_CONTENT_TYPE
 from .transfer.emerge.write_intent import WriteIntent
+from .transfer.emerge.unbound_write_intent import UnboundWriteIntentGenerator
 from .transfer.inbound.downloaded_file import DownloadedFile
 from .transfer.outbound.copy_source import CopySource
-from .transfer.outbound.upload_source import UploadSourceBytes, UploadSourceLocalFile
+from .transfer.outbound.upload_source import UploadSourceBytes, UploadSourceLocalFile, UploadMode
 from .utils import (
     B2TraceMeta,
+    Sha1HexDigest,
     b2_url_encode,
     disable_trace,
     limit_trace_arguments,
@@ -150,6 +155,7 @@ class Bucket(metaclass=B2TraceMeta):
         default_server_side_encryption: Optional[EncryptionSetting] = None,
         default_retention: Optional[BucketRetentionSetting] = None,
         replication: Optional[ReplicationConfiguration] = None,
+        is_file_lock_enabled: Optional[bool] = None,
     ) -> 'Bucket':
         """
         Update various bucket parameters.
@@ -161,7 +167,8 @@ class Bucket(metaclass=B2TraceMeta):
         :param if_revision_is: revision number, update the info **only if** *revision* equals to *if_revision_is*
         :param default_server_side_encryption: default server side encryption settings (``None`` if unknown)
         :param default_retention: bucket default retention setting
-        :param replication: replication rules for the bucket;
+        :param replication: replication rules for the bucket
+        :param bool is_file_lock_enabled: specifies whether bucket should get File Lock-enabled
         """
         account_id = self.api.account_info.get_account_id()
         return self.api.BUCKET_FACTORY_CLASS.from_api_bucket_dict(
@@ -177,6 +184,7 @@ class Bucket(metaclass=B2TraceMeta):
                 default_server_side_encryption=default_server_side_encryption,
                 default_retention=default_retention,
                 replication=replication,
+                is_file_lock_enabled=is_file_lock_enabled,
             )
         )
 
@@ -320,7 +328,8 @@ class Bucket(metaclass=B2TraceMeta):
         folder_to_list: str = '',
         latest_only: bool = True,
         recursive: bool = False,
-        fetch_count: Optional[int] = 10000
+        fetch_count: Optional[int] = 10000,
+        with_wildcard: bool = False,
     ):
         """
         Pretend that folders exist and yields the information about the files in a folder.
@@ -336,21 +345,59 @@ class Bucket(metaclass=B2TraceMeta):
         :param folder_to_list: the name of the folder to list; must not start with "/".
                                Empty string means top-level folder
         :param latest_only: when ``False`` returns info about all versions of a file,
-                              when ``True``, just returns info about the most recent versions
+                            when ``True``, just returns info about the most recent versions
         :param recursive: if ``True``, list folders recursively
         :param fetch_count: how many entries to return or ``None`` to use the default. Acceptable values: 1 - 10000
+        :param with_wildcard: Accepts "*", "?", "[]" and "[!]" in folder_to_list, similarly to what shell does.
+                              As of 1.19.0 it can only be enabled when recursive is also enabled.
+                              Also, in this mode, folder_to_list is considered to be a filename or a pattern.
         :rtype: generator[tuple[b2sdk.v2.FileVersion, str]]
         :returns: generator of (file_version, folder_name) tuples
 
         .. note::
-            In case of `recursive=True`, folder_name is returned only for first file in the folder.
+            In case of `recursive=True`, folder_name is not returned.
         """
+        # Ensure that recursive is enabled when with_wildcard is enabled.
+        if with_wildcard and not recursive:
+            raise ValueError('with_wildcard requires recursive to be turned on as well')
+
         # Every file returned must have a name that starts with the
         # folder name and a "/".
         prefix = folder_to_list
-        if prefix != '' and not prefix.endswith('/'):
+        # In case of wildcards, we don't assume that this is folder that we're searching through.
+        # It could be an exact file, e.g. 'a/b.txt' that we're trying to locate.
+        if prefix != '' and not prefix.endswith('/') and not with_wildcard:
             prefix += '/'
 
+        # If we're running with wildcard-matching, we could get
+        # a different prefix from it.  We search for the first
+        # occurrence of the special characters and fetch
+        # parent path from that place.
+        # Examples:
+        #   'b/c/*.txt' –> 'b/c/'
+        #   '*.txt' –> ''
+        #   'a/*/result.[ct]sv' –> 'a/'
+        if with_wildcard:
+            for wildcard_character in '*?[':
+                try:
+                    starter_index = folder_to_list.index(wildcard_character)
+                except ValueError:
+                    continue
+
+                # +1 to include the starter character.  Using posix path to
+                # ensure consistent behaviour on Windows (e.g. case sensitivity).
+                path = pathlib.PurePosixPath(folder_to_list[:starter_index + 1])
+                parent_path = str(path.parent)
+                # Path considers dot to be the empty path.
+                # There's no shorter path than that.
+                if parent_path == '.':
+                    prefix = ''
+                    break
+                # We could receive paths in different stage, e.g. 'a/*/result.[ct]sv' has two
+                # possible parent paths: 'a/' and 'a/*/', with the first one being the correct one
+                if len(parent_path) < len(prefix):
+                    prefix = parent_path
+
         # Loop until all files in the named directory have been listed.
         # The starting point of the first list_file_names request is the
         # prefix we're looking for.  The prefix ends with '/', which is
@@ -375,7 +422,13 @@ class Bucket(metaclass=B2TraceMeta):
                 if not file_version.file_name.startswith(prefix):
                     # We're past the files we care about
                     return
+                if with_wildcard and not fnmatch.fnmatchcase(
+                    file_version.file_name, folder_to_list
+                ):
+                    # File doesn't match our wildcard rules
+                    continue
                 after_prefix = file_version.file_name[len(prefix):]
+                # In case of wildcards, we don't care about folders at all, and it's recursive by default.
                 if '/' not in after_prefix or recursive:
                     # This is not a folder, so we'll print it out and
                     # continue on.
@@ -440,10 +493,15 @@ class Bucket(metaclass=B2TraceMeta):
         encryption: Optional[EncryptionSetting] = None,
         file_retention: Optional[FileRetentionSetting] = None,
         legal_hold: Optional[LegalHold] = None,
+        large_file_sha1: Optional[Sha1HexDigest] = None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         """
         Upload bytes in memory to a B2 file.
 
+        .. note:
+            ``custom_upload_timestamp`` is disabled by default - please talk to customer support to enable it on your account (if you really need it)
+
         :param bytes data_bytes: a byte array to upload
         :param str file_name: a file name to upload bytes to
         :param str,None content_type: the MIME type, or ``None`` to accept the default based on file extension of the B2 file name
@@ -452,7 +510,9 @@ class Bucket(metaclass=B2TraceMeta):
         :param b2sdk.v2.EncryptionSetting encryption: encryption settings (``None`` if unknown)
         :param b2sdk.v2.FileRetentionSetting file_retention: file retention setting
         :param bool legal_hold: legal hold setting
-        :rtype: generator[b2sdk.v2.FileVersion]
+        :param Sha1HexDigest,None large_file_sha1: SHA-1 hash of the result file or ``None`` if unknown
+        :param int,None custom_upload_timestamp: override object creation date, expressed as a number of milliseconds since epoch
+        :rtype: b2sdk.v2.FileVersion
         """
         upload_source = UploadSourceBytes(data_bytes)
         return self.upload(
@@ -464,6 +524,8 @@ class Bucket(metaclass=B2TraceMeta):
             encryption=encryption,
             file_retention=file_retention,
             legal_hold=legal_hold,
+            large_file_sha1=large_file_sha1,
+            custom_upload_timestamp=custom_upload_timestamp,
         )
 
     def upload_local_file(
@@ -478,10 +540,15 @@ class Bucket(metaclass=B2TraceMeta):
         encryption: Optional[EncryptionSetting] = None,
         file_retention: Optional[FileRetentionSetting] = None,
         legal_hold: Optional[LegalHold] = None,
+        upload_mode: UploadMode = UploadMode.FULL,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         """
         Upload a file on local disk to a B2 file.
 
+        .. note:
+            ``custom_upload_timestamp`` is disabled by default - please talk to customer support to enable it on your account (if you really need it)
+
         .. seealso::
 
             :ref:`Synchronizer <sync>`, a *high-performance* utility that synchronizes a local folder with a :term:`bucket`.
@@ -496,11 +563,29 @@ class Bucket(metaclass=B2TraceMeta):
         :param b2sdk.v2.EncryptionSetting encryption: encryption settings (``None`` if unknown)
         :param b2sdk.v2.FileRetentionSetting file_retention: file retention setting
         :param bool legal_hold: legal hold setting
+        :param b2sdk.v2.UploadMode upload_mode: desired upload mode
+        :param int,None custom_upload_timestamp: override object creation date, expressed as a number of milliseconds since epoch
         :rtype: b2sdk.v2.FileVersion
         """
         upload_source = UploadSourceLocalFile(local_path=local_file, content_sha1=sha1_sum)
-        return self.upload(
-            upload_source,
+        sources = [upload_source]
+        large_file_sha1 = sha1_sum
+
+        if upload_mode == UploadMode.INCREMENTAL:
+            with suppress(FileNotPresent):
+                existing_file_info = self.get_file_info_by_name(file_name)
+
+                sources = upload_source.get_incremental_sources(
+                    existing_file_info,
+                    self.api.session.account_info.get_absolute_minimum_part_size()
+                )
+
+                if len(sources) > 1 and not large_file_sha1:
+                    # the upload will be incremental, but the SHA1 sum is unknown, calculate it now
+                    large_file_sha1 = upload_source.get_content_sha1()
+
+        return self.concatenate(
+            sources,
             file_name,
             content_type=content_type,
             file_info=file_infos,
@@ -509,6 +594,131 @@ class Bucket(metaclass=B2TraceMeta):
             encryption=encryption,
             file_retention=file_retention,
             legal_hold=legal_hold,
+            large_file_sha1=large_file_sha1,
+            custom_upload_timestamp=custom_upload_timestamp,
+        )
+
+    def upload_unbound_stream(
+        self,
+        read_only_object,
+        file_name: str,
+        content_type: str = None,
+        file_info: Optional[Dict[str, str]] = None,
+        progress_listener: Optional[AbstractProgressListener] = None,
+        recommended_upload_part_size: Optional[int] = None,
+        encryption: Optional[EncryptionSetting] = None,
+        file_retention: Optional[FileRetentionSetting] = None,
+        legal_hold: Optional[LegalHold] = None,
+        min_part_size: Optional[int] = None,
+        max_part_size: Optional[int] = None,
+        large_file_sha1: Optional[Sha1HexDigest] = None,
+        buffers_count: int = 2,
+        buffer_size: Optional[int] = None,
+        read_size: int = 8192,
+        unused_buffer_timeout_seconds: float = 3600.0,
+        custom_upload_timestamp: Optional[int] = None,
+    ):
+        """
+        Upload an unbound file-like read-only object to a B2 file.
+
+        It is assumed that this object is streamed like stdin or socket, and the size is not known up front.
+        It is up to caller to ensure that this object is open and available through the whole streaming process.
+
+        If stdin is to be passed, consider opening it in binary mode, if possible on the platform:
+
+        .. code-block:: python
+
+            with open(sys.stdin.fileno(), mode='rb', buffering=min_part_size, closefd=False) as source:
+                bucket.upload_unbound_stream(source, 'target-file')
+
+        For platforms without file descriptors, one can use the following:
+
+        .. code-block:: python
+
+            bucket.upload_unbound_stream(sys.stdin.buffer, 'target-file')
+
+        but note that buffering in this case depends on the interpreter mode.
+
+        ``min_part_size``, ``recommended_upload_part_size`` and ``max_part_size`` should
+        all be greater than ``account_info.get_absolute_minimum_part_size()``.
+
+        ``buffers_count`` describes a desired number of buffers that are to be used. Minimal amount is two, as we need
+        to determine the method of uploading this stream (if there's only a single buffer we send it as a normal file,
+        if there are at least two – as a large file).
+        Number of buffers determines the amount of memory used by the streaming process and, in turns, describe
+        the amount of data that can be pulled from ``read_only_object`` while also uploading it. Providing multiple
+        buffers also allows for higher parallelization. Default two buffers allow for the process to fill one buffer
+        with data while the other one is being sent to the B2. While only one buffer can be filled with data at once,
+        all others are used to send the data in parallel (limited only by the number of parallel threads).
+        Buffer size can be controlled by ``buffer_size`` parameter. If left unset, it will default to
+        a value of ``recommended_upload_part_size``, whatever it resolves to be.
+        Note that in the current implementation buffers are (almost) directly sent to B2, thus whatever is picked
+        as the ``buffer_size`` will also become the size of the part when uploading a large file in this manner.
+        In rare cases, namely when the whole buffer was sent, but there was an error during sending of last bytes
+        and a retry was issued, another buffer (above the aforementioned limit) will be allocated.
+
+        .. note:
+            ``custom_upload_timestamp`` is disabled by default - please talk to customer support to enable it on your account (if you really need it)
+
+        :param read_only_object: any object containing a ``read`` method accepting size of the read
+        :param file_name: a file name of the new B2 file
+        :param content_type: the MIME type, or ``None`` to accept the default based on file extension of the B2 file name
+        :param file_info: a file info to store with the file or ``None`` to not store anything
+        :param progress_listener: a progress listener object to use, or ``None`` to not report progress
+        :param encryption: encryption settings (``None`` if unknown)
+        :param file_retention: file retention setting
+        :param legal_hold: legal hold setting
+        :param min_part_size: a minimum size of a part
+        :param recommended_upload_part_size: the recommended part size to use for uploading local sources
+                        or ``None`` to determine automatically
+        :param max_part_size: a maximum size of a part
+        :param large_file_sha1: SHA-1 hash of the result file or ``None`` if unknown
+        :param buffers_count: desired number of buffers allocated, cannot be smaller than 2
+        :param buffer_size: size of a single buffer that we pull data to or upload data to B2. If ``None``,
+                        value of ``recommended_upload_part_size`` is used. If that also is ``None``,
+                        it will be determined automatically as "recommended upload size".
+        :param read_size: size of a single read operation performed on the ``read_only_object``
+        :param unused_buffer_timeout_seconds: amount of time that a buffer can be idle before returning error
+        :param int,None custom_upload_timestamp: override object creation date, expressed as a number of milliseconds since epoch
+        :rtype: b2sdk.v2.FileVersion
+        """
+        if buffers_count <= 1:
+            raise ValueError('buffers_count has to be at least 2')
+        if read_size <= 0:
+            raise ValueError('read_size has to be a positive integer')
+        if unused_buffer_timeout_seconds <= 0.0:
+            raise ValueError('unused_buffer_timeout_seconds has to be a positive float')
+
+        buffer_size = buffer_size or recommended_upload_part_size
+        if buffer_size is None:
+            planner = self.api.services.emerger.get_emerge_planner()
+            buffer_size = planner.recommended_upload_part_size
+
+        return self._create_file(
+            self.api.services.emerger.emerge_unbound,
+            UnboundWriteIntentGenerator(
+                read_only_object,
+                buffer_size,
+                read_size=read_size,
+                queue_size=buffers_count,
+                queue_timeout_seconds=unused_buffer_timeout_seconds,
+            ).iterator(),
+            file_name,
+            content_type=content_type,
+            file_info=file_info,
+            progress_listener=progress_listener,
+            encryption=encryption,
+            file_retention=file_retention,
+            legal_hold=legal_hold,
+            min_part_size=min_part_size,
+            recommended_upload_part_size=recommended_upload_part_size,
+            max_part_size=max_part_size,
+            # This is a parameter for EmergeExecutor.execute_emerge_plan telling
+            # how many buffers in parallel can be handled at once. We ensure that one buffer
+            # is always downloading data from the stream while others are being uploaded.
+            max_queue_size=buffers_count - 1,
+            large_file_sha1=large_file_sha1,
+            custom_upload_timestamp=custom_upload_timestamp,
         )
 
     def upload(
@@ -522,6 +732,8 @@ class Bucket(metaclass=B2TraceMeta):
         encryption: Optional[EncryptionSetting] = None,
         file_retention: Optional[FileRetentionSetting] = None,
         legal_hold: Optional[LegalHold] = None,
+        large_file_sha1: Optional[Sha1HexDigest] = None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         """
         Upload a file to B2, retrying as needed.
@@ -534,6 +746,9 @@ class Bucket(metaclass=B2TraceMeta):
         must be possible to call it more than once in case the upload
         is retried.
 
+        .. note:
+            ``custom_upload_timestamp`` is disabled by default - please talk to customer support to enable it on your account (if you really need it)
+
         :param b2sdk.v2.AbstractUploadSource upload_source: an object that opens the source of the upload
         :param str file_name: the file name of the new B2 file
         :param str,None content_type: the MIME type, or ``None`` to accept the default based on file extension of the B2 file name
@@ -543,6 +758,8 @@ class Bucket(metaclass=B2TraceMeta):
         :param b2sdk.v2.EncryptionSetting encryption: encryption settings (``None`` if unknown)
         :param b2sdk.v2.FileRetentionSetting file_retention: file retention setting
         :param bool legal_hold: legal hold setting
+        :param Sha1HexDigest,None large_file_sha1: SHA-1 hash of the result file or ``None`` if unknown
+        :param int,None custom_upload_timestamp: override object creation date, expressed as a number of milliseconds since epoch
         :rtype: b2sdk.v2.FileVersion
         """
         return self.create_file(
@@ -556,6 +773,8 @@ class Bucket(metaclass=B2TraceMeta):
             encryption=encryption,
             file_retention=file_retention,
             legal_hold=legal_hold,
+            large_file_sha1=large_file_sha1,
+            custom_upload_timestamp=custom_upload_timestamp,
         )
 
     def create_file(
@@ -572,6 +791,8 @@ class Bucket(metaclass=B2TraceMeta):
         legal_hold: Optional[LegalHold] = None,
         min_part_size=None,
         max_part_size=None,
+        large_file_sha1=None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         """
         Creates a new file in this bucket using an iterable (list, tuple etc) of remote or local sources.
@@ -579,8 +800,11 @@ class Bucket(metaclass=B2TraceMeta):
         Source ranges can overlap and remote sources will be prioritized over local sources (when possible).
         For more information and usage examples please see :ref:`Advanced usage patterns <AdvancedUsagePatterns>`.
 
+        .. note:
+            ``custom_upload_timestamp`` is disabled by default - please talk to customer support to enable it on your account (if you really need it)
+
         :param list[b2sdk.v2.WriteIntent] write_intents: list of write intents (remote or local sources)
-        :param str new_file_name: file name of the new file
+        :param str file_name: file name of the new file
         :param str,None content_type: content_type for the new file, if ``None`` content_type would be
                         automatically determined or it may be copied if it resolves
                         as single part remote source copy
@@ -598,6 +822,8 @@ class Bucket(metaclass=B2TraceMeta):
         :param bool legal_hold: legal hold setting
         :param int min_part_size: lower limit of part size for the transfer planner, in bytes
         :param int max_part_size: upper limit of part size for the transfer planner, in bytes
+        :param Sha1HexDigest,None large_file_sha1: SHA-1 hash of the result file or ``None`` if unknown
+        :param int,None custom_upload_timestamp: override object creation date, expressed as a number of milliseconds since epoch
         """
         return self._create_file(
             self.api.services.emerger.emerge,
@@ -613,6 +839,8 @@ class Bucket(metaclass=B2TraceMeta):
             legal_hold=legal_hold,
             min_part_size=min_part_size,
             max_part_size=max_part_size,
+            large_file_sha1=large_file_sha1,
+            custom_upload_timestamp=custom_upload_timestamp,
         )
 
     def create_file_stream(
@@ -629,6 +857,8 @@ class Bucket(metaclass=B2TraceMeta):
         legal_hold: Optional[LegalHold] = None,
         min_part_size=None,
         max_part_size=None,
+        large_file_sha1=None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         """
         Creates a new file in this bucket using a stream of multiple remote or local sources.
@@ -636,9 +866,12 @@ class Bucket(metaclass=B2TraceMeta):
         Source ranges can overlap and remote sources will be prioritized over local sources (when possible).
         For more information and usage examples please see :ref:`Advanced usage patterns <AdvancedUsagePatterns>`.
 
+        .. note:
+            ``custom_upload_timestamp`` is disabled by default - please talk to customer support to enable it on your account (if you really need it)
+
         :param iterator[b2sdk.v2.WriteIntent] write_intents_iterator: iterator of write intents which
                         are sorted ascending by ``destination_offset``
-        :param str new_file_name: file name of the new file
+        :param str file_name: file name of the new file
         :param str,None content_type: content_type for the new file, if ``None`` content_type would be
                         automatically determined or it may be copied if it resolves
                         as single part remote source copy
@@ -657,6 +890,8 @@ class Bucket(metaclass=B2TraceMeta):
         :param bool legal_hold: legal hold setting
         :param int min_part_size: lower limit of part size for the transfer planner, in bytes
         :param int max_part_size: upper limit of part size for the transfer planner, in bytes
+        :param Sha1HexDigest,None large_file_sha1: SHA-1 hash of the result file or ``None`` if unknown
+        :param int,None custom_upload_timestamp: override object creation date, expressed as a number of milliseconds since epoch
         """
         return self._create_file(
             self.api.services.emerger.emerge_stream,
@@ -672,6 +907,8 @@ class Bucket(metaclass=B2TraceMeta):
             legal_hold=legal_hold,
             min_part_size=min_part_size,
             max_part_size=max_part_size,
+            large_file_sha1=large_file_sha1,
+            custom_upload_timestamp=custom_upload_timestamp,
         )
 
     def _create_file(
@@ -689,6 +926,8 @@ class Bucket(metaclass=B2TraceMeta):
         legal_hold: Optional[LegalHold] = None,
         min_part_size=None,
         max_part_size=None,
+        large_file_sha1=None,
+        **kwargs
     ):
         validate_b2_file_name(file_name)
         progress_listener = progress_listener or DoNothingProgressListener()
@@ -707,6 +946,8 @@ class Bucket(metaclass=B2TraceMeta):
             legal_hold=legal_hold,
             min_part_size=min_part_size,
             max_part_size=max_part_size,
+            large_file_sha1=large_file_sha1,
+            **kwargs
         )
 
     def concatenate(
@@ -723,12 +964,17 @@ class Bucket(metaclass=B2TraceMeta):
         legal_hold: Optional[LegalHold] = None,
         min_part_size=None,
         max_part_size=None,
+        large_file_sha1=None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         """
         Creates a new file in this bucket by concatenating multiple remote or local sources.
 
+        .. note:
+            ``custom_upload_timestamp`` is disabled by default - please talk to customer support to enable it on your account (if you really need it)
+
         :param list[b2sdk.v2.OutboundTransferSource] outbound_sources: list of outbound sources (remote or local)
-        :param str new_file_name: file name of the new file
+        :param str file_name: file name of the new file
         :param str,None content_type: content_type for the new file, if ``None`` content_type would be
                         automatically determined from file name or it may be copied if it resolves
                         as single part remote source copy
@@ -746,9 +992,11 @@ class Bucket(metaclass=B2TraceMeta):
         :param bool legal_hold: legal hold setting
         :param int min_part_size: lower limit of part size for the transfer planner, in bytes
         :param int max_part_size: upper limit of part size for the transfer planner, in bytes
+        :param Sha1HexDigest,None large_file_sha1: SHA-1 hash of the result file or ``None`` if unknown
+        :param int,None custom_upload_timestamp: override object creation date, expressed as a number of milliseconds since epoch
         """
         return self.create_file(
-            WriteIntent.wrap_sources_iterator(outbound_sources),
+            list(WriteIntent.wrap_sources_iterator(outbound_sources)),
             file_name,
             content_type=content_type,
             file_info=file_info,
@@ -760,6 +1008,8 @@ class Bucket(metaclass=B2TraceMeta):
             legal_hold=legal_hold,
             min_part_size=min_part_size,
             max_part_size=max_part_size,
+            large_file_sha1=large_file_sha1,
+            custom_upload_timestamp=custom_upload_timestamp,
         )
 
     def concatenate_stream(
@@ -774,12 +1024,14 @@ class Bucket(metaclass=B2TraceMeta):
         encryption: Optional[EncryptionSetting] = None,
         file_retention: Optional[FileRetentionSetting] = None,
         legal_hold: Optional[LegalHold] = None,
+        large_file_sha1: Optional[Sha1HexDigest] = None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         """
         Creates a new file in this bucket by concatenating stream of multiple remote or local sources.
 
         :param iterator[b2sdk.v2.OutboundTransferSource] outbound_sources_iterator: iterator of outbound sources
-        :param str new_file_name: file name of the new file
+        :param str file_name: file name of the new file
         :param str,None content_type: content_type for the new file, if ``None`` content_type would be
                         automatically determined or it may be copied if it resolves
                         as single part remote source copy
@@ -796,6 +1048,8 @@ class Bucket(metaclass=B2TraceMeta):
         :param b2sdk.v2.EncryptionSetting encryption: encryption setting (``None`` if unknown)
         :param b2sdk.v2.FileRetentionSetting file_retention: file retention setting
         :param bool legal_hold: legal hold setting
+        :param Sha1HexDigest,None large_file_sha1: SHA-1 hash of the result file or ``None`` if unknown
+        :param int,None custom_upload_timestamp: override object creation date, expressed as a number of milliseconds since epoch
         """
         return self.create_file_stream(
             WriteIntent.wrap_sources_iterator(outbound_sources_iterator),
@@ -808,6 +1062,8 @@ class Bucket(metaclass=B2TraceMeta):
             encryption=encryption,
             file_retention=file_retention,
             legal_hold=legal_hold,
+            large_file_sha1=large_file_sha1,
+            custom_upload_timestamp=custom_upload_timestamp,
         )
 
     def get_download_url(self, filename):
diff --git a/b2sdk/cache.py b/b2sdk/cache.py
index d1b71f4..f2d822e 100644
--- a/b2sdk/cache.py
+++ b/b2sdk/cache.py
@@ -9,7 +9,10 @@
 ######################################################################
 
 from abc import ABCMeta, abstractmethod
-from typing import Optional
+from typing import Optional, List, Tuple, TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from b2sdk.account_info.abstract import AbstractAccountInfo
 
 
 class AbstractCache(metaclass=ABCMeta):
@@ -28,6 +31,14 @@ class AbstractCache(metaclass=ABCMeta):
     def get_bucket_name_or_none_from_bucket_id(self, bucket_id: str) -> Optional[str]:
         pass
 
+    @abstractmethod
+    def list_bucket_names_ids(self) -> List[Tuple[str, str]]:
+        """
+        List buckets in the cache.
+
+        :return: list of tuples (bucket_name, bucket_id)
+        """
+
     @abstractmethod
     def save_bucket(self, bucket):
         pass
@@ -54,6 +65,9 @@ class DummyCache(AbstractCache):
     def get_bucket_name_or_none_from_allowed(self):
         return None
 
+    def list_bucket_names_ids(self) -> List[Tuple[str, str]]:
+        return []
+
     def save_bucket(self, bucket):
         pass
 
@@ -68,7 +82,7 @@ class InMemoryCache(AbstractCache):
 
     def __init__(self):
         self.name_id_map = {}
-        self.bucket_name = ''
+        self.bucket_name = None
 
     def get_bucket_id_or_none_from_bucket_name(self, name):
         return self.name_id_map.get(name)
@@ -82,6 +96,9 @@ class InMemoryCache(AbstractCache):
     def get_bucket_name_or_none_from_allowed(self):
         return self.bucket_name
 
+    def list_bucket_names_ids(self) -> List[Tuple[str, str]]:
+        return sorted(tuple(item) for item in self.name_id_map.items())
+
     def save_bucket(self, bucket):
         self.name_id_map[bucket.name] = bucket.id_
 
@@ -94,7 +111,7 @@ class AuthInfoCache(AbstractCache):
     A cache that stores data persistently in StoredAccountInfo.
     """
 
-    def __init__(self, info):
+    def __init__(self, info: 'AbstractAccountInfo'):
         self.info = info
 
     def get_bucket_id_or_none_from_bucket_name(self, name):
@@ -106,6 +123,9 @@ class AuthInfoCache(AbstractCache):
     def get_bucket_name_or_none_from_allowed(self):
         return self.info.get_bucket_name_or_none_from_allowed()
 
+    def list_bucket_names_ids(self) -> List[Tuple[str, str]]:
+        return self.info.list_bucket_names_ids()
+
     def save_bucket(self, bucket):
         self.info.save_bucket(bucket)
 
diff --git a/b2sdk/encryption/setting.py b/b2sdk/encryption/setting.py
index de4fdbd..8f12b19 100644
--- a/b2sdk/encryption/setting.py
+++ b/b2sdk/encryption/setting.py
@@ -218,6 +218,9 @@ class EncryptionSetting:
     def __repr__(self):
         return '<%s(%s, %s, %s)>' % (self.__class__.__name__, self.mode, self.algorithm, self.key)
 
+    def is_unknown(self):
+        return self.mode == EncryptionMode.NONE
+
 
 class EncryptionSettingFactory:
     # 2021-03-17: for the bucket the response of the server is:
diff --git a/b2sdk/exception.py b/b2sdk/exception.py
index bc5bc20..a05ca55 100644
--- a/b2sdk/exception.py
+++ b/b2sdk/exception.py
@@ -12,6 +12,7 @@ from abc import ABCMeta
 
 import logging
 import re
+import warnings
 from typing import Any, Dict, Optional
 
 from .utils import camelcase_to_underscore, trace_call
@@ -38,7 +39,7 @@ class B2Error(Exception, metaclass=ABCMeta):
         # If the exception is caused by a b2 server response,
         # the server MAY have included instructions to pause the thread before issuing any more requests
         self.retry_after_seconds = None
-        super(B2Error, self).__init__(*args, **kwargs)
+        super().__init__(*args, **kwargs)
 
     @property
     def prefix(self):
@@ -81,7 +82,7 @@ class B2SimpleError(B2Error, metaclass=ABCMeta):
     """
 
     def __str__(self):
-        return '%s: %s' % (self.prefix, super(B2SimpleError, self).__str__())
+        return '%s: %s' % (self.prefix, super().__str__())
 
 
 class NotAllowedByAppKeyError(B2SimpleError, metaclass=ABCMeta):
@@ -133,7 +134,7 @@ class CapabilityNotAllowed(NotAllowedByAppKeyError):
 
 class ChecksumMismatch(TransientErrorMixin, B2Error):
     def __init__(self, checksum_type, expected, actual):
-        super(ChecksumMismatch, self).__init__()
+        super().__init__()
         self.checksum_type = checksum_type
         self.expected = expected
         self.actual = actual
@@ -167,7 +168,7 @@ class ClockSkew(B2HttpCallbackPostRequestException):
         """
         :param int clock_skew_seconds: The difference: local_clock - server_clock
         """
-        super(ClockSkew, self).__init__()
+        super().__init__()
         self.clock_skew_seconds = clock_skew_seconds
 
     def __str__(self):
@@ -209,7 +210,7 @@ class B2RequestTimeoutDuringUpload(B2RequestTimeout):
 
 class DestFileNewer(B2Error):
     def __init__(self, dest_path, source_path, dest_prefix, source_prefix):
-        super(DestFileNewer, self).__init__()
+        super().__init__()
         self.dest_path = dest_path
         self.source_path = source_path
         self.dest_prefix = dest_prefix
@@ -239,7 +240,7 @@ class ResourceNotFound(B2SimpleError):
 
 class FileOrBucketNotFound(ResourceNotFound):
     def __init__(self, bucket_name=None, file_id_or_name=None):
-        super(FileOrBucketNotFound, self).__init__()
+        super().__init__()
         self.bucket_name = bucket_name
         self.file_id_or_name = file_id_or_name
 
@@ -290,7 +291,7 @@ class SSECKeyIdMismatchInCopy(InvalidMetadataDirective):
 
 class InvalidRange(B2Error):
     def __init__(self, content_length, range_):
-        super(InvalidRange, self).__init__()
+        super().__init__()
         self.content_length = content_length
         self.range_ = range_
 
@@ -309,7 +310,7 @@ class InvalidUploadSource(B2SimpleError):
 
 class BadRequest(B2Error):
     def __init__(self, message, code):
-        super(BadRequest, self).__init__()
+        super().__init__()
         self.message = message
         self.code = code
 
@@ -325,7 +326,7 @@ class CopySourceTooBig(BadRequest):
 
 class Unauthorized(B2Error):
     def __init__(self, message, code):
-        super(Unauthorized, self).__init__()
+        super().__init__()
         self.message = message
         self.code = code
 
@@ -349,22 +350,29 @@ class InvalidAuthToken(Unauthorized):
     """
 
     def __init__(self, message, code):
-        super(InvalidAuthToken,
-              self).__init__('Invalid authorization token. Server said: ' + message, code)
+        super().__init__('Invalid authorization token. Server said: ' + message, code)
 
 
 class RestrictedBucket(B2Error):
     def __init__(self, bucket_name):
-        super(RestrictedBucket, self).__init__()
+        super().__init__()
         self.bucket_name = bucket_name
 
     def __str__(self):
         return 'Application key is restricted to bucket: %s' % self.bucket_name
 
 
+class RestrictedBucketMissing(RestrictedBucket):
+    def __init__(self):
+        super().__init__('')
+
+    def __str__(self):
+        return 'Application key is restricted to a bucket that doesn\'t exist'
+
+
 class MaxFileSizeExceeded(B2Error):
     def __init__(self, size, max_allowed_size):
-        super(MaxFileSizeExceeded, self).__init__()
+        super().__init__()
         self.size = size
         self.max_allowed_size = max_allowed_size
 
@@ -377,7 +385,7 @@ class MaxFileSizeExceeded(B2Error):
 
 class MaxRetriesExceeded(B2Error):
     def __init__(self, limit, exception_info_list):
-        super(MaxRetriesExceeded, self).__init__()
+        super().__init__()
         self.limit = limit
         self.exception_info_list = exception_info_list
 
@@ -404,7 +412,7 @@ class FileSha1Mismatch(B2SimpleError):
 
 class PartSha1Mismatch(B2Error):
     def __init__(self, key):
-        super(PartSha1Mismatch, self).__init__()
+        super().__init__()
         self.key = key
 
     def __str__(self):
@@ -434,7 +442,7 @@ class TransactionCapExceeded(CapExceeded):
 
 class TooManyRequests(B2Error):
     def __init__(self, retry_after_seconds=None):
-        super(TooManyRequests, self).__init__()
+        super().__init__()
         self.retry_after_seconds = retry_after_seconds
 
     def __str__(self):
@@ -446,7 +454,7 @@ class TooManyRequests(B2Error):
 
 class TruncatedOutput(TransientErrorMixin, B2Error):
     def __init__(self, bytes_read, file_size):
-        super(TruncatedOutput, self).__init__()
+        super().__init__()
         self.bytes_read = bytes_read
         self.file_size = file_size
 
@@ -481,7 +489,7 @@ class UnsatisfiableRange(B2Error):
 
 class UploadTokenUsedConcurrently(B2Error):
     def __init__(self, token):
-        super(UploadTokenUsedConcurrently, self).__init__()
+        super().__init__()
         self.token = token
 
     def __str__(self):
@@ -517,6 +525,57 @@ class CopyArgumentsMismatch(InvalidUserInput):
     pass
 
 
+class DisablingFileLockNotSupported(B2Error):
+    def __str__(self):
+        return "Disabling file lock is not supported"
+
+
+class SourceReplicationConflict(B2Error):
+    def __str__(self):
+        return "Operation not supported for buckets with source replication"
+
+
+class EnablingFileLockOnRestrictedBucket(B2Error):
+    def __str__(self):
+        return "Turning on file lock for a restricted bucket is not allowed"
+
+
+class InvalidJsonResponse(B2SimpleError):
+    UP_TO_BYTES_COUNT = 200
+
+    def __init__(self, content: bytes):
+        self.content = content
+        message = '%s' % self.content[:self.UP_TO_BYTES_COUNT]
+        if len(content) > self.UP_TO_BYTES_COUNT:
+            message += '...'
+
+        super().__init__(message)
+
+
+class PotentialS3EndpointPassedAsRealm(InvalidJsonResponse):
+    pass
+
+
+class DestinationDirectoryError(B2Error):
+    pass
+
+
+class DestinationDirectoryDoesntExist(DestinationDirectoryError):
+    pass
+
+
+class DestinationParentIsNotADirectory(DestinationDirectoryError):
+    pass
+
+
+class DestinationIsADirectory(DestinationDirectoryError):
+    pass
+
+
+class DestinationDirectoryDoesntAllowOperation(DestinationDirectoryError):
+    pass
+
+
 @trace_call(logger)
 def interpret_b2_error(
     status: int,
@@ -555,21 +614,41 @@ def interpret_b2_error(
         return PartSha1Mismatch(post_params.get('fileId'))
     elif status == 400 and code == "bad_bucket_id":
         return BucketIdNotFound(post_params.get('bucketId'))
-    elif status == 400 and code in ('bad_request', 'auth_token_limit', 'source_too_large'):
-        # it's "bad_request" on 2022-03-29, but will become 'auth_token_limit' in 2022-04  # TODO: cleanup after 2022-05-01
+    elif status == 400 and code == "auth_token_limit":
         matcher = UPLOAD_TOKEN_USED_CONCURRENTLY_ERROR_MESSAGE_RE.match(message)
-        if matcher is not None:
-            token = matcher.group('token')
-            return UploadTokenUsedConcurrently(token)
-
-        # it's "bad_request" on 2022-03-29, but will become 'source_too_large' in 2022-04  # TODO: cleanup after 2022-05-01
+        assert matcher is not None, f"unexpected error message: {message}"
+        token = matcher.group('token')
+        return UploadTokenUsedConcurrently(token)
+    elif status == 400 and code == "source_too_large":
         matcher = COPY_SOURCE_TOO_BIG_ERROR_MESSAGE_RE.match(message)
-        if matcher is not None:
-            size = int(matcher.group('size'))
-            return CopySourceTooBig(size)
+        assert matcher is not None, f"unexpected error message: {message}"
+        size = int(matcher.group('size'))
+        return CopySourceTooBig(message, code, size)
+    elif status == 400 and code == 'file_lock_conflict':
+        return DisablingFileLockNotSupported()
+    elif status == 400 and code == 'source_replication_conflict':
+        return SourceReplicationConflict()
+    elif status == 400 and code == 'restricted_bucket_conflict':
+        return EnablingFileLockOnRestrictedBucket()
+    elif status == 400 and code == 'bad_request':
+
+        # it's "bad_request" on 2022-09-14, but will become 'disabling_file_lock_not_allowed'  # TODO: cleanup after 2022-09-22
+        if message == 'fileLockEnabled value of false is not allowed when bucket is already file lock enabled.':
+            return DisablingFileLockNotSupported()
+
+        # it's "bad_request" on 2022-09-14, but will become 'source_replication_conflict'  # TODO: cleanup after 2022-09-22
+        if message == 'Turning on file lock for an existing bucket having source replication configuration is not allowed.':
+            return SourceReplicationConflict()
+
+        # it's "bad_request" on 2022-09-14, but will become 'restricted_bucket_conflict'  # TODO: cleanup after 2022-09-22
+        if message == 'Turning on file lock for a restricted bucket is not allowed.':
+            return EnablingFileLockOnRestrictedBucket()
 
         return BadRequest(message, code)
     elif status == 400:
+        warnings.warn(
+            f"bad request exception with an unknown `code`. message={message}, code={code}"
+        )
         return BadRequest(message, code)
     elif status == 401 and code in ("bad_auth_token", "expired_auth_token"):
         return InvalidAuthToken(message, code)
diff --git a/b2sdk/file_version.py b/b2sdk/file_version.py
index ffdf76e..d9c979a 100644
--- a/b2sdk/file_version.py
+++ b/b2sdk/file_version.py
@@ -8,17 +8,17 @@
 #
 ######################################################################
 
-from typing import Dict, Optional, Union, Tuple, TYPE_CHECKING
+from typing import Any, Dict, Optional, Union, Tuple, TYPE_CHECKING
 import re
 from copy import deepcopy
 
 from .encryption.setting import EncryptionSetting, EncryptionSettingFactory
-from .replication.types import ReplicationStatus
-from .http_constants import FILE_INFO_HEADER_PREFIX_LOWER, SRC_LAST_MODIFIED_MILLIS
 from .file_lock import FileRetentionSetting, LegalHold, NO_RETENTION_FILE_SETTING
+from .http_constants import FILE_INFO_HEADER_PREFIX_LOWER, LARGE_FILE_SHA1, SRC_LAST_MODIFIED_MILLIS
 from .progress import AbstractProgressListener
+from .replication.types import ReplicationStatus
+from .utils import b2_url_decode, Sha1HexDigest
 from .utils.range_ import Range
-from .utils import b2_url_decode
 
 if TYPE_CHECKING:
     from .api import B2Api
@@ -102,7 +102,7 @@ class BaseFileVersion:
             return '%s%s' % (UNVERIFIED_CHECKSUM_PREFIX, content_sha1)
         return content_sha1
 
-    def _clone(self, **new_attributes: Dict[str, object]):
+    def _clone(self, **new_attributes: Any):
         """
         Create new instance based on the old one, overriding attributes with :code:`new_attributes`
         (only applies to arguments passed to __init__)
@@ -197,6 +197,18 @@ class BaseFileVersion:
         assert m, self.id_
         return self._FILE_TYPE[int(m.group(1))]
 
+    def get_content_sha1(self) -> Optional[Sha1HexDigest]:
+        """
+        Get the file's content SHA1 hex digest from the header or, if its absent,
+        from the file info.  If both are missing, return None.
+        """
+        if self.content_sha1 and self.content_sha1 != "none":
+            return self.content_sha1
+        elif LARGE_FILE_SHA1 in self.file_info:
+            return Sha1HexDigest(self.file_info[LARGE_FILE_SHA1])
+        # content SHA1 unknown
+        return None
+
 
 class FileVersion(BaseFileVersion):
     """
diff --git a/b2sdk/http_constants.py b/b2sdk/http_constants.py
index a9f86b6..6d3abea 100644
--- a/b2sdk/http_constants.py
+++ b/b2sdk/http_constants.py
@@ -17,9 +17,19 @@ FILE_INFO_HEADER_PREFIX_LOWER = FILE_INFO_HEADER_PREFIX.lower()
 # Standard names for file info entries
 SRC_LAST_MODIFIED_MILLIS = 'src_last_modified_millis'
 
+# SHA-1 hash key for large files
+LARGE_FILE_SHA1 = 'large_file_sha1'
+
 # Special X-Bz-Content-Sha1 value to verify checksum at the end
 HEX_DIGITS_AT_END = 'hex_digits_at_end'
 
 # Identifying SSE_C keys
 SSE_C_KEY_ID_FILE_INFO_KEY_NAME = 'sse_c_key_id'
 SSE_C_KEY_ID_HEADER = FILE_INFO_HEADER_PREFIX + SSE_C_KEY_ID_FILE_INFO_KEY_NAME
+
+# Default part sizes
+MEGABYTE = 1000 * 1000
+GIGABYTE = 1000 * MEGABYTE
+DEFAULT_MIN_PART_SIZE = 5 * MEGABYTE
+DEFAULT_RECOMMENDED_UPLOAD_PART_SIZE = 100 * MEGABYTE
+DEFAULT_MAX_PART_SIZE = 5 * GIGABYTE
diff --git a/b2sdk/raw_api.py b/b2sdk/raw_api.py
index ed2c9db..cbfcb82 100644
--- a/b2sdk/raw_api.py
+++ b/b2sdk/raw_api.py
@@ -288,6 +288,7 @@ class AbstractRawApi(metaclass=ABCMeta):
         default_server_side_encryption: Optional[EncryptionSetting] = None,
         default_retention: Optional[BucketRetentionSetting] = None,
         replication: Optional[ReplicationConfiguration] = None,
+        is_file_lock_enabled: Optional[bool] = None,
     ):
         pass
 
@@ -315,6 +316,7 @@ class AbstractRawApi(metaclass=ABCMeta):
         server_side_encryption: Optional[EncryptionSetting],
         file_retention: Optional[FileRetentionSetting],
         legal_hold: Optional[LegalHold],
+        custom_upload_timestamp: Optional[int] = None,
     ) -> dict:
         headers = {
             'Authorization': upload_auth_token,
@@ -337,6 +339,9 @@ class AbstractRawApi(metaclass=ABCMeta):
         if file_retention is not None:
             file_retention.add_to_to_upload_headers(headers)
 
+        if custom_upload_timestamp is not None:
+            headers['X-Bz-Custom-Upload-Timestamp'] = str(custom_upload_timestamp)
+
         return headers
 
     @abstractmethod
@@ -353,6 +358,7 @@ class AbstractRawApi(metaclass=ABCMeta):
         server_side_encryption: Optional[EncryptionSetting] = None,
         file_retention: Optional[FileRetentionSetting] = None,
         legal_hold: Optional[LegalHold] = None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         pass
 
@@ -701,6 +707,7 @@ class B2RawHTTPApi(AbstractRawApi):
         server_side_encryption: Optional[EncryptionSetting] = None,
         file_retention: Optional[FileRetentionSetting] = None,
         legal_hold: Optional[LegalHold] = None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         kwargs = {}
         if server_side_encryption is not None:
@@ -715,6 +722,9 @@ class B2RawHTTPApi(AbstractRawApi):
         if file_retention is not None:
             kwargs['fileRetention'] = file_retention.serialize_to_json_for_request()
 
+        if custom_upload_timestamp is not None:
+            kwargs['custom_upload_timestamp'] = custom_upload_timestamp
+
         return self._post_json(
             api_url,
             'b2_start_large_file',
@@ -740,6 +750,7 @@ class B2RawHTTPApi(AbstractRawApi):
         default_server_side_encryption: Optional[EncryptionSetting] = None,
         default_retention: Optional[BucketRetentionSetting] = None,
         replication: Optional[ReplicationConfiguration] = None,
+        is_file_lock_enabled: Optional[bool] = None,
     ):
         kwargs = {}
         if if_revision_is is not None:
@@ -761,6 +772,8 @@ class B2RawHTTPApi(AbstractRawApi):
             kwargs['defaultRetention'] = default_retention.serialize_to_json_for_request()
         if replication is not None:
             kwargs['replicationConfiguration'] = replication.serialize_to_json_for_request()
+        if is_file_lock_enabled is not None:
+            kwargs['fileLockEnabled'] = is_file_lock_enabled
 
         assert kwargs
 
@@ -877,6 +890,7 @@ class B2RawHTTPApi(AbstractRawApi):
         server_side_encryption: Optional[EncryptionSetting] = None,
         file_retention: Optional[FileRetentionSetting] = None,
         legal_hold: Optional[LegalHold] = None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         """
         Upload one, small file to b2.
@@ -903,6 +917,7 @@ class B2RawHTTPApi(AbstractRawApi):
             server_side_encryption=server_side_encryption,
             file_retention=file_retention,
             legal_hold=legal_hold,
+            custom_upload_timestamp=custom_upload_timestamp,
         )
         return self.b2_http.post_content_return_json(upload_url, headers, data_stream)
 
diff --git a/b2sdk/raw_simulator.py b/b2sdk/raw_simulator.py
index 074808a..c1ad51d 100644
--- a/b2sdk/raw_simulator.py
+++ b/b2sdk/raw_simulator.py
@@ -16,7 +16,7 @@ import re
 import threading
 import time
 
-from contextlib import contextmanager
+from contextlib import contextmanager, suppress
 from typing import Optional
 
 from b2sdk.http_constants import FILE_INFO_HEADER_PREFIX, HEX_DIGITS_AT_END
@@ -33,6 +33,7 @@ from .exception import (
     ChecksumMismatch,
     Conflict,
     CopySourceTooBig,
+    DisablingFileLockNotSupported,
     DuplicateBucketName,
     FileNotPresent,
     FileSha1Mismatch,
@@ -42,6 +43,7 @@ from .exception import (
     NonExistentBucket,
     PartSha1Mismatch,
     SSECKeyError,
+    SourceReplicationConflict,
     Unauthorized,
     UnsatisfiableRange,
 )
@@ -89,6 +91,7 @@ class KeySimulator:
         self.capabilities = capabilities
         self.expiration_timestamp_or_none = expiration_timestamp_or_none
         self.bucket_id_or_none = bucket_id_or_none
+        self.bucket_name_or_none = bucket_name_or_none
         self.name_prefix_or_none = name_prefix_or_none
 
     def as_key(self):
@@ -119,6 +122,7 @@ class KeySimulator:
         """
         return dict(
             bucketId=self.bucket_id_or_none,
+            bucketName=self.bucket_name_or_none,
             capabilities=self.capabilities,
             namePrefix=self.name_prefix_or_none,
         )
@@ -667,7 +671,8 @@ class BucketSimulator:
         return self.file_id_to_file[file_id].as_upload_result(account_auth_token)
 
     def get_file_info_by_name(self, account_auth_token, file_name):
-        for ((name, id), file) in self.file_name_and_id_to_file.items():
+        # Sorting files by name and ID, so lower ID (newer upload) is returned first.
+        for ((name, id), file) in sorted(self.file_name_and_id_to_file.items()):
             if file_name == name:
                 return file.as_download_headers(account_auth_token_or_none=account_auth_token)
         raise FileNotPresent(file_id_or_name=file_name, bucket_name=self.bucket_name)
@@ -922,14 +927,20 @@ class BucketSimulator:
         server_side_encryption: Optional[EncryptionSetting] = None,
         file_retention: Optional[FileRetentionSetting] = None,
         legal_hold: Optional[LegalHold] = None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         file_id = self._next_file_id()
         sse = server_side_encryption or self.default_server_side_encryption
         if sse:  # FIXME: remove this part when RawApi<->Encryption adapters are implemented properly
             file_info = sse.add_key_id_to_file_info(file_info)
+
+        upload_timestamp = next(self.upload_timestamp_counter)
+        if custom_upload_timestamp is not None:
+            upload_timestamp = custom_upload_timestamp
+
         file_sim = self.FILE_SIMULATOR_CLASS(
             self.account_id, self, file_id, 'start', file_name, content_type, 'none',
-            file_info, None, next(self.upload_timestamp_counter), server_side_encryption=sse,
+            file_info, None, upload_timestamp, server_side_encryption=sse,
             file_retention=file_retention, legal_hold=legal_hold,
         )  # yapf: disable
         self.file_id_to_file[file_id] = file_sim
@@ -946,10 +957,23 @@ class BucketSimulator:
         default_server_side_encryption: Optional[EncryptionSetting] = None,
         default_retention: Optional[BucketRetentionSetting] = None,
         replication: Optional[ReplicationConfiguration] = None,
+        is_file_lock_enabled: Optional[bool] = None,
     ):
         if if_revision_is is not None and self.revision != if_revision_is:
             raise Conflict()
 
+        if is_file_lock_enabled is not None:
+            if self.is_file_lock_enabled and not is_file_lock_enabled:
+                raise DisablingFileLockNotSupported()
+
+            if (
+                not self.is_file_lock_enabled and is_file_lock_enabled and self.replication and
+                self.replication.is_source
+            ):
+                raise SourceReplicationConflict()
+
+            self.is_file_lock_enabled = is_file_lock_enabled
+
         if bucket_type is not None:
             self.bucket_type = bucket_type
         if bucket_info is not None:
@@ -962,8 +986,10 @@ class BucketSimulator:
             self.default_server_side_encryption = default_server_side_encryption
         if default_retention:
             self.default_retention = default_retention
+        if replication is not None:
+            self.replication = replication
+
         self.revision += 1
-        self.replication = replication
         return self.bucket_dict(self.api.current_token)
 
     def upload_file(
@@ -979,6 +1005,7 @@ class BucketSimulator:
         server_side_encryption: Optional[EncryptionSetting] = None,
         file_retention: Optional[FileRetentionSetting] = None,
         legal_hold: Optional[LegalHold] = None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         data_bytes = self._simulate_chunked_post(data_stream, content_length)
         assert len(data_bytes) == content_length
@@ -999,6 +1026,10 @@ class BucketSimulator:
         if encryption:  # FIXME: remove this part when RawApi<->Encryption adapters are implemented properly
             file_infos = encryption.add_key_id_to_file_info(file_infos)
 
+        upload_timestamp = next(self.upload_timestamp_counter)
+        if custom_upload_timestamp is not None:
+            upload_timestamp = custom_upload_timestamp
+
         file_sim = self.FILE_SIMULATOR_CLASS(
             self.account_id,
             self,
@@ -1009,7 +1040,7 @@ class BucketSimulator:
             content_sha1,
             file_infos,
             data_bytes,
-            next(self.upload_timestamp_counter),
+            upload_timestamp,
             server_side_encryption=encryption,
             file_retention=file_retention,
             legal_hold=legal_hold,
@@ -1288,10 +1319,13 @@ class RawSimulator(AbstractRawApi):
         self.app_key_counter += 1
         application_key_id = 'appKeyId%d' % (index,)
         app_key = 'appKey%d' % (index,)
-        if bucket_id is None:
-            bucket_name_or_none = None
-        else:
-            bucket_name_or_none = self._get_bucket_by_id(bucket_id).bucket_name
+        bucket_name_or_none = None
+        if bucket_id is not None:
+            # It is possible for bucketId to be filled and bucketName to be empty.
+            # It can happen when the bucket was deleted.
+            with suppress(NonExistentBucket):
+                bucket_name_or_none = self._get_bucket_by_id(bucket_id).bucket_name
+
         key_sim = KeySimulator(
             account_id=account_id,
             name=key_name,
@@ -1400,6 +1434,9 @@ class RawSimulator(AbstractRawApi):
                 'application key does not exist: %s' % (application_key_id,),
                 'bad_request',
             )
+        self.all_application_keys = [
+            key for key in self.all_application_keys if key.application_key_id != application_key_id
+        ]
         return key_sim.as_key()
 
     def finish_large_file(self, api_url, account_auth_token, file_id, part_sha1_array):
@@ -1683,6 +1720,7 @@ class RawSimulator(AbstractRawApi):
         server_side_encryption: Optional[EncryptionSetting] = None,
         file_retention: Optional[FileRetentionSetting] = None,
         legal_hold: Optional[LegalHold] = None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         bucket = self._get_bucket_by_id(bucket_id)
         self._assert_account_auth(api_url, account_auth_token, bucket.account_id, 'writeFiles')
@@ -1694,6 +1732,7 @@ class RawSimulator(AbstractRawApi):
             server_side_encryption,
             file_retention,
             legal_hold,
+            custom_upload_timestamp=custom_upload_timestamp,
         )
         self.file_id_to_bucket_id[result['fileId']] = bucket_id
 
@@ -1713,8 +1752,9 @@ class RawSimulator(AbstractRawApi):
         default_server_side_encryption: Optional[EncryptionSetting] = None,
         default_retention: Optional[BucketRetentionSetting] = None,
         replication: Optional[ReplicationConfiguration] = None,
+        is_file_lock_enabled: Optional[bool] = None,
     ):
-        assert bucket_type or bucket_info or cors_rules or lifecycle_rules or default_server_side_encryption or replication
+        assert bucket_type or bucket_info or cors_rules or lifecycle_rules or default_server_side_encryption or replication or is_file_lock_enabled is not None
         bucket = self._get_bucket_by_id(bucket_id)
         self._assert_account_auth(api_url, account_auth_token, bucket.account_id, 'writeBuckets')
         return bucket._update_bucket(
@@ -1726,6 +1766,7 @@ class RawSimulator(AbstractRawApi):
             default_server_side_encryption=default_server_side_encryption,
             default_retention=default_retention,
             replication=replication,
+            is_file_lock_enabled=is_file_lock_enabled,
         )
 
     @classmethod
@@ -1740,6 +1781,7 @@ class RawSimulator(AbstractRawApi):
         server_side_encryption: Optional[EncryptionSetting],
         file_retention: Optional[FileRetentionSetting],
         legal_hold: Optional[LegalHold],
+        custom_upload_timestamp: Optional[int] = None,
     ) -> dict:
 
         # fix to allow calculating headers on unknown key - only for simulation
@@ -1758,6 +1800,7 @@ class RawSimulator(AbstractRawApi):
             server_side_encryption=server_side_encryption,
             file_retention=file_retention,
             legal_hold=legal_hold,
+            custom_upload_timestamp=custom_upload_timestamp,
         )
 
     def upload_file(
@@ -1773,6 +1816,7 @@ class RawSimulator(AbstractRawApi):
         server_side_encryption: Optional[EncryptionSetting] = None,
         file_retention: Optional[FileRetentionSetting] = None,
         legal_hold: Optional[LegalHold] = None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         with ConcurrentUsedAuthTokenGuard(
             self.currently_used_auth_tokens[upload_auth_token], upload_auth_token
@@ -1803,6 +1847,7 @@ class RawSimulator(AbstractRawApi):
                 server_side_encryption=server_side_encryption,
                 file_retention=file_retention,
                 legal_hold=legal_hold,
+                custom_upload_timestamp=custom_upload_timestamp,
             )
 
             response = bucket.upload_file(
@@ -1817,6 +1862,7 @@ class RawSimulator(AbstractRawApi):
                 server_side_encryption,
                 file_retention,
                 legal_hold,
+                custom_upload_timestamp,
             )
             file_id = response['fileId']
             self.file_id_to_bucket_id[file_id] = bucket_id
diff --git a/b2sdk/replication/monitoring.py b/b2sdk/replication/monitoring.py
index ce1936e..bc0e51e 100644
--- a/b2sdk/replication/monitoring.py
+++ b/b2sdk/replication/monitoring.py
@@ -41,7 +41,7 @@ class ReplicationScanResult(AbstractScanResult):
     # source attrs
     source_replication_status: Optional[ReplicationStatus] = None
     source_has_hide_marker: Optional[bool] = None
-    source_has_sse_c_enabled: Optional[bool] = None
+    source_encryption_mode: Optional[EncryptionMode] = None
     source_has_large_metadata: Optional[bool] = None
     source_has_file_retention: Optional[bool] = None
     source_has_legal_hold: Optional[bool] = None
@@ -69,8 +69,8 @@ class ReplicationScanResult(AbstractScanResult):
                         source_file_version.replication_status,
                     'source_has_hide_marker':
                         not source_file.is_visible(),
-                    'source_has_sse_c_enabled':
-                        source_file_version.server_side_encryption.mode == EncryptionMode.SSE_C,
+                    'source_encryption_mode':
+                        source_file_version.server_side_encryption.mode,
                     'source_has_large_metadata':
                         source_file_version.has_large_header,
                     'source_has_file_retention':
diff --git a/b2sdk/scan/folder.py b/b2sdk/scan/folder.py
index aea3a1a..c57f369 100644
--- a/b2sdk/scan/folder.py
+++ b/b2sdk/scan/folder.py
@@ -15,7 +15,7 @@ import re
 import sys
 
 from abc import ABCMeta, abstractmethod
-from typing import Iterator
+from typing import Iterator, Optional
 
 from ..utils import fix_windows_path_limit, get_file_mtime, is_file_readable
 from .exception import EmptyDirectory, EnvironmentEncodingError, NotADirectory, UnableToCreateDirectory, UnsupportedFilename
@@ -52,7 +52,7 @@ class AbstractFolder(metaclass=ABCMeta):
     """
 
     @abstractmethod
-    def all_files(self, reporter: ProgressReport,
+    def all_files(self, reporter: Optional[ProgressReport],
                   policies_manager=DEFAULT_SCAN_MANAGER) -> Iterator[AbstractPath]:
         """
         Return an iterator over all of the files in the folder, in
@@ -124,7 +124,7 @@ class LocalFolder(AbstractFolder):
         """
         return 'local'
 
-    def all_files(self, reporter: ProgressReport,
+    def all_files(self, reporter: Optional[ProgressReport],
                   policies_manager=DEFAULT_SCAN_MANAGER) -> Iterator[LocalPath]:
         """
         Yield all files.
@@ -319,7 +319,7 @@ class B2Folder(AbstractFolder):
 
     def all_files(
         self,
-        reporter: ProgressReport,
+        reporter: Optional[ProgressReport],
         policies_manager: ScanPoliciesManager = DEFAULT_SCAN_MANAGER
     ) -> Iterator[B2Path]:
         """
diff --git a/b2sdk/session.py b/b2sdk/session.py
index 7cb5e06..ec68805 100644
--- a/b2sdk/session.py
+++ b/b2sdk/session.py
@@ -131,7 +131,7 @@ class B2Session:
             realm=realm,
             s3_api_url=response['s3ApiUrl'],
             allowed=allowed,
-            application_key_id=application_key_id
+            application_key_id=application_key_id,
         )
 
     def cancel_large_file(self, file_id):
@@ -296,6 +296,7 @@ class B2Session:
         server_side_encryption: Optional[EncryptionSetting] = None,
         file_retention: Optional[FileRetentionSetting] = None,
         legal_hold: Optional[LegalHold] = None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         return self._wrap_default_token(
             self.raw_api.start_large_file,
@@ -306,6 +307,7 @@ class B2Session:
             server_side_encryption,
             file_retention=file_retention,
             legal_hold=legal_hold,
+            custom_upload_timestamp=custom_upload_timestamp,
         )
 
     def update_bucket(
@@ -320,6 +322,7 @@ class B2Session:
         default_server_side_encryption: Optional[EncryptionSetting] = None,
         default_retention: Optional[BucketRetentionSetting] = None,
         replication: Optional[ReplicationConfiguration] = None,
+        is_file_lock_enabled: Optional[bool] = None,
     ):
         return self._wrap_default_token(
             self.raw_api.update_bucket,
@@ -333,6 +336,7 @@ class B2Session:
             default_server_side_encryption=default_server_side_encryption,
             default_retention=default_retention,
             replication=replication,
+            is_file_lock_enabled=is_file_lock_enabled,
         )
 
     def upload_file(
@@ -347,6 +351,7 @@ class B2Session:
         server_side_encryption: Optional[EncryptionSetting] = None,
         file_retention: Optional[FileRetentionSetting] = None,
         legal_hold: Optional[LegalHold] = None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         return self._wrap_token(
             self.raw_api.upload_file,
@@ -361,6 +366,7 @@ class B2Session:
             server_side_encryption,
             file_retention=file_retention,
             legal_hold=legal_hold,
+            custom_upload_timestamp=custom_upload_timestamp,
         )
 
     def upload_part(
diff --git a/b2sdk/stream/progress.py b/b2sdk/stream/progress.py
index 9b6b8f1..38cb3ab 100644
--- a/b2sdk/stream/progress.py
+++ b/b2sdk/stream/progress.py
@@ -36,6 +36,9 @@ class AbstractStreamWithProgress(StreamWrapper):
         self.bytes_completed += delta
         self.progress_listener.bytes_completed(self.bytes_completed + self.offset)
 
+    def __str__(self):
+        return str(self.stream)
+
 
 class ReadingStreamWithProgress(AbstractStreamWithProgress):
     """
diff --git a/b2sdk/sync/action.py b/b2sdk/sync/action.py
index 4a429f0..3f3eadc 100644
--- a/b2sdk/sync/action.py
+++ b/b2sdk/sync/action.py
@@ -11,10 +11,15 @@
 import logging
 import os
 from abc import ABCMeta, abstractmethod
+from contextlib import suppress
+from typing import List, Optional
 
 from ..bucket import Bucket
+from ..file_version import FileVersion
 from ..http_constants import SRC_LAST_MODIFIED_MILLIS
 from ..scan.path import B2Path
+from ..sync.report import ProgressReport, SyncReport
+from ..transfer.outbound.outbound_source import OutboundTransferSource
 from ..transfer.outbound.upload_source import UploadSourceLocalFile
 from .encryption_provider import AbstractSyncEncryptionSettingsProvider
 from .report import SyncFileReporter
@@ -33,7 +38,7 @@ class AbstractAction(metaclass=ABCMeta):
     UploadFileAction.
     """
 
-    def run(self, bucket, reporter, dry_run=False):
+    def run(self, bucket: Bucket, reporter: ProgressReport, dry_run: bool = False):
         """
         Main action routine.
 
@@ -53,30 +58,26 @@ class AbstractAction(metaclass=ABCMeta):
             raise  # Re-throw so we can identify failed actions
 
     @abstractmethod
-    def get_bytes(self):
+    def get_bytes(self) -> int:
         """
         Return the number of bytes to transfer for this action.
-
-        :rtype: int
         """
 
     @abstractmethod
-    def do_action(self, bucket, reporter):
+    def do_action(self, bucket: Bucket, reporter: ProgressReport) -> None:
         """
         Perform the action, returning only after the action is completed.
 
         :param bucket: a Bucket object
-        :type bucket: b2sdk.bucket.Bucket
         :param reporter: a place to report errors
         """
 
     @abstractmethod
-    def do_report(self, bucket, reporter):
+    def do_report(self, bucket: Bucket, reporter: ProgressReport) -> None:
         """
         Report the action performed.
 
         :param bucket: a Bucket object
-        :type bucket: b2sdk.bucket.Bucket
         :param reporter: a place to report errors
         """
 
@@ -88,20 +89,20 @@ class B2UploadAction(AbstractAction):
 
     def __init__(
         self,
-        local_full_path,
-        relative_name,
-        b2_file_name,
-        mod_time_millis,
-        size,
+        local_full_path: str,
+        relative_name: str,
+        b2_file_name: str,
+        mod_time_millis: int,
+        size: int,
         encryption_settings_provider: AbstractSyncEncryptionSettingsProvider,
     ):
         """
-        :param str local_full_path: a local file path
-        :param str relative_name: a relative file name
-        :param str b2_file_name: a name of a new remote file
-        :param int mod_time_millis: file modification time in milliseconds
-        :param int size: a file size
-        :param b2sdk.v2.AbstractSyncEncryptionSettingsProvider encryption_settings_provider: encryption setting provider
+        :param local_full_path: a local file path
+        :param relative_name: a relative file name
+        :param b2_file_name: a name of a new remote file
+        :param mod_time_millis: file modification time in milliseconds
+        :param size: a file size
+        :param encryption_settings_provider: encryption setting provider
         """
         self.local_full_path = local_full_path
         self.relative_name = relative_name
@@ -109,20 +110,34 @@ class B2UploadAction(AbstractAction):
         self.mod_time_millis = mod_time_millis
         self.size = size
         self.encryption_settings_provider = encryption_settings_provider
+        self.large_file_sha1 = None
+        # TODO: Remove once we drop Python 3.7 support
+        self.cached_upload_source = None
 
-    def get_bytes(self):
+    def get_bytes(self) -> int:
         """
         Return file size.
-
-        :rtype: int
         """
         return self.size
 
-    def do_action(self, bucket, reporter):
+    @property
+    # TODO: Use @functools.cached_property once we drop Python 3.7 support
+    def _upload_source(self) -> UploadSourceLocalFile:
+        """ Upload source if the file was to be uploaded in full """
+        # NOTE: We're caching this to ensure that sha1 is not recalculated.
+        if self.cached_upload_source is None:
+            self.cached_upload_source = UploadSourceLocalFile(self.local_full_path)
+        return self.cached_upload_source
+
+    def get_all_sources(self) -> List[OutboundTransferSource]:
+        """ Get list of sources required to complete this upload """
+        return [self._upload_source]
+
+    def do_action(self, bucket: Bucket, reporter: ProgressReport) -> None:
         """
         Perform the uploading action, returning only after the action is completed.
 
-        :param b2sdk.v2.Bucket bucket: a Bucket object
+        :param bucket: a Bucket object
         :param reporter: a place to report errors
         """
         if reporter:
@@ -136,42 +151,83 @@ class B2UploadAction(AbstractAction):
             file_info=file_info,
             length=self.size,
         )
-        bucket.upload(
-            UploadSourceLocalFile(self.local_full_path),
+
+        sources = self.get_all_sources()
+        large_file_sha1 = None
+
+        if len(sources) > 1:
+            # The upload will be incremental, calculate the large_file_sha1
+            large_file_sha1 = self._upload_source.get_content_sha1()
+
+        bucket.concatenate(
+            sources,
             self.b2_file_name,
-            file_info=file_info,
             progress_listener=progress_listener,
+            file_info=file_info,
             encryption=encryption,
+            large_file_sha1=large_file_sha1,
         )
 
-    def do_report(self, bucket, reporter):
+    def do_report(self, bucket: Bucket, reporter: ProgressReport) -> None:
         """
         Report the uploading action performed.
 
         :param bucket: a Bucket object
-        :type bucket: b2sdk.bucket.Bucket
         :param reporter: a place to report errors
         """
         reporter.print_completion('upload ' + self.relative_name)
 
-    def __str__(self):
+    def __str__(self) -> str:
         return 'b2_upload(%s, %s, %s)' % (
             self.local_full_path, self.b2_file_name, self.mod_time_millis
         )
 
 
+class B2IncrementalUploadAction(B2UploadAction):
+    def __init__(
+        self,
+        local_full_path: str,
+        relative_name: str,
+        b2_file_name: str,
+        mod_time_millis: int,
+        size: int,
+        encryption_settings_provider: AbstractSyncEncryptionSettingsProvider,
+        file_version: Optional[FileVersion] = None,
+        absolute_minimum_part_size: int = Optional[None],
+    ):
+        """
+        :param local_full_path: a local file path
+        :param relative_name: a relative file name
+        :param b2_file_name: a name of a new remote file
+        :param mod_time_millis: file modification time in milliseconds
+        :param size: a file size
+        :param encryption_settings_provider: encryption setting provider
+        :param file_version: version of file currently on the server
+        :param absolute_minimum_part_size: minimum file part size for large files
+        """
+        super().__init__(
+            local_full_path, relative_name, b2_file_name, mod_time_millis, size,
+            encryption_settings_provider
+        )
+        self.file_version = file_version
+        self.absolute_minimum_part_size = absolute_minimum_part_size
+
+    def get_all_sources(self) -> List[OutboundTransferSource]:
+        return self._upload_source.get_incremental_sources(
+            self.file_version, self.absolute_minimum_part_size
+        )
+
+
 class B2HideAction(AbstractAction):
-    def __init__(self, relative_name, b2_file_name):
+    def __init__(self, relative_name: str, b2_file_name: str):
         """
         :param relative_name: a relative file name
-        :type relative_name: str
         :param b2_file_name: a name of a remote file
-        :type b2_file_name: str
         """
         self.relative_name = relative_name
         self.b2_file_name = b2_file_name
 
-    def get_bytes(self):
+    def get_bytes(self) -> int:
         """
         Return file size.
 
@@ -180,28 +236,27 @@ class B2HideAction(AbstractAction):
         """
         return 0
 
-    def do_action(self, bucket, reporter):
+    def do_action(self, bucket: Bucket, reporter: ProgressReport) -> None:
         """
         Perform the hiding action, returning only after the action is completed.
 
         :param bucket: a Bucket object
-        :type bucket: b2sdk.bucket.Bucket
         :param reporter: a place to report errors
         """
         bucket.hide_file(self.b2_file_name)
 
-    def do_report(self, bucket, reporter):
+    # TODO: This function uses SyncReport.update_transfer, while others are enough with ProgressReport interface.
+    def do_report(self, bucket: Bucket, reporter: SyncReport):
         """
         Report the hiding action performed.
 
         :param bucket: a Bucket object
-        :type bucket: b2sdk.bucket.Bucket
         :param reporter: a place to report errors
         """
         reporter.update_transfer(1, 0)
         reporter.print_completion('hide   ' + self.relative_name)
 
-    def __str__(self):
+    def __str__(self) -> str:
         return 'b2_hide(%s)' % (self.b2_file_name,)
 
 
@@ -214,39 +269,37 @@ class B2DownloadAction(AbstractAction):
         encryption_settings_provider: AbstractSyncEncryptionSettingsProvider,
     ):
         """
-        :param b2sdk.v2.B2Path source_path: the file to be downloaded
-        :param str b2_file_name: b2_file_name
-        :param str local_full_path: a local file path
-        :param b2sdk.v2.AbstractSyncEncryptionSettingsProvider encryption_settings_provider: encryption setting provider
+        :param source_path: the file to be downloaded
+        :param b2_file_name: b2_file_name
+        :param local_full_path: a local file path
+        :param encryption_settings_provider: encryption setting provider
         """
         self.source_path = source_path
         self.b2_file_name = b2_file_name
         self.local_full_path = local_full_path
         self.encryption_settings_provider = encryption_settings_provider
 
-    def get_bytes(self):
+    def get_bytes(self) -> int:
         """
         Return file size.
-
-        :rtype: int
         """
         return self.source_path.size
 
-    def _ensure_directory_existence(self):
+    def _ensure_directory_existence(self) -> None:
+        # TODO: this can fail to multiple reasons (e.g. path is a file, permissions etc).
+        #   We could provide nice exceptions for it.
         parent_dir = os.path.dirname(self.local_full_path)
         if not os.path.isdir(parent_dir):
-            try:
+            with suppress(OSError):
                 os.makedirs(parent_dir)
-            except OSError:
-                pass
         if not os.path.isdir(parent_dir):
             raise Exception('could not create directory %s' % (parent_dir,))
 
-    def do_action(self, bucket, reporter):
+    def do_action(self, bucket: Bucket, reporter: ProgressReport) -> None:
         """
         Perform the downloading action, returning only after the action is completed.
 
-        :param b2sdk.v2.Bucket bucket: a Bucket object
+        :param bucket: a Bucket object
         :param reporter: a place to report errors
         """
         self._ensure_directory_existence()
@@ -272,23 +325,20 @@ class B2DownloadAction(AbstractAction):
         downloaded_file.save_to(download_path)
 
         # Move the file into place
-        try:
+        with suppress(OSError):
             os.unlink(self.local_full_path)
-        except OSError:
-            pass
         os.rename(download_path, self.local_full_path)
 
-    def do_report(self, bucket, reporter):
+    def do_report(self, bucket: Bucket, reporter: ProgressReport) -> None:
         """
         Report the downloading action performed.
 
         :param bucket: a Bucket object
-        :type bucket: b2sdk.bucket.Bucket
         :param reporter: a place to report errors
         """
         reporter.print_completion('dnload ' + self.source_path.relative_path)
 
-    def __str__(self):
+    def __str__(self) -> str:
         return (
             'b2_download(%s, %s, %s, %d)' % (
                 self.b2_file_name, self.source_path.selected_version.id_, self.local_full_path,
@@ -312,12 +362,12 @@ class B2CopyAction(AbstractAction):
         encryption_settings_provider: AbstractSyncEncryptionSettingsProvider,
     ):
         """
-        :param str b2_file_name: a b2_file_name
-        :param b2sdk.v2.B2Path source_path: the file to be copied
-        :param str dest_b2_file_name: a name of a destination remote file
-        :param Bucket source_bucket: bucket to copy from
-        :param Bucket destination_bucket: bucket to copy to
-        :param b2sdk.v2.AbstractSyncEncryptionSettingsProvider encryption_settings_provider: encryption setting provider
+        :param b2_file_name: a b2_file_name
+        :param source_path: the file to be copied
+        :param dest_b2_file_name: a name of a destination remote file
+        :param source_bucket: bucket to copy from
+        :param destination_bucket: bucket to copy to
+        :param encryption_settings_provider: encryption setting provider
         """
         self.b2_file_name = b2_file_name
         self.source_path = source_path
@@ -326,20 +376,17 @@ class B2CopyAction(AbstractAction):
         self.source_bucket = source_bucket
         self.destination_bucket = destination_bucket
 
-    def get_bytes(self):
+    def get_bytes(self) -> int:
         """
         Return file size.
-
-        :rtype: int
         """
         return self.source_path.size
 
-    def do_action(self, bucket, reporter):
+    def do_action(self, bucket: Bucket, reporter: ProgressReport) -> None:
         """
         Perform the copying action, returning only after the action is completed.
 
         :param bucket: a Bucket object
-        :type bucket: b2sdk.bucket.Bucket
         :param reporter: a place to report errors
         """
         if reporter:
@@ -369,17 +416,16 @@ class B2CopyAction(AbstractAction):
             source_content_type=self.source_path.selected_version.content_type,
         )
 
-    def do_report(self, bucket, reporter):
+    def do_report(self, bucket: Bucket, reporter: ProgressReport) -> None:
         """
         Report the copying action performed.
 
         :param bucket: a Bucket object
-        :type bucket: b2sdk.bucket.Bucket
         :param reporter: a place to report errors
         """
         reporter.print_completion('copy ' + self.source_path.relative_path)
 
-    def __str__(self):
+    def __str__(self) -> str:
         return (
             'b2_copy(%s, %s, %s, %d)' % (
                 self.b2_file_name, self.source_path.selected_version.id_, self.dest_b2_file_name,
@@ -389,93 +435,87 @@ class B2CopyAction(AbstractAction):
 
 
 class B2DeleteAction(AbstractAction):
-    def __init__(self, relative_name, b2_file_name, file_id, note):
+    def __init__(self, relative_name: str, b2_file_name: str, file_id: str, note: str):
         """
-        :param str relative_name: a relative file name
-        :param str b2_file_name: a name of a remote file
-        :param str file_id: a file ID
-        :param str note: a deletion note
+        :param relative_name: a relative file name
+        :param b2_file_name: a name of a remote file
+        :param file_id: a file ID
+        :param note: a deletion note
         """
         self.relative_name = relative_name
         self.b2_file_name = b2_file_name
         self.file_id = file_id
         self.note = note
 
-    def get_bytes(self):
+    def get_bytes(self) -> int:
         """
         Return file size.
 
         :return: always zero
-        :rtype: int
         """
         return 0
 
-    def do_action(self, bucket, reporter):
+    def do_action(self, bucket: Bucket, reporter: ProgressReport):
         """
         Perform the deleting action, returning only after the action is completed.
 
         :param bucket: a Bucket object
-        :type bucket: b2sdk.bucket.Bucket
         :param reporter: a place to report errors
         """
         bucket.api.delete_file_version(self.file_id, self.b2_file_name)
 
-    def do_report(self, bucket, reporter):
+    # TODO: This function uses SyncReport.update_transfer, while others are enough with ProgressReport interface.
+    def do_report(self, bucket: Bucket, reporter: SyncReport):
         """
         Report the deleting action performed.
 
         :param bucket: a Bucket object
-        :type bucket: b2sdk.bucket.Bucket
         :param reporter: a place to report errors
         """
         reporter.update_transfer(1, 0)
         reporter.print_completion('delete ' + self.relative_name + ' ' + self.note)
 
-    def __str__(self):
+    def __str__(self) -> str:
         return 'b2_delete(%s, %s, %s)' % (self.b2_file_name, self.file_id, self.note)
 
 
 class LocalDeleteAction(AbstractAction):
-    def __init__(self, relative_name, full_path):
+    def __init__(self, relative_name: str, full_path: str):
         """
         :param relative_name: a relative file name
-        :type relative_name: str
         :param full_path: a full local path
-        :type: str
         """
         self.relative_name = relative_name
         self.full_path = full_path
 
-    def get_bytes(self):
+    def get_bytes(self) -> int:
         """
         Return file size.
 
         :return: always zero
-        :rtype: int
         """
         return 0
 
-    def do_action(self, bucket, reporter):
+    def do_action(self, bucket: Bucket, reporter: ProgressReport):
         """
         Perform the deleting of a local file action,
         returning only after the action is completed.
 
         :param bucket: a Bucket object
-        :type bucket: b2sdk.bucket.Bucket
         :param reporter: a place to report errors
         """
         os.unlink(self.full_path)
 
-    def do_report(self, bucket, reporter):
+    # TODO: This function uses SyncReport.update_transfer, while others are enough with ProgressReport interface.
+    def do_report(self, bucket: Bucket, reporter: SyncReport):
         """
         Report the deleting of a local file action performed.
 
         :param bucket: a Bucket object
-        :type bucket: b2sdk.bucket.Bucket
         :param reporter: a place to report errors
         """
         reporter.update_transfer(1, 0)
         reporter.print_completion('delete ' + self.relative_name)
 
-    def __str__(self):
-        return 'local_delete(%s)' % (self.full_path)
+    def __str__(self) -> str:
+        return 'local_delete(%s)' % (self.full_path,)
diff --git a/b2sdk/sync/policy.py b/b2sdk/sync/policy.py
index f9a4e68..dec9797 100644
--- a/b2sdk/sync/policy.py
+++ b/b2sdk/sync/policy.py
@@ -12,13 +12,14 @@ import logging
 
 from abc import ABCMeta, abstractmethod
 from enum import Enum, unique
-from typing import Optional
+from typing import cast, Optional
 
 from ..exception import DestFileNewer
 from ..scan.exception import InvalidArgument
-from ..scan.folder import AbstractFolder
-from ..scan.path import AbstractPath
-from .action import B2CopyAction, B2DeleteAction, B2DownloadAction, B2HideAction, B2UploadAction, LocalDeleteAction
+from ..scan.folder import AbstractFolder, B2Folder
+from ..scan.path import AbstractPath, B2Path
+from ..transfer.outbound.upload_source import UploadMode
+from .action import B2CopyAction, B2DeleteAction, B2DownloadAction, B2HideAction, B2IncrementalUploadAction, B2UploadAction, LocalDeleteAction
 from .encryption_provider import SERVER_DEFAULT_SYNC_ENCRYPTION_SETTINGS_PROVIDER, AbstractSyncEncryptionSettingsProvider
 
 ONE_DAY_IN_MS = 24 * 60 * 60 * 1000
@@ -51,9 +52,9 @@ class AbstractFileSyncPolicy(metaclass=ABCMeta):
 
     def __init__(
         self,
-        source_path: AbstractPath,
+        source_path: Optional[AbstractPath],
         source_folder: AbstractFolder,
-        dest_path: AbstractPath,
+        dest_path: Optional[AbstractPath],
         dest_folder: AbstractFolder,
         now_millis: int,
         keep_days: int,
@@ -62,18 +63,22 @@ class AbstractFileSyncPolicy(metaclass=ABCMeta):
         compare_version_mode: CompareVersionMode = CompareVersionMode.MODTIME,
         encryption_settings_provider:
         AbstractSyncEncryptionSettingsProvider = SERVER_DEFAULT_SYNC_ENCRYPTION_SETTINGS_PROVIDER,
+        upload_mode: UploadMode = UploadMode.FULL,
+        absolute_minimum_part_size: Optional[int] = None,
     ):
         """
-        :param b2sdk.v2.AbstractPath source_path: source file object
-        :param b2sdk.v2.AbstractFolder source_folder: source folder object
-        :param b2sdk.v2.AbstractPath dest_path: destination file object
-        :param b2sdk.v2.AbstractFolder dest_folder: destination folder object
-        :param int now_millis: current time in milliseconds
-        :param int keep_days: days to keep before delete
-        :param b2sdk.v2.NewerFileSyncMode newer_file_mode: setting which determines handling for destination files newer than on the source
-        :param int compare_threshold: when comparing with size or time for sync
-        :param b2sdk.v2.CompareVersionMode compare_version_mode: how to compare source and destination files
-        :param b2sdk.v2.AbstractSyncEncryptionSettingsProvider encryption_settings_provider: encryption setting provider
+        :param source_path: source file object
+        :param source_folder: source folder object
+        :param dest_path: destination file object
+        :param dest_folder: destination folder object
+        :param now_millis: current time in milliseconds
+        :param keep_days: days to keep before delete
+        :param newer_file_mode: setting which determines handling for destination files newer than on the source
+        :param compare_threshold: when comparing with size or time for sync
+        :param compare_version_mode: how to compare source and destination files
+        :param encryption_settings_provider: encryption setting provider
+        :param upload_mode: file upload mode
+        :param absolute_minimum_part_size: minimum file part size that can be uploaded to the server
         """
         self._source_path = source_path
         self._source_folder = source_folder
@@ -86,8 +91,10 @@ class AbstractFileSyncPolicy(metaclass=ABCMeta):
         self._now_millis = now_millis
         self._transferred = False
         self._encryption_settings_provider = encryption_settings_provider
+        self._upload_mode = upload_mode
+        self._absolute_minimum_part_size = absolute_minimum_part_size
 
-    def _should_transfer(self):
+    def _should_transfer(self) -> bool:
         """
         Decide whether to transfer the file from the source to the destination.
         """
@@ -209,7 +216,9 @@ class AbstractFileSyncPolicy(metaclass=ABCMeta):
         """
         return []
 
-    def _get_source_mod_time(self):
+    def _get_source_mod_time(self) -> int:
+        if self._source_path is None:
+            return 0
         return self._source_path.mod_time
 
     @abstractmethod
@@ -228,7 +237,7 @@ class DownPolicy(AbstractFileSyncPolicy):
 
     def _make_transfer_action(self):
         return B2DownloadAction(
-            self._source_path,
+            cast(B2Path, self._source_path),
             self._source_folder.make_full_path(self._source_path.relative_path),
             self._dest_folder.make_full_path(self._source_path.relative_path),
             self._encryption_settings_provider,
@@ -243,14 +252,27 @@ class UpPolicy(AbstractFileSyncPolicy):
     SOURCE_PREFIX = 'local://'
 
     def _make_transfer_action(self):
-        return B2UploadAction(
-            self._source_folder.make_full_path(self._source_path.relative_path),
-            self._source_path.relative_path,
-            self._dest_folder.make_full_path(self._source_path.relative_path),
-            self._get_source_mod_time(),
-            self._source_path.size,
-            self._encryption_settings_provider,
-        )
+        # Find out if we want to append with new bytes or replace completely
+        if self._upload_mode == UploadMode.INCREMENTAL and self._dest_path:
+            return B2IncrementalUploadAction(
+                self._source_folder.make_full_path(self._source_path.relative_path),
+                self._source_path.relative_path,
+                self._dest_folder.make_full_path(self._source_path.relative_path),
+                self._get_source_mod_time(),
+                self._source_path.size,
+                self._encryption_settings_provider,
+                cast(B2Path, self._dest_path).selected_version,
+                self._absolute_minimum_part_size,
+            )
+        else:
+            return B2UploadAction(
+                self._source_folder.make_full_path(self._source_path.relative_path),
+                self._source_path.relative_path,
+                self._dest_folder.make_full_path(self._source_path.relative_path),
+                self._get_source_mod_time(),
+                self._source_path.size,
+                self._encryption_settings_provider,
+            )
 
 
 class UpAndDeletePolicy(UpPolicy):
@@ -324,10 +346,10 @@ class CopyPolicy(AbstractFileSyncPolicy):
 
         return B2CopyAction(
             self._source_folder.make_full_path(self._source_path.relative_path),
-            self._source_path,
+            cast(B2Path, self._source_path),
             self._dest_folder.make_full_path(self._source_path.relative_path),
-            self._source_folder.bucket,
-            self._dest_folder.bucket,
+            cast(B2Folder, self._source_folder).bucket,
+            cast(B2Folder, self._dest_folder).bucket,
             self._encryption_settings_provider,
         )
 
@@ -385,18 +407,18 @@ def make_b2_delete_note(version, index, transferred):
 
 
 def make_b2_delete_actions(
-    source_path: AbstractPath,
-    dest_path: AbstractPath,
+    source_path: Optional[AbstractPath],
+    dest_path: Optional[B2Path],
     dest_folder: AbstractFolder,
     transferred: bool,
 ):
     """
     Create the actions to delete files stored on B2, which are not present locally.
 
-    :param b2sdk.v2.AbstractPath source_path: source file object
-    :param b2sdk.v2.AbstractPath dest_path: destination file object
-    :param b2sdk.v2.AbstractFolder dest_folder: destination folder
-    :param bool transferred: if True, file has been transferred, False otherwise
+    :param source_path: source file object
+    :param dest_path: destination file object
+    :param dest_folder: destination folder
+    :param transferred: if True, file has been transferred, False otherwise
     """
     if dest_path is None:
         # B2 does not really store folders, so there is no need to hide
@@ -414,8 +436,8 @@ def make_b2_delete_actions(
 
 
 def make_b2_keep_days_actions(
-    source_path: AbstractPath,
-    dest_path: AbstractPath,
+    source_path: Optional[AbstractPath],
+    dest_path: Optional[B2Path],
     dest_folder: AbstractFolder,
     transferred: bool,
     keep_days: int,
@@ -428,15 +450,15 @@ def make_b2_keep_days_actions(
     When keepDays is set, all files that were visible any time from
     keepDays ago until now must be kept.  If versions were uploaded 5
     days ago, 15 days ago, and 25 days ago, and the keepDays is 10,
-    only the 25-day old version can be deleted.  The 15 day-old version
+    only the 25 day-old version can be deleted.  The 15 day-old version
     was visible 10 days ago.
 
-    :param b2sdk.v2.AbstractPath source_path: source file object
-    :param b2sdk.v2.AbstractPath dest_path: destination file object
-    :param b2sdk.v2.AbstractFolder dest_folder: destination folder object
-    :param bool transferred: if True, file has been transferred, False otherwise
-    :param int keep_days: how many days to keep a file
-    :param int now_millis: current time in milliseconds
+    :param source_path: source file object
+    :param dest_path: destination file object
+    :param dest_folder: destination folder object
+    :param transferred: if True, file has been transferred, False otherwise
+    :param keep_days: how many days to keep a file
+    :param now_millis: current time in milliseconds
     """
     deleting = False
     if dest_path is None:
@@ -455,7 +477,7 @@ def make_b2_keep_days_actions(
         # assert that age_days is non-decreasing.
         #
         # Note that if there is an out-of-order date that is old enough
-        # to trigger deletions, all of the versions uploaded before that
+        # to trigger deletions, all the versions uploaded before that
         # (the ones after it in the list) will be deleted, even if they
         # aren't over the age threshold.
 
diff --git a/b2sdk/sync/policy_manager.py b/b2sdk/sync/policy_manager.py
index f8f8e0b..da5a3fa 100644
--- a/b2sdk/sync/policy_manager.py
+++ b/b2sdk/sync/policy_manager.py
@@ -8,10 +8,15 @@
 #
 ######################################################################
 
+from typing import Optional
+
+from ..scan.folder import AbstractFolder
 from ..scan.path import AbstractPath
-from .policy import CopyAndDeletePolicy, CopyAndKeepDaysPolicy, CopyPolicy, \
-    DownAndDeletePolicy, DownAndKeepDaysPolicy, DownPolicy, UpAndDeletePolicy, \
-    UpAndKeepDaysPolicy, UpPolicy
+from ..transfer.outbound.upload_source import UploadMode
+from .policy import AbstractFileSyncPolicy, CompareVersionMode, CopyAndDeletePolicy, \
+    CopyAndKeepDaysPolicy, CopyPolicy, DownAndDeletePolicy, DownAndKeepDaysPolicy, \
+    DownPolicy, NewerFileSyncMode, UpAndDeletePolicy, UpAndKeepDaysPolicy, UpPolicy
+from .encryption_provider import AbstractSyncEncryptionSettingsProvider
 
 
 class SyncPolicyManager:
@@ -25,34 +30,38 @@ class SyncPolicyManager:
 
     def get_policy(
         self,
-        sync_type,
-        source_path: AbstractPath,
-        source_folder,
-        dest_path: AbstractPath,
-        dest_folder,
-        now_millis,
-        delete,
-        keep_days,
-        newer_file_mode,
-        compare_threshold,
-        compare_version_mode,
-        encryption_settings_provider,
-    ):
+        sync_type: str,
+        source_path: Optional[AbstractPath],
+        source_folder: AbstractFolder,
+        dest_path: Optional[AbstractPath],
+        dest_folder: AbstractFolder,
+        now_millis: int,
+        delete: bool,
+        keep_days: int,
+        newer_file_mode: NewerFileSyncMode,
+        compare_threshold: int,
+        compare_version_mode: CompareVersionMode,
+        encryption_settings_provider: AbstractSyncEncryptionSettingsProvider,
+        upload_mode: UploadMode,
+        absolute_minimum_part_size: int,
+    ) -> AbstractFileSyncPolicy:
         """
         Return a policy object.
 
-        :param str sync_type: synchronization type
-        :param b2sdk.v2.AbstractPath source_path: source file
-        :param str source_folder: a source folder path
-        :param b2sdk.v2.AbstractPath dest_path: destination file
-        :param str dest_folder: a destination folder path
-        :param int now_millis: current time in milliseconds
-        :param bool delete: delete policy
-        :param int keep_days: keep for days policy
-        :param b2sdk.v2.NewerFileSyncMode newer_file_mode: setting which determines handling for destination files newer than on the source
-        :param int compare_threshold: difference between file modification time or file size
-        :param b2sdk.v2.CompareVersionMode compare_version_mode: setting which determines how to compare source and destination files
-        :param b2sdk.v2.AbstractSyncEncryptionSettingsProvider encryption_settings_provider: an object which decides which encryption to use (if any)
+        :param sync_type: synchronization type
+        :param source_path: source file
+        :param source_folder: a source folder path
+        :param dest_path: destination file
+        :param dest_folder: a destination folder path
+        :param now_millis: current time in milliseconds
+        :param delete: delete policy
+        :param keep_days: keep for days policy
+        :param newer_file_mode: setting which determines handling for destination files newer than on the source
+        :param compare_threshold: difference between file modification time or file size
+        :param compare_version_mode: setting which determines how to compare source and destination files
+        :param encryption_settings_provider: an object which decides which encryption to use (if any)
+        :param upload_mode: determines how file uploads are handled
+        :param absolute_minimum_part_size: minimum file part size for large files
         :return: a policy object
         """
         policy_class = self.get_policy_class(sync_type, delete, keep_days)
@@ -67,6 +76,8 @@ class SyncPolicyManager:
             compare_threshold,
             compare_version_mode,
             encryption_settings_provider,
+            upload_mode,
+            absolute_minimum_part_size,
         )
 
     def get_policy_class(self, sync_type, delete, keep_days):
diff --git a/b2sdk/sync/sync.py b/b2sdk/sync/sync.py
index 7381b88..c57c5a7 100644
--- a/b2sdk/sync/sync.py
+++ b/b2sdk/sync/sync.py
@@ -12,13 +12,15 @@ import concurrent.futures as futures
 import logging
 
 from enum import Enum, unique
+from typing import cast, Optional
 
 from ..bounded_queue_executor import BoundedQueueExecutor
 from ..scan.exception import InvalidArgument
-from ..scan.folder import AbstractFolder
+from ..scan.folder import AbstractFolder, LocalFolder, B2Folder
 from ..scan.path import AbstractPath
-from ..scan.policies import DEFAULT_SCAN_MANAGER
+from ..scan.policies import DEFAULT_SCAN_MANAGER, ScanPoliciesManager
 from ..scan.scan import zip_folders
+from ..transfer.outbound.upload_source import UploadMode
 from .encryption_provider import SERVER_DEFAULT_SYNC_ENCRYPTION_SETTINGS_PROVIDER, AbstractSyncEncryptionSettingsProvider
 from .exception import IncompleteSync
 from .policy import CompareVersionMode, NewerFileSyncMode
@@ -87,6 +89,8 @@ class Synchronizer:
         compare_threshold=None,
         keep_days=None,
         sync_policy_manager: SyncPolicyManager = POLICY_MANAGER,
+        upload_mode: UploadMode = UploadMode.FULL,
+        absolute_minimum_part_size: Optional[int] = None,
     ):
         """
         Initialize synchronizer class and validate arguments
@@ -101,6 +105,8 @@ class Synchronizer:
         :param int compare_threshold: should be greater than 0, default is 0
         :param int keep_days: if keep_days_or_delete is `b2sdk.v2.KeepOrDeleteMode.KEEP_BEFORE_DELETE`, then this should be greater than 0
         :param SyncPolicyManager sync_policy_manager: object which decides what to do with each file (upload, download, delete, copy, hide etc)
+        :param b2sdk.v2.UploadMode upload_mode: determines how file uploads are handled
+        :param int absolute_minimum_part_size: minimum file part size for large files
         """
         self.newer_file_mode = newer_file_mode
         self.keep_days_or_delete = keep_days_or_delete
@@ -112,6 +118,8 @@ class Synchronizer:
         self.policies_manager = policies_manager  # actually it should be called scan_policies_manager
         self.sync_policy_manager = sync_policy_manager
         self.max_workers = max_workers
+        self.upload_mode = upload_mode
+        self.absolute_minimum_part_size = absolute_minimum_part_size
         self._validate()
 
     def _validate(self):
@@ -144,10 +152,10 @@ class Synchronizer:
 
     def sync_folders(
         self,
-        source_folder,
-        dest_folder,
-        now_millis,
-        reporter,
+        source_folder: AbstractFolder,
+        dest_folder: AbstractFolder,
+        now_millis: int,
+        reporter: Optional[SyncReport],
         encryption_settings_provider:
         AbstractSyncEncryptionSettingsProvider = SERVER_DEFAULT_SYNC_ENCRYPTION_SETTINGS_PROVIDER,
     ):
@@ -156,11 +164,11 @@ class Synchronizer:
         source is also in the destination.  Deletes any file versions
         in the destination older than history_days.
 
-        :param b2sdk.scan.folder.AbstractFolder source_folder: source folder object
-        :param b2sdk.scan.folder.AbstractFolder dest_folder: destination folder object
-        :param int now_millis: current time in milliseconds
-        :param b2sdk.sync.report.SyncReport,None reporter: progress reporter
-        :param b2sdk.v2.AbstractSyncEncryptionSettingsProvider encryption_settings_provider: encryption setting provider
+        :param source_folder: source folder object
+        :param dest_folder: destination folder object
+        :param now_millis: current time in milliseconds
+        :param reporter: progress reporter
+        :param encryption_settings_provider: encryption setting provider
         """
         source_type = source_folder.folder_type()
         dest_type = dest_folder.folder_type()
@@ -170,10 +178,10 @@ class Synchronizer:
 
         # For downloads, make sure that the target directory is there.
         if dest_type == 'local' and not self.dry_run:
-            dest_folder.ensure_present()
+            cast(LocalFolder, dest_folder).ensure_present()
 
         if source_type == 'local' and not self.allow_empty_source:
-            source_folder.ensure_non_empty()
+            cast(LocalFolder, source_folder).ensure_non_empty()
 
         # Make an executor to count files and run all of the actions. This is
         # not the same as the executor in the API object which is used for
@@ -195,9 +203,9 @@ class Synchronizer:
         # For bucket-to-bucket sync, the bucket for the API calls should be the destination.
         action_bucket = None
         if dest_type == 'b2':
-            action_bucket = dest_folder.bucket
+            action_bucket = cast(B2Folder, dest_folder).bucket
         elif source_type == 'b2':
-            action_bucket = source_folder.bucket
+            action_bucket = cast(B2Folder, source_folder).bucket
 
         # Schedule each of the actions.
         for action in self._make_folder_sync_actions(
@@ -222,7 +230,7 @@ class Synchronizer:
         dest_folder: AbstractFolder,
         now_millis: int,
         reporter: SyncReport,
-        policies_manager: SyncPolicyManager = DEFAULT_SCAN_MANAGER,
+        policies_manager: ScanPoliciesManager = DEFAULT_SCAN_MANAGER,
         encryption_settings_provider:
         AbstractSyncEncryptionSettingsProvider = SERVER_DEFAULT_SYNC_ENCRYPTION_SETTINGS_PROVIDER,
     ):
@@ -230,12 +238,12 @@ class Synchronizer:
         Yield a sequence of actions that will sync the destination
         folder to the source folder.
 
-        :param b2sdk.v2.AbstractFolder source_folder: source folder object
-        :param b2sdk.v2.AbstractFolder dest_folder: destination folder object
-        :param int now_millis: current time in milliseconds
-        :param b2sdk.v2.SyncReport reporter: reporter object
-        :param b2sdk.v2.ScanPolicyManager policies_manager: object which decides which files to process
-        :param b2sdk.v2.AbstractSyncEncryptionSettingsProvider encryption_settings_provider: encryption setting provider
+        :param source_folder: source folder object
+        :param dest_folder: destination folder object
+        :param now_millis: current time in milliseconds
+        :param reporter: reporter object
+        :param policies_manager: object which decides which files to process
+        :param encryption_settings_provider: encryption setting provider
         """
         if self.keep_days_or_delete == KeepOrDeleteMode.KEEP_BEFORE_DELETE and dest_folder.folder_type(
         ) == 'local':
@@ -290,8 +298,8 @@ class Synchronizer:
     def _make_file_sync_actions(
         self,
         sync_type: str,
-        source_path: AbstractPath,
-        dest_path: AbstractPath,
+        source_path: Optional[AbstractPath],
+        dest_path: Optional[AbstractPath],
         source_folder: AbstractFolder,
         dest_folder: AbstractFolder,
         now_millis: int,
@@ -301,13 +309,13 @@ class Synchronizer:
         """
         Yields the sequence of actions needed to sync the two files
 
-        :param str sync_type: synchronization type
-        :param b2sdk.v2.AbstractPath source_path: source file object
-        :param b2sdk.v2.AbstractPath dest_path: destination file object
-        :param b2sdk.v2.AbstractFolder source_folder: a source folder object
-        :param b2sdk.v2.AbstractFolder dest_folder: a destination folder object
-        :param int now_millis: current time in milliseconds
-        :param b2sdk.v2.AbstractSyncEncryptionSettingsProvider encryption_settings_provider: encryption setting provider
+        :param sync_type: synchronization type
+        :param source_path: source file object
+        :param dest_path: destination file object
+        :param source_folder: a source folder object
+        :param dest_folder: a destination folder object
+        :param now_millis: current time in milliseconds
+        :param encryption_settings_provider: encryption setting provider
         """
         delete = self.keep_days_or_delete == KeepOrDeleteMode.DELETE
 
@@ -324,5 +332,7 @@ class Synchronizer:
             self.compare_threshold,
             self.compare_version_mode,
             encryption_settings_provider=encryption_settings_provider,
+            upload_mode=self.upload_mode,
+            absolute_minimum_part_size=self.absolute_minimum_part_size,
         )
         return policy.get_all_actions()
diff --git a/b2sdk/transfer/emerge/emerger.py b/b2sdk/transfer/emerge/emerger.py
index 007120c..35c348c 100644
--- a/b2sdk/transfer/emerge/emerger.py
+++ b/b2sdk/transfer/emerge/emerger.py
@@ -9,13 +9,16 @@
 ######################################################################
 
 import logging
-from typing import Optional
+from typing import Dict, Iterator, Optional, List
 
 from b2sdk.encryption.setting import EncryptionSetting
 from b2sdk.file_lock import FileRetentionSetting, LegalHold
-from b2sdk.utils import B2TraceMetaAbstract
+from b2sdk.http_constants import LARGE_FILE_SHA1
+from b2sdk.progress import AbstractProgressListener
 from b2sdk.transfer.emerge.executor import EmergeExecutor
-from b2sdk.transfer.emerge.planner.planner import EmergePlanner
+from b2sdk.transfer.emerge.planner.planner import EmergePlan, EmergePlanner
+from b2sdk.transfer.emerge.write_intent import WriteIntent
+from b2sdk.utils import B2TraceMetaAbstract, iterator_peek, Sha1HexDigest
 
 logger = logging.getLogger(__name__)
 
@@ -40,123 +43,284 @@ class Emerger(metaclass=B2TraceMetaAbstract):
         self.services = services
         self.emerge_executor = EmergeExecutor(services)
 
-    def emerge(
+    @classmethod
+    def _get_updated_file_info_with_large_file_sha1(
+        cls,
+        file_info: Optional[Dict[str, str]],
+        write_intents: Optional[List[WriteIntent]],
+        emerge_plan: EmergePlan,
+        large_file_sha1: Optional[Sha1HexDigest] = None,
+    ) -> Optional[Dict[str, str]]:
+        if not emerge_plan.is_large_file():
+            # Emerge plan doesn't construct a large file, no point setting the large_file_sha1
+            return file_info
+
+        file_sha1 = large_file_sha1
+        if not file_sha1 and write_intents is not None and len(write_intents) == 1:
+            # large_file_sha1 was not given explicitly, but there's just one write intent, perhaps it has a hash
+            file_sha1 = write_intents[0].get_content_sha1()
+
+        out_file_info = file_info
+        if file_sha1:
+            out_file_info = dict(file_info) if file_info else {}
+            out_file_info[LARGE_FILE_SHA1] = file_sha1
+
+        return out_file_info
+
+    def _emerge(
         self,
+        emerge_function,
         bucket_id,
-        write_intents,
+        write_intents_iterable,
         file_name,
         content_type,
         file_info,
         progress_listener,
         recommended_upload_part_size=None,
         continue_large_file_id=None,
+        max_queue_size=None,
         encryption: Optional[EncryptionSetting] = None,
         file_retention: Optional[FileRetentionSetting] = None,
         legal_hold: Optional[LegalHold] = None,
-        min_part_size=None,
-        max_part_size=None,
+        min_part_size: Optional[int] = None,
+        max_part_size: Optional[int] = None,
+        large_file_sha1: Optional[Sha1HexDigest] = None,
+        check_first_intent_for_sha1: bool = True,
+        custom_upload_timestamp: Optional[int] = None,
     ):
-        """
-        Create a new file (object in the cloud, really) from an iterable (list, tuple etc) of write intents.
-
-        :param str bucket_id: a bucket ID
-        :param write_intents: write intents to process to create a file
-        :type write_intents: List[b2sdk.v2.WriteIntent]
-        :param str file_name: the file name of the new B2 file
-        :param str,None content_type: the MIME type or ``None`` to determine automatically
-        :param dict,None file_info: a file info to store with the file or ``None`` to not store anything
-        :param b2sdk.v2.AbstractProgressListener progress_listener: a progress listener object to use
-
-        :param int min_part_size: lower limit of part size for the transfer planner, in bytes
-        :param int max_part_size: upper limit of part size for the transfer planner, in bytes
-        """
-        # WARNING: time spent trying to extract common parts of emerge() and emerge_stream()
-        # into a separate method: 20min. You can try it too, but please increment the timer honestly.
-        # Problematic lines are marked with a "<--".
         planner = self.get_emerge_planner(
             min_part_size=min_part_size,
             recommended_upload_part_size=recommended_upload_part_size,
             max_part_size=max_part_size,
         )
-        emerge_plan = planner.get_emerge_plan(write_intents)  # <--
+
+        # Large file SHA1 operation, possibly on intents.
+        large_file_sha1_intents_for_check = None
+        all_write_intents = write_intents_iterable
+        if check_first_intent_for_sha1:
+            write_intents_iterator = iter(all_write_intents)
+            large_file_sha1_intents_for_check, all_write_intents = \
+                iterator_peek(write_intents_iterator, 2)
+
+        emerge_plan = emerge_function(planner, all_write_intents)
+
+        out_file_info = self._get_updated_file_info_with_large_file_sha1(
+            file_info,
+            large_file_sha1_intents_for_check,
+            emerge_plan,
+            large_file_sha1,
+        )
+
         return self.emerge_executor.execute_emerge_plan(
             emerge_plan,
             bucket_id,
             file_name,
             content_type,
+            out_file_info,
+            progress_listener,
+            continue_large_file_id=continue_large_file_id,
+            encryption=encryption,
+            file_retention=file_retention,
+            legal_hold=legal_hold,
+            # Max queue size is only used in case of large files.
+            # Passing anything for small files does nothing.
+            max_queue_size=max_queue_size,
+            custom_upload_timestamp=custom_upload_timestamp,
+        )
+
+    def emerge(
+        self,
+        bucket_id: str,
+        write_intents: List[WriteIntent],
+        file_name: str,
+        content_type: Optional[str],
+        file_info: Optional[Dict[str, str]],
+        progress_listener: AbstractProgressListener,
+        recommended_upload_part_size: Optional[int] = None,
+        continue_large_file_id: Optional[str] = None,
+        encryption: Optional[EncryptionSetting] = None,
+        file_retention: Optional[FileRetentionSetting] = None,
+        legal_hold: Optional[LegalHold] = None,
+        min_part_size: Optional[int] = None,
+        max_part_size: Optional[int] = None,
+        large_file_sha1: Optional[Sha1HexDigest] = None,
+        custom_upload_timestamp: Optional[int] = None,
+    ):
+        """
+        Create a new file (object in the cloud, really) from an iterable (list, tuple etc) of write intents.
+
+        :param bucket_id: a bucket ID
+        :param write_intents: write intents to process to create a file
+        :param file_name: the file name of the new B2 file
+        :param content_type: the MIME type or ``None`` to determine automatically
+        :param file_info: a file info to store with the file or ``None`` to not store anything
+        :param progress_listener: a progress listener object to use
+        :param recommended_upload_part_size: the recommended part size to use for uploading local sources
+                        or ``None`` to determine automatically, but remote sources would be copied with
+                        maximum possible part size
+        :param continue_large_file_id: large file id that should be selected to resume file creation
+                        for multipart upload/copy, if ``None`` in multipart case it would always start a new
+                        large file
+        :param encryption: encryption settings (``None`` if unknown)
+        :param file_retention: file retention setting
+        :param legal_hold: legal hold setting
+        :param min_part_size: lower limit of part size for the transfer planner, in bytes
+        :param max_part_size: upper limit of part size for the transfer planner, in bytes
+        :param large_file_sha1: SHA1 for this file, if ``None`` and there's exactly one intent, it'll be taken from it
+        :param custom_upload_timestamp: override object creation date, expressed as a number of milliseconds since epoch
+        """
+        return self._emerge(
+            EmergePlanner.get_emerge_plan,
+            bucket_id,
+            write_intents,
+            file_name,
+            content_type,
             file_info,
             progress_listener,
             continue_large_file_id=continue_large_file_id,
             encryption=encryption,
             file_retention=file_retention,
             legal_hold=legal_hold,
+            recommended_upload_part_size=recommended_upload_part_size,
+            min_part_size=min_part_size,
+            max_part_size=max_part_size,
+            large_file_sha1=large_file_sha1,
+            custom_upload_timestamp=custom_upload_timestamp,
         )
 
     def emerge_stream(
         self,
-        bucket_id,
-        write_intent_iterator,
-        file_name,
-        content_type,
-        file_info,
-        progress_listener,
-        recommended_upload_part_size=None,
-        continue_large_file_id=None,
-        max_queue_size=DEFAULT_STREAMING_MAX_QUEUE_SIZE,
+        bucket_id: str,
+        write_intent_iterator: Iterator[WriteIntent],
+        file_name: str,
+        content_type: Optional[str],
+        file_info: Optional[Dict[str, str]],
+        progress_listener: AbstractProgressListener,
+        recommended_upload_part_size: Optional[int] = None,
+        continue_large_file_id: Optional[str] = None,
+        max_queue_size: int = DEFAULT_STREAMING_MAX_QUEUE_SIZE,
         encryption: Optional[EncryptionSetting] = None,
         file_retention: Optional[FileRetentionSetting] = None,
         legal_hold: Optional[LegalHold] = None,
-        min_part_size=None,
-        max_part_size=None,
+        min_part_size: Optional[int] = None,
+        max_part_size: Optional[int] = None,
+        large_file_sha1: Optional[Sha1HexDigest] = None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         """
         Create a new file (object in the cloud, really) from a stream of write intents.
 
-        :param str bucket_id: a bucket ID
+        :param bucket_id: a bucket ID
         :param write_intent_iterator: iterator of :class:`~b2sdk.v2.WriteIntent`
-        :param str file_name: the file name of the new B2 file
-        :param str,None content_type: the MIME type or ``None`` to determine automatically
-        :param dict,None file_info: a file info to store with the file or ``None`` to not store anything
-        :param b2sdk.v2.AbstractProgressListener progress_listener: a progress listener object to use
-        :param int,None recommended_upload_part_size: the recommended part size to use for uploading local sources
+        :param file_name: the file name of the new B2 file
+        :param content_type: the MIME type or ``None`` to determine automatically
+        :param file_info: a file info to store with the file or ``None`` to not store anything
+        :param progress_listener: a progress listener object to use
+        :param recommended_upload_part_size: the recommended part size to use for uploading local sources
                         or ``None`` to determine automatically, but remote sources would be copied with
                         maximum possible part size
-        :param str,None continue_large_file_id: large file id that should be selected to resume file creation
+        :param continue_large_file_id: large file id that should be selected to resume file creation
                         for multipart upload/copy, if ``None`` in multipart case it would always start a new
                         large file
-        :param b2sdk.v2.EncryptionSetting encryption: encryption settings (``None`` if unknown)
-        :param b2sdk.v2.FileRetentionSetting file_retention: file retention setting
-        :param bool legal_hold: legal hold setting
-
-        :param int min_part_size: lower limit of part size for the transfer planner, in bytes
-        :param int max_part_size: upper limit of part size for the transfer planner, in bytes
+        :param max_queue_size: parallelization level
+        :param encryption: encryption settings (``None`` if unknown)
+        :param file_retention: file retention setting
+        :param legal_hold: legal hold setting
 
+        :param min_part_size: lower limit of part size for the transfer planner, in bytes
+        :param max_part_size: upper limit of part size for the transfer planner, in bytes
+        :param large_file_sha1: SHA1 for this file, if ``None`` and there's exactly one intent, it'll be taken from it
+        :param custom_upload_timestamp: override object creation date, expressed as a number of milliseconds since epoch
         """
-        planner = self.get_emerge_planner(
-            min_part_size=min_part_size,
+        return self._emerge(
+            EmergePlanner.get_streaming_emerge_plan,
+            bucket_id,
+            write_intent_iterator,
+            file_name,
+            content_type,
+            file_info,
+            progress_listener,
+            continue_large_file_id=continue_large_file_id,
+            max_queue_size=max_queue_size,
+            encryption=encryption,
+            file_retention=file_retention,
+            legal_hold=legal_hold,
             recommended_upload_part_size=recommended_upload_part_size,
+            min_part_size=min_part_size,
             max_part_size=max_part_size,
+            large_file_sha1=large_file_sha1,
+            custom_upload_timestamp=custom_upload_timestamp,
         )
-        emerge_plan = planner.get_streaming_emerge_plan(write_intent_iterator)  # <--
-        return self.emerge_executor.execute_emerge_plan(
-            emerge_plan,
+
+    def emerge_unbound(
+        self,
+        bucket_id: str,
+        write_intent_iterator: Iterator[WriteIntent],
+        file_name: str,
+        content_type: Optional[str],
+        file_info: Optional[Dict[str, str]],
+        progress_listener: AbstractProgressListener,
+        recommended_upload_part_size: Optional[int] = None,
+        continue_large_file_id: Optional[str] = None,
+        max_queue_size: int = 1,
+        encryption: Optional[EncryptionSetting] = None,
+        file_retention: Optional[FileRetentionSetting] = None,
+        legal_hold: Optional[LegalHold] = None,
+        min_part_size: Optional[int] = None,
+        max_part_size: Optional[int] = None,
+        large_file_sha1: Optional[Sha1HexDigest] = None,
+        custom_upload_timestamp: Optional[int] = None,
+    ):
+        """
+        Create a new file (object in the cloud, really) from an unbound stream of write intents.
+
+        :param bucket_id: a bucket ID
+        :param write_intent_iterator: iterator of :class:`~b2sdk.v2.WriteIntent`
+        :param file_name: the file name of the new B2 file
+        :param content_type: the MIME type or ``None`` to determine automatically
+        :param file_info: a file info to store with the file or ``None`` to not store anything
+        :param progress_listener: a progress listener object to use
+        :param recommended_upload_part_size: the recommended part size to use for uploading local sources
+                        or ``None`` to determine automatically, but remote sources would be copied with
+                        maximum possible part size
+        :param continue_large_file_id: large file id that should be selected to resume file creation
+                        for multipart upload/copy, if ``None`` in multipart case it would always start a new
+                        large file
+        :param max_queue_size: parallelization level, should be equal to the number of buffers available in parallel
+        :param encryption: encryption settings (``None`` if unknown)
+        :param file_retention: file retention setting
+        :param legal_hold: legal hold setting
+        :param min_part_size: lower limit of part size for the transfer planner, in bytes
+        :param max_part_size: upper limit of part size for the transfer planner, in bytes
+        :param large_file_sha1: SHA1 for this file, if ``None`` it's left unset
+        :param custom_upload_timestamp: override object creation date, expressed as a number of milliseconds since epoch
+        """
+        return self._emerge(
+            EmergePlanner.get_unbound_emerge_plan,
             bucket_id,
+            write_intent_iterator,
             file_name,
             content_type,
             file_info,
             progress_listener,
             continue_large_file_id=continue_large_file_id,
-            max_queue_size=max_queue_size,  # <--
+            max_queue_size=max_queue_size,
             encryption=encryption,
             file_retention=file_retention,
             legal_hold=legal_hold,
+            recommended_upload_part_size=recommended_upload_part_size,
+            min_part_size=min_part_size,
+            max_part_size=max_part_size,
+            large_file_sha1=large_file_sha1,
+            check_first_intent_for_sha1=False,
+            custom_upload_timestamp=custom_upload_timestamp,
         )
 
     def get_emerge_planner(
         self,
-        recommended_upload_part_size=None,
-        min_part_size=None,
-        max_part_size=None,
+        recommended_upload_part_size: Optional[int] = None,
+        min_part_size: Optional[int] = None,
+        max_part_size: Optional[int] = None,
     ):
         return EmergePlanner.from_account_info(
             self.services.session.account_info,
diff --git a/b2sdk/transfer/emerge/exception.py b/b2sdk/transfer/emerge/exception.py
new file mode 100644
index 0000000..5a50aee
--- /dev/null
+++ b/b2sdk/transfer/emerge/exception.py
@@ -0,0 +1,18 @@
+######################################################################
+#
+# File: b2sdk/transfer/emerge/exception.py
+#
+# Copyright 2022 Backblaze Inc. All Rights Reserved.
+#
+# License https://www.backblaze.com/using_b2_code.html
+#
+######################################################################
+
+from b2sdk.exception import B2SimpleError
+
+
+class UnboundStreamBufferTimeout(B2SimpleError):
+    """
+    Raised when there is no space for a new buffer for a certain amount of time.
+    """
+    pass
diff --git a/b2sdk/transfer/emerge/executor.py b/b2sdk/transfer/emerge/executor.py
index 2c5a146..c3e5f02 100644
--- a/b2sdk/transfer/emerge/executor.py
+++ b/b2sdk/transfer/emerge/executor.py
@@ -8,19 +8,23 @@
 #
 ######################################################################
 
+import logging
 import threading
 
 from abc import ABCMeta, abstractmethod
-from typing import Optional
+from typing import Dict, Optional
 
 from b2sdk.encryption.setting import EncryptionSetting
 from b2sdk.exception import MaxFileSizeExceeded
 from b2sdk.file_lock import FileRetentionSetting, LegalHold, NO_RETENTION_FILE_SETTING
+from b2sdk.http_constants import LARGE_FILE_SHA1
 from b2sdk.transfer.outbound.large_file_upload_state import LargeFileUploadState
 from b2sdk.transfer.outbound.upload_source import UploadSourceStream
 
 AUTO_CONTENT_TYPE = 'b2/x-auto'
 
+logger = logging.getLogger(__name__)
+
 
 class EmergeExecutor:
     def __init__(self, services):
@@ -39,6 +43,7 @@ class EmergeExecutor:
         encryption: Optional[EncryptionSetting] = None,
         file_retention: Optional[FileRetentionSetting] = None,
         legal_hold: Optional[LegalHold] = None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         if emerge_plan.is_large_file():
             execution = LargeFileEmergeExecution(
@@ -53,6 +58,7 @@ class EmergeExecutor:
                 legal_hold=legal_hold,
                 continue_large_file_id=continue_large_file_id,
                 max_queue_size=max_queue_size,
+                custom_upload_timestamp=custom_upload_timestamp,
             )
         else:
             if continue_large_file_id is not None:
@@ -67,6 +73,7 @@ class EmergeExecutor:
                 encryption=encryption,
                 file_retention=file_retention,
                 legal_hold=legal_hold,
+                custom_upload_timestamp=custom_upload_timestamp,
             )
         return execution.execute_plan(emerge_plan)
 
@@ -85,6 +92,7 @@ class BaseEmergeExecution(metaclass=ABCMeta):
         encryption: Optional[EncryptionSetting] = None,
         file_retention: Optional[FileRetentionSetting] = None,
         legal_hold: Optional[LegalHold] = None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         self.services = services
         self.bucket_id = bucket_id
@@ -95,6 +103,7 @@ class BaseEmergeExecution(metaclass=ABCMeta):
         self.encryption = encryption
         self.file_retention = file_retention
         self.legal_hold = legal_hold
+        self.custom_upload_timestamp = custom_upload_timestamp
 
     @abstractmethod
     def execute_plan(self, emerge_plan):
@@ -128,6 +137,7 @@ class LargeFileEmergeExecution(BaseEmergeExecution):
         legal_hold: Optional[LegalHold] = None,
         continue_large_file_id=None,
         max_queue_size=None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         super(LargeFileEmergeExecution, self).__init__(
             services,
@@ -139,6 +149,7 @@ class LargeFileEmergeExecution(BaseEmergeExecution):
             encryption=encryption,
             file_retention=file_retention,
             legal_hold=legal_hold,
+            custom_upload_timestamp=custom_upload_timestamp,
         )
         self.continue_large_file_id = continue_large_file_id
         self.max_queue_size = max_queue_size
@@ -174,6 +185,7 @@ class LargeFileEmergeExecution(BaseEmergeExecution):
             file_retention=self.file_retention,
             legal_hold=self.legal_hold,
             emerge_parts_dict=emerge_parts_dict,
+            custom_upload_timestamp=self.custom_upload_timestamp,
         )
 
         if unfinished_file is None:
@@ -244,6 +256,7 @@ class LargeFileEmergeExecution(BaseEmergeExecution):
         file_retention: Optional[FileRetentionSetting] = None,
         legal_hold: Optional[LegalHold] = None,
         emerge_parts_dict=None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         if 'listFiles' not in self.services.session.account_info.get_allowed()['capabilities']:
             return None, {}
@@ -275,6 +288,7 @@ class LargeFileEmergeExecution(BaseEmergeExecution):
                 encryption,
                 file_retention,
                 legal_hold,
+                custom_upload_timestamp=custom_upload_timestamp,
             )
         elif emerge_parts_dict is not None:
             unfinished_file, finished_parts = self._match_unfinished_file_if_possible(
@@ -285,6 +299,7 @@ class LargeFileEmergeExecution(BaseEmergeExecution):
                 encryption,
                 file_retention,
                 legal_hold,
+                custom_upload_timestamp=custom_upload_timestamp,
             )
         return unfinished_file, finished_parts
 
@@ -297,6 +312,7 @@ class LargeFileEmergeExecution(BaseEmergeExecution):
         encryption: EncryptionSetting,
         file_retention: Optional[FileRetentionSetting] = None,
         legal_hold: Optional[LegalHold] = None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         file_retention = file_retention or NO_RETENTION_FILE_SETTING
         assert 'plan_id' in file_info
@@ -325,6 +341,10 @@ class LargeFileEmergeExecution(BaseEmergeExecution):
                 # pass UNKNOWN file_retention here - but raw_api/server won't allow it
                 # and we don't check it here
                 continue
+
+            if custom_upload_timestamp is not None and file_.upload_timestamp != custom_upload_timestamp:
+                continue
+
             finished_parts = {}
             for part in self.services.large_file.list_parts(file_.file_id):
                 emerge_part = emerge_parts_dict.get(part.part_number)
@@ -345,6 +365,17 @@ class LargeFileEmergeExecution(BaseEmergeExecution):
                 best_match_parts_len = finished_parts_len
         return best_match_file, best_match_parts
 
+    @classmethod
+    def _get_file_info_without_large_file_sha1(
+        cls,
+        file_info: Optional[Dict[str, str]],
+    ) -> Optional[Dict[str, str]]:
+        if not file_info or LARGE_FILE_SHA1 not in file_info:
+            return file_info
+        out_file_info = dict(file_info)
+        del out_file_info[LARGE_FILE_SHA1]
+        return out_file_info
+
     def _match_unfinished_file_if_possible(
         self,
         bucket_id,
@@ -354,6 +385,7 @@ class LargeFileEmergeExecution(BaseEmergeExecution):
         encryption: EncryptionSetting,
         file_retention: Optional[FileRetentionSetting] = None,
         legal_hold: Optional[LegalHold] = None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         """
         Find an unfinished file that may be used to resume a large file upload.  The
@@ -363,30 +395,59 @@ class LargeFileEmergeExecution(BaseEmergeExecution):
         This is only possible if the application key being used allows ``listFiles`` access.
         """
         file_retention = file_retention or NO_RETENTION_FILE_SETTING
+        file_info_without_large_file_sha1 = self._get_file_info_without_large_file_sha1(file_info)
+
+        logger.debug('Checking for matching unfinished large files for %s...', file_name)
         for file_ in self.services.large_file.list_unfinished_large_files(
             bucket_id, prefix=file_name
         ):
             if file_.file_name != file_name:
+                logger.debug('Rejecting %s: file has a different file name', file_.file_id)
                 continue
             if file_.file_info != file_info:
-                continue
+                if (LARGE_FILE_SHA1 in file_.file_info) == (LARGE_FILE_SHA1 in file_info):
+                    logger.debug(
+                        'Rejecting %s: large_file_sha1 is present or missing in both file infos',
+                        file_.file_id
+                    )
+                    continue
+
+                if self._get_file_info_without_large_file_sha1(
+                    file_.file_info
+                ) != file_info_without_large_file_sha1:
+                    # ignoring the large_file_sha1 file infos are still different
+                    logger.debug(
+                        'Rejecting %s: file info mismatch after dropping `large_file_sha1`',
+                        file_.file_id
+                    )
+                    continue
+
             # FIXME: what if `encryption is None` - match ANY encryption? :)
             if encryption is not None and encryption != file_.encryption:
+                logger.debug('Rejecting %s: encryption mismatch', file_.file_id)
                 continue
 
             if legal_hold is None:
                 if LegalHold.UNSET != file_.legal_hold:
                     # Uploading and not providing legal_hold means that server's response about that file version
                     # will have legal_hold=LegalHold.UNSET
+                    logger.debug('Rejecting %s: legal hold mismatch (not unset)', file_.file_id)
                     continue
             elif legal_hold != file_.legal_hold:
+                logger.debug('Rejecting %s: legal hold mismatch', file_.file_id)
                 continue
 
             if file_retention != file_.file_retention:
                 # if `file_.file_retention` is UNKNOWN then we skip - lib user can still
                 # pass UNKNOWN file_retention here - but raw_api/server won't allow it
                 # and we don't check it here
+                logger.debug('Rejecting %s: retention mismatch', file_.file_id)
+                continue
+
+            if custom_upload_timestamp is not None and file_.upload_timestamp != custom_upload_timestamp:
+                logger.debug('Rejecting %s: custom_upload_timestamp mismatch', file_.file_id)
                 continue
+
             files_match = True
             finished_parts = {}
             for part in self.services.large_file.list_parts(file_.file_id):
@@ -412,10 +473,17 @@ class LargeFileEmergeExecution(BaseEmergeExecution):
 
             # Skip not matching files or unfinished files with no uploaded parts
             if not files_match or not finished_parts:
+                logger.debug('Rejecting %s: No finished parts or part mismatch', file_.file_id)
                 continue
 
             # Return first matched file
+            logger.debug(
+                'Unfinished file %s matches with %i finished parts', file_.file_id,
+                len(finished_parts)
+            )
             return file_, finished_parts
+
+        logger.debug('No matching unfinished files found.')
         return None, {}
 
 
@@ -577,6 +645,7 @@ class UploadFileExecutionStep(BaseExecutionStep):
             encryption=execution.encryption,
             file_retention=execution.file_retention,
             legal_hold=execution.legal_hold,
+            custom_upload_timestamp=execution.custom_upload_timestamp,
         )
 
 
diff --git a/b2sdk/transfer/emerge/planner/planner.py b/b2sdk/transfer/emerge/planner/planner.py
index 46f613f..b2804e8 100644
--- a/b2sdk/transfer/emerge/planner/planner.py
+++ b/b2sdk/transfer/emerge/planner/planner.py
@@ -14,7 +14,6 @@ import json
 
 from abc import ABCMeta, abstractmethod
 from collections import deque
-from itertools import chain
 
 from b2sdk.transfer.emerge.planner.part_definition import (
     CopyEmergePartDefinition,
@@ -25,9 +24,12 @@ from b2sdk.transfer.emerge.planner.upload_subpart import (
     LocalSourceUploadSubpart,
     RemoteSourceUploadSubpart,
 )
-
-MEGABYTE = 1000 * 1000
-GIGABYTE = 1000 * MEGABYTE
+from b2sdk.http_constants import (
+    DEFAULT_MIN_PART_SIZE,
+    DEFAULT_MAX_PART_SIZE,
+    DEFAULT_RECOMMENDED_UPLOAD_PART_SIZE,
+)
+from b2sdk.utils import iterator_peek
 
 
 class UploadBuffer:
@@ -82,9 +84,6 @@ class UploadBuffer:
 
 class EmergePlanner:
     """ Creates a list of actions required for advanced creation of an object in the cloud from an iterator of write intent objects """
-    DEFAULT_MIN_PART_SIZE = 5 * MEGABYTE
-    DEFAULT_RECOMMENDED_UPLOAD_PART_SIZE = 100 * MEGABYTE
-    DEFAULT_MAX_PART_SIZE = 5 * GIGABYTE
 
     def __init__(
         self,
@@ -92,9 +91,9 @@ class EmergePlanner:
         recommended_upload_part_size=None,
         max_part_size=None,
     ):
-        self.min_part_size = min_part_size or self.DEFAULT_MIN_PART_SIZE
-        self.recommended_upload_part_size = recommended_upload_part_size or self.DEFAULT_RECOMMENDED_UPLOAD_PART_SIZE
-        self.max_part_size = max_part_size or self.DEFAULT_MAX_PART_SIZE
+        self.min_part_size = min_part_size or DEFAULT_MIN_PART_SIZE
+        self.recommended_upload_part_size = recommended_upload_part_size or DEFAULT_RECOMMENDED_UPLOAD_PART_SIZE
+        self.max_part_size = max_part_size or DEFAULT_MAX_PART_SIZE
         assert self.min_part_size <= self.recommended_upload_part_size <= self.max_part_size
 
     @classmethod
@@ -107,9 +106,9 @@ class EmergePlanner:
     ):
         if recommended_upload_part_size is None:
             recommended_upload_part_size = account_info.get_recommended_part_size()
-        if min_part_size is None and recommended_upload_part_size < cls.DEFAULT_MIN_PART_SIZE:
+        if min_part_size is None and recommended_upload_part_size < DEFAULT_MIN_PART_SIZE:
             min_part_size = recommended_upload_part_size
-        if max_part_size is None and recommended_upload_part_size > cls.DEFAULT_MAX_PART_SIZE:
+        if max_part_size is None and recommended_upload_part_size > DEFAULT_MAX_PART_SIZE:
             max_part_size = recommended_upload_part_size
         kwargs = {
             'min_part_size': min_part_size,
@@ -141,6 +140,31 @@ class EmergePlanner:
     def get_streaming_emerge_plan(self, write_intent_iterator):
         return self._get_emerge_plan(write_intent_iterator, StreamingEmergePlan)
 
+    def get_unbound_emerge_plan(self, write_intent_iterator):
+        """
+        For unbound streams we skip the whole process of bunching different parts together,
+        validating them and splitting by operation type. We can do this, because:
+        1. there will be no copy operations at all;
+        2. we don't want to pull more data than actually needed;
+        3. all the data is ordered;
+        4. we don't want anything else to touch our buffers.
+        Furthermore, we're using StreamingEmergePlan, as it checks whether we have one or more
+        chunks to work with, and picks a proper upload method.
+        """
+        return StreamingEmergePlan(self._get_simple_emerge_parts(write_intent_iterator))
+
+    def _get_simple_emerge_parts(self, write_intent_iterator):
+        # Assumption here is that we need to do no magic. We are receiving
+        # a read-only stream that cannot be seeked and is only for uploading
+        # purposes. Moreover, we assume that each write intent we received is
+        # a nice, enclosed buffer with enough data to make the cloud happy.
+        for write_intent in write_intent_iterator:
+            yield UploadEmergePartDefinition(
+                write_intent.outbound_source,
+                relative_offset=0,
+                length=write_intent.length,
+            )
+
     def _get_emerge_plan(self, write_intent_iterator, plan_class):
         return plan_class(
             self._get_emerge_parts(
@@ -340,10 +364,10 @@ class EmergePlanner:
         return left_buff, UploadBuffer(left_buff.end_offset)
 
     def _select_intent_fragments(self, write_intent_iterator):
-        """ Select overapping write intent fragments to use.
+        """ Select overlapping write intent fragments to use.
 
         To solve overlapping intents selection, intents can be split to smaller fragments.
-        Those fragments are yieled as soon as decision can be made to use them,
+        Those fragments are yielded as soon as decision can be made to use them,
         so there is possibility that one intent is yielded in multiple fragments. Those
         would be merged again by higher level iterator that produces emerge parts, but
         in principle this merging can happen here. Not merging it is a code design decision
@@ -627,8 +651,10 @@ class EmergePlan(BaseEmergePlan):
 
 class StreamingEmergePlan(BaseEmergePlan):
     def __init__(self, emerge_parts_iterator):
-        emerge_parts, self._is_large_file = self._peek_for_large_file(emerge_parts_iterator)
-        super(StreamingEmergePlan, self).__init__(emerge_parts)
+        emerge_parts_iterator, self._is_large_file = self._peek_for_large_file(
+            emerge_parts_iterator
+        )
+        super().__init__(emerge_parts_iterator)
 
     def is_large_file(self):
         return self._is_large_file
@@ -640,15 +666,12 @@ class StreamingEmergePlan(BaseEmergePlan):
         return None
 
     def _peek_for_large_file(self, emerge_parts_iterator):
-        first_part = next(emerge_parts_iterator, None)
-        if first_part is None:
+        peeked, emerge_parts_iterator = iterator_peek(emerge_parts_iterator, 2)
+
+        if not peeked:
             raise ValueError('Empty emerge parts iterator')
 
-        second_part = next(emerge_parts_iterator, None)
-        if second_part is None:
-            return iter([first_part]), False
-        else:
-            return chain([first_part, second_part], emerge_parts_iterator), True
+        return emerge_parts_iterator, len(peeked) > 1
 
 
 class EmergePart:
diff --git a/b2sdk/transfer/emerge/unbound_write_intent.py b/b2sdk/transfer/emerge/unbound_write_intent.py
new file mode 100644
index 0000000..9b0e00a
--- /dev/null
+++ b/b2sdk/transfer/emerge/unbound_write_intent.py
@@ -0,0 +1,213 @@
+######################################################################
+#
+# File: b2sdk/transfer/emerge/unbound_write_intent.py
+#
+# Copyright 2022 Backblaze Inc. All Rights Reserved.
+#
+# License https://www.backblaze.com/using_b2_code.html
+#
+######################################################################
+
+import hashlib
+import io
+import queue
+from typing import Callable, Iterator, Optional, Union
+
+from b2sdk.transfer.emerge.exception import UnboundStreamBufferTimeout
+from b2sdk.transfer.emerge.write_intent import WriteIntent
+from b2sdk.transfer.outbound.upload_source import AbstractUploadSource
+
+
+class IOWrapper(io.BytesIO):
+    """
+    Wrapper for BytesIO that knows when it has been read in full.
+
+    Note that this stream should go through ``emerge_unbound``, as it's the only
+    one that skips ``_get_emerge_parts`` and pushes buffers to the cloud
+    exactly as they come. This way we can (somewhat) rely on check whether
+    reading of this wrapper returned no more data.
+
+    It is assumed that this object is owned by a single thread at a time.
+    For that reason, no additional synchronisation is provided.
+    """
+
+    def __init__(
+        self,
+        data: Union[bytes, bytearray],
+        release_function: Callable[[], None],
+    ):
+        """
+        Prepares a new ``io.BytesIO`` structure that will call
+        a ``release_function`` when buffer is read in full.
+
+        ``release_function`` can be called from another thread.
+        It is called exactly once, when the read returns
+        an empty buffer for the first time.
+
+        :param data: data to be provided as a stream
+        :param release_function: function to be called when all the data was read
+        """
+        super().__init__(data)
+
+        self.already_done = False
+        self.release_function = release_function
+
+    def read(self, size: Optional[int] = None) -> bytes:
+        result = super().read(size)
+
+        is_done = len(result) == 0
+        if is_done and not self.already_done:
+            self.already_done = True
+            self.release_function()
+
+        return result
+
+
+class UnboundSourceBytes(AbstractUploadSource):
+    """
+    Upload source that deals with a chunk of unbound data.
+
+    It ensures that the data it provides doesn't have to be iterated
+    over more than once. To do that, we have ensured that both length
+    and sha1 is known. Also, it should be used only with ``emerge_unbound``,
+    as it's the only plan that pushes buffers directly to the cloud.
+    """
+
+    def __init__(
+        self,
+        bytes_data: bytearray,
+        release_function: Callable[[], None],
+    ):
+        """
+        Prepares a new ```UploadSource`` that can be used with ``WriteIntent``.
+
+        Calculates SHA1 and length of the data.
+
+        :param bytes_data: data that should be uploaded, IOWrapper for this data is created.
+        :param release_function: function to be called when all the ``bytes_data`` is uploaded.
+        """
+        self.length = len(bytes_data)
+        # Prepare sha1 of the chunk upfront to ensure that nothing iterates over the stream but the upload.
+        self.chunk_sha1 = hashlib.sha1(bytes_data).hexdigest()
+        self.stream = IOWrapper(bytes_data, release_function)
+
+    def get_content_sha1(self):
+        return self.chunk_sha1
+
+    def open(self):
+        return self.stream
+
+    def get_content_length(self):
+        return self.length
+
+
+class UnboundWriteIntentGenerator:
+    """
+    Generator that creates new write intents as data is streamed from an external source.
+
+    It tries to ensure that at most ``queue_size`` buffers with size ``buffer_size_bytes``
+    are allocated at any given moment.
+    """
+
+    def __init__(
+        self,
+        read_only_source,
+        buffer_size_bytes: int,
+        read_size: int,
+        queue_size: int,
+        queue_timeout_seconds: float,
+    ):
+        """
+        Prepares a new intent generator for a given source.
+
+        ``queue_size`` is handled on a best-effort basis. It's possible, in rare cases, that there will be more buffers
+        available at once. With current implementation that would be the case when the whole buffer was read, but on
+        the very last byte the server stopped responding and a retry is issued.
+
+        :param read_only_source: Python object that has a ``read`` method.
+        :param buffer_size_bytes: Size of a single buffer that we're to download from the source and push to the cloud.
+        :param read_size: Size of a single read to be performed on ``read_only_source``.
+        :param queue_size: Maximal amount of buffers that will be created.
+        :param queue_timeout_seconds: Iterator will wait at most this many seconds for an empty slot
+                                      for a buffer. After that time it's considered an error.
+        """
+        assert queue_size >= 1 and read_size > 0 and buffer_size_bytes > 0 and queue_timeout_seconds > 0.0
+
+        self.read_only_source = read_only_source
+        self.read_size = read_size
+
+        self.buffer_size_bytes = buffer_size_bytes
+        self.buffer_limit_queue = queue.Queue(maxsize=queue_size)
+        self.queue_timeout_seconds = queue_timeout_seconds
+
+        self.buffer = bytearray()
+        self.leftovers_buffer = bytearray()
+
+    def iterator(self) -> Iterator[WriteIntent]:
+        """
+        Creates new ``WriteIntent`` objects as the data is pulled from the ``read_only_source``.
+        """
+        datastream_done = False
+        offset = 0
+
+        while not datastream_done:
+            self._wait_for_free_buffer_slot()
+
+            # In very small buffer sizes and large read sizes we could
+            # land with multiple buffers read at once. This should happen
+            # only in tests.
+            self._trim_to_leftovers()
+
+            while len(self.buffer) < self.buffer_size_bytes:
+                data = self.read_only_source.read(self.read_size)
+                if len(data) == 0:
+                    datastream_done = True
+                    break
+
+                self.buffer += data
+                self._trim_to_leftovers()
+
+            # If we've just started a new buffer and got an empty read on it,
+            # we have no data to send and the process is finished.
+            if len(self.buffer) == 0:
+                self._release_buffer()
+                break
+
+            source = UnboundSourceBytes(self.buffer, self._release_buffer)
+            intent = WriteIntent(source, destination_offset=offset)
+            yield intent
+
+            offset += len(self.buffer)
+            self._rotate_leftovers()
+
+    def _trim_to_leftovers(self) -> None:
+        if len(self.buffer) <= self.buffer_size_bytes:
+            return
+        remainder = len(self.buffer) - self.buffer_size_bytes
+        buffer_view = memoryview(self.buffer)
+        self.leftovers_buffer += buffer_view[-remainder:]
+        # This conversion has little to no implication on performance.
+        self.buffer = bytearray(buffer_view[:-remainder])
+
+    def _rotate_leftovers(self) -> None:
+        self.buffer = self.leftovers_buffer
+        self.leftovers_buffer = bytearray()
+
+    def _wait_for_free_buffer_slot(self) -> None:
+        # Inserted item is only a placeholder. If we fail to insert it in given time, it means
+        # that system is unable to process data quickly enough. By default, this timeout is around
+        # a really large value (counted in minutes, not seconds) to indicate weird behaviour.
+        try:
+            self.buffer_limit_queue.put(1, timeout=self.queue_timeout_seconds)
+        except queue.Full:
+            raise UnboundStreamBufferTimeout()
+
+    def _release_buffer(self) -> None:
+        # Pull one element from the queue of waiting elements.
+        # Note that it doesn't matter which element we pull.
+        # Each of them is just a placeholder. Since we know that we've put them there,
+        # there is no need to actually wait. The queue should contain at least one element if we got here.
+        try:
+            self.buffer_limit_queue.get_nowait()
+        except queue.Empty as error:  # pragma: nocover
+            raise RuntimeError('Buffer pulled twice from the queue.') from error
diff --git a/b2sdk/transfer/emerge/write_intent.py b/b2sdk/transfer/emerge/write_intent.py
index f021155..a6e1e2d 100644
--- a/b2sdk/transfer/emerge/write_intent.py
+++ b/b2sdk/transfer/emerge/write_intent.py
@@ -8,6 +8,10 @@
 #
 ######################################################################
 
+from typing import Optional
+
+from b2sdk.utils import Sha1HexDigest
+
 
 class WriteIntent:
     """ Wrapper for outbound source that defines destination offset. """
@@ -63,6 +67,19 @@ class WriteIntent:
         """
         return self.outbound_source.is_upload()
 
+    def get_content_sha1(self) -> Optional[Sha1HexDigest]:
+        """
+        Return a 40-character string containing the hex SHA1 checksum, which can be used as the `large_file_sha1` entry.
+
+        This method is only used if a large file is constructed from only a single source.  If that source's hash is known,
+        the result file's SHA1 checksum will be the same and can be copied.
+
+        If the source's sha1 is unknown and can't be calculated, `None` is returned.
+
+        :rtype str:
+        """
+        return self.outbound_source.get_content_sha1()
+
     @classmethod
     def wrap_sources_iterator(cls, outbound_sources_iterator):
         """ Helper that wraps outbound sources iterator with write intents.
diff --git a/b2sdk/transfer/inbound/download_manager.py b/b2sdk/transfer/inbound/download_manager.py
index 008ce6a..2c8ccfc 100644
--- a/b2sdk/transfer/inbound/download_manager.py
+++ b/b2sdk/transfer/inbound/download_manager.py
@@ -44,7 +44,13 @@ class DownloadManager(TransferManager, ThreadPoolMixin, metaclass=B2TraceMetaAbs
     PARALLEL_DOWNLOADER_CLASS = staticmethod(ParallelDownloader)
     SIMPLE_DOWNLOADER_CLASS = staticmethod(SimpleDownloader)
 
-    def __init__(self, write_buffer_size: Optional[int] = None, check_hash: bool = True, **kwargs):
+    def __init__(
+        self,
+        write_buffer_size: Optional[int] = None,
+        check_hash: bool = True,
+        max_download_streams_per_file: Optional[int] = None,
+        **kwargs
+    ):
         """
         Initialize the DownloadManager using the given services object.
         """
@@ -58,6 +64,7 @@ class DownloadManager(TransferManager, ThreadPoolMixin, metaclass=B2TraceMetaAbs
                 align_factor=write_buffer_size,
                 thread_pool=self._thread_pool,
                 check_hash=check_hash,
+                max_streams=max_download_streams_per_file,
             ),
             self.SIMPLE_DOWNLOADER_CLASS(
                 min_chunk_size=self.MIN_CHUNK_SIZE,
diff --git a/b2sdk/transfer/inbound/downloaded_file.py b/b2sdk/transfer/inbound/downloaded_file.py
index 952743d..171d4ba 100644
--- a/b2sdk/transfer/inbound/downloaded_file.py
+++ b/b2sdk/transfer/inbound/downloaded_file.py
@@ -10,6 +10,7 @@
 
 import io
 import logging
+import pathlib
 from typing import Optional, Tuple, TYPE_CHECKING
 
 from requests.models import Response
@@ -21,6 +22,10 @@ from ...stream.progress import WritingStreamWithProgress
 
 from b2sdk.exception import (
     ChecksumMismatch,
+    DestinationDirectoryDoesntAllowOperation,
+    DestinationDirectoryDoesntExist,
+    DestinationIsADirectory,
+    DestinationParentIsNotADirectory,
     TruncatedOutput,
 )
 from b2sdk.utils import set_file_mtime
@@ -70,7 +75,27 @@ class MtimeUpdatedFile(io.IOBase):
         return self.file.tell()
 
     def __enter__(self):
-        self.file = open(self.path_, self.mode, buffering=self.buffering)
+        try:
+            path = pathlib.Path(self.path_)
+            if not path.parent.exists():
+                raise DestinationDirectoryDoesntExist()
+
+            if not path.parent.is_dir():
+                raise DestinationParentIsNotADirectory()
+
+            # This ensures consistency on *nix and Windows. Windows doesn't seem to raise ``IsADirectoryError`` at all,
+            # so with this we actually can differentiate between permissions errors and target being a directory.
+            if path.exists() and path.is_dir():
+                raise DestinationIsADirectory()
+        except PermissionError as ex:
+            raise DestinationDirectoryDoesntAllowOperation() from ex
+
+        # All remaining problems should be with permissions.
+        try:
+            self.file = open(self.path_, self.mode, buffering=self.buffering)
+        except PermissionError as ex:
+            raise DestinationDirectoryDoesntAllowOperation() from ex
+
         self.write = self.file.write
         self.read = self.file.read
         return self
@@ -79,6 +104,9 @@ class MtimeUpdatedFile(io.IOBase):
         self.file.close()
         set_file_mtime(self.path_, self.mod_time_to_set)
 
+    def __str__(self):
+        return str(self.path_)
+
 
 class DownloadedFile:
     """
diff --git a/b2sdk/transfer/inbound/downloader/parallel.py b/b2sdk/transfer/inbound/downloader/parallel.py
index 33b80bf..b2720d6 100644
--- a/b2sdk/transfer/inbound/downloader/parallel.py
+++ b/b2sdk/transfer/inbound/downloader/parallel.py
@@ -8,20 +8,22 @@
 #
 ######################################################################
 
-from concurrent import futures
-from io import IOBase
-from typing import Optional
 import logging
 import queue
 import threading
+from concurrent import futures
+from io import IOBase
+from time import perf_counter_ns
+from typing import Optional
 
 from requests.models import Response
 
-from .abstract import AbstractDownloader
 from b2sdk.encryption.setting import EncryptionSetting
 from b2sdk.file_version import DownloadVersion
 from b2sdk.session import B2Session
 from b2sdk.utils.range_ import Range
+from .abstract import AbstractDownloader
+from .stats_collector import StatsCollector
 
 logger = logging.getLogger(__name__)
 
@@ -118,7 +120,15 @@ class ParallelDownloader(AbstractDownloader):
         if self._check_hash:
             # we skip hashing if we would not check it - hasher object is actually a EmptyHasher instance
             # but we avoid here reading whole file (except for the first part) from disk again
+            before_hash = perf_counter_ns()
             self._finish_hashing(first_part, file, hasher, download_version.content_length)
+            after_hash = perf_counter_ns()
+            logger.info(
+                'download stats | %s | %s total: %.3f ms',
+                file,
+                'finish_hash',
+                (after_hash - before_hash) / 1000000,
+            )
 
         return bytes_written, hasher.hexdigest()
 
@@ -203,18 +213,31 @@ class WriterThread(threading.Thread):
         self.file = file
         self.queue = queue.Queue(max_queue_depth)
         self.total = 0
+        self.stats_collector = StatsCollector(str(self.file), 'writer', 'seek')
         super(WriterThread, self).__init__()
 
     def run(self):
         file = self.file
         queue_get = self.queue.get
-        while 1:
-            shutdown, offset, data = queue_get()
-            if shutdown:
-                break
-            file.seek(offset)
-            file.write(data)
-            self.total += len(data)
+        stats_collector_read = self.stats_collector.read
+        stats_collector_other = self.stats_collector.other
+        stats_collector_write = self.stats_collector.write
+
+        with self.stats_collector.total:
+            while 1:
+                with stats_collector_read:
+                    shutdown, offset, data = queue_get()
+
+                if shutdown:
+                    break
+
+                with stats_collector_other:
+                    file.seek(offset)
+
+                with stats_collector_write:
+                    file.write(data)
+
+                self.total += len(data)
 
     def __enter__(self):
         self.start()
@@ -223,6 +246,7 @@ class WriterThread(threading.Thread):
     def __exit__(self, exc_type, exc_val, exc_tb):
         self.queue.put((True, None, None))
         self.join()
+        self.stats_collector.report()
 
 
 def download_first_part(
@@ -243,6 +267,19 @@ def download_first_part(
     :param chunk_size: size (in bytes) of read data chunks
     :param encryption: encryption mode, algorithm and key
     """
+    # This function contains a loop that has heavy impact on performance.
+    # It has not been broken down to several small functions due to fear of
+    # performance overhead of calling a python function. Advanced performance optimization
+    # techniques are in use here, for example avoiding internal python getattr calls by
+    # caching function signatures in local variables. Most of this code was written in
+    # times where python 2.7 (or maybe even 2.6) had to be supported, so maybe some
+    # of those optimizations could be removed without affecting performance.
+    #
+    # Due to reports of hard to debug performance issues, this code has also been riddled
+    # with performance measurements. A known issue is GCP VMs which have more network speed
+    # than storage speed, but end users have different issues with network and storage.
+    # Basic tools to figure out where the time is being spent is a must for long-term
+    # maintainability.
 
     writer_queue_put = writer.queue.put
     hasher_update = hasher.update
@@ -253,42 +290,76 @@ def download_first_part(
 
     bytes_read = 0
     stop = False
-    for data in response.iter_content(chunk_size=chunk_size):
-        if first_offset + bytes_read + len(data) >= last_offset:
-            to_write = data[:last_offset - bytes_read]
-            stop = True
-        else:
-            to_write = data
-        writer_queue_put((False, first_offset + bytes_read, to_write))
-        hasher_update(to_write)
-        bytes_read += len(to_write)
-        if stop:
-            break
-
-    # since we got everything we need from original response, close the socket and free the buffer
-    # to avoid a timeout exception during hashing and other trouble
-    response.close()
-
-    url = response.request.url
-    tries_left = 5 - 1  # this is hardcoded because we are going to replace the entire retry interface soon, so we'll avoid deprecation here and keep it private
-    while tries_left and bytes_read < actual_part_size:
-        cloud_range = starting_cloud_range.subrange(
-            bytes_read, actual_part_size - 1
-        )  # first attempt was for the whole file, but retries are bound correctly
-        logger.debug(
-            'download attempts remaining: %i, bytes read already: %i. Getting range %s now.',
-            tries_left, bytes_read, cloud_range
-        )
-        with session.download_file_from_url(
-            url,
-            cloud_range.as_tuple(),
-            encryption=encryption,
-        ) as response:
-            for to_write in response.iter_content(chunk_size=chunk_size):
+
+    stats_collector = StatsCollector(response.url, f'{first_offset}:{last_offset}', 'hash')
+    stats_collector_read = stats_collector.read
+    stats_collector_other = stats_collector.other
+    stats_collector_write = stats_collector.write
+
+    with stats_collector.total:
+        response_iterator = response.iter_content(chunk_size=chunk_size)
+
+        while True:
+            with stats_collector_read:
+                try:
+                    data = next(response_iterator)
+                except StopIteration:
+                    break
+
+            if first_offset + bytes_read + len(data) >= last_offset:
+                to_write = data[:last_offset - bytes_read]
+                stop = True
+            else:
+                to_write = data
+
+            with stats_collector_write:
                 writer_queue_put((False, first_offset + bytes_read, to_write))
+
+            with stats_collector_other:
                 hasher_update(to_write)
-                bytes_read += len(to_write)
-        tries_left -= 1
+
+            bytes_read += len(to_write)
+            if stop:
+                break
+
+        # since we got everything we need from original response, close the socket and free the buffer
+        # to avoid a timeout exception during hashing and other trouble
+        response.close()
+
+        url = response.request.url
+        tries_left = 5 - 1  # this is hardcoded because we are going to replace the entire retry interface soon, so we'll avoid deprecation here and keep it private
+        while tries_left and bytes_read < actual_part_size:
+            cloud_range = starting_cloud_range.subrange(
+                bytes_read, actual_part_size - 1
+            )  # first attempt was for the whole file, but retries are bound correctly
+            logger.debug(
+                'download attempts remaining: %i, bytes read already: %i. Getting range %s now.',
+                tries_left, bytes_read, cloud_range
+            )
+            with session.download_file_from_url(
+                url,
+                cloud_range.as_tuple(),
+                encryption=encryption,
+            ) as response:
+                response_iterator = response.iter_content(chunk_size=chunk_size)
+
+                while True:
+                    with stats_collector_read:
+                        try:
+                            to_write = next(response_iterator)
+                        except StopIteration:
+                            break
+
+                    with stats_collector_write:
+                        writer_queue_put((False, first_offset + bytes_read, to_write))
+
+                    with stats_collector_other:
+                        hasher_update(to_write)
+
+                    bytes_read += len(to_write)
+            tries_left -= 1
+
+    stats_collector.report()
 
 
 def download_non_first_part(
@@ -321,15 +392,32 @@ def download_non_first_part(
             'download attempts remaining: %i, bytes read already: %i. Getting range %s now.',
             retries_left, bytes_read, cloud_range
         )
-        with session.download_file_from_url(
-            url,
-            cloud_range.as_tuple(),
-            encryption=encryption,
-        ) as response:
-            for to_write in response.iter_content(chunk_size=chunk_size):
-                writer_queue_put((False, start_range + bytes_read, to_write))
-                bytes_read += len(to_write)
-        retries_left -= 1
+        stats_collector = StatsCollector(url, f'{cloud_range.start}:{cloud_range.end}', 'none')
+        stats_collector_read = stats_collector.read
+        stats_collector_write = stats_collector.write
+
+        with stats_collector.total:
+            with session.download_file_from_url(
+                url,
+                cloud_range.as_tuple(),
+                encryption=encryption,
+            ) as response:
+                response_iterator = response.iter_content(chunk_size=chunk_size)
+
+                while True:
+                    with stats_collector_read:
+                        try:
+                            to_write = next(response_iterator)
+                        except StopIteration:
+                            break
+
+                    with stats_collector_write:
+                        writer_queue_put((False, start_range + bytes_read, to_write))
+
+                    bytes_read += len(to_write)
+            retries_left -= 1
+
+        stats_collector.report()
 
 
 class PartToDownload:
diff --git a/b2sdk/transfer/inbound/downloader/stats_collector.py b/b2sdk/transfer/inbound/downloader/stats_collector.py
new file mode 100644
index 0000000..6e09fbb
--- /dev/null
+++ b/b2sdk/transfer/inbound/downloader/stats_collector.py
@@ -0,0 +1,89 @@
+######################################################################
+#
+# File: b2sdk/transfer/inbound/downloader/stats_collector.py
+#
+# Copyright 2020 Backblaze Inc. All Rights Reserved.
+#
+# License https://www.backblaze.com/using_b2_code.html
+#
+######################################################################
+
+import logging
+from dataclasses import (
+    dataclass,
+    field,
+)
+from time import perf_counter_ns
+from typing import (
+    Any,
+    Optional,
+    Type,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class SingleStatsCollector:
+    TO_MS = 1_000_000
+
+    def __init__(self):
+        self.latest_entry: Optional[int] = None
+        self.sum_of_all_entries: int = 0
+        self.started_perf_timer: Optional[int] = None
+
+    def __enter__(self) -> None:
+        self.started_perf_timer = perf_counter_ns()
+
+    def __exit__(self, exc_type: Type, exc_val: Exception, exc_tb: Any) -> None:
+        time_diff = perf_counter_ns() - self.started_perf_timer
+        self.latest_entry = time_diff
+        self.sum_of_all_entries += time_diff
+        self.started_perf_timer = None
+
+    @property
+    def sum_ms(self) -> float:
+        return self.sum_of_all_entries / self.TO_MS
+
+    @property
+    def latest_ms(self) -> float:
+        return self.latest_entry / self.TO_MS
+
+    @property
+    def has_any_entry(self) -> bool:
+        return self.latest_entry is not None
+
+
+@dataclass
+class StatsCollector:
+    name: str  #: file name or object url
+    detail: str  #: description of the thread, ex. "10000000:20000000" or "writer"
+    other_name: str  #: other statistic, typically "seek" or "hash"
+    total: SingleStatsCollector = field(default_factory=SingleStatsCollector)
+    other: SingleStatsCollector = field(default_factory=SingleStatsCollector)
+    write: SingleStatsCollector = field(default_factory=SingleStatsCollector)
+    read: SingleStatsCollector = field(default_factory=SingleStatsCollector)
+
+    def report(self):
+        if self.read.has_any_entry:
+            logger.info('download stats | %s | TTFB: %.3f ms', self, self.read.latest_ms)
+            logger.info(
+                'download stats | %s | read() without TTFB: %.3f ms', self,
+                (self.read.sum_of_all_entries - self.read.latest_entry) / self.read.TO_MS
+            )
+        if self.other.has_any_entry:
+            logger.info(
+                'download stats | %s | %s total: %.3f ms', self, self.other_name, self.other.sum_ms
+            )
+        if self.write.has_any_entry:
+            logger.info('download stats | %s | write() total: %.3f ms', self, self.write.sum_ms)
+        if self.total.has_any_entry:
+            basic_operation_time = self.write.sum_of_all_entries \
+                                   + self.other.sum_of_all_entries \
+                                   + self.read.sum_of_all_entries
+            overhead = self.total.sum_of_all_entries - basic_operation_time
+            logger.info(
+                'download stats | %s | overhead: %.3f ms', self, overhead / self.total.TO_MS
+            )
+
+    def __str__(self):
+        return f'{self.name}[{self.detail}]'
diff --git a/b2sdk/transfer/outbound/copy_source.py b/b2sdk/transfer/outbound/copy_source.py
index e76c17d..1b359ae 100644
--- a/b2sdk/transfer/outbound/copy_source.py
+++ b/b2sdk/transfer/outbound/copy_source.py
@@ -12,6 +12,7 @@ from typing import Optional
 
 from b2sdk.encryption.setting import EncryptionSetting
 from b2sdk.transfer.outbound.outbound_source import OutboundTransferSource
+from b2sdk.http_constants import LARGE_FILE_SHA1
 
 
 class CopySource(OutboundTransferSource):
@@ -80,3 +81,9 @@ class CopySource(OutboundTransferSource):
             source_file_info=self.source_file_info,
             source_content_type=self.source_content_type
         )
+
+    def get_content_sha1(self):
+        if self.offset or self.length:
+            # this is a copy of only a range of the source, can't copy the SHA1
+            return None
+        return self.source_file_info.get(LARGE_FILE_SHA1)
diff --git a/b2sdk/transfer/outbound/outbound_source.py b/b2sdk/transfer/outbound/outbound_source.py
index 8e46885..9faf9de 100644
--- a/b2sdk/transfer/outbound/outbound_source.py
+++ b/b2sdk/transfer/outbound/outbound_source.py
@@ -9,6 +9,9 @@
 ######################################################################
 
 from abc import ABCMeta, abstractmethod
+from typing import Optional
+
+from b2sdk.utils import Sha1HexDigest
 
 
 class OutboundTransferSource(metaclass=ABCMeta):
@@ -26,19 +29,30 @@ class OutboundTransferSource(metaclass=ABCMeta):
     """
 
     @abstractmethod
-    def get_content_length(self):
+    def get_content_length(self) -> int:
         """
-        Return the number of bytes of data in the file.
+        Returns the number of bytes of data in the file.
         """
 
     @abstractmethod
-    def is_upload(self):
-        """ Return if outbound source is an upload source.
-        :rtype bool:
+    def get_content_sha1(self) -> Optional[Sha1HexDigest]:
+        """
+        Return a 40-character string containing the hex SHA1 checksum, which can be used as the `large_file_sha1` entry.
+
+        This method is only used if a large file is constructed from only a single source.  If that source's hash is known,
+        the result file's SHA1 checksum will be the same and can be copied.
+
+        If the source's sha1 is unknown and can't be calculated, `None` is returned.
         """
 
     @abstractmethod
-    def is_copy(self):
-        """ Return if outbound source is a copy source.
-        :rtype bool:
+    def is_upload(self) -> bool:
+        """
+        Returns True if outbound source is an upload source.
+        """
+
+    @abstractmethod
+    def is_copy(self) -> bool:
+        """
+        Returns True if outbound source is a copy source.
         """
diff --git a/b2sdk/transfer/outbound/upload_manager.py b/b2sdk/transfer/outbound/upload_manager.py
index 5341374..9772482 100644
--- a/b2sdk/transfer/outbound/upload_manager.py
+++ b/b2sdk/transfer/outbound/upload_manager.py
@@ -52,6 +52,7 @@ class UploadManager(TransferManager, ThreadPoolMixin):
         encryption: Optional[EncryptionSetting] = None,
         file_retention: Optional[FileRetentionSetting] = None,
         legal_hold: Optional[LegalHold] = None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         f = self._thread_pool.submit(
             self._upload_small_file,
@@ -64,6 +65,7 @@ class UploadManager(TransferManager, ThreadPoolMixin):
             encryption,
             file_retention,
             legal_hold,
+            custom_upload_timestamp=custom_upload_timestamp,
         )
         return f
 
@@ -181,6 +183,7 @@ class UploadManager(TransferManager, ThreadPoolMixin):
         encryption: Optional[EncryptionSetting] = None,
         file_retention: Optional[FileRetentionSetting] = None,
         legal_hold: Optional[LegalHold] = None,
+        custom_upload_timestamp: Optional[int] = None,
     ):
         content_length = upload_source.get_content_length()
         exception_info_list = []
@@ -211,6 +214,7 @@ class UploadManager(TransferManager, ThreadPoolMixin):
                             server_side_encryption=encryption,  # todo: client side encryption
                             file_retention=file_retention,
                             legal_hold=legal_hold,
+                            custom_upload_timestamp=custom_upload_timestamp,
                         )
                         if content_sha1 == HEX_DIGITS_AT_END:
                             content_sha1 = input_stream.hash
diff --git a/b2sdk/transfer/outbound/upload_source.py b/b2sdk/transfer/outbound/upload_source.py
index 26dd8f7..3e113e0 100644
--- a/b2sdk/transfer/outbound/upload_source.py
+++ b/b2sdk/transfer/outbound/upload_source.py
@@ -10,14 +10,29 @@
 
 import hashlib
 import io
+import logging
 import os
 
 from abc import abstractmethod
+from enum import auto, Enum, unique
+from typing import Callable, List, Optional, Union
 
 from b2sdk.exception import InvalidUploadSource
+from b2sdk.file_version import BaseFileVersion
+from b2sdk.http_constants import DEFAULT_MIN_PART_SIZE
 from b2sdk.stream.range import RangeOfInputStream, wrap_with_range
+from b2sdk.transfer.outbound.copy_source import CopySource
 from b2sdk.transfer.outbound.outbound_source import OutboundTransferSource
-from b2sdk.utils import hex_sha1_of_stream, hex_sha1_of_unlimited_stream
+from b2sdk.utils import hex_sha1_of_unlimited_stream, Sha1HexDigest, IncrementalHexDigester, hex_sha1_of_stream
+
+logger = logging.getLogger(__name__)
+
+
+@unique
+class UploadMode(Enum):
+    """ Mode of file uploads """
+    FULL = auto()  #: always upload the whole file
+    INCREMENTAL = auto()  #: use incremental uploads when possible
 
 
 class AbstractUploadSource(OutboundTransferSource):
@@ -26,35 +41,48 @@ class AbstractUploadSource(OutboundTransferSource):
     """
 
     @abstractmethod
-    def get_content_sha1(self):
+    def get_content_sha1(self) -> Optional[Sha1HexDigest]:
         """
-        Return a 40-character string containing the hex SHA1 checksum of the data in the file.
+        Returns a 40-character string containing the hex SHA1 checksum of the data in the file.
         """
 
     @abstractmethod
-    def open(self):
+    def open(self) -> io.IOBase:
         """
-        Return a binary file-like object from which the
-        data can be read.
-        :return:
+        Returns a binary file-like object from which the data can be read.
         """
 
-    def is_upload(self):
+    def is_upload(self) -> bool:
         return True
 
-    def is_copy(self):
+    def is_copy(self) -> bool:
         return False
 
-    def is_sha1_known(self):
+    def is_sha1_known(self) -> bool:
+        """
+        Returns information whether SHA1 of the source is currently available.
+        Note that negative result doesn't mean that SHA1 is not available.
+        Calling ``get_content_sha1`` can still provide a valid digest.
+        """
         return False
 
 
 class UploadSourceBytes(AbstractUploadSource):
-    def __init__(self, data_bytes, content_sha1=None):
+    def __init__(
+        self,
+        data_bytes: Union[bytes, bytearray],
+        content_sha1: Optional[Sha1HexDigest] = None,
+    ):
+        """
+        Initialize upload source using given bytes.
+
+        :param data_bytes: Data that is to be uploaded.
+        :param content_sha1: SHA1 hexdigest of the data, or ``None``.
+        """
         self.data_bytes = data_bytes
         self.content_sha1 = content_sha1
 
-    def __repr__(self):
+    def __repr__(self) -> str:
         return '<{classname} data={data} id={id}>'.format(
             classname=self.__class__.__name__,
             data=str(self.data_bytes[:20]) +
@@ -62,10 +90,10 @@ class UploadSourceBytes(AbstractUploadSource):
             id=id(self),
         )
 
-    def get_content_length(self):
+    def get_content_length(self) -> int:
         return len(self.data_bytes)
 
-    def get_content_sha1(self):
+    def get_content_sha1(self) -> Optional[Sha1HexDigest]:
         if self.content_sha1 is None:
             self.content_sha1 = hashlib.sha1(self.data_bytes).hexdigest()
         return self.content_sha1
@@ -73,24 +101,33 @@ class UploadSourceBytes(AbstractUploadSource):
     def open(self):
         return io.BytesIO(self.data_bytes)
 
-    def is_sha1_known(self):
+    def is_sha1_known(self) -> bool:
         return self.content_sha1 is not None
 
 
-class UploadSourceLocalFile(AbstractUploadSource):
-    def __init__(self, local_path, content_sha1=None):
+class UploadSourceLocalFileBase(AbstractUploadSource):
+    def __init__(
+        self,
+        local_path: Union[os.PathLike, str],
+        content_sha1: Optional[Sha1HexDigest] = None,
+    ):
+        """
+        Initialize upload source using provided path.
+
+        :param local_path: Any path-like object that points to a file to be uploaded.
+        :param content_sha1: SHA1 hexdigest of the data, or ``None``.
+        """
         self.local_path = local_path
         self.content_length = 0
-        self.check_path_and_get_size()
-
         self.content_sha1 = content_sha1
+        self.check_path_and_get_size()
 
-    def check_path_and_get_size(self):
+    def check_path_and_get_size(self) -> None:
         if not os.path.isfile(self.local_path):
             raise InvalidUploadSource(self.local_path)
         self.content_length = os.path.getsize(self.local_path)
 
-    def __repr__(self):
+    def __repr__(self) -> str:
         return (
             '<{classname} local_path={local_path} content_length={content_length} '
             'content_sha1={content_sha1} id={id}>'
@@ -102,28 +139,43 @@ class UploadSourceLocalFile(AbstractUploadSource):
             id=id(self),
         )
 
-    def get_content_length(self):
+    def get_content_length(self) -> int:
         return self.content_length
 
-    def get_content_sha1(self):
+    def get_content_sha1(self) -> Optional[Sha1HexDigest]:
         if self.content_sha1 is None:
-            self.content_sha1 = self._hex_sha1_of_file(self.local_path)
+            self.content_sha1 = self._hex_sha1_of_file()
         return self.content_sha1
 
     def open(self):
         return io.open(self.local_path, 'rb')
 
-    def _hex_sha1_of_file(self, local_path):
+    def _hex_sha1_of_file(self) -> Sha1HexDigest:
         with self.open() as f:
             return hex_sha1_of_stream(f, self.content_length)
 
-    def is_sha1_known(self):
+    def is_sha1_known(self) -> bool:
         return self.content_sha1 is not None
 
 
-class UploadSourceLocalFileRange(UploadSourceLocalFile):
-    def __init__(self, local_path, content_sha1=None, offset=0, length=None):
-        super(UploadSourceLocalFileRange, self).__init__(local_path, content_sha1)
+class UploadSourceLocalFileRange(UploadSourceLocalFileBase):
+    def __init__(
+        self,
+        local_path: Union[os.PathLike, str],
+        content_sha1: Optional[Sha1HexDigest] = None,
+        offset: int = 0,
+        length: Optional[int] = None,
+    ):
+        """
+        Initialize upload source using provided path.
+
+        :param local_path: Any path-like object that points to a file to be uploaded.
+        :param content_sha1: SHA1 hexdigest of the data, or ``None``.
+        :param offset: Position in the file where upload should start from.
+        :param length: Amount of data to be uploaded. If ``None``, length of
+                      the remainder of the file is taken.
+        """
+        super().__init__(local_path, content_sha1)
         self.file_size = self.content_length
         self.offset = offset
         if length is None:
@@ -133,7 +185,7 @@ class UploadSourceLocalFileRange(UploadSourceLocalFile):
                 raise ValueError('Range length overflow file size')
             self.content_length = length
 
-    def __repr__(self):
+    def __repr__(self) -> str:
         return (
             '<{classname} local_path={local_path} offset={offset} '
             'content_length={content_length} content_sha1={content_sha1} id={id}>'
@@ -151,13 +203,105 @@ class UploadSourceLocalFileRange(UploadSourceLocalFile):
         return wrap_with_range(fp, self.file_size, self.offset, self.content_length)
 
 
+class UploadSourceLocalFile(UploadSourceLocalFileBase):
+    def get_incremental_sources(
+        self,
+        file_version: BaseFileVersion,
+        min_part_size: Optional[int] = None,
+    ) -> List[OutboundTransferSource]:
+        """
+        Split the upload into a copy and upload source constructing an incremental upload
+
+        This will return a list of upload sources.  If the upload cannot split, the method will return [self].
+        """
+
+        if not file_version:
+            logger.debug(
+                "Fallback to full upload for %s -- no matching file on server", self.local_path
+            )
+            return [self]
+
+        min_part_size = min_part_size or DEFAULT_MIN_PART_SIZE
+        if file_version.size < min_part_size:
+            # existing file size below minimal large file part size
+            logger.debug(
+                "Fallback to full upload for %s -- remote file is smaller than %i bytes",
+                self.local_path, min_part_size
+            )
+            return [self]
+
+        if self.get_content_length() < file_version.size:
+            logger.debug(
+                "Fallback to full upload for %s -- local file is smaller than remote",
+                self.local_path
+            )
+            return [self]
+
+        content_sha1 = file_version.get_content_sha1()
+
+        if not content_sha1:
+            logger.debug(
+                "Fallback to full upload for %s -- remote file content SHA1 unknown",
+                self.local_path
+            )
+            return [self]
+
+        # We're calculating hexdigest of the first N bytes of the file. However, if the sha1 differs,
+        # we'll be needing the whole hash of the file anyway. So we can use this partial information.
+        with self.open() as fp:
+            digester = IncrementalHexDigester(fp)
+            hex_digest = digester.update_from_stream(file_version.size)
+            if hex_digest != content_sha1:
+                logger.debug(
+                    "Fallback to full upload for %s -- content in common range differs",
+                    self.local_path,
+                )
+                # Calculate SHA1 of the remainder of the file and set it.
+                self.content_sha1 = digester.update_from_stream()
+                return [self]
+
+        logger.debug("Incremental upload of %s is possible.", self.local_path)
+
+        if file_version.server_side_encryption and file_version.server_side_encryption.is_unknown():
+            source_encryption = None
+        else:
+            source_encryption = file_version.server_side_encryption
+
+        sources = [
+            CopySource(
+                file_version.id_,
+                offset=0,
+                length=file_version.size,
+                encryption=source_encryption,
+                source_file_info=file_version.file_info,
+                source_content_type=file_version.content_type,
+            ),
+            UploadSourceLocalFileRange(self.local_path, offset=file_version.size),
+        ]
+        return sources
+
+
 class UploadSourceStream(AbstractUploadSource):
-    def __init__(self, stream_opener, stream_length=None, stream_sha1=None):
+    def __init__(
+        self,
+        stream_opener: Callable[[], io.IOBase],
+        stream_length: Optional[int] = None,
+        stream_sha1: Optional[Sha1HexDigest] = None,
+    ):
+        """
+        Initialize upload source using arbitrary function.
+
+        :param stream_opener: A function that opens a stream for uploading.
+        :param stream_length: Length of the stream. If ``None``, data will be calculated
+                      from the stream the first time it's required.
+        :param stream_sha1: SHA1 of the stream. If ``None``, data will be calculated from
+                      the stream the first time it's required.
+        """
         self.stream_opener = stream_opener
         self._content_length = stream_length
         self._content_sha1 = stream_sha1
 
-    def __repr__(self):
+    def __repr__(self) -> str:
         return (
             '<{classname} stream_opener={stream_opener} content_length={content_length} '
             'content_sha1={content_sha1} id={id}>'
@@ -169,12 +313,12 @@ class UploadSourceStream(AbstractUploadSource):
             id=id(self),
         )
 
-    def get_content_length(self):
+    def get_content_length(self) -> int:
         if self._content_length is None:
             self._set_content_length_and_sha1()
         return self._content_length
 
-    def get_content_sha1(self):
+    def get_content_sha1(self) -> Optional[Sha1HexDigest]:
         if self._content_sha1 is None:
             self._set_content_length_and_sha1()
         return self._content_sha1
@@ -182,17 +326,33 @@ class UploadSourceStream(AbstractUploadSource):
     def open(self):
         return self.stream_opener()
 
-    def _set_content_length_and_sha1(self):
+    def _set_content_length_and_sha1(self) -> None:
         sha1, content_length = hex_sha1_of_unlimited_stream(self.open())
         self._content_length = content_length
         self._content_sha1 = sha1
 
-    def is_sha1_known(self):
+    def is_sha1_known(self) -> bool:
         return self._content_sha1 is not None
 
 
 class UploadSourceStreamRange(UploadSourceStream):
-    def __init__(self, stream_opener, offset, stream_length, stream_sha1=None):
+    def __init__(
+        self,
+        stream_opener: Callable[[], io.IOBase],
+        offset: int = 0,
+        stream_length: Optional[int] = None,
+        stream_sha1: Optional[Sha1HexDigest] = None,
+    ):
+        """
+        Initialize upload source using arbitrary function.
+
+        :param stream_opener: A function that opens a stream for uploading.
+        :param offset: Offset from which stream should be uploaded.
+        :param stream_length: Length of the stream. If ``None``, data will be calculated
+                      from the stream the first time it's required.
+        :param stream_sha1: SHA1 of the stream. If ``None``, data will be calculated from
+                      the stream the first time it's required.
+        """
         super(UploadSourceStreamRange, self).__init__(
             stream_opener,
             stream_length=stream_length,
@@ -200,7 +360,7 @@ class UploadSourceStreamRange(UploadSourceStream):
         )
         self._offset = offset
 
-    def __repr__(self):
+    def __repr__(self) -> str:
         return (
             '<{classname} stream_opener={stream_opener} offset={offset} '
             'content_length={content_length} content_sha1={content_sha1} id={id}>'
diff --git a/b2sdk/utils/__init__.py b/b2sdk/utils/__init__.py
index 5edd3bf..8a3e88e 100644
--- a/b2sdk/utils/__init__.py
+++ b/b2sdk/utils/__init__.py
@@ -2,7 +2,7 @@
 #
 # File: b2sdk/utils/__init__.py
 #
-# Copyright 2019 Backblaze Inc. All Rights Reserved.
+# Copyright 2022 Backblaze Inc. All Rights Reserved.
 #
 # License https://www.backblaze.com/using_b2_code.html
 #
@@ -16,12 +16,20 @@ import re
 import shutil
 import tempfile
 import time
-import concurrent.futures as futures
+from dataclasses import dataclass, field
 from decimal import Decimal
+from itertools import chain
+from typing import Any, Iterator, List, NewType, Optional, Tuple, TypeVar
 from urllib.parse import quote, unquote_plus
 
 from logfury.v1 import DefaultTraceAbstractMeta, DefaultTraceMeta, limit_trace_arguments, disable_trace, trace_call
 
+Sha1HexDigest = NewType('Sha1HexDigest', str)
+T = TypeVar('T')
+# TODO: When we drop Python 3.7 support, this should be replaced
+#   with typing.Protocol that exposes read method.
+ReadOnlyStream = Any
+
 
 def b2_url_encode(s):
     """
@@ -84,19 +92,17 @@ def choose_part_ranges(content_length, minimum_part_size):
     return parts
 
 
-def hex_sha1_of_stream(input_stream, content_length):
+def update_digest_from_stream(digest: T, input_stream: ReadOnlyStream, content_length: int) -> T:
     """
-    Return the 40-character hex SHA1 checksum of the first content_length
-    bytes in the input stream.
+    Update and return `digest` with data read from `input_stream`
 
-    :param input_stream: stream object, which exposes read() method
+    :param digest: a digest object, which exposes an `update(bytes)` method
+    :param input_stream: stream object, which exposes a `read(int|None)` method
     :param content_length: expected length of the stream
     :type content_length: int
-    :rtype: str
     """
     remaining = content_length
     block_size = 1024 * 1024
-    digest = hashlib.sha1()
     while remaining != 0:
         to_read = min(remaining, block_size)
         data = input_stream.read(to_read)
@@ -106,37 +112,89 @@ def hex_sha1_of_stream(input_stream, content_length):
             )
         digest.update(data)
         remaining -= to_read
-    return digest.hexdigest()
+    return digest
 
 
-def hex_sha1_of_unlimited_stream(input_stream, limit=None):
-    block_size = 1024 * 1024
-    content_length = 0
-    digest = hashlib.sha1()
-    while True:
-        if limit is not None:
-            to_read = min(limit - content_length, block_size)
-        else:
-            to_read = block_size
-        data = input_stream.read(to_read)
-        data_len = len(data)
-        if data_len > 0:
-            digest.update(data)
-            content_length += data_len
-        if data_len < to_read:
-            return digest.hexdigest(), content_length
+def hex_sha1_of_stream(input_stream: ReadOnlyStream, content_length: int) -> Sha1HexDigest:
+    """
+    Return the 40-character hex SHA1 checksum of the first content_length
+    bytes in the input stream.
+
+    :param input_stream: stream object, which exposes read(int|None) method
+    :param content_length: expected length of the stream
+    :type content_length: int
+    :rtype: str
+    """
+    return Sha1HexDigest(
+        update_digest_from_stream(
+            hashlib.sha1(),
+            input_stream,
+            content_length,
+        ).hexdigest()
+    )
 
 
-def hex_sha1_of_file(path_):
+@dataclass
+class IncrementalHexDigester:
+    """
+    Calculates digest of a stream or parts of it.
+    """
+    stream: ReadOnlyStream
+    digest: 'hashlib._Hash' = field(  # noqa (_Hash is a dynamic object)
+        default_factory=hashlib.sha1
+    )
+    read_bytes: int = 0
+    block_size: int = 1024 * 1024
+
+    @property
+    def hex_digest(self) -> Sha1HexDigest:
+        return Sha1HexDigest(self.digest.hexdigest())
+
+    def update_from_stream(
+        self,
+        limit: Optional[int] = None,
+    ) -> Sha1HexDigest:
+        """
+        :param limit: How many new bytes try to read from the stream. Default None – read until nothing left.
+        """
+        offset = 0
+
+        while True:
+            if limit is not None:
+                to_read = min(limit - offset, self.block_size)
+            else:
+                to_read = self.block_size
+            data = self.stream.read(to_read)
+            data_len = len(data)
+            if data_len > 0:
+                self.digest.update(data)
+                self.read_bytes += data_len
+                offset += data_len
+            if data_len < to_read or to_read == 0:
+                break
+
+        return self.hex_digest
+
+
+def hex_sha1_of_unlimited_stream(
+    input_stream: ReadOnlyStream,
+    limit: Optional[int] = None,
+) -> Tuple[Sha1HexDigest, int]:
+    digester = IncrementalHexDigester(input_stream)
+    digester.update_from_stream(limit)
+    return digester.hex_digest, digester.read_bytes
+
+
+def hex_sha1_of_file(path_) -> Sha1HexDigest:
     with open(path_, 'rb') as file:
-        return hex_sha1_of_unlimited_stream(file)
+        return hex_sha1_of_unlimited_stream(file)[0]
 
 
-def hex_sha1_of_bytes(data: bytes) -> str:
+def hex_sha1_of_bytes(data: bytes) -> Sha1HexDigest:
     """
     Return the 40-character hex SHA1 checksum of the data.
     """
-    return hashlib.sha1(data).hexdigest()
+    return Sha1HexDigest(hashlib.sha1(data).hexdigest())
 
 
 def hex_md5_of_bytes(data: bytes) -> str:
@@ -433,6 +491,24 @@ def current_time_millis():
     return int(round(time.time() * 1000))
 
 
+def iterator_peek(iterator: Iterator[T], count: int) -> Tuple[List[T], Iterator[T]]:
+    """
+    Get up to the `count` first elements yielded by `iterator`.
+
+    The function will read `count` elements from `iterator` or less if the end is reached first.  Returns a tuple
+    consisting of a list of retrieved elements and an iterator equivalent to the input iterator.
+    """
+
+    ret = []
+    for _ in range(count):
+        try:
+            ret.append(next(iterator))
+        except StopIteration:
+            break
+
+    return ret, chain(ret, iterator)
+
+
 assert disable_trace
 assert limit_trace_arguments
 assert trace_call
diff --git a/b2sdk/v1/api.py b/b2sdk/v1/api.py
index 5163170..99855f7 100644
--- a/b2sdk/v1/api.py
+++ b/b2sdk/v1/api.py
@@ -204,3 +204,7 @@ class B2Api(v2.B2Api):
 
     def delete_key(self, application_key_id):
         return super().delete_key_by_id(application_key_id).as_dict()
+
+    def get_key(self, key_id: str) -> Optional[dict]:
+        keys = self.list_keys(start_application_key_id=key_id)['keys']
+        return next((key for key in keys if key['applicationKeyId'] == key_id), None)
diff --git a/b2sdk/v1/bucket.py b/b2sdk/v1/bucket.py
index 66cb10c..3645239 100644
--- a/b2sdk/v1/bucket.py
+++ b/b2sdk/v1/bucket.py
@@ -209,6 +209,7 @@ class Bucket(v2.Bucket):
         if_revision_is: Optional[int] = None,
         default_server_side_encryption: Optional[v2.EncryptionSetting] = None,
         default_retention: Optional[v2.BucketRetentionSetting] = None,
+        is_file_lock_enabled: Optional[bool] = None,
         **kwargs
     ):
         """
@@ -221,6 +222,7 @@ class Bucket(v2.Bucket):
         :param if_revision_is: revision number, update the info **only if** *revision* equals to *if_revision_is*
         :param default_server_side_encryption: default server side encryption settings (``None`` if unknown)
         :param default_retention: bucket default retention setting
+        :param bool is_file_lock_enabled: specifies whether bucket should get File Lock-enabled
         """
         # allow common tests to execute without hitting attributeerror
 
@@ -240,6 +242,7 @@ class Bucket(v2.Bucket):
             if_revision_is=if_revision_is,
             default_server_side_encryption=default_server_side_encryption,
             default_retention=default_retention,
+            is_file_lock_enabled=is_file_lock_enabled,
         )
 
     def ls(
@@ -247,7 +250,8 @@ class Bucket(v2.Bucket):
         folder_to_list: str = '',
         show_versions: bool = False,
         recursive: bool = False,
-        fetch_count: Optional[int] = 10000
+        fetch_count: Optional[int] = 10000,
+        **kwargs
     ):
         """
         Pretend that folders exist and yields the information about the files in a folder.
@@ -270,9 +274,9 @@ class Bucket(v2.Bucket):
         :returns: generator of (file_version, folder_name) tuples
 
         .. note::
-            In case of `recursive=True`, folder_name is returned only for first file in the folder.
+            In case of `recursive=True`, folder_name is not returned.
         """
-        return super().ls(folder_to_list, not show_versions, recursive, fetch_count)
+        return super().ls(folder_to_list, not show_versions, recursive, fetch_count, **kwargs)
 
 
 def download_file_and_return_info_dict(
diff --git a/b2sdk/v2/__init__.py b/b2sdk/v2/__init__.py
index 9d61375..22bc933 100644
--- a/b2sdk/v2/__init__.py
+++ b/b2sdk/v2/__init__.py
@@ -13,6 +13,7 @@ from b2sdk._v3 import parse_folder as parse_sync_folder
 from b2sdk._v3 import AbstractPath as AbstractSyncPath
 from b2sdk._v3 import LocalPath as LocalSyncPath
 
+from .account_info import AbstractAccountInfo
 from .api import B2Api
 from .b2http import B2Http
 from .bucket import Bucket, BucketFactory
diff --git a/b2sdk/v2/account_info.py b/b2sdk/v2/account_info.py
new file mode 100644
index 0000000..247cabd
--- /dev/null
+++ b/b2sdk/v2/account_info.py
@@ -0,0 +1,15 @@
+######################################################################
+#
+# File: b2sdk/v2/account_info.py
+#
+# Copyright 2023 Backblaze Inc. All Rights Reserved.
+#
+# License https://www.backblaze.com/using_b2_code.html
+#
+######################################################################
+from b2sdk import _v3
+
+
+class AbstractAccountInfo(_v3.AbstractAccountInfo):
+    def list_bucket_names_ids(self):
+        return []  # Removed @abstractmethod decorator
diff --git a/debian/changelog b/debian/changelog
index 9fc5381..2f16e51 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,10 @@
+python-b2sdk (1.21.0-1) UNRELEASED; urgency=low
+
+  * New upstream release.
+  * New upstream release.
+
+ -- Debian Janitor <janitor@jelmer.uk>  Wed, 07 Jun 2023 02:22:29 -0000
+
 python-b2sdk (1.17.3-2) unstable; urgency=medium
 
   [ Debian Janitor ]
diff --git a/debian/patches/0001-Don-t-use-setuptools-scm.patch b/debian/patches/0001-Don-t-use-setuptools-scm.patch
index 5961be0..27c2bc3 100644
--- a/debian/patches/0001-Don-t-use-setuptools-scm.patch
+++ b/debian/patches/0001-Don-t-use-setuptools-scm.patch
@@ -4,10 +4,10 @@ Subject: Don't use setuptools-scm
 
 Remove depedency of setuptools-scm from setup.py.
 ---
-Index: python-b2sdk/setup.py
+Index: python-b2sdk.git/setup.py
 ===================================================================
---- python-b2sdk.orig/setup.py
-+++ python-b2sdk/setup.py
+--- python-b2sdk.git.orig/setup.py
++++ python-b2sdk.git/setup.py
 @@ -109,8 +109,6 @@ setup(
      # for example:
      # $ pip install -e .[dev,test]
diff --git a/debian/patches/0002-Hardcoded-b2sdk-version.patch b/debian/patches/0002-Hardcoded-b2sdk-version.patch
index 8f52b18..92c6a12 100644
--- a/debian/patches/0002-Hardcoded-b2sdk-version.patch
+++ b/debian/patches/0002-Hardcoded-b2sdk-version.patch
@@ -3,10 +3,10 @@ Date: Wed, 10 Feb 2021 19:12:36 -0300
 Subject: Hardcoded b2sdk version
 
 ---
-Index: python-b2sdk/b2sdk/version.py
+Index: python-b2sdk.git/b2sdk/version.py
 ===================================================================
---- python-b2sdk.orig/b2sdk/version.py
-+++ python-b2sdk/b2sdk/version.py
+--- python-b2sdk.git.orig/b2sdk/version.py
++++ python-b2sdk.git/b2sdk/version.py
 @@ -10,12 +10,7 @@
  
  import sys
diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst
index d63ddb3..e2e7b80 100644
--- a/doc/source/advanced.rst
+++ b/doc/source/advanced.rst
@@ -45,7 +45,7 @@ Streaming interface
 
 Some object creation methods start writing data before reading the whole input (iterator). This can be used to write objects that do not have fully known contents without writing them first locally, so that they could be copied. Such usage pattern can be relevant to small devices which stream data to B2 from an external NAS, where caching large files such as media files or virtual machine images is not an option.
 
-Please see :ref:`advanced method support table <advanced_methods_support_table>` to see where streaming interface is supported. 
+Please see :ref:`advanced method support table <advanced_methods_support_table>` to see where streaming interface is supported.
 
 Continuation
 ============
@@ -184,7 +184,7 @@ Change the middle of the remote file
 For more information see :meth:`b2sdk.v2.Bucket.create_file`.
 
 
-Synthetize a file from local and remote parts
+Synthesize a file from local and remote parts
 =============================================
 
 This is useful for expert usage patterns such as:
@@ -321,7 +321,7 @@ To support automatic continuation, some advanced methods create a plan before st
 If that is not available, ``large_file_id`` can be extracted via callback during the operation start. It can then be passed into the subsequent call to continue the same task, though the responsibility for passing the exact same input is then on the user of the function. Please see :ref:`advanced method support table <advanced_methods_support_table>` to see where automatic continuation is supported. ``large_file_id`` can also be passed if automatic continuation is available in order to avoid issues where multiple matching upload sessions are matching the transfer.
 
 
-Continuation of create/concantenate
+Continuation of create/concatenate
 ===================================
 
 :meth:`b2sdk.v2.Bucket.create_file` supports automatic continuation or manual continuation. :meth:`b2sdk.v2.Bucket.create_file_stream` supports only manual continuation for local-only inputs. The situation looks the same for :meth:`b2sdk.v2.Bucket.concatenate` and :meth:`b2sdk.v2.Bucket.concatenate_stream` (streamed version supports only manual continuation of local sources). Also :meth:`b2sdk.v2.Bucket.upload` and :meth:`b2sdk.v2.Bucket.copy` support both automatic and manual continuation.
@@ -376,3 +376,14 @@ No continuation
 
 
 Note, that this only forces start of a new large file - it is still possible to continue the process with either auto or manual modes.
+
+
+****************************
+SHA-1 hashes for large files
+****************************
+
+Depending on the number and size of sources and the size of the result file, the SDK may decide to use the large file API to create a file on the server.  In such cases the file's SHA-1 won't be stored on the server in the ``X-Bz-Content-Sha1`` header, but it may optionally be stored with the file in the ``large_file_sha1`` entry in the ``file_info``, as per [B2 integration checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).
+
+In basic scenarios, large files uploaded to the server will have a ``large_file_sha1`` element added automatically to their ``file_info``.  However, when concatenating multiple sources, it may be impossible for the SDK to figure out the SHA-1 automatically.  In such cases, the SHA-1 can be provided using the ``large_file_sha1`` parameter to :meth:`b2sdk.v2.Bucket.create_file`, :meth:`b2sdk.v2.Bucket.concatenate` and their stream equivalents.  If the parameter is skipped or ``None``, the result file may not have the ``large_file_sha1`` value set.
+
+Note that the provided SHA-1 value is not verified.
diff --git a/doc/source/api_types.rst b/doc/source/api_types.rst
index bc37fe4..1e30588 100644
--- a/doc/source/api_types.rst
+++ b/doc/source/api_types.rst
@@ -65,9 +65,6 @@ This should be used in 99% of use cases, it's enough to implement anything from
 
 Those modules will generally not change in a backwards-incompatible way between non-major versions. Please see :ref:`interface version compatibility <interface_version_compatibility>` chapter for notes on what changes must be expected.
 
-.. note::
-  Replication is currently in a Closed Beta state, where not all B2 accounts have access to the feature. The interface of the beta server API might change and the interface of **b2sdk** around replication may change as well. For the avoidance of doubt, until this message is removed, replication-related functionality of **b2sdk** should be considered as internal interface.
-
 .. hint::
   If the current version of **b2sdk** is ``4.5.6`` and you only use the *public* interface,
   put this in your ``requirements.txt`` to be safe::
diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst
index 36bdb8e..17d52fb 100644
--- a/doc/source/glossary.rst
+++ b/doc/source/glossary.rst
@@ -5,7 +5,7 @@ Glossary
 .. glossary::
 
     absoluteMinimumPartSize
-      The smallest large file part size, as indicated during authorization process by the server (in 2019 it used to be ``5MB``, but the server can set it dynamincally)
+      The smallest large file part size, as indicated during authorization process by the server (in 2019 it used to be ``5MB``, but the server can set it dynamically)
 
     account ID
       An identifier of the B2 account (not login). Looks like this: ``4ba5845d7aaf``.
diff --git a/doc/source/index.rst b/doc/source/index.rst
index eb3e28a..60413e5 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -26,7 +26,7 @@ Why use b2sdk?
 When building an application which uses B2 cloud, it is possible to implement an independent B2 API client, but using **b2sdk** allows for:
 
 - reuse of code that is already written, with hundreds of unit tests
-- use of **Syncronizer**, a high-performance, parallel rsync-like utility
+- use of **Synchronizer**, a high-performance, parallel rsync-like utility
 - developer-friendly library :ref:`api version policy <semantic_versioning>` which guards your program against incompatible changes
 - `B2 integration checklist <https://www.backblaze.com/b2/docs/integration_checklist.html>`_ is passed automatically
 - **raw_simulator** makes it easy to mock the B2 cloud for unit testing purposes
diff --git a/doc/source/server_side_encryption.rst b/doc/source/server_side_encryption.rst
index 925e2d6..2d8fc1f 100644
--- a/doc/source/server_side_encryption.rst
+++ b/doc/source/server_side_encryption.rst
@@ -27,7 +27,7 @@ source and for destination). Sync, however, accepts an `EncryptionSettingsProvid
 ******************************
 High security: use unique keys
 ******************************
-B2 cloud does not promote or discourage either reusing encryption keys or using unique keys for `SEE-C`.
+B2 cloud does not promote or discourage either reusing encryption keys or using unique keys for `SSE-C`.
 In applications requiring enhanced security, using unique key per file is a good strategy. **b2sdk** follows a convention,
 that makes managing such keys easier: `EncryptionSetting` holds a key identifier, aside from the key itself. This key
 identifier is saved in the metadata of all files uploaded, created or copied via **b2sdk** methods using `SSE-C`,
diff --git a/noxfile.py b/noxfile.py
index 6fac81a..7fd5d27 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -59,13 +59,13 @@ def install_myself(session, extras=None):
     if extras:
         arg += '[%s]' % ','.join(extras)
 
-    session.install('-e', arg)
+    session.run('pip', 'install', '-e', arg)
 
 
 @nox.session(name='format', python=PYTHON_DEFAULT_VERSION)
 def format_(session):
     """Format the code."""
-    session.install(*REQUIREMENTS_FORMAT)
+    session.run('pip', 'install', *REQUIREMENTS_FORMAT)
     # TODO: incremental mode for yapf
     session.run('yapf', '--in-place', '--parallel', '--recursive', *PY_PATHS)
     # TODO: uncomment if we want to use isort and docformatter
@@ -84,7 +84,7 @@ def format_(session):
 def lint(session):
     """Run linters."""
     install_myself(session)
-    session.install(*REQUIREMENTS_LINT)
+    session.run('pip', 'install', *REQUIREMENTS_LINT)
     session.run('yapf', '--diff', '--parallel', '--recursive', *PY_PATHS)
     # TODO: uncomment if we want to use isort and docformatter
     # session.run('isort', '--check', *PY_PATHS)
@@ -115,7 +115,7 @@ def lint(session):
 def unit(session):
     """Run unit tests."""
     install_myself(session)
-    session.install(*REQUIREMENTS_TEST)
+    session.run('pip', 'install', *REQUIREMENTS_TEST)
     args = ['--doctest-modules', '-p', 'pyfakefs', '-n', 'auto']
     if not SKIP_COVERAGE:
         args += ['--cov=b2sdk', '--cov-branch', '--cov-report=xml']
@@ -135,7 +135,7 @@ def unit(session):
 def integration(session):
     """Run integration tests."""
     install_myself(session)
-    session.install(*REQUIREMENTS_TEST)
+    session.run('pip', 'install', *REQUIREMENTS_TEST)
     session.run('pytest', '-s', *session.posargs, 'test/integration')
 
 
@@ -143,7 +143,7 @@ def integration(session):
 def cleanup_old_buckets(session):
     """Remove buckets from previous test runs."""
     install_myself(session)
-    session.install(*REQUIREMENTS_TEST)
+    session.run('pip', 'install', *REQUIREMENTS_TEST)
     session.run('python', '-m', 'test.integration.cleanup_buckets')
 
 
@@ -161,7 +161,7 @@ def test(session):
 @nox.session
 def cover(session):
     """Perform coverage analysis."""
-    session.install('coverage')
+    session.run('pip', 'install', 'coverage')
     session.run('coverage', 'report', '--fail-under=75', '--show-missing', '--skip-covered')
     session.run('coverage', 'erase')
 
@@ -170,7 +170,7 @@ def cover(session):
 def build(session):
     """Build the distribution."""
     # TODO: consider using wheel as well
-    session.install(*REQUIREMENTS_BUILD)
+    session.run('pip', 'install', *REQUIREMENTS_BUILD)
     session.run('python', 'setup.py', 'check', '--metadata', '--strict')
     session.run('rm', '-rf', 'build', 'dist', 'b2sdk.egg-info', external=True)
     session.run('python', 'setup.py', 'sdist', *session.posargs)
diff --git a/requirements.txt b/requirements.txt
index c977673..0f36428 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,3 @@
-arrow>=1.0.2,<2.0.0
 importlib-metadata>=3.3.0; python_version < '3.8'
 logfury>=1.0.1,<2.0.0
 requests>=2.9.1,<3.0.0
diff --git a/test/integration/base.py b/test/integration/base.py
index 2d95b18..ff62456 100644
--- a/test/integration/base.py
+++ b/test/integration/base.py
@@ -8,16 +8,13 @@
 #
 ######################################################################
 
-from typing import Optional
 import http.client
 import os
-import random
-import string
 
 import pytest
 
 from b2sdk.v2 import current_time_millis
-
+from b2sdk.v2.exception import DuplicateBucketName
 from .bucket_cleaner import BucketCleaner
 from .helpers import GENERAL_BUCKET_NAME_PREFIX, BUCKET_NAME_LENGTH, BUCKET_CREATED_AT_MILLIS, bucket_name_part, authorize
 
@@ -63,8 +60,32 @@ class IntegrationTestBase:
             written += line_len
 
     def create_bucket(self):
-        return self.b2_api.create_bucket(
-            self.generate_bucket_name(),
-            'allPublic',
-            bucket_info={BUCKET_CREATED_AT_MILLIS: str(current_time_millis())}
-        )
+        bucket_name = self.generate_bucket_name()
+        try:
+            return self.b2_api.create_bucket(
+                bucket_name,
+                'allPublic',
+                bucket_info={BUCKET_CREATED_AT_MILLIS: str(current_time_millis())}
+            )
+        except DuplicateBucketName:
+            self._duplicated_bucket_name_debug_info(bucket_name)
+            raise
+
+    def _duplicated_bucket_name_debug_info(self, bucket_name: str) -> None:
+        # Trying to obtain as much information as possible about this bucket.
+        print(' DUPLICATED BUCKET DEBUG START '.center(60, '='))
+        bucket = self.b2_api.get_bucket_by_name(bucket_name)
+
+        print('Bucket metadata:')
+        bucket_dict = bucket.as_dict()
+        for info_key, info in bucket_dict.items():
+            print('\t%s: "%s"' % (info_key, info))
+
+        print('All files (and their versions) inside the bucket:')
+        ls_generator = bucket.ls(recursive=True, latest_only=False)
+        for file_version, _directory in ls_generator:
+            # as_dict() is bound to have more info than we can use,
+            # but maybe some of it will cast some light on the issue.
+            print('\t%s (%s)' % (file_version.file_name, file_version.as_dict()))
+
+        print(' DUPLICATED BUCKET DEBUG END '.center(60, '='))
diff --git a/test/integration/test_download.py b/test/integration/test_download.py
index d7f851e..a093d24 100644
--- a/test/integration/test_download.py
+++ b/test/integration/test_download.py
@@ -12,10 +12,11 @@ import gzip
 import io
 import pathlib
 from pprint import pprint
-from typing import Optional
+from typing import Optional, Tuple
 from unittest import mock
 
 from b2sdk.v2 import *
+from b2sdk.utils import Sha1HexDigest
 
 from .fixtures import *  # pyflakes: disable
 from .helpers import authorize
@@ -49,14 +50,17 @@ class TestDownload(IntegrationTestBase):
                         bucket.download_file_by_name('a_single_zero').save(io_)
                 assert exc_info.value.args == ('no strategy suitable for download was found!',)
 
-                f = self._file_helper(bucket)
+                f, sha1 = self._file_helper(bucket)
                 if zero._type() != 'large':
                     # if we are here, that's not the production server!
                     assert f.download_version.content_sha1_verified  # large files don't have sha1, lets not check
 
-    def _file_helper(
-        self, bucket, sha1_sum=None, bytes_to_write: Optional[int] = None
-    ) -> DownloadVersion:
+                file_info = f.download_version.file_info
+                assert LARGE_FILE_SHA1 in file_info
+                assert file_info[LARGE_FILE_SHA1] == sha1
+
+    def _file_helper(self, bucket, sha1_sum=None,
+                     bytes_to_write: Optional[int] = None) -> Tuple[DownloadVersion, Sha1HexDigest]:
         bytes_to_write = bytes_to_write or int(self.info.get_absolute_minimum_part_size()) * 2 + 1
         with TempDir() as temp_dir:
             temp_dir = pathlib.Path(temp_dir)
@@ -72,17 +76,19 @@ class TestDownload(IntegrationTestBase):
 
             f = bucket.download_file_by_name('small_file')
             f.save_to(target_small_file)
-            assert hex_sha1_of_file(source_small_file) == hex_sha1_of_file(target_small_file)
-        return f
+
+            source_sha1 = hex_sha1_of_file(source_small_file)
+            assert source_sha1 == hex_sha1_of_file(target_small_file)
+        return f, source_sha1
 
     def test_small(self):
         bucket = self.create_bucket()
-        f = self._file_helper(bucket, bytes_to_write=1)
+        f, _ = self._file_helper(bucket, bytes_to_write=1)
         assert f.download_version.content_sha1_verified
 
     def test_small_unverified(self):
         bucket = self.create_bucket()
-        f = self._file_helper(bucket, sha1_sum='do_not_verify', bytes_to_write=1)
+        f, _ = self._file_helper(bucket, sha1_sum='do_not_verify', bytes_to_write=1)
         if f.download_version.content_sha1_verified:
             pprint(f.download_version._get_args_for_clone())
             assert not f.download_version.content_sha1_verified
diff --git a/test/integration/test_raw_api.py b/test/integration/test_raw_api.py
index a6fc565..f57cd7a 100644
--- a/test/integration/test_raw_api.py
+++ b/test/integration/test_raw_api.py
@@ -19,6 +19,7 @@ import pytest
 
 from b2sdk.b2http import B2Http
 from b2sdk.encryption.setting import EncryptionAlgorithm, EncryptionMode, EncryptionSetting
+from b2sdk.exception import DisablingFileLockNotSupported
 from b2sdk.replication.setting import ReplicationConfiguration, ReplicationRule
 from b2sdk.replication.types import ReplicationStatus
 from b2sdk.file_lock import BucketRetentionSetting, NO_RETENTION_FILE_SETTING, RetentionMode, RetentionPeriod
@@ -359,6 +360,7 @@ def raw_api_test_helper(raw_api, should_cleanup_old_buckets):
         {'color': 'blue'},
         io.BytesIO(file_contents),
         server_side_encryption=sse_b2_aes,
+        custom_upload_timestamp=12345,
     )
 
     file_id = file_dict['fileId']
@@ -519,9 +521,24 @@ def raw_api_test_helper(raw_api, should_cleanup_old_buckets):
         default_retention=BucketRetentionSetting(
             mode=RetentionMode.GOVERNANCE, period=RetentionPeriod(days=1)
         ),
+        is_file_lock_enabled=True,
     )
     assert first_bucket_revision < updated_bucket['revision']
 
+    # NOTE: this update_bucket call is only here to be able to find out the error code returned by
+    # the server if an attempt is made to disable file lock.  It has to be done here since the CLI
+    # by design does not allow disabling file lock at all (i.e. there is no --fileLockEnabled=false
+    # option or anything equivalent to that).
+    with pytest.raises(DisablingFileLockNotSupported):
+        raw_api.update_bucket(
+            api_url,
+            account_auth_token,
+            account_id,
+            bucket_id,
+            'allPrivate',
+            is_file_lock_enabled=False,
+        )
+
     # Clean up this test.
     _clean_and_delete_bucket(raw_api, api_url, account_auth_token, account_id, bucket_id)
 
diff --git a/test/integration/test_upload.py b/test/integration/test_upload.py
new file mode 100644
index 0000000..34b88b8
--- /dev/null
+++ b/test/integration/test_upload.py
@@ -0,0 +1,40 @@
+######################################################################
+#
+# File: test/integration/test_upload.py
+#
+# Copyright 2021 Backblaze Inc. All Rights Reserved.
+#
+# License https://www.backblaze.com/using_b2_code.html
+#
+######################################################################
+
+import io
+from typing import Optional
+
+from .fixtures import b2_auth_data  # noqa
+from .base import IntegrationTestBase
+
+
+class TestUnboundStreamUpload(IntegrationTestBase):
+    def assert_data_uploaded_via_stream(self, data: bytes, part_size: Optional[int] = None):
+        bucket = self.create_bucket()
+        stream = io.BytesIO(data)
+        file_name = 'unbound_stream'
+
+        bucket.upload_unbound_stream(stream, file_name, recommended_upload_part_size=part_size)
+
+        downloaded_data = io.BytesIO()
+        bucket.download_file_by_name(file_name).save(downloaded_data)
+
+        assert downloaded_data.getvalue() == data
+
+    def test_streamed_small_buffer(self):
+        # 20kb
+        data = b'a small data content' * 1024
+        self.assert_data_uploaded_via_stream(data)
+
+    def test_streamed_large_buffer_small_part_size(self):
+        # 10mb
+        data = b'a large data content' * 512 * 1024
+        # 5mb, the smallest allowed part size
+        self.assert_data_uploaded_via_stream(data, part_size=5 * 1024 * 1024)
diff --git a/test/unit/account_info/test_account_info.py b/test/unit/account_info/test_account_info.py
index 62df7ed..d8e31bd 100644
--- a/test/unit/account_info/test_account_info.py
+++ b/test/unit/account_info/test_account_info.py
@@ -7,7 +7,6 @@
 # License https://www.backblaze.com/using_b2_code.html
 #
 ######################################################################
-
 from abc import ABCMeta, abstractmethod
 import json
 import unittest.mock as mock
@@ -17,8 +16,6 @@ import shutil
 import stat
 import tempfile
 
-import pytest
-
 from apiver_deps import (
     ALL_CAPABILITIES,
     AbstractAccountInfo,
@@ -238,9 +235,11 @@ class AccountInfoBase(metaclass=ABCMeta):
             ).get_bucket_id_or_none_from_bucket_name('my-bucket')
             assert 'my-bucket' == self._make_info(
             ).get_bucket_name_or_none_from_bucket_id('bucket-0')
+        assert ('my-bucket', 'bucket-0') in account_info.list_bucket_names_ids()
         account_info.remove_bucket_name('my-bucket')
         assert account_info.get_bucket_id_or_none_from_bucket_name('my-bucket') is None
         assert account_info.get_bucket_name_or_none_from_bucket_id('bucket-0') is None
+        assert ('my-bucket', 'bucket-0') not in account_info.list_bucket_names_ids()
         if self.PERSISTENCE:
             assert self._make_info().get_bucket_id_or_none_from_bucket_name('my-bucket') is None
             assert self._make_info().get_bucket_name_or_none_from_bucket_id('bucket-0') is None
diff --git a/test/unit/api/test_api.py b/test/unit/api/test_api.py
index 56a6e63..2c8fd33 100644
--- a/test/unit/api/test_api.py
+++ b/test/unit/api/test_api.py
@@ -19,7 +19,7 @@ import apiver_deps
 from apiver_deps import B2Api
 from apiver_deps import B2HttpApiConfig
 from apiver_deps import B2Http
-from apiver_deps import DummyCache
+from apiver_deps import DummyCache, InMemoryCache
 from apiver_deps import EncryptionAlgorithm
 from apiver_deps import EncryptionMode
 from apiver_deps import EncryptionSetting
@@ -43,7 +43,7 @@ class TestApi:
     @pytest.fixture(autouse=True)
     def setUp(self):
         self.account_info = InMemoryAccountInfo()
-        self.cache = DummyCache()
+        self.cache = InMemoryCache()
         self.api = B2Api(
             self.account_info, self.cache, api_config=B2HttpApiConfig(_raw_api_class=RawSimulator)
         )
@@ -147,6 +147,23 @@ class TestApi:
         self.api.create_bucket('bucket2', 'allPrivate')
         assert [b.name for b in self.api.list_buckets(bucket_name='bucket1')] == ['bucket1']
 
+    @pytest.mark.apiver(from_ver=3)
+    def test_list_buckets_from_cache(self):
+        bucket = type("bucket", (), {"name": "bucket", "id_": "ID-0"})
+        self._authorize_account()
+        self.cache.set_bucket_name_cache([bucket])
+
+        def list_buckets(*args, **kwargs):
+            buckets = self.api.list_buckets(*args, **kwargs)
+            return [(b.name, b.id_) for b in buckets]
+
+        assert list_buckets(use_cache=True) == [('bucket', 'ID-0')]
+        assert list_buckets(bucket_name="bucket", use_cache=True) == [('bucket', 'ID-0')]
+        assert list_buckets(bucket_name="bucket2", use_cache=True) == []
+        assert list_buckets(bucket_id="ID-0", use_cache=True) == [('bucket', 'ID-0')]
+        assert list_buckets(bucket_id="ID-2", use_cache=True) == []
+        assert self.api.list_buckets() == []
+
     def test_buckets_with_encryption(self):
         self._authorize_account()
         sse_b2_aes = EncryptionSetting(
@@ -480,3 +497,22 @@ class TestApi:
             'appKeyId9',
         ]
         assert isinstance(keys[0], ApplicationKey)
+
+    def test_get_key(self):
+        self._authorize_account()
+        key = self.api.create_key(['readFiles'], 'testkey')
+
+        if apiver_deps.V <= 1:
+            key_id = key['applicationKeyId']
+        else:
+            key_id = key.id_
+
+        assert self.api.get_key(key_id) is not None
+
+        if apiver_deps.V <= 1:
+            self.api.delete_key(key_id)
+        else:
+            self.api.delete_key(key)
+
+        assert self.api.get_key(key_id) is None
+        assert self.api.get_key('non-existent') is None
diff --git a/test/unit/b2http/test_b2http.py b/test/unit/b2http/test_b2http.py
index 8a9b8e5..057ec99 100644
--- a/test/unit/b2http/test_b2http.py
+++ b/test/unit/b2http/test_b2http.py
@@ -11,15 +11,17 @@
 import datetime
 import requests
 import socket
+import locale
 
 from ..test_base import TestBase
 
 import apiver_deps
-from apiver_deps_exception import BadDateFormat, BadJson, BrokenPipe, B2ConnectionError, ClockSkew, ConnectionReset, ServiceError, UnknownError, UnknownHost, TooManyRequests
+from apiver_deps_exception import BadDateFormat, BadJson, BrokenPipe, B2ConnectionError, ClockSkew, ConnectionReset, ServiceError, UnknownError, UnknownHost, TooManyRequests, InvalidJsonResponse, PotentialS3EndpointPassedAsRealm
 from apiver_deps import USER_AGENT
 from apiver_deps import B2Http
 from apiver_deps import B2HttpApiConfig
 from apiver_deps import ClockSkewHook
+from b2sdk.b2http import setlocale
 
 from unittest.mock import call, MagicMock, patch
 
@@ -98,6 +100,27 @@ class TestTranslateErrors(TestBase):
         with self.assertRaises(TooManyRequests):
             B2Http._translate_errors(lambda: response)
 
+    def test_invalid_json(self):
+        response = MagicMock()
+        response.status_code = 400
+        response.content = b'{' * 500
+        response.url = 'https://example.com'
+
+        with self.assertRaises(InvalidJsonResponse) as error:
+            B2Http._translate_errors(lambda: response)
+
+            content_length = min(len(response.content), len(error.content))
+            self.assertEqual(response.content[:content_length], error.content[:content_length])
+
+    def test_potential_s3_endpoint_passed_as_realm(self):
+        response = MagicMock()
+        response.status_code = 400
+        response.content = b'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
+        response.url = 'https://s3.us-west-000.backblazeb2.com'
+
+        with self.assertRaises(PotentialS3EndpointPassedAsRealm):
+            B2Http._translate_errors(lambda: response)
+
 
 class TestTranslateAndRetry(TestBase):
     def setUp(self):
@@ -245,9 +268,12 @@ class TestB2Http(TestBase):
         with self.b2_http.get_content(self.URL, self.HEADERS) as r:
             self.assertIs(self.response, r)
         self.session.get.assert_called_with(
-            self.URL, headers=self.EXPECTED_HEADERS, stream=True, timeout=B2Http.TIMEOUT
+            self.URL,
+            headers=self.EXPECTED_HEADERS,
+            stream=True,
+            timeout=(B2Http.CONNECTION_TIMEOUT, B2Http.TIMEOUT),
         )
-        self.response.close.assert_called_with()
+        self.response.close.assert_not_called()  # prevent premature close() on requests.Response
 
     def test_head_content(self):
         self.session.head.return_value = self.response
@@ -270,6 +296,23 @@ class TestB2HttpUserAgentAppend(TestB2Http):
     }
 
 
+class TestSetLocaleContextManager(TestBase):
+    def test_set_locale_context_manager(self):
+        test_locale = locale.normalize(
+            'C.utf8'
+        )  # C.UTF-8 on Ubuntu 18.04 Bionic, C.utf8 on Ubuntu 22.04 Jammy
+        other_locale = 'C'
+
+        saved = locale.setlocale(locale.LC_ALL)
+        if saved == test_locale:
+            test_locale, other_locale = other_locale, test_locale
+
+        locale.setlocale(locale.LC_ALL, other_locale)
+        with setlocale(test_locale):
+            assert locale.setlocale(category=locale.LC_ALL) == test_locale
+        locale.setlocale(locale.LC_ALL, saved)
+
+
 class TestClockSkewHook(TestBase):
     def test_bad_format(self):
         response = MagicMock()
diff --git a/test/unit/bucket/test_bucket.py b/test/unit/bucket/test_bucket.py
index 6d56e33..80677c6 100644
--- a/test/unit/bucket/test_bucket.py
+++ b/test/unit/bucket/test_bucket.py
@@ -7,10 +7,12 @@
 # License https://www.backblaze.com/using_b2_code.html
 #
 ######################################################################
+import contextlib
 import io
 from contextlib import suppress
 from io import BytesIO
 import os
+import pathlib
 import platform
 import unittest.mock as mock
 
@@ -23,15 +25,23 @@ from apiver_deps_exception import (
     AlreadyFailed,
     B2Error,
     B2RequestTimeoutDuringUpload,
+    BadRequest,
     BucketIdNotFound,
+    DestinationDirectoryDoesntAllowOperation,
+    DestinationDirectoryDoesntExist,
+    DestinationIsADirectory,
+    DestinationParentIsNotADirectory,
+    DisablingFileLockNotSupported,
+    FileSha1Mismatch,
     InvalidAuthToken,
     InvalidMetadataDirective,
     InvalidRange,
     InvalidUploadSource,
     MaxRetriesExceeded,
-    UnsatisfiableRange,
-    FileSha1Mismatch,
+    RestrictedBucketMissing,
     SSECKeyError,
+    SourceReplicationConflict,
+    UnsatisfiableRange,
 )
 if apiver_deps.V <= 1:
     from apiver_deps import DownloadDestBytes, PreSeekedDownloadDest
@@ -54,12 +64,15 @@ from apiver_deps import ParallelDownloader
 from apiver_deps import Range
 from apiver_deps import SimpleDownloader
 from apiver_deps import UploadSourceBytes
+from apiver_deps import DummyCache, InMemoryCache
 from apiver_deps import hex_sha1_of_bytes, TempDir
 from apiver_deps import EncryptionAlgorithm, EncryptionSetting, EncryptionMode, EncryptionKey, SSE_NONE, SSE_B2_AES
 from apiver_deps import CopySource, UploadSourceLocalFile, WriteIntent
 from apiver_deps import BucketRetentionSetting, FileRetentionSetting, LegalHold, RetentionMode, RetentionPeriod, \
     NO_RETENTION_FILE_SETTING
 from apiver_deps import ReplicationConfiguration, ReplicationRule
+from apiver_deps import LARGE_FILE_SHA1
+from apiver_deps import UploadMode
 
 pytestmark = [pytest.mark.apiver(from_ver=1)]
 
@@ -197,10 +210,13 @@ def bucket_ls(bucket, *args, show_versions=False, **kwargs):
 
 class TestCaseWithBucket(TestBase):
     RAW_SIMULATOR_CLASS = RawSimulator
+    CACHE_CLASS = DummyCache
 
     def get_api(self):
         return B2Api(
-            self.account_info, api_config=B2HttpApiConfig(_raw_api_class=self.RAW_SIMULATOR_CLASS)
+            self.account_info,
+            cache=self.CACHE_CLASS(),
+            api_config=B2HttpApiConfig(_raw_api_class=self.RAW_SIMULATOR_CLASS),
         )
 
     def setUp(self):
@@ -212,7 +228,7 @@ class TestCaseWithBucket(TestBase):
         self.api.authorize_account('production', self.account_id, self.master_key)
         self.api_url = self.account_info.get_api_url()
         self.account_auth_token = self.account_info.get_account_auth_token()
-        self.bucket = self.api.create_bucket('my-bucket', 'allPublic')
+        self.bucket = self.api.create_bucket(self.bucket_name, 'allPublic')
         self.bucket_id = self.bucket.id_
 
     def bucket_ls(self, *args, show_versions=False, **kwargs):
@@ -248,6 +264,14 @@ class TestCaseWithBucket(TestBase):
         contents = self._download_file(file_name)
         self.assertEqual(expected_contents, contents)
 
+    def _check_large_file_sha1(self, file_name, expected_sha1):
+        file_info = self.bucket.get_file_info_by_name(file_name).file_info
+        if expected_sha1:
+            assert LARGE_FILE_SHA1 in file_info
+            assert file_info[LARGE_FILE_SHA1] == expected_sha1
+        else:
+            assert LARGE_FILE_SHA1 not in file_info
+
     def _download_file(self, file_name):
         with FileSimulator.dont_check_encryption():
             if apiver_deps.V <= 1:
@@ -519,6 +543,202 @@ class TestLs(TestCaseWithBucket):
         expected = [('hello.txt', 15, 'upload', None)]
         self.assertBucketContents(expected, '', show_versions=True)
 
+    def test_non_recursive_returns_folder_names(self):
+        data = b'hello world'
+        self.bucket.upload_bytes(data, 'a')
+        self.bucket.upload_bytes(data, 'b/1/test-1.txt')
+        self.bucket.upload_bytes(data, 'b/2/test-2.txt')
+        self.bucket.upload_bytes(data, 'b/3/test-3.txt')
+        self.bucket.upload_bytes(data, 'b/3/test-4.txt')
+        # Since inside `b` there are 3 directories, we get three results,
+        # with a first file for each of them.
+        expected = [
+            ('b/1/test-1.txt', len(data), 'upload', 'b/1/'),
+            ('b/2/test-2.txt', len(data), 'upload', 'b/2/'),
+            ('b/3/test-3.txt', len(data), 'upload', 'b/3/'),
+        ]
+        actual = [
+            (info.file_name, info.size, info.action, folder)
+            for (info, folder) in self.bucket_ls('b/')
+        ]
+        self.assertEqual(expected, actual)
+
+    def test_recursive_returns_no_folder_names(self):
+        data = b'hello world'
+        self.bucket.upload_bytes(data, 'a')
+        self.bucket.upload_bytes(data, 'b/1/test-1.txt')
+        self.bucket.upload_bytes(data, 'b/2/test-2.txt')
+        self.bucket.upload_bytes(data, 'b/3/test-3.txt')
+        self.bucket.upload_bytes(data, 'b/3/test-4.txt')
+        expected = [
+            ('b/1/test-1.txt', len(data), 'upload', None),
+            ('b/2/test-2.txt', len(data), 'upload', None),
+            ('b/3/test-3.txt', len(data), 'upload', None),
+            ('b/3/test-4.txt', len(data), 'upload', None),
+        ]
+        actual = [
+            (info.file_name, info.size, info.action, folder)
+            for (info, folder) in self.bucket_ls('b/', recursive=True)
+        ]
+        self.assertEqual(expected, actual)
+
+    def test_wildcard_matching(self):
+        data = b'hello world'
+        self.bucket.upload_bytes(data, 'a')
+        self.bucket.upload_bytes(data, 'b/1/test-1.txt')
+        self.bucket.upload_bytes(data, 'b/2/test-2.csv')
+        self.bucket.upload_bytes(data, 'b/2/test-3.txt')
+        self.bucket.upload_bytes(data, 'b/3/test-4.jpg')
+        self.bucket.upload_bytes(data, 'b/3/test-4.txt')
+        self.bucket.upload_bytes(data, 'b/3/test-5.txt')
+        expected = [
+            ('b/1/test-1.txt', len(data), 'upload', None),
+            ('b/2/test-3.txt', len(data), 'upload', None),
+            ('b/3/test-4.txt', len(data), 'upload', None),
+            ('b/3/test-5.txt', len(data), 'upload', None),
+        ]
+        actual = [
+            (info.file_name, info.size, info.action, folder)
+            for (info, folder) in self.bucket_ls('b/*.txt', recursive=True, with_wildcard=True)
+        ]
+        self.assertEqual(expected, actual)
+
+    def test_wildcard_matching_including_root(self):
+        data = b'hello world'
+        self.bucket.upload_bytes(data, 'b/1/test.txt')
+        self.bucket.upload_bytes(data, 'b/2/test.txt')
+        self.bucket.upload_bytes(data, 'b/3/test.txt')
+        self.bucket.upload_bytes(data, 'test.txt')
+        expected = [
+            ('b/1/test.txt', len(data), 'upload', None),
+            ('b/2/test.txt', len(data), 'upload', None),
+            ('b/3/test.txt', len(data), 'upload', None),
+            ('test.txt', len(data), 'upload', None),
+        ]
+        actual = [
+            (info.file_name, info.size, info.action, folder)
+            for (info, folder) in self.bucket_ls('*.txt', recursive=True, with_wildcard=True)
+        ]
+        self.assertEqual(expected, actual)
+
+    def test_wildcard_matching_directory(self):
+        data = b'hello world'
+        self.bucket.upload_bytes(data, 'a')
+        self.bucket.upload_bytes(data, 'b/2/test.txt')
+        self.bucket.upload_bytes(data, 'b/3/test.jpg')
+        self.bucket.upload_bytes(data, 'b/3/test.txt')
+        self.bucket.upload_bytes(data, 'c/4/test.txt')
+        expected = [
+            ('b/2/test.txt', len(data), 'upload', None),
+            ('b/3/test.txt', len(data), 'upload', None),
+        ]
+        actual = [
+            (info.file_name, info.size, info.action, folder)
+            for (info,
+                 folder) in self.bucket_ls('b/*/test.txt', recursive=True, with_wildcard=True)
+        ]
+        self.assertEqual(expected, actual)
+
+    def test_single_character_matching(self):
+        data = b'hello world'
+        self.bucket.upload_bytes(data, 'a')
+        self.bucket.upload_bytes(data, 'b/2/test.csv')
+        self.bucket.upload_bytes(data, 'b/2/test.txt')
+        self.bucket.upload_bytes(data, 'b/2/test.tsv')
+        expected = [
+            ('b/2/test.csv', len(data), 'upload', None),
+            ('b/2/test.tsv', len(data), 'upload', None),
+        ]
+        actual = [
+            (info.file_name, info.size, info.action, folder)
+            for (info,
+                 folder) in self.bucket_ls('b/2/test.?sv', recursive=True, with_wildcard=True)
+        ]
+        self.assertEqual(expected, actual)
+
+    def test_sequence_matching(self):
+        data = b'hello world'
+        self.bucket.upload_bytes(data, 'a')
+        self.bucket.upload_bytes(data, 'b/2/test.csv')
+        self.bucket.upload_bytes(data, 'b/2/test.ksv')
+        self.bucket.upload_bytes(data, 'b/2/test.tsv')
+        expected = [
+            ('b/2/test.csv', len(data), 'upload', None),
+            ('b/2/test.tsv', len(data), 'upload', None),
+        ]
+        actual = [
+            (info.file_name, info.size, info.action, folder)
+            for (info,
+                 folder) in self.bucket_ls('b/2/test.[tc]sv', recursive=True, with_wildcard=True)
+        ]
+        self.assertEqual(expected, actual)
+
+    def test_negative_sequence_matching(self):
+        data = b'hello world'
+        self.bucket.upload_bytes(data, 'a')
+        self.bucket.upload_bytes(data, 'b/2/test.csv')
+        self.bucket.upload_bytes(data, 'b/2/test.ksv')
+        self.bucket.upload_bytes(data, 'b/2/test.tsv')
+        expected = [
+            ('b/2/test.tsv', len(data), 'upload', None),
+        ]
+        actual = [
+            (info.file_name, info.size, info.action, folder)
+            for (info,
+                 folder) in self.bucket_ls('b/2/test.[!ck]sv', recursive=True, with_wildcard=True)
+        ]
+        self.assertEqual(expected, actual)
+
+    def test_matching_wildcard_named_file(self):
+        data = b'hello world'
+        self.bucket.upload_bytes(data, 'a/*.txt')
+        self.bucket.upload_bytes(data, 'a/1.txt')
+        self.bucket.upload_bytes(data, 'a/2.txt')
+        expected = [
+            ('a/*.txt', len(data), 'upload', None),
+            ('a/1.txt', len(data), 'upload', None),
+            ('a/2.txt', len(data), 'upload', None),
+        ]
+        actual = [
+            (info.file_name, info.size, info.action, folder)
+            for (info, folder) in self.bucket_ls('a/*.txt', recursive=True, with_wildcard=True)
+        ]
+        self.assertEqual(expected, actual)
+
+    def test_matching_single_question_mark_named_file(self):
+        data = b'hello world'
+        self.bucket.upload_bytes(data, 'b/?.txt')
+        self.bucket.upload_bytes(data, 'b/a.txt')
+        self.bucket.upload_bytes(data, 'b/b.txt')
+        expected = [
+            ('b/?.txt', len(data), 'upload', None),
+            ('b/a.txt', len(data), 'upload', None),
+            ('b/b.txt', len(data), 'upload', None),
+        ]
+        actual = [
+            (info.file_name, info.size, info.action, folder)
+            for (info, folder) in self.bucket_ls('b/?.txt', recursive=True, with_wildcard=True)
+        ]
+        self.assertEqual(expected, actual)
+
+    def test_wildcard_requires_recursive(self):
+        with pytest.raises(ValueError):
+            # Since ls is a generator, we need to actually fetch something from it.
+            next(self.bucket_ls('*.txt', recursive=False, with_wildcard=True))
+
+    def test_matching_exact_filename(self):
+        data = b'hello world'
+        self.bucket.upload_bytes(data, 'b/a.txt')
+        self.bucket.upload_bytes(data, 'b/b.txt')
+        expected = [
+            ('b/a.txt', len(data), 'upload', None),
+        ]
+        actual = [
+            (info.file_name, info.size, info.action, folder)
+            for (info, folder) in self.bucket_ls('b/a.txt', recursive=True, with_wildcard=True)
+        ]
+        self.assertEqual(expected, actual)
+
 
 class TestGetFreshState(TestCaseWithBucket):
     def test_ok(self):
@@ -699,6 +919,7 @@ class TestCopyFile(TestCaseWithBucket):
         else:
             self.bucket.copy(file_id, 'hello_new.txt', offset=3, length=7)
         self._check_file_contents('hello_new.txt', b'lo worl')
+        self._check_large_file_sha1('hello_new.txt', None)
         expected = [('hello.txt', 11, 'upload', None), ('hello_new.txt', 7, 'upload', None)]
         self.assertBucketContents(expected, '', show_versions=True)
 
@@ -1063,6 +1284,54 @@ class TestUpdate(TestCaseWithBucket):
         not_updated_bucket = self.api.get_bucket_by_name(self.bucket.name)
         self.assertEqual([{'life': 'is life'}], not_updated_bucket.lifecycle_rules)
 
+    def test_is_file_lock_enabled(self):
+        assert not self.bucket.is_file_lock_enabled
+
+        # set is_file_lock_enabled to False when it's already false
+        self.bucket.update(is_file_lock_enabled=False)
+        updated_bucket = self.api.get_bucket_by_name(self.bucket.name)
+        assert not updated_bucket.is_file_lock_enabled
+
+        # sunny day scenario
+        self.bucket.update(is_file_lock_enabled=True)
+        updated_bucket = self.api.get_bucket_by_name(self.bucket.name)
+        assert updated_bucket.is_file_lock_enabled
+        assert self.simulator.bucket_name_to_bucket[self.bucket.name].is_file_lock_enabled
+
+        # attempt to clear is_file_lock_enabled
+        with pytest.raises(DisablingFileLockNotSupported):
+            self.bucket.update(is_file_lock_enabled=False)
+        updated_bucket = self.api.get_bucket_by_name(self.bucket.name)
+        assert updated_bucket.is_file_lock_enabled
+
+        # attempt to set is_file_lock_enabled when it's already set
+        self.bucket.update(is_file_lock_enabled=True)
+        updated_bucket = self.api.get_bucket_by_name(self.bucket.name)
+        assert updated_bucket.is_file_lock_enabled
+
+    @pytest.mark.apiver(from_ver=2)
+    def test_is_file_lock_enabled_source_replication(self):
+        assert not self.bucket.is_file_lock_enabled
+
+        # attempt to set is_file_lock_enabled with source replication enabled
+        self.bucket.update(replication=REPLICATION)
+        with pytest.raises(SourceReplicationConflict):
+            self.bucket.update(is_file_lock_enabled=True)
+        updated_bucket = self.bucket.update(replication=REPLICATION)
+        assert not updated_bucket.is_file_lock_enabled
+
+        # sunny day scenario
+        self.bucket.update(
+            replication=ReplicationConfiguration(
+                rules=[],
+                source_to_destination_key_mapping={},
+            )
+        )
+        self.bucket.update(is_file_lock_enabled=True)
+        updated_bucket = self.api.get_bucket_by_name(self.bucket.name)
+        assert updated_bucket.is_file_lock_enabled
+        assert self.simulator.bucket_name_to_bucket[self.bucket.name].is_file_lock_enabled
+
 
 class TestUpload(TestCaseWithBucket):
     def test_upload_bytes(self):
@@ -1070,6 +1339,7 @@ class TestUpload(TestCaseWithBucket):
         file_info = self.bucket.upload_bytes(data, 'file1')
         self.assertTrue(isinstance(file_info, VFileVersionInfo))
         self._check_file_contents('file1', data)
+        self._check_large_file_sha1('file1', None)
         self.assertEqual(file_info.server_side_encryption, SSE_NONE)
 
     def test_upload_bytes_file_retention(self):
@@ -1079,6 +1349,7 @@ class TestUpload(TestCaseWithBucket):
             data, 'file1', file_retention=retention, legal_hold=LegalHold.ON
         )
         self._check_file_contents('file1', data)
+        self._check_large_file_sha1('file1', None)
         self.assertEqual(retention, file_info.file_retention)
         self.assertEqual(LegalHold.ON, file_info.legal_hold)
 
@@ -1144,11 +1415,71 @@ class TestUpload(TestCaseWithBucket):
             write_file(path, data)
             file_info = self.bucket.upload_local_file(path, 'file1')
             self._check_file_contents('file1', data)
+            self._check_large_file_sha1('file1', None)
             self.assertTrue(isinstance(file_info, VFileVersionInfo))
             self.assertEqual(file_info.server_side_encryption, SSE_NONE)
             print(file_info.as_dict())
             self.assertEqual(file_info.as_dict()['serverSideEncryption'], {'mode': 'none'})
 
+    @pytest.mark.apiver(from_ver=2)
+    def test_upload_local_file_incremental(self):
+        with TempDir() as d:
+            path = os.path.join(d, 'file1')
+
+            small_data = b'Hello world!'
+            big_data = self._make_data(self.simulator.MIN_PART_SIZE * 3)
+            DATA = [
+                big_data,
+                big_data + small_data,
+                big_data + small_data + big_data,
+                small_data,
+                small_data + small_data,
+                small_data.upper() + small_data,
+            ]
+
+            last_data = None
+            for data in DATA:
+                # figure out if this particular upload should be incremental
+                should_be_incremental = (
+                    last_data and data.startswith(last_data) and
+                    len(last_data) >= self.simulator.MIN_PART_SIZE
+                )
+
+                # if it's incremental, then there should be two sources concatenated, otherwise one
+                expected_source_count = 2 if should_be_incremental else 1
+
+                # is the result file expected to be a large file
+                expected_large_file = \
+                    should_be_incremental or \
+                    len(data) > self.simulator.MIN_PART_SIZE
+
+                expected_parts_sizes = \
+                    [len(last_data), len(data) - len(last_data)] \
+                        if should_be_incremental else [len(data)]
+
+                write_file(path, data)
+                with mock.patch.object(
+                    self.bucket, 'concatenate', wraps=self.bucket.concatenate
+                ) as mocked_concatenate:
+                    self.bucket.upload_local_file(path, 'file1', upload_mode=UploadMode.INCREMENTAL)
+                    mocked_concatenate.assert_called_once()
+                    call = mocked_concatenate.mock_calls[0]
+                    # TODO: use .args[0] instead of [1][0] when we drop Python 3.7
+                    assert len(call[1][0]) == expected_source_count
+                    # Ensuring that the part sizes make sense.
+                    parts_sizes = [entry.get_content_length() for entry in call[1][0]]
+                    assert parts_sizes == expected_parts_sizes
+                    if should_be_incremental:
+                        # Ensuring that the first part is a copy.
+                        # Order of indices: pick arguments, pick first argument, first element of the first argument.
+                        self.assertIsInstance(call[1][0][0], CopySource)
+
+                self._check_file_contents('file1', data)
+                if expected_large_file:
+                    self._check_large_file_sha1('file1', hex_sha1_of_bytes(data))
+
+                last_data = data
+
     @pytest.mark.skipif(platform.system() == 'Windows', reason='no os.mkfifo() on Windows')
     def test_upload_fifo(self):
         with TempDir() as d:
@@ -1204,6 +1535,7 @@ class TestUpload(TestCaseWithBucket):
         progress_listener = StubProgressListener()
         self.bucket.upload_bytes(data, 'file1', progress_listener=progress_listener)
         self._check_file_contents('file1', data)
+        self._check_large_file_sha1('file1', hex_sha1_of_bytes(data))
         self.assertTrue(progress_listener.is_valid())
 
     def test_upload_local_large_file(self):
@@ -1213,6 +1545,7 @@ class TestUpload(TestCaseWithBucket):
             write_file(path, data)
             self.bucket.upload_local_file(path, 'file1')
             self._check_file_contents('file1', data)
+            self._check_large_file_sha1('file1', hex_sha1_of_bytes(data))
 
     def test_upload_local_large_file_over_10k_parts(self):
         pytest.skip('this test is really slow and impedes development')  # TODO: fix it
@@ -1222,6 +1555,7 @@ class TestUpload(TestCaseWithBucket):
             write_file(path, data)
             self.bucket.upload_local_file(path, 'file1')
             self._check_file_contents('file1', data)
+            self._check_large_file_sha1('file1', hex_sha1_of_bytes(data))
 
     def test_create_file_over_10k_parts(self):
         data = b'hello world' * 20000
@@ -1344,6 +1678,20 @@ class TestUpload(TestCaseWithBucket):
         self._check_file_contents('path/to/file1', data)
         self.assertTrue(progress_listener.is_valid())
 
+    def test_upload_stream(self):
+        data = self._make_data(self.simulator.MIN_PART_SIZE * 3)
+        self.bucket.upload_unbound_stream(io.BytesIO(data), 'file1')
+        self._check_file_contents('file1', data)
+
+    def test_upload_stream_from_file(self):
+        with TempDir() as d:
+            path = os.path.join(d, 'file1')
+            data = self._make_data(self.simulator.MIN_PART_SIZE * 3)
+            write_file(path, data)
+            with open(path, 'rb') as f:
+                self.bucket.upload_unbound_stream(f, 'file1')
+            self._check_file_contents('file1', data)
+
     def _start_large_file(self, file_name, file_info=None):
         if file_info is None:
             file_info = {}
@@ -1445,6 +1793,50 @@ class TestCreateFileStream(TestConcatenate):
         )
 
 
+class TestCustomTimestamp(TestCaseWithBucket):
+    def test_custom_timestamp(self):
+        data = b'hello world'
+
+        # upload
+        self.bucket.upload_bytes(data, 'file0', custom_upload_timestamp=0)
+
+        with TempDir() as d:
+            path = os.path.join(d, 'file1')
+            write_file(path, data)
+            self.bucket.upload_local_file(path, 'file1', custom_upload_timestamp=1)
+
+        upload_source = UploadSourceBytes(data)
+        self.bucket.upload(upload_source, 'file2', custom_upload_timestamp=2)
+        self.bucket.upload_unbound_stream(io.BytesIO(data), 'file3', custom_upload_timestamp=3)
+
+        # concatenate
+        self.bucket.concatenate([upload_source], 'file4', custom_upload_timestamp=4)
+        self.bucket.concatenate_stream([upload_source], 'file5', custom_upload_timestamp=5)
+
+        # create_file
+        self.bucket.create_file(
+            [WriteIntent(upload_source, destination_offset=0)], 'file6', custom_upload_timestamp=6
+        )
+        self.bucket.create_file_stream(
+            [WriteIntent(upload_source, destination_offset=0)], 'file7', custom_upload_timestamp=7
+        )
+
+        def ls(bucket):
+            return [(info.file_name, info.upload_timestamp) for (info, folder) in bucket_ls(bucket)]
+
+        expected = [
+            ('file0', 0),
+            ('file1', 1),
+            ('file2', 2),
+            ('file3', 3),
+            ('file4', 4),
+            ('file5', 5),
+            ('file6', 6),
+            ('file7', 7),
+        ]
+        self.assertEqual(ls(self.bucket), expected)
+
+
 class DownloadTestsBase:
     DATA = NotImplemented
 
@@ -2122,3 +2514,167 @@ class DecodeTests(DecodeTestsBase, TestCaseWithBucket):
     def test_file_info_4(self):
         download_version = self.bucket.get_file_info_by_name('test.txt%253Ffoo%253Dbar')
         assert download_version.file_name == 'test.txt%253Ffoo%253Dbar'
+
+
+class TestAuthorizeForBucket(TestCaseWithBucket):
+    CACHE_CLASS = InMemoryCache
+
+    @pytest.mark.apiver(from_ver=2)
+    def test_authorize_for_bucket_ensures_cache(self):
+        key = create_key(
+            self.api,
+            key_name='singlebucket',
+            capabilities=[
+                'listBuckets',
+            ],
+            bucket_id=self.bucket_id,
+        )
+
+        self.api.authorize_account('production', key.id_, key.application_key)
+
+        # Check whether the bucket fetching performs an API call.
+        with mock.patch.object(self.api, 'list_buckets') as mock_list_buckets:
+            self.api.get_bucket_by_id(self.bucket_id)
+            mock_list_buckets.assert_not_called()
+
+            self.api.get_bucket_by_name(self.bucket_name)
+            mock_list_buckets.assert_not_called()
+
+    @pytest.mark.apiver(from_ver=2)
+    def test_authorize_for_non_existing_bucket(self):
+        key = create_key(
+            self.api,
+            key_name='singlebucket',
+            capabilities=[
+                'listBuckets',
+            ],
+            bucket_id=self.bucket_id + 'x',
+        )
+
+        with self.assertRaises(RestrictedBucketMissing):
+            self.api.authorize_account('production', key.id_, key.application_key)
+
+
+class TestDownloadLocalDirectoryIssues(TestCaseWithBucket):
+    def setUp(self):
+        super().setUp()
+        self.file_version = self.bucket.upload_bytes(b'test-data', 'file1')
+        self.bytes_io = io.BytesIO()
+        self.progress_listener = StubProgressListener()
+
+    @pytest.mark.apiver(from_ver=2)
+    def test_download_file_to_unknown_directory(self):
+        with TempDir() as temp_dir:
+            target_file = pathlib.Path(temp_dir) / 'non-existing-directory' / 'some-file'
+            with self.assertRaises(DestinationDirectoryDoesntExist):
+                self.bucket.download_file_by_name(self.file_version.file_name).save_to(target_file)
+
+    @pytest.mark.apiver(from_ver=2)
+    def test_download_file_targeting_directory(self):
+        with TempDir() as temp_dir:
+            target_file = pathlib.Path(temp_dir) / 'existing-directory'
+            os.makedirs(target_file, exist_ok=True)
+
+            with self.assertRaises(DestinationIsADirectory):
+                self.bucket.download_file_by_name(self.file_version.file_name).save_to(target_file)
+
+    @pytest.mark.apiver(from_ver=2)
+    def test_download_file_targeting_directory_is_a_file(self):
+        with TempDir() as temp_dir:
+            some_file = pathlib.Path(temp_dir) / 'existing-file'
+            some_file.write_bytes(b'i-am-a-file')
+            target_file = some_file / 'save-target'
+
+            with self.assertRaises(DestinationParentIsNotADirectory):
+                self.bucket.download_file_by_name(self.file_version.file_name).save_to(target_file)
+
+    @pytest.mark.apiver(from_ver=2)
+    @pytest.mark.skipif(
+        platform.system() == 'Windows',
+        reason='os.chmod on Windows only affects read-only flag for files',
+    )
+    def test_download_file_no_access_to_directory(self):
+        chain = contextlib.ExitStack()
+        temp_dir = chain.enter_context(TempDir())
+
+        with chain:
+            target_directory = pathlib.Path(temp_dir) / 'impossible-directory'
+
+            os.makedirs(target_directory, exist_ok=True)
+            # Don't allow any operation on this directory. Used explicitly, as the documentation
+            # states that on some platforms passing mode to `makedirs` may be ignored.
+            os.chmod(target_directory, mode=0)
+
+            # Ensuring that whenever we exit this context, our directory will be removable.
+            chain.push(lambda *args, **kwargs: os.chmod(target_directory, mode=0o777))
+
+            target_file = target_directory / 'target_file'
+            with self.assertRaises(DestinationDirectoryDoesntAllowOperation):
+                self.bucket.download_file_by_name(self.file_version.file_name).save_to(target_file)
+
+
+# Listing where every other response returns no entries and pointer to the next file
+class EmptyListBucketSimulator(BucketSimulator):
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        # Whenever we receive a list request, if it's the first time
+        # for this particular ``start_file_name``, we'll return
+        # an empty response pointing to the same file.
+        self.last_queried_file = None
+
+    def _should_return_empty(self, file_name: str) -> bool:
+        # Note that every other request is empty – the logic is as follows:
+        #   1st request – unknown start name – empty response
+        #   2nd request – known start name – normal response with a proper next filename
+        #   3rd request – unknown start name (as it's the next filename from the previous request) – empty response
+        #   4th request – known start name
+        # etc. This works especially well when using limiter of number of files fetched set to 1.
+        should_return_empty = self.last_queried_file != file_name
+        self.last_queried_file = file_name
+        return should_return_empty
+
+    def list_file_versions(
+        self,
+        account_auth_token,
+        start_file_name=None,
+        start_file_id=None,
+        max_file_count=None,  # noqa
+        prefix=None,
+    ):
+        if self._should_return_empty(start_file_name):
+            return dict(files=[], nextFileName=start_file_name, nextFileId=start_file_id)
+        return super().list_file_versions(
+            account_auth_token,
+            start_file_name,
+            start_file_id,
+            1,  # Forcing only a single file per response.
+            prefix,
+        )
+
+    def list_file_names(
+        self,
+        account_auth_token,
+        start_file_name=None,
+        max_file_count=None,  # noqa
+        prefix=None,
+    ):
+        if self._should_return_empty(start_file_name):
+            return dict(files=[], nextFileName=start_file_name)
+        return super().list_file_names(
+            account_auth_token,
+            start_file_name,
+            1,  # Forcing only a single file per response.
+            prefix,
+        )
+
+
+class EmptyListSimulator(RawSimulator):
+    BUCKET_SIMULATOR_CLASS = EmptyListBucketSimulator
+
+
+class TestEmptyListVersions(TestListVersions):
+    RAW_SIMULATOR_CLASS = EmptyListSimulator
+
+
+class TestEmptyLs(TestLs):
+    RAW_SIMULATOR_CLASS = EmptyListSimulator
diff --git a/test/unit/conftest.py b/test/unit/conftest.py
index fe94258..32d68cc 100644
--- a/test/unit/conftest.py
+++ b/test/unit/conftest.py
@@ -144,3 +144,9 @@ def pytest_runtest_setup(item):
 def apiver(request):
     """Get apiver as a v-prefixed string, e.g. "v2"."""
     return request.config.getoption('--api')
+
+
+@pytest.fixture(scope='session')
+def apiver_int(apiver):
+    """Get apiver as an int, e.g. `2`."""
+    return int(apiver[1:])
diff --git a/test/unit/internal/test_emerge_planner.py b/test/unit/internal/test_emerge_planner.py
index 4296dc0..7d087b9 100644
--- a/test/unit/internal/test_emerge_planner.py
+++ b/test/unit/internal/test_emerge_planner.py
@@ -8,11 +8,11 @@
 #
 ######################################################################
 
-from b2sdk.transfer.emerge.planner.planner import (
-    EmergePlanner,
+from b2sdk.http_constants import (
     GIGABYTE,
     MEGABYTE,
 )
+from b2sdk.transfer.emerge.planner.planner import EmergePlanner
 from b2sdk.transfer.emerge.planner.part_definition import (
     CopyEmergePartDefinition,
     UploadEmergePartDefinition,
diff --git a/test/unit/internal/test_unbound_write_intent.py b/test/unit/internal/test_unbound_write_intent.py
new file mode 100644
index 0000000..5a4ab29
--- /dev/null
+++ b/test/unit/internal/test_unbound_write_intent.py
@@ -0,0 +1,154 @@
+######################################################################
+#
+# File: test/unit/internal/test_unbound_write_intent.py
+#
+# Copyright 2022 Backblaze Inc. All Rights Reserved.
+#
+# License https://www.backblaze.com/using_b2_code.html
+#
+######################################################################
+
+import io
+import string
+from typing import Optional
+from unittest.mock import MagicMock
+
+from b2sdk.transfer.emerge.unbound_write_intent import (
+    IOWrapper,
+    UnboundSourceBytes,
+    UnboundStreamBufferTimeout,
+    UnboundWriteIntentGenerator,
+)
+from b2sdk.transfer.emerge.write_intent import WriteIntent
+from b2sdk.utils import hex_sha1_of_bytes
+
+from .test_base import TestBase
+
+
+class TestIOWrapper(TestBase):
+    def setUp(self) -> None:
+        self.data = b'test-data'
+        self.mock_fun = MagicMock()
+        self.wrapper = IOWrapper(self.data, release_function=self.mock_fun)
+
+    def test_function_called_only_after_empty_read(self):
+        self.mock_fun.assert_not_called()
+
+        self.wrapper.read(1)
+        self.mock_fun.assert_not_called()
+
+        self.wrapper.read(len(self.data) - 1)
+        self.mock_fun.assert_not_called()
+
+        self.wrapper.seek(0)
+        self.mock_fun.assert_not_called()
+
+        self.wrapper.read(len(self.data))
+        self.mock_fun.assert_not_called()
+
+        self.wrapper.seek(0)
+        self.mock_fun.assert_not_called()
+
+        for _ in range(len(self.data)):
+            self.wrapper.read(1)
+            self.mock_fun.assert_not_called()
+
+        self.assertEqual(0, len(self.wrapper.read(1)))
+        self.mock_fun.assert_called_once()
+
+    def test_function_called_exactly_once(self):
+        self.wrapper.read(len(self.data))
+        self.wrapper.read(1)
+        self.mock_fun.assert_called_once()
+
+        self.wrapper.seek(0)
+        self.wrapper.read(len(self.data))
+        self.wrapper.read(1)
+        self.mock_fun.assert_called_once()
+
+
+class TestUnboundSourceBytes(TestBase):
+    def test_data_has_length_and_sha1_calculated_without_touching_the_stream(self):
+        data = bytearray(b'test-data')
+        mock_fun = MagicMock()
+        source = UnboundSourceBytes(data, mock_fun)
+
+        self.assertEqual(len(data), source.get_content_length())
+        self.assertEqual(hex_sha1_of_bytes(data), source.get_content_sha1())
+        mock_fun.assert_not_called()
+
+
+class TestUnboundWriteIntentGenerator(TestBase):
+    def setUp(self) -> None:
+        self.data = b'test-data'
+        self.kwargs = dict(
+            # From the perspective of the UnboundWriteIntentGenerator itself, the queue size
+            # can be any positive integer. Bucket requires it to be at least two, so that
+            # it can determine the upload method.
+            queue_size=1,
+            queue_timeout_seconds=0.1,
+        )
+
+    def _get_iterator(self, buffer_and_read_size: int = 1, data: Optional[bytes] = None):
+        data = data or self.data
+        generator = UnboundWriteIntentGenerator(
+            io.BytesIO(data),
+            buffer_size_bytes=buffer_and_read_size,
+            read_size=buffer_and_read_size,
+            **self.kwargs
+        )
+        return generator.iterator()
+
+    def _read_write_intent(self, write_intent: WriteIntent, full_read_size: int = 1) -> bytes:
+        buffer_stream = write_intent.outbound_source.open()  # noqa
+        read_data = buffer_stream.read(full_read_size)
+        empty_data = buffer_stream.read(full_read_size)
+        self.assertEqual(0, len(empty_data))
+        return read_data
+
+    def test_timeout_called_when_waiting_too_long_for_empty_buffer_slot(self):
+        # First buffer is delivered without issues.
+        iterator = self._get_iterator()
+        next(iterator)
+        with self.assertRaises(UnboundStreamBufferTimeout):
+            # Since we didn't read the first one, the second one is blocked.
+            next(iterator)
+
+    def test_all_data_iterated_over(self):
+        # This also tests empty last buffer case.
+        data_loaded = []
+
+        for write_intent in self._get_iterator():
+            read_data = self._read_write_intent(write_intent, 1)
+            self.assertEqual(
+                self.data[write_intent.destination_offset].to_bytes(1, 'big'),
+                read_data,
+            )
+            data_loaded.append((read_data, write_intent.destination_offset))
+
+        expected_data_loaded = [
+            (byte.to_bytes(1, 'big'), idx) for idx, byte in enumerate(self.data)
+        ]
+        self.assertCountEqual(expected_data_loaded, data_loaded)
+
+    def test_larger_buffer_size(self):
+        # This also tests non-empty last buffer case.
+        read_size = 4
+        # Build a buffer of N reads of size read_size and one more byte.
+        data = b''.join(string.printable[:read_size].encode('ascii') for _ in range(2)) + b'1'
+
+        for write_intent in self._get_iterator(read_size, data):
+            read_data = self._read_write_intent(write_intent, full_read_size=read_size)
+            offset = write_intent.destination_offset
+            expected_data = data[offset:offset + read_size]
+            self.assertEqual(expected_data, read_data)
+
+    def test_single_buffer_delivered(self):
+        read_size = len(self.data) + 1
+        iterator = self._get_iterator(read_size)
+
+        write_intent = next(iterator)
+        self._read_write_intent(write_intent, full_read_size=read_size)
+
+        with self.assertRaises(StopIteration):
+            next(iterator)
diff --git a/test/unit/replication/test_monitoring.py b/test/unit/replication/test_monitoring.py
index 5d936d0..161c9b9 100644
--- a/test/unit/replication/test_monitoring.py
+++ b/test/unit/replication/test_monitoring.py
@@ -21,7 +21,7 @@ RETENTION_GOVERNANCE = FileRetentionSetting(RetentionMode.GOVERNANCE, retain_unt
 DEFAULT_REPLICATION_RESULT = dict(
     source_replication_status=None,
     source_has_hide_marker=False,
-    source_has_sse_c_enabled=False,
+    source_encryption_mode=EncryptionMode.NONE,
     source_has_large_metadata=False,
     source_has_file_retention=False,
     source_has_legal_hold=False,
@@ -60,7 +60,8 @@ def test_iter_pairs(source_bucket, destination_bucket, test_file, monitor):
 def test_scan_source(source_bucket, test_file, monitor):
     # upload various types of files to source and get a report
     files = [
-        source_bucket.upload_local_file(test_file, 'folder/test-1.txt'),
+        source_bucket.upload_local_file(test_file, 'folder/test-1-1.txt'),
+        source_bucket.upload_local_file(test_file, 'folder/test-1-2.txt'),
         source_bucket.upload_local_file(test_file, 'folder/test-2.txt', encryption=SSE_B2_AES),
         source_bucket.upload_local_file(test_file,
                                         'not-in-folder.txt'),  # monitor should ignore this
@@ -95,14 +96,21 @@ def test_scan_source(source_bucket, test_file, monitor):
     assert report.counter_by_status[ReplicationScanResult(
         **{
             **DEFAULT_REPLICATION_RESULT,
-            'source_has_sse_c_enabled': True,
+            'source_encryption_mode': EncryptionMode.SSE_B2,
+        }
+    )] == 1
+
+    assert report.counter_by_status[ReplicationScanResult(
+        **{
+            **DEFAULT_REPLICATION_RESULT,
+            'source_encryption_mode': EncryptionMode.SSE_C,
         }
     )] == 2
 
     assert report.counter_by_status[ReplicationScanResult(
         **{
             **DEFAULT_REPLICATION_RESULT,
-            'source_has_sse_c_enabled': True,
+            'source_encryption_mode': EncryptionMode.SSE_C,
             'source_has_file_retention': True,
         }
     )] == 1
@@ -117,7 +125,7 @@ def test_scan_source(source_bucket, test_file, monitor):
     assert report.counter_by_status[ReplicationScanResult(
         **{
             **DEFAULT_REPLICATION_RESULT,
-            'source_has_sse_c_enabled': True,
+            'source_encryption_mode': EncryptionMode.SSE_C,
             'source_has_large_metadata': True,
         }
     )] == 1
@@ -187,7 +195,7 @@ def test_scan_source_and_destination(
             **DEFAULT_REPLICATION_RESULT,
             'source_replication_status': None,
             'source_has_hide_marker': None,
-            'source_has_sse_c_enabled': None,
+            'source_encryption_mode': None,
             'source_has_large_metadata': None,
             'source_has_file_retention': None,
             'source_has_legal_hold': None,
diff --git a/test/unit/sync/fixtures.py b/test/unit/sync/fixtures.py
index 67f2fa3..9dd0974 100644
--- a/test/unit/sync/fixtures.py
+++ b/test/unit/sync/fixtures.py
@@ -10,7 +10,8 @@
 
 import pytest
 
-from apiver_deps import DEFAULT_SCAN_MANAGER, POLICY_MANAGER, CompareVersionMode, KeepOrDeleteMode, NewerFileSyncMode, Synchronizer
+import apiver_deps
+from apiver_deps import DEFAULT_SCAN_MANAGER, POLICY_MANAGER, CompareVersionMode, KeepOrDeleteMode, NewerFileSyncMode, Synchronizer, UploadMode
 
 
 @pytest.fixture(scope='session')
@@ -25,7 +26,19 @@ def synchronizer_factory():
         compare_version_mode=CompareVersionMode.MODTIME,
         compare_threshold=None,
         sync_policy_manager=POLICY_MANAGER,
+        upload_mode=UploadMode.FULL,
+        absolute_minimum_part_size=None,
     ):
+        kwargs = {}
+        if apiver_deps.V < 2:
+            assert upload_mode == UploadMode.FULL, "upload_mode not supported in apiver < 2"
+            assert absolute_minimum_part_size is None, "absolute_minimum_part_size not supported in apiver < 2"
+        else:
+            kwargs = dict(
+                upload_mode=upload_mode,
+                absolute_minimum_part_size=absolute_minimum_part_size,
+            )
+
         return Synchronizer(
             1,
             policies_manager=policies_manager,
@@ -37,6 +50,7 @@ def synchronizer_factory():
             compare_version_mode=compare_version_mode,
             compare_threshold=compare_threshold,
             sync_policy_manager=sync_policy_manager,
+            **kwargs
         )
 
     return get_synchronizer
diff --git a/test/unit/sync/test_sync.py b/test/unit/sync/test_sync.py
index 4035176..1ff6a96 100644
--- a/test/unit/sync/test_sync.py
+++ b/test/unit/sync/test_sync.py
@@ -7,14 +7,16 @@
 # License https://www.backblaze.com/using_b2_code.html
 #
 ######################################################################
+import io
 from collections import defaultdict
+from contextlib import ExitStack
 from unittest import mock
 from enum import Enum
 from functools import partial
 
-from apiver_deps import UpPolicy, B2DownloadAction, AbstractSyncEncryptionSettingsProvider, UploadSourceLocalFile, SyncPolicyManager
+from apiver_deps import UpPolicy, B2DownloadAction, AbstractSyncEncryptionSettingsProvider, UploadSourceLocalFileRange, UploadSourceLocalFile, SyncPolicyManager, CopySource
 from apiver_deps_exception import DestFileNewer, InvalidArgument
-from apiver_deps import KeepOrDeleteMode, NewerFileSyncMode, CompareVersionMode
+from apiver_deps import KeepOrDeleteMode, NewerFileSyncMode, CompareVersionMode, FileVersion, IncrementalHexDigester
 import pytest
 from ..fixtures.folder import *
 from .fixtures import *
@@ -475,7 +477,7 @@ class TestSynchronizer:
             ('b2', 'b2', ['b2_copy(folder/a.txt, id_a_200, folder/a.txt, 200)']),
         ],
     )
-    def test_never(self, synchronizer, src_type, dst_type, expected):
+    def test_newer(self, synchronizer, src_type, dst_type, expected):
         src = self.folder_factory(src_type, ('a.txt', [200]))
         dst = self.folder_factory(dst_type, ('a.txt', [100]))
         self.assert_folder_sync_actions(synchronizer, src, dst, expected)
@@ -771,12 +773,13 @@ class TestSynchronizer:
                 pass
 
         assert bucket.mock_calls == [
-            mock.call.upload(
+            mock.call.concatenate(
                 mock.ANY,
                 'folder/directory/a.txt',
                 file_info={'src_last_modified_millis': '100'},
                 progress_listener=mock.ANY,
-                encryption=encryption
+                encryption=encryption,
+                large_file_sha1=None,
             )
         ]
 
@@ -870,6 +873,88 @@ class TestSynchronizer:
         expected = ['b2_upload(/dir/a.txt, folder/a.txt, 200)']
         self.assert_folder_sync_actions(synchronizer, src, dst, expected)
 
+    # FIXME: rewrite this test to not use mock.call checks when all of Synchronizers tests are rewritten to test_bucket
+    # style - i.e. with simulated api and fake files returned from methods.
+    @pytest.mark.apiver(from_ver=2)
+    @pytest.mark.parametrize(
+        "local_size,remote_size,local_sha1,local_partial_sha1,remote_sha1,should_be_incremental",
+        [
+            (2000, 1000, "ff" * 20, "aa" * 20, "aa" * 20, True),  # incremental upload possible
+            (2000, 999, "ff" * 20, "aa" * 20, "aa" * 20, False),  # uploaded part too small
+            (2000, 1000, "ff" * 20, "aa" * 20, None, False),  # remote sha unknown
+            (2000, 1000, "ff" * 20, "aa" * 20, "bb" * 20, False),  # remote sha mismatch
+            (2000, 3000, "ff" * 20, "aa" * 20, "bb" * 20, False),  # remote file bigger
+        ]
+    )
+    def test_incremental_upload(
+        self, synchronizer_factory, local_size, remote_size, local_sha1, local_partial_sha1,
+        remote_sha1, should_be_incremental
+    ):
+
+        synchronizer = synchronizer_factory(
+            upload_mode=UploadMode.INCREMENTAL, absolute_minimum_part_size=1000
+        )
+
+        src = self.folder_factory('local', ('a.txt', [200], local_size))
+        dst = self.folder_factory('b2', ('a.txt', [100], remote_size))
+
+        upload_action = next(
+            iter(self._make_folder_sync_actions(synchronizer, src, dst, TODAY, self.reporter))
+        )
+
+        bucket = mock.MagicMock()
+
+        def update_from_stream(self, limit=None):
+            if limit is None:
+                return local_sha1
+            elif limit == remote_size:
+                return local_partial_sha1
+            else:
+                assert False
+
+        def check_path_and_get_size(self):
+            self.content_length = local_size
+
+        with ExitStack() as stack:
+            patches = [
+                mock.patch.object(
+                    UploadSourceLocalFile, 'open', mock.mock_open(read_data='test-data')
+                ),
+                mock.patch.object(IncrementalHexDigester, 'update_from_stream', update_from_stream),
+                mock.patch.object(
+                    UploadSourceLocalFile, 'check_path_and_get_size', check_path_and_get_size
+                ),
+                mock.patch.object(
+                    UploadSourceLocalFile, '_hex_sha1_of_file', return_value=local_sha1
+                ),
+                mock.patch.object(
+                    UploadSourceLocalFileRange, 'check_path_and_get_size', check_path_and_get_size
+                ),
+                mock.patch.object(FileVersion, 'get_content_sha1', return_value=remote_sha1),
+            ]
+            for patch in patches:
+                stack.enter_context(patch)
+
+            upload_action.do_action(bucket, self.reporter)
+
+        assert bucket.mock_calls == [
+            mock.call.concatenate(
+                mock.ANY,
+                'folder/a.txt',
+                file_info=mock.ANY,
+                progress_listener=mock.ANY,
+                encryption=None,
+                large_file_sha1=local_sha1 if should_be_incremental else None,
+            )
+        ]
+        # In Python 3.7 unittest.mock.call doesn't have `args` properly defined. Instead we have to take 1st index.
+        # TODO: use .args[0] instead of [1] when we drop Python 3.7
+        num_calls = len(bucket.mock_calls[0][1])
+        assert num_calls == 2 if should_be_incremental else 1, bucket.mock_calls[0]
+        if should_be_incremental:
+            # Order of indices: call index, pick arguments, pick first argument, first element of the first argument.
+            assert isinstance(bucket.mock_calls[0][1][0][0], CopySource)
+
 
 class TstEncryptionSettingsProvider(AbstractSyncEncryptionSettingsProvider):
     def __init__(self, source_encryption_setting, destination_encryption_setting):
diff --git a/test/unit/test_cache.py b/test/unit/test_cache.py
new file mode 100644
index 0000000..84d41c7
--- /dev/null
+++ b/test/unit/test_cache.py
@@ -0,0 +1,89 @@
+######################################################################
+#
+# File: test/unit/test_cache.py
+#
+# Copyright 2023 Backblaze Inc. All Rights Reserved.
+#
+# License https://www.backblaze.com/using_b2_code.html
+#
+######################################################################
+
+from dataclasses import dataclass
+
+import pytest
+
+from pytest_lazyfixture import lazy_fixture
+
+from apiver_deps import DummyCache, InMemoryCache, AuthInfoCache, InMemoryAccountInfo
+
+
+@pytest.fixture
+def dummy_cache():
+    return DummyCache()
+
+
+@pytest.fixture
+def in_memory_cache():
+    return InMemoryCache()
+
+
+@pytest.fixture
+def auth_info_cache():
+    return AuthInfoCache(InMemoryAccountInfo())
+
+
+@pytest.fixture(
+    scope="class", params=[lazy_fixture('in_memory_cache'),
+                           lazy_fixture('auth_info_cache')]
+)
+def cache(request):
+    return request.param
+
+
+@dataclass
+class DummyBucket:
+    name: str
+    id_: str
+
+
+@pytest.fixture
+def buckets():
+    class InfBuckets(list):
+        def __getitem__(self, item: int):
+            self.extend(DummyBucket(f'bucket{i}', f'ID-{i}') for i in range(len(self), item + 1))
+            return super().__getitem__(item)
+
+    return InfBuckets()
+
+
+class TestCache:
+    def test_save_bucket(self, cache, buckets):
+        cache.save_bucket(buckets[0])
+
+    def test_get_bucket_id_or_none_from_bucket_name(self, cache, buckets):
+        assert cache.get_bucket_id_or_none_from_bucket_name('bucket0') is None
+        cache.save_bucket(buckets[0])
+        assert cache.get_bucket_id_or_none_from_bucket_name('bucket0') == 'ID-0'
+
+    def test_get_bucket_name_or_none_from_bucket_id(self, cache, buckets):
+        assert cache.get_bucket_name_or_none_from_bucket_id('ID-0') is None
+        cache.save_bucket(buckets[0])
+        assert cache.get_bucket_name_or_none_from_bucket_id('ID-0') == 'bucket0'
+
+    @pytest.mark.apiver(from_ver=3)
+    def test_list_bucket_names_ids(self, cache, buckets):
+        assert cache.list_bucket_names_ids() == []
+        for i in range(2):
+            cache.save_bucket(buckets[i])
+        assert cache.list_bucket_names_ids() == [('bucket0', 'ID-0'), ('bucket1', 'ID-1')]
+
+    def test_set_bucket_name_cache(self, cache, buckets):
+        cache.set_bucket_name_cache([buckets[i] for i in range(2, 4)])
+
+        assert cache.get_bucket_id_or_none_from_bucket_name('bucket1') is None
+        assert cache.get_bucket_id_or_none_from_bucket_name('bucket2') == 'ID-2'
+
+        cache.set_bucket_name_cache([buckets[1]])
+
+        assert cache.get_bucket_id_or_none_from_bucket_name('bucket1') == 'ID-1'
+        assert cache.get_bucket_id_or_none_from_bucket_name('bucket2') is None
diff --git a/test/unit/utils/__init__.py b/test/unit/utils/__init__.py
new file mode 100644
index 0000000..b36b27d
--- /dev/null
+++ b/test/unit/utils/__init__.py
@@ -0,0 +1,9 @@
+######################################################################
+#
+# File: test/unit/utils/__init__.py
+#
+# Copyright 2022 Backblaze Inc. All Rights Reserved.
+#
+# License https://www.backblaze.com/using_b2_code.html
+#
+######################################################################
diff --git a/test/unit/utils/test_incremental_hex_digester.py b/test/unit/utils/test_incremental_hex_digester.py
new file mode 100644
index 0000000..3765a17
--- /dev/null
+++ b/test/unit/utils/test_incremental_hex_digester.py
@@ -0,0 +1,77 @@
+######################################################################
+#
+# File: test/unit/utils/test_incremental_hex_digester.py
+#
+# Copyright 2022 Backblaze Inc. All Rights Reserved.
+#
+# License https://www.backblaze.com/using_b2_code.html
+#
+######################################################################
+
+import hashlib
+import io
+
+from b2sdk.utils import (
+    IncrementalHexDigester,
+    Sha1HexDigest,
+)
+from test.unit.test_base import TestBase
+
+
+class TestIncrementalHexDigester(TestBase):
+    BLOCK_SIZE = 4
+
+    def _get_sha1(self, input_data: bytes) -> Sha1HexDigest:
+        return Sha1HexDigest(hashlib.sha1(input_data).hexdigest())
+
+    def _get_digester(self, stream: io.IOBase) -> IncrementalHexDigester:
+        return IncrementalHexDigester(stream, block_size=self.BLOCK_SIZE)
+
+    def test_limited_read(self):
+        limit = self.BLOCK_SIZE * 10
+        input_data = b'1' * limit * 2
+        stream = io.BytesIO(input_data)
+        expected_sha1 = self._get_sha1(input_data[:limit])
+
+        result_sha1 = self._get_digester(stream).update_from_stream(limit)
+
+        self.assertEqual(expected_sha1, result_sha1)
+        self.assertEqual(limit, stream.tell())
+
+    def test_limited_read__stream_smaller_than_block_size(self):
+        limit = self.BLOCK_SIZE * 99
+        input_data = b'1' * (self.BLOCK_SIZE - 1)
+        stream = io.BytesIO(input_data)
+        expected_sha1 = self._get_sha1(input_data)
+
+        result_sha1 = self._get_digester(stream).update_from_stream(limit)
+
+        self.assertEqual(expected_sha1, result_sha1)
+        self.assertEqual(len(input_data), stream.tell())
+
+    def test_unlimited_read(self):
+        input_data = b'1' * self.BLOCK_SIZE * 10
+        stream = io.BytesIO(input_data)
+        expected_sha1 = self._get_sha1(input_data)
+
+        result_sha1 = self._get_digester(stream).update_from_stream()
+
+        self.assertEqual(expected_sha1, result_sha1)
+        self.assertEqual(len(input_data), stream.tell())
+
+    def test_limited_and_unlimited_read(self):
+        blocks_count = 5
+        limit = self.BLOCK_SIZE * 5
+        input_data = b'1' * limit * blocks_count
+        stream = io.BytesIO(input_data)
+
+        digester = self._get_digester(stream)
+
+        for idx in range(blocks_count - 1):
+            expected_sha1_part = self._get_sha1(input_data[:limit * (idx + 1)])
+            result_sha1_part = digester.update_from_stream(limit)
+            self.assertEqual(expected_sha1_part, result_sha1_part)
+
+        expected_sha1_whole = self._get_sha1(input_data)
+        result_sha1_whole = digester.update_from_stream()
+        self.assertEqual(expected_sha1_whole, result_sha1_whole)
diff --git a/test/unit/v_all/test_api.py b/test/unit/v_all/test_api.py
index 43ecee1..6e3dcfe 100644
--- a/test/unit/v_all/test_api.py
+++ b/test/unit/v_all/test_api.py
@@ -22,6 +22,86 @@ from apiver_deps_exception import BucketIdNotFound
 from ..test_base import TestBase
 
 
+class DummyA:
+    def __init__(self, *args, **kwargs):
+        pass
+
+
+class DummyB:
+    def __init__(self, *args, **kwargs):
+        pass
+
+
+class TestServices:
+    @pytest.mark.apiver(from_ver=2)
+    @pytest.mark.parametrize(
+        ('kwargs', '_raw_api_class'),
+        [
+            [
+                {
+                    'max_upload_workers': 1,
+                    'max_copy_workers': 2,
+                    'max_download_workers': 3,
+                    'save_to_buffer_size': 4,
+                    'check_download_hash': False,
+                    'max_download_streams_per_file': 5,
+                },
+                DummyA,
+            ],
+            [
+                {
+                    'max_upload_workers': 2,
+                    'max_copy_workers': 3,
+                    'max_download_workers': 4,
+                    'save_to_buffer_size': 5,
+                    'check_download_hash': True,
+                    'max_download_streams_per_file': 6,
+                },
+                DummyB,
+            ],
+        ],
+    )  # yapf: disable
+    def test_api_initialization(self, kwargs, _raw_api_class):
+        self.account_info = InMemoryAccountInfo()
+        self.cache = InMemoryCache()
+
+        api_config = B2HttpApiConfig(_raw_api_class=_raw_api_class)
+
+        self.api = B2Api(
+            self.account_info,
+            self.cache,
+            api_config=api_config,
+
+            **kwargs
+        )  # yapf: disable
+
+        assert self.api.account_info is self.account_info
+        assert self.api.api_config is api_config
+        assert self.api.cache is self.cache
+
+        assert self.api.session.account_info is self.account_info
+        assert self.api.session.cache is self.cache
+        assert isinstance(self.api.session.raw_api, _raw_api_class)
+
+        assert isinstance(self.api.file_version_factory, B2Api.FILE_VERSION_FACTORY_CLASS)
+        assert isinstance(
+            self.api.download_version_factory,
+            B2Api.DOWNLOAD_VERSION_FACTORY_CLASS,
+        )
+
+        services = self.api.services
+        assert isinstance(services, B2Api.SERVICES_CLASS)
+
+        # max copy/upload/download workers could only be verified with mocking
+
+        download_manager = services.download_manager
+        assert isinstance(download_manager, services.DOWNLOAD_MANAGER_CLASS)
+
+        assert download_manager.write_buffer_size == kwargs['save_to_buffer_size']
+        assert download_manager.check_hash == kwargs['check_download_hash']
+        assert download_manager.strategies[0].max_streams == kwargs['max_download_streams_per_file']
+
+
 class TestApi(TestBase):
     def setUp(self):
         self.account_info = InMemoryAccountInfo()

More details

Full run details

Historical runs