New Upstream Release - zarr

Ready changes

Summary

Merged new upstream version: 2.14.2+ds (was: 2.13.6+ds).

Diff

diff --git a/.github/workflows/minimal.yml b/.github/workflows/minimal.yml
index 2cde38e..4de5aca 100644
--- a/.github/workflows/minimal.yml
+++ b/.github/workflows/minimal.yml
@@ -24,6 +24,7 @@ jobs:
       shell: "bash -l {0}"
       env:
         ZARR_V3_EXPERIMENTAL_API: 1
+        ZARR_V3_SHARDING: 1
       run: |
         conda activate minimal
         python -m pip install .
@@ -32,6 +33,7 @@ jobs:
       shell: "bash -l {0}"
       env:
         ZARR_V3_EXPERIMENTAL_API: 1
+        ZARR_V3_SHARDING: 1
       run: |
         conda activate minimal
         rm -rf fixture/
diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml
index 872ce52..cee2ca7 100644
--- a/.github/workflows/python-package.yml
+++ b/.github/workflows/python-package.yml
@@ -70,6 +70,7 @@ jobs:
         ZARR_TEST_MONGO: 1
         ZARR_TEST_REDIS: 1
         ZARR_V3_EXPERIMENTAL_API: 1
+        ZARR_V3_SHARDING: 1
       run: |
         conda activate zarr-env
         mkdir ~/blob_emulator
diff --git a/.github/workflows/windows-testing.yml b/.github/workflows/windows-testing.yml
index ea1d0f6..2f8922b 100644
--- a/.github/workflows/windows-testing.yml
+++ b/.github/workflows/windows-testing.yml
@@ -52,6 +52,7 @@ jobs:
         env:
           ZARR_TEST_ABS: 1
           ZARR_V3_EXPERIMENTAL_API: 1
+          ZARR_V3_SHARDING: 1
       - name: Conda info
         shell: bash -l {0}
         run: conda info
diff --git a/debian/changelog b/debian/changelog
index e35b0b1..e2c2db7 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+zarr (2.14.2+ds-1) UNRELEASED; urgency=low
+
+  * New upstream release.
+
+ -- Debian Janitor <janitor@jelmer.uk>  Mon, 13 Mar 2023 18:09:27 -0000
+
 zarr (2.13.6+ds-1) unstable; urgency=medium
 
   * New upstream release.
diff --git a/docs/_static/custom.css b/docs/_static/custom.css
index a0e3929..487addf 100644
--- a/docs/_static/custom.css
+++ b/docs/_static/custom.css
@@ -1,36 +1,123 @@
-/* override text color */
-.wy-menu-vertical a {
-    color: #000000;
+@import url('https://fonts.googleapis.com/css2?family=Lato:ital,wght@0,400;0,700;0,900;1,400;1,700;1,900&family=Open+Sans:ital,wght@0,400;0,600;1,400;1,600&display=swap');
+
+.navbar-brand img {
+   height: 75px;
+}
+.navbar-brand {
+   height: 75px;
+}
+
+body {
+  font-family: 'Open Sans', sans-serif;
+}
+
+pre, code {
+  font-size: 100%;
+  line-height: 155%;
+}
+
+/* Style the active version button.
+
+- dev: orange
+- stable: green
+- old, PR: red
+
+Colors from:
+
+Wong, B. Points of view: Color blindness.
+Nat Methods 8, 441 (2011). https://doi.org/10.1038/nmeth.1618
+*/
+
+/* If the active version has the name "dev", style it orange */
+#version_switcher_button[data-active-version-name*="dev"] {
+  background-color: #E69F00;
+  border-color: #E69F00;
+  color:#000000;
+}
+
+/* green for `stable` */
+#version_switcher_button[data-active-version-name*="stable"] {
+  background-color: #009E73;
+  border-color: #009E73;
+}
+
+/* red for `old` */
+#version_switcher_button:not([data-active-version-name*="stable"], [data-active-version-name*="dev"], [data-active-version-name=""]) {
+  background-color: #980F0F;
+  border-color: #980F0F;
 }
 
-/* Sidebar background color */
-.wy-nav-side, div.wy-side-nav-search {
-  background-color: rgb(198, 197, 213, 0); /* full alpha */
+/* Main page overview cards */
+
+.sd-card {
+  background: #fff;
+  border-radius: 0;
+  padding: 30px 10px 20px 10px;
+  margin: 10px 0px;
+}
+
+.sd-card .sd-card-header {
+  text-align: center;
+}
+
+.sd-card .sd-card-header .sd-card-text {
+  margin: 0px;
+}
+
+.sd-card .sd-card-img-top {
+  height: 52px;
+  width: 52px;
+  margin-left: auto;
+  margin-right: auto;
+}
+
+.sd-card .sd-card-header {
+  border: none;
+  background-color: white;
+  color: #150458 !important;
+  font-size: var(--pst-font-size-h5);
+  font-weight: bold;
+  padding: 2.5rem 0rem 0.5rem 0rem;
+}
+
+.sd-card .sd-card-footer {
+  border: none;
+  background-color: white;
+}
+
+.sd-card .sd-card-footer .sd-card-text {
+  max-width: 220px;
+  margin-left: auto;
+  margin-right: auto;
+}
+
+/* Dark theme tweaking */
+html[data-theme=dark] .sd-card img[src*='.svg'] {
+    filter: invert(0.82) brightness(0.8) contrast(1.2);
 }
 
-/* Sidebar link click color */
-.wy-menu-vertical .toctree-l1 > a:active {
-  background-color: rgb(198, 197, 213);
-  color: rgb(0, 0, 0);
+/* Main index page overview cards */
+html[data-theme=dark] .sd-card {
+  background-color:var(--pst-color-background);
 }
 
-/* Link color is darker to make hovering more clear */
-.wy-menu-vertical .toctree-l1 > a:hover {
-  background-color: rgb(198, 197, 213);
-  color: rgb(0, 0, 0);
+html[data-theme=dark] .sd-shadow-sm {
+    box-shadow: 0 .1rem 1rem rgba(250, 250, 250, .6) !important
 }
 
-.wy-menu-vertical li.current > a:hover, .wy-menu-vertical li.current > a:active {
-  color: #404040;
-  background-color: #F5F5F5;
+html[data-theme=dark] .sd-card .sd-card-header {
+  background-color:var(--pst-color-background);
+  color: #150458 !important;
 }
 
-/* On hover over logo */
-.wy-side-nav-search > a:hover, .wy-side-nav-search .wy-dropdown > a:hover {
-  background: inherit;
+html[data-theme=dark] .sd-card .sd-card-footer {
+  background-color:var(--pst-color-background);
 }
 
-/* Border around search box */
-.wy-side-nav-search input[type="text"] {
-  border: 0px;
+html[data-theme=dark] h1 {
+  color: var(--pst-color-primary);
 }
+
+html[data-theme=dark] h3 {
+  color: #0a6774;
+}
\ No newline at end of file
diff --git a/docs/_static/custom.js b/docs/_static/custom.js
new file mode 100644
index 0000000..dcb584e
--- /dev/null
+++ b/docs/_static/custom.js
@@ -0,0 +1,18 @@
+// handle redirects
+(() => {
+    let anchorMap = {
+        "installation": "installation.html",
+        "getting-started": "getting_started.html#getting-started",
+        "highlights": "getting_started.html#highlights",
+        "contributing": "contributing.html",
+        "projects-using-zarr": "getting_started.html#projects-using-zarr",
+        "acknowledgments": "acknowledgments.html",
+        "contents": "getting_started.html#contents",
+        "indices-and-tables": "api.html#indices-and-tables"
+    }
+
+    let hash = window.location.hash.substring(1);
+    if (hash && hash in anchorMap) {
+            window.location.replace(anchorMap[hash]);
+    }
+})();
diff --git a/docs/_static/index_api.svg b/docs/_static/index_api.svg
new file mode 100644
index 0000000..69f7ba1
--- /dev/null
+++ b/docs/_static/index_api.svg
@@ -0,0 +1,97 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="99.058548mm"
+   height="89.967583mm"
+   viewBox="0 0 99.058554 89.967582"
+   version="1.1"
+   id="svg1040"
+   inkscape:version="0.92.4 (f8dce91, 2019-08-02)"
+   sodipodi:docname="index_api.svg">
+  <defs
+     id="defs1034" />
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="0.35"
+     inkscape:cx="533.74914"
+     inkscape:cy="10.90433"
+     inkscape:document-units="mm"
+     inkscape:current-layer="layer1"
+     showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     inkscape:window-width="930"
+     inkscape:window-height="472"
+     inkscape:window-x="2349"
+     inkscape:window-y="267"
+     inkscape:window-maximized="0" />
+  <metadata
+     id="metadata1037">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(195.19933,-1.0492759)">
+    <g
+       id="g1008"
+       transform="matrix(1.094977,0,0,1.094977,-521.5523,-198.34055)">
+      <path
+         inkscape:connector-curvature="0"
+         id="path899"
+         d="M 324.96812,187.09499 H 303.0455 v 72.1639 h 22.67969"
+         style="fill:none;stroke:#459DB9;stroke-width:10;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+      <path
+         inkscape:connector-curvature="0"
+         id="path899-3"
+         d="m 361.58921,187.09499 h 21.92262 v 72.1639 h -22.67969"
+         style="fill:none;stroke:#459DB9;stroke-width:10;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+      <g
+         transform="translate(415.87139,46.162126)"
+         id="g944">
+        <circle
+           style="fill:#459DB9;fill-opacity:1;stroke:#459DB9;stroke-width:4.53704548;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+           id="path918"
+           cx="-84.40152"
+           cy="189.84375"
+           r="2.2293637" />
+        <circle
+           style="fill:#459DB9;fill-opacity:1;stroke:#459DB9;stroke-width:4.53704548;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+           id="path918-5"
+           cx="-72.949402"
+           cy="189.84375"
+           r="2.2293637" />
+        <circle
+           style="fill:#459DB9;fill-opacity:1;stroke:#459DB9;stroke-width:4.53704548;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+           id="path918-6"
+           cx="-61.497284"
+           cy="189.84375"
+           r="2.2293637" />
+      </g>
+    </g>
+  </g>
+</svg>
diff --git a/docs/_static/index_contribute.svg b/docs/_static/index_contribute.svg
new file mode 100644
index 0000000..de3d902
--- /dev/null
+++ b/docs/_static/index_contribute.svg
@@ -0,0 +1,76 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="89.624855mm"
+   height="89.96759mm"
+   viewBox="0 0 89.62486 89.96759"
+   version="1.1"
+   id="svg1040"
+   inkscape:version="0.92.4 (f8dce91, 2019-08-02)"
+   sodipodi:docname="index_contribute.svg">
+  <defs
+     id="defs1034" />
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="0.35"
+     inkscape:cx="683.11893"
+     inkscape:cy="-59.078181"
+     inkscape:document-units="mm"
+     inkscape:current-layer="layer1"
+     showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     inkscape:window-width="930"
+     inkscape:window-height="472"
+     inkscape:window-x="2349"
+     inkscape:window-y="267"
+     inkscape:window-maximized="0" />
+  <metadata
+     id="metadata1037">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(234.72009,17.466935)">
+    <g
+       id="g875"
+       transform="matrix(0.99300176,0,0,0.99300176,-133.24106,-172.58804)">
+      <path
+         sodipodi:nodetypes="ccc"
+         inkscape:connector-curvature="0"
+         id="path869"
+         d="m -97.139881,161.26069 47.247024,40.25446 -47.247024,40.25446"
+         style="fill:none;stroke:#459DB9;stroke-width:10;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+      <path
+         inkscape:connector-curvature="0"
+         id="path871"
+         d="m -49.514879,241.81547 h 32.505951"
+         style="fill:none;stroke:#459DB9;stroke-width:10;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" />
+    </g>
+  </g>
+</svg>
diff --git a/docs/_static/index_getting_started.svg b/docs/_static/index_getting_started.svg
new file mode 100644
index 0000000..2d36622
--- /dev/null
+++ b/docs/_static/index_getting_started.svg
@@ -0,0 +1,66 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="101.09389mm"
+   height="89.96759mm"
+   viewBox="0 0 101.09389 89.96759"
+   version="1.1"
+   id="svg1040"
+   inkscape:version="0.92.4 (f8dce91, 2019-08-02)"
+   sodipodi:docname="index_getting_started.svg">
+  <defs
+     id="defs1034" />
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="0.35"
+     inkscape:cx="-93.242129"
+     inkscape:cy="-189.9825"
+     inkscape:document-units="mm"
+     inkscape:current-layer="layer1"
+     showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     inkscape:window-width="1875"
+     inkscape:window-height="1056"
+     inkscape:window-x="1965"
+     inkscape:window-y="0"
+     inkscape:window-maximized="1" />
+  <metadata
+     id="metadata1037">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(2.9219487,-8.5995374)">
+    <path
+       style="fill:#459DB9;fill-opacity:1;stroke-width:0.20233451"
+       d="M 37.270955,98.335591 C 33.358064,97.07991 31.237736,92.52319 32.964256,89.08022 c 0.18139,-0.361738 4.757999,-5.096629 10.17021,-10.521968 l 9.84041,-9.864254 -4.03738,-4.041175 -4.037391,-4.041172 -4.96415,4.916665 c -3.61569,3.581096 -5.238959,5.04997 -5.975818,5.407377 l -1.011682,0.490718 H 17.267525 1.5866055 L 0.65034544,70.96512 C -2.2506745,69.535833 -3.5952145,66.18561 -2.5925745,62.884631 c 0.53525,-1.762217 1.61699004,-3.050074 3.22528014,-3.839847 l 1.15623996,-0.56778 13.2591094,-0.05613 13.259111,-0.05613 11.5262,-11.527539 11.526199,-11.527528 H 40.622647 c -12.145542,0 -12.189222,-0.0046 -13.752801,-1.445851 -2.229871,-2.055423 -2.162799,-5.970551 0.135998,-7.938238 1.475193,-1.262712 1.111351,-1.238469 18.588522,-1.238469 12.899229,0 16.035311,0.05193 16.692589,0.276494 0.641832,0.219264 2.590731,2.051402 9.416301,8.852134 l 8.606941,8.575638 h 6.848168 c 4.837422,0 7.092281,0.07311 7.679571,0.249094 0.48064,0.144008 1.22985,0.634863 1.77578,1.163429 2.383085,2.307333 1.968685,6.539886 -0.804989,8.221882 -0.571871,0.346781 -1.38284,0.687226 -1.80217,0.756523 -0.41933,0.06928 -4.2741,0.127016 -8.56615,0.128238 -6.56998,0.0016 -7.977492,-0.04901 -8.902732,-0.321921 -0.975569,-0.287742 -1.400468,-0.622236 -3.783999,-2.978832 l -2.685021,-2.654679 -5.05411,5.051071 -5.0541,5.051081 3.926292,3.947202 c 2.365399,2.378001 4.114289,4.309171 4.399158,4.857713 0.39266,0.75606 0.47311,1.219412 0.474321,2.731516 0.003,3.083647 0.620779,2.331942 -13.598011,16.531349 -10.273768,10.259761 -12.679778,12.563171 -13.500979,12.92519 -1.267042,0.55857 -3.156169,0.681342 -4.390271,0.285321 z m 40.130741,-65.45839 c -2.212909,-0.579748 -3.782711,-1.498393 -5.51275,-3.226063 -2.522111,-2.518633 -3.633121,-5.181304 -3.633121,-8.707194 0,-3.530699 1.11238,-6.197124 3.631161,-8.704043 4.866751,-4.8438383 12.324781,-4.8550953 17.211791,-0.026 3.908758,3.862461 4.818578,9.377999 2.372188,14.380771 -0.846209,1.730481 -3.39493,4.326384 -5.143839,5.239072 -2.69708,1.407492 -6.042829,1.798628 -8.92543,1.043434 z"
+       id="path1000"
+       inkscape:connector-curvature="0" />
+  </g>
+</svg>
diff --git a/docs/_static/index_user_guide.svg b/docs/_static/index_user_guide.svg
new file mode 100644
index 0000000..bd17053
--- /dev/null
+++ b/docs/_static/index_user_guide.svg
@@ -0,0 +1,67 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="123.72241mm"
+   height="89.96759mm"
+   viewBox="0 0 123.72242 89.96759"
+   version="1.1"
+   id="svg1040"
+   inkscape:version="0.92.4 (f8dce91, 2019-08-02)"
+   sodipodi:docname="index_userguide.svg">
+  <defs
+     id="defs1034" />
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="0.35"
+     inkscape:cx="332.26618"
+     inkscape:cy="83.744004"
+     inkscape:document-units="mm"
+     inkscape:current-layer="layer1"
+     showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     inkscape:window-width="930"
+     inkscape:window-height="472"
+     inkscape:window-x="2349"
+     inkscape:window-y="267"
+     inkscape:window-maximized="0" />
+  <metadata
+     id="metadata1037">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(141.8903,-20.32143)">
+    <path
+       style="fill:#459DB9;fill-opacity:1;stroke-width:0.20483544"
+       d="m -139.53374,110.1657 c -0.80428,-0.24884 -1.71513,-1.11296 -2.07107,-1.96486 -0.23905,-0.57214 -0.28453,-6.28104 -0.28453,-35.720988 0,-38.274546 -0.079,-35.840728 1.19849,-36.91568 0.58869,-0.495345 4.63766,-2.187548 8.47998,-3.544073 l 1.58749,-0.560453 v -3.309822 c 0,-3.025538 0.0396,-3.388179 0.46086,-4.222122 0.68808,-1.362003 1.38671,-1.714455 4.60319,-2.322195 4.12797,-0.779966 5.13304,-0.912766 8.81544,-1.16476 11.80964,-0.808168 22.80911,2.509277 30.965439,9.3392 1.750401,1.465747 3.840861,3.5635 5.0903,5.108065 l 0.659122,0.814805 0.659109,-0.814805 c 1.249431,-1.544565 3.33988,-3.642318 5.09029,-5.108065 8.156331,-6.829923 19.155791,-10.147368 30.965441,-9.3392 3.682389,0.251994 4.68748,0.384794 8.81544,1.16476 3.21647,0.60774 3.91511,0.960192 4.60318,2.322195 0.4213,0.833943 0.46087,1.196584 0.46087,4.222122 v 3.309822 l 1.58748,0.560453 c 4.10165,1.448077 7.98852,3.072753 8.5259,3.563743 1.22643,1.120567 1.15258,-1.245868 1.15258,36.927177 0,34.567591 -0.005,35.083151 -0.40663,35.903991 -0.22365,0.45804 -0.73729,1.05665 -1.14143,1.33024 -1.22281,0.82783 -2.17721,0.70485 -5.86813,-0.7561 -9.19595,-3.63998 -18.956011,-6.38443 -26.791332,-7.53353 -3.02827,-0.44412 -9.26189,-0.61543 -11.77821,-0.3237 -5.19357,0.60212 -8.736108,2.05527 -11.700039,4.79936 -0.684501,0.63371 -1.466141,1.23646 -1.736979,1.33942 -0.63859,0.2428 -4.236521,0.2428 -4.875112,0 -0.27083,-0.10296 -1.05247,-0.70571 -1.73696,-1.33942 -2.96395,-2.74409 -6.50648,-4.19724 -11.700058,-4.79936 -2.516312,-0.29173 -8.749941,-0.12042 -11.778201,0.3237 -7.78194,1.14127 -17.39965,3.83907 -26.73341,7.49883 -3.38325,1.32658 -4.15525,1.50926 -5.11851,1.21125 z m 4.2107,-5.34052 c 5.86759,-2.29858 14.40398,-4.922695 20.2018,-6.210065 6.31584,-1.402418 8.5236,-1.646248 14.91592,-1.647338 4.68699,-7.94e-4 6.013661,0.0632 7.257809,0.3497 0.837332,0.19286 1.561052,0.312028 1.60828,0.264819 0.147111,-0.147119 -1.803289,-1.307431 -4.154879,-2.471801 -8.12511,-4.023029 -18.27311,-4.986568 -29.0861,-2.761718 -1.09536,0.22538 -2.32708,0.40827 -2.73715,0.406418 -1.12787,-0.005 -2.3054,-0.76382 -2.84516,-1.8332 l -0.46086,-0.913098 V 62.99179 35.97471 l -0.56331,0.138329 c -0.30981,0.07608 -1.89985,0.665075 -3.5334,1.308881 -2.27551,0.896801 -2.96414,1.252878 -2.94452,1.522563 0.014,0.193604 0.0372,15.284513 0.0512,33.535345 0.014,18.250839 0.0538,33.183322 0.0884,33.183322 0.0346,0 1.02543,-0.3771 2.20198,-0.83801 z m 113.006991,-32.697216 -0.0518,-33.535203 -3.17495,-1.272156 c -1.74623,-0.699685 -3.33627,-1.278755 -3.53341,-1.286819 -0.33966,-0.01389 -0.35847,1.401778 -0.35847,26.980216 v 26.994863 l -0.46087,0.913112 c -0.53976,1.06939 -1.71729,1.828088 -2.84515,1.833189 -0.41008,0.0021 -1.6418,-0.181031 -2.73716,-0.406421 -11.888201,-2.446089 -22.84337,-1.046438 -31.491022,4.02332 -1.68175,0.985941 -2.216748,1.467501 -1.36534,1.228942 1.575181,-0.441362 4.990592,-0.73864 8.524862,-0.742011 5.954408,-0.005 11.43046,0.791951 19.10874,2.78333 3.9516,1.024874 12.1555,3.687454 15.6699,5.085704 1.23926,0.49306 2.36869,0.90517 2.50985,0.9158 0.20489,0.0155 0.2462,-6.745894 0.20483,-33.515866 z m -59.76135,-2.233777 V 40.065438 l -0.95972,-1.357442 c -1.380522,-1.952627 -5.376262,-5.847994 -7.64336,-7.45136 -3.778692,-2.672401 -9.063392,-4.943324 -13.672511,-5.875304 -3.19731,-0.646503 -5.23069,-0.833103 -9.05886,-0.831312 -4.37716,0.0021 -7.70223,0.349169 -11.83461,1.235469 l -1.07538,0.230645 v 31.242342 c 0,26.565778 0.0426,31.226011 0.28429,31.133261 0.15637,-0.06 1.42379,-0.297169 2.81648,-0.527026 12.37657,-2.042634 23.21658,-0.346861 32.521639,5.087596 2.10018,1.226558 5.20202,3.618878 6.880942,5.30692 0.788609,0.792909 1.502978,1.446609 1.587468,1.452679 0.0845,0.006 0.153622,-13.411893 0.153622,-29.817719 z m 5.80221,28.3766 c 6.21476,-6.141601 15.08488,-10.061509 25.025529,-11.05933 4.262419,-0.427849 11.579921,-0.0054 16.017661,0.924912 0.75932,0.15916 1.45259,0.244888 1.54058,0.190498 0.088,-0.05434 0.16003,-14.060382 0.16003,-31.124436 V 26.176883 l -0.52136,-0.198219 c -0.66893,-0.254325 -4.77649,-0.95482 -7.159981,-1.221048 -2.41372,-0.269605 -8.559851,-0.266589 -10.759229,0.0052 -6.458111,0.798299 -12.584091,3.083792 -17.405651,6.49374 -2.267091,1.603366 -6.262831,5.498733 -7.64336,7.45136 l -0.959721,1.357438 v 29.828747 c 0,16.405812 0.0532,29.828746 0.11802,29.828746 0.065,0 0.77928,-0.65347 1.587482,-1.452149 z"
+       id="path845"
+       inkscape:connector-curvature="0"
+       sodipodi:nodetypes="csscccscsssscsssssscscsccsccsccscsscccccccscccccccccsccscscscccscccsccssccsscccscccccsccccsccscsccsscc" />
+  </g>
+</svg>
diff --git a/docs/acknowledgments.rst b/docs/acknowledgments.rst
new file mode 100644
index 0000000..36cd1f5
--- /dev/null
+++ b/docs/acknowledgments.rst
@@ -0,0 +1,76 @@
+Acknowledgments
+===============
+
+The following people have contributed to the development of Zarr by contributing code,
+documentation, code reviews, comments and/or ideas:
+
+* :user:`Alistair Miles <alimanfoo>`
+* :user:`Altay Sansal <tasansal>`
+* :user:`Anderson Banihirwe <andersy005>`
+* :user:`Andrew Fulton <andrewfulton9>`
+* :user:`Andrew Thomas <amcnicho>`
+* :user:`Anthony Scopatz <scopatz>`
+* :user:`Attila Bergou <abergou>`
+* :user:`BGCMHou <BGCMHou>`
+* :user:`Ben Jeffery <benjeffery>`
+* :user:`Ben Williams <benjaminhwilliams>`
+* :user:`Boaz Mohar <boazmohar>`
+* :user:`Charles Noyes <CSNoyes>`
+* :user:`Chris Barnes <clbarnes>`
+* :user:`David Baddeley <David-Baddeley>`
+* :user:`Davis Bennett <d-v-b>`
+* :user:`Dimitri Papadopoulos Orfanos <DimitriPapadopoulos>`
+* :user:`Eduardo Gonzalez <eddienko>`
+* :user:`Elliott Sales de Andrade <QuLogic>`
+* :user:`Eric Prestat <ericpre>`
+* :user:`Eric Younkin <ericgyounkin>`
+* :user:`Francesc Alted <FrancescAlted>`
+* :user:`Greggory Lee <grlee77>`
+* :user:`Gregory R. Lee <grlee77>`
+* :user:`Ian Hunt-Isaak <ianhi>`
+* :user:`James Bourbeau <jrbourbeau>`
+* :user:`Jan Funke <funkey>`
+* :user:`Jerome Kelleher <jeromekelleher>`
+* :user:`Joe Hamman <jhamman>`
+* :user:`Joe Jevnik <llllllllll>`
+* :user:`John Kirkham <jakirkham>`
+* :user:`Josh Moore <joshmoore>`
+* :user:`Juan Nunez-Iglesias <jni>`
+* :user:`Justin Swaney <jmswaney>`
+* :user:`Mads R. B. Kristensen <madsbk>`
+* :user:`Mamy Ratsimbazafy <mratsim>`
+* :user:`Martin Durant <martindurant>`
+* :user:`Matthew Rocklin <mrocklin>`
+* :user:`Matthias Bussonnier <Carreau>`
+* :user:`Mattia Almansi <malmans2>`
+* :user:`Noah D Brenowitz <nbren12>`
+* :user:`Oren Watson <orenwatson>`
+* :user:`Pavithra Eswaramoorthy <pavithraes>`
+* :user:`Poruri Sai Rahul <rahulporuri>`
+* :user:`Prakhar Goel <newt0311>`
+* :user:`Raphael Dussin <raphaeldussin>`
+* :user:`Ray Bell <raybellwaves>`
+* :user:`Richard Scott <RichardScottOZ>`
+* :user:`Richard Shaw <jrs65>`
+* :user:`Ryan Abernathey <rabernat>`
+* :user:`Ryan Williams <ryan-williams>`
+* :user:`Saransh Chopra <Saransh-cpp>`
+* :user:`Sebastian Grill <yetyetanotherusername>`
+* :user:`Shikhar Goenka <shikharsg>`
+* :user:`Shivank Chaudhary <Alt-Shivam>`
+* :user:`Stephan Hoyer <shoyer>`
+* :user:`Stephan Saalfeld <axtimwalde>`
+* :user:`Tarik Onalan <onalant>`
+* :user:`Tim Crone <tjcrone>`
+* :user:`Tobias Kölling <d70-t>`
+* :user:`Tom Augspurger <TomAugspurger>`
+* :user:`Tom White <tomwhite>`
+* :user:`Tommy Tran <potter420>`
+* :user:`Trevor Manz <manzt>`
+* :user:`Vincent Schut <vincentschut>`
+* :user:`Vyas Ramasubramani <vyasr>`
+* :user:`Zain Patel <mzjp2>`
+* :user:`gsakkis`
+* :user:`hailiangzhang <hailiangzhang>`
+* :user:`pmav99 <pmav99>`
+* :user:`sbalmer <sbalmer>`
\ No newline at end of file
diff --git a/docs/api.rst b/docs/api.rst
index 8162ada..2b6e7ea 100644
--- a/docs/api.rst
+++ b/docs/api.rst
@@ -13,3 +13,10 @@ API reference
     api/codecs
     api/attrs
     api/sync
+
+Indices and tables
+------------------
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/docs/conf.py b/docs/conf.py
index 2639f76..413d648 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -45,6 +45,7 @@ extensions = [
     'numpydoc',
     'sphinx_issues',
     "sphinx_copybutton",
+    "sphinx_design"
 ]
 
 numpydoc_show_class_members = False
@@ -124,12 +125,26 @@ todo_include_todos = False
 
 # The theme to use for HTML and HTML Help pages.  See the documentation for
 # a list of builtin themes.
-html_theme = 'sphinx_rtd_theme'
+html_theme = 'pydata_sphinx_theme'
+
+html_favicon = '_static/logo1.png'
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
 # documentation.
-html_theme_options = {'logo_only': True}
+html_theme_options = {
+  "github_url": "https://github.com/zarr-developers/zarr-python",
+  "twitter_url": "https://twitter.com/zarr_dev",
+  "icon_links": [
+    {
+        "name": "Zarr Dev",
+        "url": "https://zarr.dev/",
+        "icon": "_static/logo1.png",
+        "type": "local"
+    },
+  ],
+  "collapse_navigation": True
+}
 
 # Add any paths that contain custom themes here, relative to this directory.
 #html_theme_path = []
@@ -160,6 +175,9 @@ def setup(app):
 # relative to this directory. They are copied after the builtin static files,
 # so a file named "default.css" will overwrite the builtin "default.css".
 html_static_path = ['_static']
+html_js_files = [
+    'custom.js',
+]
 
 # Add any extra paths that contain custom files (such as robots.txt or
 # .htaccess) here, relative to this directory. These files are copied
@@ -246,7 +264,7 @@ latex_elements = {
 # (source start file, target name, title,
 #  author, documentclass [howto, manual, or own class]).
 latex_documents = [
-    (main_doc, 'zarr.tex', 'zarr Documentation',
+    (main_doc, 'zarr.tex', 'Zarr-Python',
      author, 'manual'),
 ]
 
@@ -276,7 +294,7 @@ latex_documents = [
 # One entry per manual page. List of tuples
 # (source start file, name, description, authors, manual section).
 man_pages = [
-    (main_doc, 'zarr', 'zarr Documentation',
+    (main_doc, 'zarr', 'Zarr-Python',
      [author], 1)
 ]
 
@@ -290,7 +308,7 @@ man_pages = [
 # (source start file, target name, title, author,
 #  dir menu entry, description, category)
 texinfo_documents = [
-    (main_doc, 'zarr', 'zarr Documentation',
+    (main_doc, 'zarr', 'Zarr-Python',
      author, 'zarr', 'One line description of project.',
      'Miscellaneous'),
 ]
diff --git a/docs/getting_started.rst b/docs/getting_started.rst
new file mode 100644
index 0000000..77d4532
--- /dev/null
+++ b/docs/getting_started.rst
@@ -0,0 +1,46 @@
+Getting Started
+===============
+
+Zarr is a format for the storage of chunked, compressed, N-dimensional arrays
+inspired by `HDF5 <https://www.hdfgroup.org/HDF5/>`_, `h5py
+<https://www.h5py.org/>`_ and `bcolz <https://bcolz.readthedocs.io/>`_.
+
+The project is fiscally sponsored by `NumFOCUS <https://numfocus.org/>`_, a US
+501(c)(3) public charity, and development is supported by the
+`MRC Centre for Genomics and Global Health <https://www.cggh.org>`_
+and the `Chan Zuckerberg Initiative <https://chanzuckerberg.com/>`_.
+
+These documents describe the Zarr Python implementation. More information
+about the Zarr format can be found on the `main website <https://zarr.dev>`_.
+
+Highlights
+----------
+
+* Create N-dimensional arrays with any NumPy dtype.
+* Chunk arrays along any dimension.
+* Compress and/or filter chunks using any NumCodecs_ codec.
+* Store arrays in memory, on disk, inside a Zip file, on S3, ...
+* Read an array concurrently from multiple threads or processes.
+* Write to an array concurrently from multiple threads or processes.
+* Organize arrays into hierarchies via groups.
+
+Contributing
+------------
+
+Feedback and bug reports are very welcome, please get in touch via
+the `GitHub issue tracker <https://github.com/zarr-developers/zarr-python/issues>`_. See
+:doc:`contributing` for further information about contributing to Zarr.
+
+Projects using Zarr
+-------------------
+
+If you are using Zarr, we would `love to hear about it
+<https://github.com/zarr-developers/community/issues/19>`_.
+
+.. toctree::
+    :caption: Getting Started
+    :hidden:
+
+    installation
+
+.. _NumCodecs: https://numcodecs.readthedocs.io/
diff --git a/docs/index.rst b/docs/index.rst
index dd6abc1..50060d1 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -1,142 +1,104 @@
-.. zarr documentation main file, created by
-   sphinx-quickstart on Mon May  2 21:40:09 2016.
-
-Zarr
-====
-
-Zarr is a format for the storage of chunked, compressed, N-dimensional arrays
-inspired by `HDF5 <https://www.hdfgroup.org/HDF5/>`_, `h5py
-<https://www.h5py.org/>`_ and `bcolz <https://bcolz.readthedocs.io/>`_.
-
-The project is fiscally sponsored by `NumFOCUS <https://numfocus.org/>`_, a US
-501(c)(3) public charity, and development is supported by the
-`MRC Centre for Genomics and Global Health <https://www.cggh.org>`_
-and the `Chan Zuckerberg Initiative <https://chanzuckerberg.com/>`_.
-
-These documents describe the Zarr Python implementation. More information
-about the Zarr format can be found on the `main website <https://zarr.dev>`_.
-
-Highlights
-----------
-
- * Create N-dimensional arrays with any NumPy dtype.
- * Chunk arrays along any dimension.
- * Compress and/or filter chunks using any NumCodecs_ codec.
- * Store arrays in memory, on disk, inside a Zip file, on S3, ...
- * Read an array concurrently from multiple threads or processes.
- * Write to an array concurrently from multiple threads or processes.
- * Organize arrays into hierarchies via groups.
-
-Contributing
-------------
-
-Feedback and bug reports are very welcome, please get in touch via
-the `GitHub issue tracker <https://github.com/zarr-developers/zarr-python/issues>`_. See
-:doc:`contributing` for further information about contributing to Zarr.
-
-Projects using Zarr
--------------------
-
-If you are using Zarr, we would `love to hear about it
-<https://github.com/zarr-developers/community/issues/19>`_.
-
-Acknowledgments
----------------
-
-The following people have contributed to the development of Zarr by contributing code,
-documentation, code reviews, comments and/or ideas:
-
-:user:`Alistair Miles <alimanfoo>`
-:user:`Altay Sansal <tasansal>`
-:user:`Anderson Banihirwe <andersy005>`
-:user:`Andrew Fulton <andrewfulton9>`
-:user:`Andrew Thomas <amcnicho>`
-:user:`Anthony Scopatz <scopatz>`
-:user:`Attila Bergou <abergou>`
-:user:`BGCMHou <BGCMHou>`
-:user:`Ben Jeffery <benjeffery>`
-:user:`Ben Williams <benjaminhwilliams>`
-:user:`Boaz Mohar <boazmohar>`
-:user:`Charles Noyes <CSNoyes>`
-:user:`Chris Barnes <clbarnes>`
-:user:`David Baddeley <David-Baddeley>`
-:user:`Davis Bennett <d-v-b>`
-:user:`Dimitri Papadopoulos Orfanos <DimitriPapadopoulos>`
-:user:`Eduardo Gonzalez <eddienko>`
-:user:`Elliott Sales de Andrade <QuLogic>`
-:user:`Eric Prestat <ericpre>`
-:user:`Eric Younkin <ericgyounkin>`
-:user:`Francesc Alted <FrancescAlted>`
-:user:`Greggory Lee <grlee77>`
-:user:`Gregory R. Lee <grlee77>`
-:user:`Ian Hunt-Isaak <ianhi>`
-:user:`James Bourbeau <jrbourbeau>`
-:user:`Jan Funke <funkey>`
-:user:`Jerome Kelleher <jeromekelleher>`
-:user:`Joe Hamman <jhamman>`
-:user:`Joe Jevnik <llllllllll>`
-:user:`John Kirkham <jakirkham>`
-:user:`Josh Moore <joshmoore>`
-:user:`Juan Nunez-Iglesias <jni>`
-:user:`Justin Swaney <jmswaney>`
-:user:`Mads R. B. Kristensen <madsbk>`
-:user:`Mamy Ratsimbazafy <mratsim>`
-:user:`Martin Durant <martindurant>`
-:user:`Matthew Rocklin <mrocklin>`
-:user:`Matthias Bussonnier <Carreau>`
-:user:`Mattia Almansi <malmans2>`
-:user:`Noah D Brenowitz <nbren12>`
-:user:`Oren Watson <orenwatson>`
-:user:`Pavithra Eswaramoorthy <pavithraes>`
-:user:`Poruri Sai Rahul <rahulporuri>`
-:user:`Prakhar Goel <newt0311>`
-:user:`Raphael Dussin <raphaeldussin>`
-:user:`Ray Bell <raybellwaves>`
-:user:`Richard Scott <RichardScottOZ>`
-:user:`Richard Shaw <jrs65>`
-:user:`Ryan Abernathey <rabernat>`
-:user:`Ryan Williams <ryan-williams>`
-:user:`Saransh Chopra <Saransh-cpp>`
-:user:`Sebastian Grill <yetyetanotherusername>`
-:user:`Shikhar Goenka <shikharsg>`
-:user:`Shivank Chaudhary <Alt-Shivam>`
-:user:`Stephan Hoyer <shoyer>`
-:user:`Stephan Saalfeld <axtimwalde>`
-:user:`Tarik Onalan <onalant>`
-:user:`Tim Crone <tjcrone>`
-:user:`Tobias Kölling <d70-t>`
-:user:`Tom Augspurger <TomAugspurger>`
-:user:`Tom White <tomwhite>`
-:user:`Tommy Tran <potter420>`
-:user:`Trevor Manz <manzt>`
-:user:`Vincent Schut <vincentschut>`
-:user:`Vyas Ramasubramani <vyasr>`
-:user:`Zain Patel <mzjp2>`
-:user:`gsakkis`
-:user:`hailiangzhang <hailiangzhang>`
-:user:`pmav99 <pmav99>`
-:user:`sbalmer <sbalmer>`
-
-Contents
---------
+.. _zarr_docs_mainpage:
+
+***********
+Zarr-Python
+***********
 
 .. toctree::
-    :maxdepth: 2
+    :maxdepth: 1
+    :hidden:
 
-    installation
+    getting_started
     tutorial
     api
     spec
-    contributing
     release
     license
-    View homepage <https://zarr.dev/>
+    acknowledgments
+    contributing
+
+**Version**: |version|
+
+**Download documentation**: `Zipped HTML <https://zarr.readthedocs.io/_/downloads/en/stable/htmlzip/>`_
+   
+**Useful links**:
+`Installation <installation.html>`_ |
+`Source Repository <https://github.com/zarr-developers/zarr-python>`_ |
+`Issue Tracker <https://github.com/zarr-developers/zarr-python/issues>`_ |
+`Gitter <https://gitter.im/zarr-developers/community>`_
+
+Zarr is a file storage format for chunked, compressed, N-dimensional arrays based on an open-source specification.
+
+.. grid:: 2
+
+    .. grid-item-card::
+        :img-top: _static/index_getting_started.svg
+
+        Getting Started
+        ^^^^^^^^^^^^^^^
+
+        New to Zarr? Check out the getting started guide. It contains an
+        introduction to Zarr's main concepts and links to additional tutorials.
+
+        +++
+
+        .. button-ref:: getting_started
+            :expand:
+            :color: dark
+            :click-parent:
+
+            To the getting started guide
+
+    .. grid-item-card::
+        :img-top: _static/index_user_guide.svg
+
+        Tutorial
+        ^^^^^^^^
+
+        The tutorial provides working examples of Zarr classes and functions.
+
+        +++
+
+        .. button-ref:: tutorial
+            :expand:
+            :color: dark
+            :click-parent:
+
+            To the Tutorial
+
+    .. grid-item-card::
+        :img-top: _static/index_api.svg
+
+        API Reference
+        ^^^^^^^^^^^^^
+
+        The reference guide contains a detailed description of the functions,
+        modules, and objects included in Zarr. The reference describes how the
+        methods work and which parameters can be used. It assumes that you have an
+        understanding of the key concepts.
+
+        +++
+
+        .. button-ref:: api
+            :expand:
+            :color: dark
+            :click-parent:
+
+            To the api reference guide
+
+    .. grid-item-card::
+        :img-top: _static/index_contribute.svg
+
+        Contributor's Guide
+        ^^^^^^^^^^^^^^^^^^^
+
+        Want to contribute to Zarr? We welcome contributions in the form of bug reports, bug fixes, documentation, enhancement proposals and more. The contributing guidelines will guide you through the process of improving Zarr.
 
-Indices and tables
-------------------
+        +++
 
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
+        .. button-ref:: contributing
+            :expand:
+            :color: dark
+            :click-parent:
 
-.. _NumCodecs: https://numcodecs.readthedocs.io/
+            To the contributor's guide
\ No newline at end of file
diff --git a/docs/release.rst b/docs/release.rst
index 817bdc4..a6c3210 100644
--- a/docs/release.rst
+++ b/docs/release.rst
@@ -10,10 +10,71 @@ Release notes
 
     Unreleased
     ----------
+
 ..
     # .. warning::
     #    Pre-release! Use :command:`pip install --pre zarr` to evaluate this release.
 
+.. _release_2.14.2:
+
+2.14.2
+------
+
+Bug fixes
+~~~~~~~~~
+
+* Ensure ``zarr.group`` uses writeable mode to fix issue with :issue:`1304`.
+  By :user:`Brandur Thorgrimsson <swordcat>` :issue:`1354`.
+
+.. _release_2.14.1:
+
+2.14.1
+------
+
+Documentation
+~~~~~~~~~~~~~
+
+* Fix API links.
+  By :user:`Josh Moore <joshmoore>` :issue:`1346`.
+
+* Fix unit tests which prevented the conda-forge release.
+  By :user:`Josh Moore <joshmoore>` :issue:`1348`.
+
+.. _release_2.14.0:
+
+2.14.0
+------
+
+Major changes
+~~~~~~~~~~~~~
+
+* Improve Zarr V3 support, adding partial store read/write and storage transformers.
+  Add new features from the `v3 spec <https://zarr-specs.readthedocs.io/en/latest/core/v3.0.html>`_:
+    * storage transformers
+    * `get_partial_values` and `set_partial_values`
+    * efficient `get_partial_values` implementation for `FSStoreV3`
+    * sharding storage transformer
+  By :user:`Jonathan Striebel <jstriebel>`; :issue:`1096`, :issue:`1111`.
+
+* N5 nows supports Blosc.
+  Remove warnings emitted when using N5Store or N5FSStore with a blosc-compressed array.
+  By :user:`Davis Bennett <d-v-b>`; :issue:`1331`.
+
+Bug fixes
+~~~~~~~~~
+
+* Allow reading utf-8 encoded json files
+  By :user:`Nathan Zimmerberg <nhz2>` :issue:`1308`.
+
+* Ensure contiguous data is give to ``FSStore``. Only copying if needed.
+  By :user:`Mads R. B. Kristensen <madsbk>` :issue:`1285`.
+
+* NestedDirectoryStore.listdir now returns chunk keys with the correct '/' dimension_separator.
+  By :user:`Brett Graham <braingram>` :issue:`1334`.
+
+* N5Store/N5FSStore dtype returns zarr Stores readable dtype.
+  By :user:`Marwan Zouinkhi <mzouink>` :issue:`1339`.
+
 .. _release_2.13.6:
 
 2.13.6
@@ -44,7 +105,10 @@ Bug fixes
 Appreciation
 ~~~~~~~~~~~~~
 
-Special thanks to Outreachy participants for contributing to most of the maintenance PRs. Please read the blog post summarising the contribution phase and welcoming new Outreachy interns: https://zarr.dev/blog/welcoming-outreachy-2022-interns/
+Special thanks to Outreachy participants for contributing to most of the
+maintenance PRs. Please read the blog post summarising the contribution phase
+and welcoming new Outreachy interns:
+https://zarr.dev/blog/welcoming-outreachy-2022-interns/
 
 
 Enhancements
diff --git a/pyproject.toml b/pyproject.toml
index 1592b98..3277e9d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -87,7 +87,6 @@ addopts = [
 ]
 filterwarnings = [
     "error:::zarr.*",
-    "ignore:Not all N5 implementations support blosc compression.*:RuntimeWarning",
     "ignore:PY_SSIZE_T_CLEAN will be required.*:DeprecationWarning",
     "ignore:The loop argument is deprecated since Python 3.8.*:DeprecationWarning",
 ]
diff --git a/requirements_dev_numpy.txt b/requirements_dev_numpy.txt
index 7d373a2..e094d4f 100644
--- a/requirements_dev_numpy.txt
+++ b/requirements_dev_numpy.txt
@@ -1,4 +1,4 @@
 # Break this out into a separate file to allow testing against
 # different versions of numpy. This file should pin to the latest
 # numpy version.
-numpy==1.24.1
+numpy==1.24.2
diff --git a/requirements_dev_optional.txt b/requirements_dev_optional.txt
index 5d7dc33..07ca6d7 100644
--- a/requirements_dev_optional.txt
+++ b/requirements_dev_optional.txt
@@ -8,7 +8,7 @@ ipywidgets==8.0.4
 # don't let pyup change pinning for azure-storage-blob, need to pin to older
 # version to get compatibility with azure storage emulator on appveyor (FIXME)
 azure-storage-blob==12.14.1 # pyup: ignore
-redis==4.4.2
+redis==4.5.1
 types-redis
 types-setuptools
 pymongo==4.3.3
@@ -17,7 +17,7 @@ coverage
 pytest-cov==4.0.0
 pytest-doctestplus==0.12.1
 pytest-timeout==2.1.0
-h5py==3.7.0
-fsspec==2022.11.0
-s3fs==2022.11.0
+h5py==3.8.0
+fsspec==2023.1.0
+s3fs==2023.1.0
 moto[server]>=4.0.8
diff --git a/requirements_rtfd.txt b/requirements_rtfd.txt
index 553384e..5d7fec3 100644
--- a/requirements_rtfd.txt
+++ b/requirements_rtfd.txt
@@ -2,9 +2,11 @@ asciitree
 setuptools
 setuptools_scm
 sphinx
+sphinx_design
 sphinx-issues
 sphinx-copybutton
 sphinx-rtd-theme
+pydata-sphinx-theme
 numpydoc
 numpy!=1.21.0
 msgpack-python==0.5.6
diff --git a/zarr/_storage/store.py b/zarr/_storage/store.py
index 9e265cf..4d813b8 100644
--- a/zarr/_storage/store.py
+++ b/zarr/_storage/store.py
@@ -1,8 +1,10 @@
 import abc
 import os
+from collections import defaultdict
 from collections.abc import MutableMapping
+from copy import copy
 from string import ascii_letters, digits
-from typing import Any, List, Mapping, Optional, Union
+from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union
 
 from zarr.meta import Metadata2, Metadata3
 from zarr.util import normalize_storage_path
@@ -254,6 +256,82 @@ class StoreV3(BaseStore):
     def __getitem__(self, key):
         """Get a value."""
 
+    @abc.abstractmethod
+    def rmdir(self, path=None):
+        """Remove a data path and all its subkeys and related metadata.
+        Expects a path without the data or meta root prefix."""
+
+    @property
+    def supports_efficient_get_partial_values(self):
+        return False
+
+    def get_partial_values(
+        self,
+        key_ranges: Sequence[Tuple[str, Tuple[int, Optional[int]]]]
+    ) -> List[Union[bytes, memoryview, bytearray]]:
+        """Get multiple partial values.
+        key_ranges can be an iterable of key, range pairs,
+        where a range specifies two integers range_start and range_length
+        as a tuple, (range_start, range_length).
+        range_length may be None to indicate to read until the end.
+        range_start may be negative to start reading range_start bytes
+        from the end of the file.
+        A key may occur multiple times with different ranges.
+        Inserts None for missing keys into the returned list."""
+        results: List[Union[bytes, memoryview, bytearray]] = (
+            [None] * len(key_ranges)  # type: ignore[list-item]
+        )
+        indexed_ranges_by_key: Dict[str, List[Tuple[int, Tuple[int, Optional[int]]]]] = (
+            defaultdict(list)
+        )
+        for i, (key, range_) in enumerate(key_ranges):
+            indexed_ranges_by_key[key].append((i, range_))
+        for key, indexed_ranges in indexed_ranges_by_key.items():
+            try:
+                value = self[key]
+            except KeyError:  # pragma: no cover
+                continue
+            for i, (range_from, range_length) in indexed_ranges:
+                if range_length is None:
+                    results[i] = value[range_from:]
+                else:
+                    results[i] = value[range_from:range_from + range_length]
+        return results
+
+    def supports_efficient_set_partial_values(self):
+        return False
+
+    def set_partial_values(self, key_start_values):
+        """Set multiple partial values.
+        key_start_values can be an iterable of key, start and value triplets
+        as tuples, (key, start, value), where start defines the offset in bytes.
+        A key may occur multiple times with different starts and non-overlapping values.
+        Also, start may only be beyond the current value if other values fill the gap.
+        start may be negative to start writing start bytes from the current
+        end of the file, ending the file with the new value."""
+        unique_keys = set(next(zip(*key_start_values)))
+        values = {}
+        for key in unique_keys:
+            old_value = self.get(key)
+            values[key] = None if old_value is None else bytearray(old_value)
+        for key, start, value in key_start_values:
+            if values[key] is None:
+                assert start == 0
+                values[key] = value
+            else:
+                if start > len(values[key]):  # pragma: no cover
+                    raise ValueError(
+                        f"Cannot set value at start {start}, "
+                        + f"since it is beyond the data at key {key}, "
+                        + f"having length {len(values[key])}."
+                    )
+                if start < 0:
+                    values[key][start:] = value
+                else:
+                    values[key][start:start + len(value)] = value
+        for key, value in values.items():
+            self[key] = value
+
     def clear(self):
         """Remove all items from store."""
         self.erase_prefix("/")
@@ -303,6 +381,151 @@ class StoreV3(BaseStore):
         )
 
 
+class StorageTransformer(MutableMapping, abc.ABC):
+    """Base class for storage transformers. The methods simply pass on the data as-is
+    and should be overwritten by sub-classes."""
+
+    _store_version = 3
+    _metadata_class = Metadata3
+
+    def __init__(self, _type) -> None:
+        if _type not in self.valid_types:  # pragma: no cover
+            raise ValueError(
+                f"Storage transformer cannot be initialized with type {_type}, "
+                + f"must be one of {list(self.valid_types)}."
+            )
+        self.type = _type
+        self._inner_store = None
+
+    def _copy_for_array(self, array, inner_store):
+        transformer_copy = copy(self)
+        transformer_copy._inner_store = inner_store
+        return transformer_copy
+
+    @abc.abstractproperty
+    def extension_uri(self):
+        pass  # pragma: no cover
+
+    @abc.abstractproperty
+    def valid_types(self):
+        pass  # pragma: no cover
+
+    def get_config(self):
+        """Return a dictionary holding configuration parameters for this
+        storage transformer. All values must be compatible with JSON encoding."""
+        # Override in sub-class if need special encoding of config values.
+        # By default, assume all non-private members are configuration
+        # parameters except for type .
+        return {
+            k: v for k, v in self.__dict__.items()
+            if not k.startswith('_') and k != "type"
+        }
+
+    @classmethod
+    def from_config(cls, _type, config):
+        """Instantiate storage transformer from a configuration object."""
+        # override in sub-class if need special decoding of config values
+
+        # by default, assume constructor accepts configuration parameters as
+        # keyword arguments without any special decoding
+        return cls(_type, **config)
+
+    @property
+    def inner_store(self) -> Union["StorageTransformer", StoreV3]:
+        assert self._inner_store is not None, (
+            "inner_store is not initialized, first get a copy via _copy_for_array."
+        )
+        return self._inner_store
+
+    # The following implementations are usually fine to keep as-is:
+
+    def __eq__(self, other):
+        return (
+            type(self) == type(other) and
+            self._inner_store == other._inner_store and
+            self.get_config() == other.get_config()
+        )
+
+    def erase(self, key):
+        self.__delitem__(key)
+
+    def list(self):
+        return list(self.keys())
+
+    def list_dir(self, prefix):
+        return StoreV3.list_dir(self, prefix)
+
+    def is_readable(self):
+        return self.inner_store.is_readable()
+
+    def is_writeable(self):
+        return self.inner_store.is_writeable()
+
+    def is_listable(self):
+        return self.inner_store.is_listable()
+
+    def is_erasable(self):
+        return self.inner_store.is_erasable()
+
+    def clear(self):
+        return self.inner_store.clear()
+
+    def __enter__(self):
+        return self.inner_store.__enter__()
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        return self.inner_store.__exit__(exc_type, exc_value, traceback)
+
+    def close(self) -> None:
+        return self.inner_store.close()
+
+    # The following implementations might need to be re-implemented
+    # by subclasses implementing storage transformers:
+
+    def rename(self, src_path: str, dst_path: str) -> None:
+        return self.inner_store.rename(src_path, dst_path)
+
+    def list_prefix(self, prefix):
+        return self.inner_store.list_prefix(prefix)
+
+    def erase_prefix(self, prefix):
+        return self.inner_store.erase_prefix(prefix)
+
+    def rmdir(self, path=None):
+        return self.inner_store.rmdir(path)
+
+    def __contains__(self, key):
+        return self.inner_store.__contains__(key)
+
+    def __setitem__(self, key, value):
+        return self.inner_store.__setitem__(key, value)
+
+    def __getitem__(self, key):
+        return self.inner_store.__getitem__(key)
+
+    def __delitem__(self, key):
+        return self.inner_store.__delitem__(key)
+
+    def __iter__(self):
+        return self.inner_store.__iter__()
+
+    def __len__(self):
+        return self.inner_store.__len__()
+
+    @property
+    def supports_efficient_get_partial_values(self):
+        return self.inner_store.supports_efficient_get_partial_values
+
+    def get_partial_values(self, key_ranges):
+        return self.inner_store.get_partial_values(key_ranges)
+
+    def supports_efficient_set_partial_values(self):
+        return self.inner_store.supports_efficient_set_partial_values()
+
+    def set_partial_values(self, key_start_values):
+        return self.inner_store.set_partial_values(key_start_values)
+
+
 # allow MutableMapping for backwards compatibility
 StoreLike = Union[BaseStore, MutableMapping]
 
diff --git a/zarr/_storage/v3.py b/zarr/_storage/v3.py
index a0a1870..5f8964f 100644
--- a/zarr/_storage/v3.py
+++ b/zarr/_storage/v3.py
@@ -182,6 +182,35 @@ class FSStoreV3(FSStore, StoreV3):
             if self.fs.isdir(store_path):
                 self.fs.rm(store_path, recursive=True)
 
+    @property
+    def supports_efficient_get_partial_values(self):
+        return True
+
+    def get_partial_values(self, key_ranges):
+        """Get multiple partial values.
+        key_ranges can be an iterable of key, range pairs,
+        where a range specifies two integers range_start and range_length
+        as a tuple, (range_start, range_length).
+        range_length may be None to indicate to read until the end.
+        range_start may be negative to start reading range_start bytes
+        from the end of the file.
+        A key may occur multiple times with different ranges.
+        Inserts None for missing keys into the returned list."""
+        results = []
+        for key, (range_start, range_length) in key_ranges:
+            key = self._normalize_key(key)
+            path = self.dir_path(key)
+            try:
+                if range_start is None or range_length is None:
+                    end = None
+                else:
+                    end = range_start + range_length
+                result = self.fs.cat_file(path, start=range_start, end=end)
+            except self.map.missing_exceptions:
+                result = None
+            results.append(result)
+        return results
+
 
 class MemoryStoreV3(MemoryStore, StoreV3):
 
diff --git a/zarr/_storage/v3_storage_transformers.py b/zarr/_storage/v3_storage_transformers.py
new file mode 100644
index 0000000..3675d42
--- /dev/null
+++ b/zarr/_storage/v3_storage_transformers.py
@@ -0,0 +1,383 @@
+import functools
+import itertools
+import os
+from typing import NamedTuple, Tuple, Optional, Union, Iterator
+
+from numcodecs.compat import ensure_bytes
+import numpy as np
+
+from zarr._storage.store import StorageTransformer, StoreV3, _rmdir_from_keys_v3
+from zarr.util import normalize_storage_path
+
+
+MAX_UINT_64 = 2 ** 64 - 1
+
+
+v3_sharding_available = os.environ.get('ZARR_V3_SHARDING', '0').lower() not in ['0', 'false']
+
+
+def assert_zarr_v3_sharding_available():
+    if not v3_sharding_available:
+        raise NotImplementedError(
+            "Using V3 sharding is experimental and not yet finalized! To enable support, set:\n"
+            "ZARR_V3_SHARDING=1"
+        )  # pragma: no cover
+
+
+class _ShardIndex(NamedTuple):
+    store: "ShardingStorageTransformer"
+    # dtype uint64, shape (chunks_per_shard_0, chunks_per_shard_1, ..., 2)
+    offsets_and_lengths: np.ndarray
+
+    def __localize_chunk__(self, chunk: Tuple[int, ...]) -> Tuple[int, ...]:
+        return tuple(
+            chunk_i % shard_i
+            for chunk_i, shard_i in zip(chunk, self.store.chunks_per_shard)
+        )
+
+    def is_all_empty(self) -> bool:
+        return np.array_equiv(self.offsets_and_lengths, MAX_UINT_64)
+
+    def get_chunk_slice(self, chunk: Tuple[int, ...]) -> Optional[slice]:
+        localized_chunk = self.__localize_chunk__(chunk)
+        chunk_start, chunk_len = self.offsets_and_lengths[localized_chunk]
+        if (chunk_start, chunk_len) == (MAX_UINT_64, MAX_UINT_64):
+            return None
+        else:
+            return slice(int(chunk_start), int(chunk_start + chunk_len))
+
+    def set_chunk_slice(
+        self, chunk: Tuple[int, ...], chunk_slice: Optional[slice]
+    ) -> None:
+        localized_chunk = self.__localize_chunk__(chunk)
+        if chunk_slice is None:
+            self.offsets_and_lengths[localized_chunk] = (MAX_UINT_64, MAX_UINT_64)
+        else:
+            self.offsets_and_lengths[localized_chunk] = (
+                chunk_slice.start,
+                chunk_slice.stop - chunk_slice.start,
+            )
+
+    def to_bytes(self) -> bytes:
+        return self.offsets_and_lengths.tobytes(order="C")
+
+    @classmethod
+    def from_bytes(
+        cls, buffer: Union[bytes, bytearray], store: "ShardingStorageTransformer"
+    ) -> "_ShardIndex":
+        try:
+            return cls(
+                store=store,
+                offsets_and_lengths=np.frombuffer(bytearray(buffer), dtype="<u8").reshape(
+                    *store.chunks_per_shard, 2, order="C"
+                ),
+            )
+        except ValueError as e:  # pragma: no cover
+            raise RuntimeError from e
+
+    @classmethod
+    def create_empty(cls, store: "ShardingStorageTransformer"):
+        # reserving 2*64bit per chunk for offset and length:
+        return cls.from_bytes(
+            MAX_UINT_64.to_bytes(8, byteorder="little")
+            * (2 * store._num_chunks_per_shard),
+            store=store,
+        )
+
+
+class ShardingStorageTransformer(StorageTransformer):  # lgtm[py/missing-equals]
+    """Implements sharding as a storage transformer, as described in the spec:
+    https://zarr-specs.readthedocs.io/en/latest/extensions/storage-transformers/sharding/v1.0.html
+    https://purl.org/zarr/spec/storage_transformers/sharding/1.0
+    """
+
+    extension_uri = "https://purl.org/zarr/spec/storage_transformers/sharding/1.0"
+    valid_types = ["indexed"]
+
+    def __init__(self, _type, chunks_per_shard) -> None:
+        assert_zarr_v3_sharding_available()
+        super().__init__(_type)
+        if isinstance(chunks_per_shard, int):
+            chunks_per_shard = (chunks_per_shard, )
+        else:
+            chunks_per_shard = tuple(int(i) for i in chunks_per_shard)
+            if chunks_per_shard == ():
+                chunks_per_shard = (1, )
+        self.chunks_per_shard = chunks_per_shard
+        self._num_chunks_per_shard = functools.reduce(
+            lambda x, y: x * y, chunks_per_shard, 1
+        )
+        self._dimension_separator = None
+        self._data_key_prefix = None
+
+    def _copy_for_array(self, array, inner_store):
+        transformer_copy = super()._copy_for_array(array, inner_store)
+        transformer_copy._dimension_separator = array._dimension_separator
+        transformer_copy._data_key_prefix = array._data_key_prefix
+        if len(array._shape) > len(self.chunks_per_shard):
+            # The array shape might be longer when initialized with subdtypes.
+            # subdtypes dimensions come last, therefore padding chunks_per_shard
+            # with ones, effectively disabling sharding on the unlisted dimensions.
+            transformer_copy.chunks_per_shard += (
+                (1, ) * (len(array._shape) - len(self.chunks_per_shard))
+            )
+        return transformer_copy
+
+    @property
+    def dimension_separator(self) -> str:
+        assert self._dimension_separator is not None, (
+            "dimension_separator is not initialized, first get a copy via _copy_for_array."
+        )
+        return self._dimension_separator
+
+    def _is_data_key(self, key: str) -> bool:
+        assert self._data_key_prefix is not None, (
+            "data_key_prefix is not initialized, first get a copy via _copy_for_array."
+        )
+        return key.startswith(self._data_key_prefix)
+
+    def _key_to_shard(self, chunk_key: str) -> Tuple[str, Tuple[int, ...]]:
+        prefix, _, chunk_string = chunk_key.rpartition("c")
+        chunk_subkeys = tuple(
+            map(int, chunk_string.split(self.dimension_separator))
+        ) if chunk_string else (0, )
+        shard_key_tuple = (
+            subkey // shard_i
+            for subkey, shard_i in zip(chunk_subkeys, self.chunks_per_shard)
+        )
+        shard_key = (
+            prefix + "c" + self.dimension_separator.join(map(str, shard_key_tuple))
+        )
+        return shard_key, chunk_subkeys
+
+    def _get_index_from_store(self, shard_key: str) -> _ShardIndex:
+        # At the end of each shard 2*64bit per chunk for offset and length define the index:
+        index_bytes = self.inner_store.get_partial_values(
+            [(shard_key, (-16 * self._num_chunks_per_shard, None))]
+        )[0]
+        if index_bytes is None:
+            raise KeyError(shard_key)
+        return _ShardIndex.from_bytes(
+            index_bytes,
+            self,
+        )
+
+    def _get_index_from_buffer(self, buffer: Union[bytes, bytearray]) -> _ShardIndex:
+        # At the end of each shard 2*64bit per chunk for offset and length define the index:
+        return _ShardIndex.from_bytes(buffer[-16 * self._num_chunks_per_shard:], self)
+
+    def _get_chunks_in_shard(self, shard_key: str) -> Iterator[Tuple[int, ...]]:
+        _, _, chunk_string = shard_key.rpartition("c")
+        shard_key_tuple = tuple(
+            map(int, chunk_string.split(self.dimension_separator))
+        ) if chunk_string else (0, )
+        for chunk_offset in itertools.product(
+            *(range(i) for i in self.chunks_per_shard)
+        ):
+            yield tuple(
+                shard_key_i * shards_i + offset_i
+                for shard_key_i, offset_i, shards_i in zip(
+                    shard_key_tuple, chunk_offset, self.chunks_per_shard
+                )
+            )
+
+    def __getitem__(self, key):
+        if self._is_data_key(key):
+            if self.supports_efficient_get_partial_values:
+                # Use the partial implementation, which fetches the index separately
+                value = self.get_partial_values([(key, (0, None))])[0]
+                if value is None:
+                    raise KeyError(key)
+                else:
+                    return value
+            shard_key, chunk_subkey = self._key_to_shard(key)
+            try:
+                full_shard_value = self.inner_store[shard_key]
+            except KeyError:
+                raise KeyError(key)
+            index = self._get_index_from_buffer(full_shard_value)
+            chunk_slice = index.get_chunk_slice(chunk_subkey)
+            if chunk_slice is not None:
+                return full_shard_value[chunk_slice]
+            else:
+                raise KeyError(key)
+        else:
+            return self.inner_store.__getitem__(key)
+
+    def __setitem__(self, key, value):
+        value = ensure_bytes(value)
+        if self._is_data_key(key):
+            shard_key, chunk_subkey = self._key_to_shard(key)
+            chunks_to_read = set(self._get_chunks_in_shard(shard_key))
+            chunks_to_read.remove(chunk_subkey)
+            new_content = {chunk_subkey: value}
+            try:
+                if self.supports_efficient_get_partial_values:
+                    index = self._get_index_from_store(shard_key)
+                    full_shard_value = None
+                else:
+                    full_shard_value = self.inner_store[shard_key]
+                    index = self._get_index_from_buffer(full_shard_value)
+            except KeyError:
+                index = _ShardIndex.create_empty(self)
+            else:
+                chunk_slices = [
+                    (chunk_to_read, index.get_chunk_slice(chunk_to_read))
+                    for chunk_to_read in chunks_to_read
+                ]
+                valid_chunk_slices = [
+                    (chunk_to_read, chunk_slice)
+                    for chunk_to_read, chunk_slice in chunk_slices
+                    if chunk_slice is not None
+                ]
+                # use get_partial_values if less than half of the available chunks must be read:
+                # (This can be changed when set_partial_values can be used efficiently.)
+                use_partial_get = (
+                    self.supports_efficient_get_partial_values
+                    and len(valid_chunk_slices) < len(chunk_slices) / 2
+                )
+
+                if use_partial_get:
+                    chunk_values = self.inner_store.get_partial_values(
+                        [
+                            (
+                                shard_key,
+                                (
+                                    chunk_slice.start,
+                                    chunk_slice.stop - chunk_slice.start,
+                                ),
+                            )
+                            for _, chunk_slice in valid_chunk_slices
+                        ]
+                    )
+                    for chunk_value, (chunk_to_read, _) in zip(
+                        chunk_values, valid_chunk_slices
+                    ):
+                        new_content[chunk_to_read] = chunk_value
+                else:
+                    if full_shard_value is None:
+                        full_shard_value = self.inner_store[shard_key]
+                    for chunk_to_read, chunk_slice in valid_chunk_slices:
+                        if chunk_slice is not None:
+                            new_content[chunk_to_read] = full_shard_value[chunk_slice]
+
+            shard_content = b""
+            for chunk_subkey, chunk_content in new_content.items():
+                chunk_slice = slice(
+                    len(shard_content), len(shard_content) + len(chunk_content)
+                )
+                index.set_chunk_slice(chunk_subkey, chunk_slice)
+                shard_content += chunk_content
+            # Appending the index at the end of the shard:
+            shard_content += index.to_bytes()
+            self.inner_store[shard_key] = shard_content
+        else:  # pragma: no cover
+            self.inner_store[key] = value
+
+    def __delitem__(self, key):
+        if self._is_data_key(key):
+            shard_key, chunk_subkey = self._key_to_shard(key)
+            try:
+                index = self._get_index_from_store(shard_key)
+            except KeyError:
+                raise KeyError(key)
+
+            index.set_chunk_slice(chunk_subkey, None)
+
+            if index.is_all_empty():
+                del self.inner_store[shard_key]
+            else:
+                index_bytes = index.to_bytes()
+                self.inner_store.set_partial_values([(shard_key, -len(index_bytes), index_bytes)])
+        else:  # pragma: no cover
+            del self.inner_store[key]
+
+    def _shard_key_to_original_keys(self, key: str) -> Iterator[str]:
+        if self._is_data_key(key):
+            index = self._get_index_from_store(key)
+            prefix, _, _ = key.rpartition("c")
+            for chunk_tuple in self._get_chunks_in_shard(key):
+                if index.get_chunk_slice(chunk_tuple) is not None:
+                    yield prefix + "c" + self.dimension_separator.join(
+                        map(str, chunk_tuple)
+                    )
+        else:
+            yield key
+
+    def __iter__(self) -> Iterator[str]:
+        for key in self.inner_store:
+            yield from self._shard_key_to_original_keys(key)
+
+    def __len__(self):
+        return sum(1 for _ in self.keys())
+
+    def get_partial_values(self, key_ranges):
+        if self.supports_efficient_get_partial_values:
+            transformed_key_ranges = []
+            cached_indices = {}
+            none_indices = []
+            for i, (key, range_) in enumerate(key_ranges):
+                if self._is_data_key(key):
+                    shard_key, chunk_subkey = self._key_to_shard(key)
+                    try:
+                        index = cached_indices[shard_key]
+                    except KeyError:
+                        try:
+                            index = self._get_index_from_store(shard_key)
+                        except KeyError:
+                            none_indices.append(i)
+                            continue
+                        cached_indices[shard_key] = index
+                    chunk_slice = index.get_chunk_slice(chunk_subkey)
+                    if chunk_slice is None:
+                        none_indices.append(i)
+                        continue
+                    range_start, range_length = range_
+                    if range_length is None:
+                        range_length = chunk_slice.stop - chunk_slice.start
+                    transformed_key_ranges.append(
+                        (shard_key, (range_start + chunk_slice.start, range_length))
+                    )
+                else:  # pragma: no cover
+                    transformed_key_ranges.append((key, range_))
+            values = self.inner_store.get_partial_values(transformed_key_ranges)
+            for i in none_indices:
+                values.insert(i, None)
+            return values
+        else:
+            return StoreV3.get_partial_values(self, key_ranges)
+
+    def supports_efficient_set_partial_values(self):
+        return False
+
+    def set_partial_values(self, key_start_values):
+        # This does not yet implement efficient set_partial_values
+        StoreV3.set_partial_values(self, key_start_values)
+
+    def rename(self, src_path: str, dst_path: str) -> None:
+        StoreV3.rename(self, src_path, dst_path)  # type: ignore[arg-type]
+
+    def list_prefix(self, prefix):
+        return StoreV3.list_prefix(self, prefix)
+
+    def erase_prefix(self, prefix):
+        if self._is_data_key(prefix):
+            StoreV3.erase_prefix(self, prefix)
+        else:
+            self.inner_store.erase_prefix(prefix)
+
+    def rmdir(self, path=None):
+        path = normalize_storage_path(path)
+        _rmdir_from_keys_v3(self, path)  # type: ignore
+
+    def __contains__(self, key):
+        if self._is_data_key(key):
+            shard_key, chunk_subkeys = self._key_to_shard(key)
+            try:
+                index = self._get_index_from_store(shard_key)
+            except KeyError:
+                return False
+            chunk_slice = index.get_chunk_slice(chunk_subkeys)
+            return chunk_slice is not None
+        else:
+            return self._inner_store.__contains__(key)
diff --git a/zarr/core.py b/zarr/core.py
index e5b2045..b9db6cb 100644
--- a/zarr/core.py
+++ b/zarr/core.py
@@ -51,7 +51,8 @@ from zarr.util import (
     normalize_shape,
     normalize_storage_path,
     PartialReadBuffer,
-    ensure_ndarray_like
+    UncompressedPartialReadBufferV3,
+    ensure_ndarray_like,
 )
 
 
@@ -189,6 +190,7 @@ class Array:
 
         self._store = store
         self._chunk_store = chunk_store
+        self._transformed_chunk_store = None
         self._path = normalize_storage_path(path)
         if self._path:
             self._key_prefix = self._path + '/'
@@ -292,6 +294,16 @@ class Array:
                 filters = [get_codec(config) for config in filters]
             self._filters = filters
 
+            if self._version == 3:
+                storage_transformers = meta.get('storage_transformers', [])
+                if storage_transformers:
+                    transformed_store = self._chunk_store or self._store
+                    for storage_transformer in storage_transformers[::-1]:
+                        transformed_store = storage_transformer._copy_for_array(
+                            self, transformed_store
+                        )
+                    self._transformed_chunk_store = transformed_store
+
     def _refresh_metadata(self):
         if not self._cache_metadata:
             self._load_metadata()
@@ -371,10 +383,12 @@ class Array:
     @property
     def chunk_store(self):
         """A MutableMapping providing the underlying storage for array chunks."""
-        if self._chunk_store is None:
-            return self._store
-        else:
+        if self._transformed_chunk_store is not None:
+            return self._transformed_chunk_store
+        elif self._chunk_store is not None:
             return self._chunk_store
+        else:
+            return self._store
 
     @property
     def shape(self):
@@ -1258,8 +1272,12 @@ class Array:
             check_array_shape('out', out, out_shape)
 
         # iterate over chunks
-        if not hasattr(self.chunk_store, "getitems") or \
-           any(map(lambda x: x == 0, self.shape)):
+        if (
+            not hasattr(self.chunk_store, "getitems") and not (
+                hasattr(self.chunk_store, "get_partial_values") and
+                self.chunk_store.supports_efficient_get_partial_values
+            )
+        ) or any(map(lambda x: x == 0, self.shape)):
             # sequentially get one key at a time from storage
             for chunk_coords, chunk_selection, out_selection in indexer:
 
@@ -1800,7 +1818,7 @@ class Array:
             check_array_shape('value', value, sel_shape)
 
         # iterate over chunks in range
-        if not hasattr(self.store, "setitems") or self._synchronizer is not None \
+        if not hasattr(self.chunk_store, "setitems") or self._synchronizer is not None \
            or any(map(lambda x: x == 0, self.shape)):
             # iterative approach
             for chunk_coords, chunk_selection, out_selection in indexer:
@@ -1885,6 +1903,8 @@ class Array:
                         cdata = cdata.read_full()
                     self._compressor.decode(cdata, dest)
                 else:
+                    if isinstance(cdata, UncompressedPartialReadBufferV3):
+                        cdata = cdata.read_full()
                     chunk = ensure_ndarray_like(cdata).view(self._dtype)
                     chunk = chunk.reshape(self._chunks, order=self._order)
                     np.copyto(dest, chunk)
@@ -1906,13 +1926,21 @@ class Array:
                         else dim
                         for i, dim in enumerate(self.chunks)
                     ]
-                    cdata.read_part(start, nitems)
-                    chunk_partial = self._decode_chunk(
-                        cdata.buff,
-                        start=start,
-                        nitems=nitems,
-                        expected_shape=expected_shape,
-                    )
+                    if isinstance(cdata, UncompressedPartialReadBufferV3):
+                        chunk_partial = self._decode_chunk(
+                            cdata.read_part(start, nitems),
+                            start=start,
+                            nitems=nitems,
+                            expected_shape=expected_shape,
+                        )
+                    else:
+                        cdata.read_part(start, nitems)
+                        chunk_partial = self._decode_chunk(
+                            cdata.buff,
+                            start=start,
+                            nitems=nitems,
+                            expected_shape=expected_shape,
+                        )
                     tmp[partial_out_selection] = chunk_partial
                 out[out_selection] = tmp[chunk_selection]
                 return
@@ -2007,9 +2035,29 @@ class Array:
                 for ckey in ckeys
                 if ckey in self.chunk_store
             }
+        elif (
+            self._partial_decompress
+            and not self._compressor
+            and not fields
+            and self.dtype != object
+            and hasattr(self.chunk_store, "get_partial_values")
+            and self.chunk_store.supports_efficient_get_partial_values
+        ):
+            partial_read_decode = True
+            cdatas = {
+                ckey: UncompressedPartialReadBufferV3(
+                    ckey, self.chunk_store, itemsize=self.itemsize
+                )
+                for ckey in ckeys
+                if ckey in self.chunk_store
+            }
         else:
             partial_read_decode = False
-            cdatas = self.chunk_store.getitems(ckeys, on_error="omit")
+            if not hasattr(self.chunk_store, "getitems"):
+                values = self.chunk_store.get_partial_values([(ckey, (0, None)) for ckey in ckeys])
+                cdatas = {key: value for key, value in zip(ckeys, values) if value is not None}
+            else:
+                cdatas = self.chunk_store.getitems(ckeys, on_error="omit")
         for ckey, chunk_select, out_select in zip(ckeys, lchunk_selection, lout_selection):
             if ckey in cdatas:
                 self._process_chunk(
@@ -2229,7 +2277,10 @@ class Array:
             cdata = chunk
 
         # ensure in-memory data is immutable and easy to compare
-        if isinstance(self.chunk_store, KVStore):
+        if (
+            isinstance(self.chunk_store, KVStore)
+            or isinstance(self._chunk_store, KVStore)
+        ):
             cdata = ensure_bytes(cdata)
 
         return cdata
diff --git a/zarr/creation.py b/zarr/creation.py
index cc191e3..a6fa8e4 100644
--- a/zarr/creation.py
+++ b/zarr/creation.py
@@ -22,7 +22,7 @@ def create(shape, chunks=True, dtype=None, compressor='default',
            overwrite=False, path=None, chunk_store=None, filters=None,
            cache_metadata=True, cache_attrs=True, read_only=False,
            object_codec=None, dimension_separator=None, write_empty_chunks=True,
-           *, zarr_version=None, meta_array=None, **kwargs):
+           *, zarr_version=None, meta_array=None, storage_transformers=(), **kwargs):
     """Create an array.
 
     Parameters
@@ -85,6 +85,14 @@ def create(shape, chunks=True, dtype=None, compressor='default',
 
         .. versionadded:: 2.11
 
+    storage_transformers : sequence of StorageTransformers, optional
+        Setting storage transformers, changes the storage structure and behaviour
+        of data coming from the underlying store. The transformers are applied in the
+        order of the given sequence. Supplying an empty sequence is the same as omitting
+        the argument or setting it to None. May only be set when using zarr_version 3.
+
+        .. versionadded:: 2.13
+
     zarr_version : {None, 2, 3}, optional
         The zarr protocol version of the created array. If None, it will be
         inferred from ``store`` or ``chunk_store`` if they are provided,
@@ -170,7 +178,7 @@ def create(shape, chunks=True, dtype=None, compressor='default',
     init_array(store, shape=shape, chunks=chunks, dtype=dtype, compressor=compressor,
                fill_value=fill_value, order=order, overwrite=overwrite, path=path,
                chunk_store=chunk_store, filters=filters, object_codec=object_codec,
-               dimension_separator=dimension_separator)
+               dimension_separator=dimension_separator, storage_transformers=storage_transformers)
 
     # instantiate array
     z = Array(store, path=path, chunk_store=chunk_store, synchronizer=synchronizer,
diff --git a/zarr/hierarchy.py b/zarr/hierarchy.py
index 0dae921..18e7ac7 100644
--- a/zarr/hierarchy.py
+++ b/zarr/hierarchy.py
@@ -1336,7 +1336,7 @@ def group(store=None, overwrite=False, chunk_store=None,
     """
 
     # handle polymorphic store arg
-    store = _normalize_store_arg(store, zarr_version=zarr_version)
+    store = _normalize_store_arg(store, zarr_version=zarr_version, mode='w')
     if zarr_version is None:
         zarr_version = getattr(store, '_store_version', DEFAULT_ZARR_VERSION)
 
diff --git a/zarr/meta.py b/zarr/meta.py
index 77c55b9..59c56ab 100644
--- a/zarr/meta.py
+++ b/zarr/meta.py
@@ -9,7 +9,11 @@ from numcodecs.abc import Codec
 from zarr.errors import MetadataError
 from zarr.util import json_dumps, json_loads
 
-from typing import cast, Union, Any, List, Mapping as MappingType, Optional
+from typing import cast, Union, Any, List, Mapping as MappingType, Optional, TYPE_CHECKING
+
+if TYPE_CHECKING:  # pragma: no cover
+    from zarr._storage.store import StorageTransformer
+
 
 ZARR_FORMAT = 2
 ZARR_FORMAT_v3 = 3
@@ -88,7 +92,7 @@ class Metadata2:
     ZARR_FORMAT = ZARR_FORMAT
 
     @classmethod
-    def parse_metadata(cls, s: Union[MappingType, str]) -> MappingType[str, Any]:
+    def parse_metadata(cls, s: Union[MappingType, bytes, str]) -> MappingType[str, Any]:
 
         # Here we allow that a store may return an already-parsed metadata object,
         # or a string of JSON that we will parse here. We allow for an already-parsed
@@ -106,7 +110,7 @@ class Metadata2:
         return meta
 
     @classmethod
-    def decode_array_metadata(cls, s: Union[MappingType, str]) -> MappingType[str, Any]:
+    def decode_array_metadata(cls, s: Union[MappingType, bytes, str]) -> MappingType[str, Any]:
         meta = cls.parse_metadata(s)
 
         # check metadata format
@@ -194,7 +198,7 @@ class Metadata2:
         return np.dtype(d)
 
     @classmethod
-    def decode_group_metadata(cls, s: Union[MappingType, str]) -> MappingType[str, Any]:
+    def decode_group_metadata(cls, s: Union[MappingType, bytes, str]) -> MappingType[str, Any]:
         meta = cls.parse_metadata(s)
 
         # check metadata format version
@@ -347,7 +351,7 @@ class Metadata3(Metadata2):
             return get_extended_dtype_info(np.dtype(d))
 
     @classmethod
-    def decode_group_metadata(cls, s: Union[MappingType, str]) -> MappingType[str, Any]:
+    def decode_group_metadata(cls, s: Union[MappingType, bytes, str]) -> MappingType[str, Any]:
         meta = cls.parse_metadata(s)
         # 1 / 0
         # # check metadata format version
@@ -386,7 +390,7 @@ class Metadata3(Metadata2):
 
     @classmethod
     def decode_hierarchy_metadata(
-        cls, s: Union[MappingType, str]
+        cls, s: Union[MappingType, bytes, str]
     ) -> MappingType[str, Any]:
         meta = cls.parse_metadata(s)
         # check metadata format
@@ -460,7 +464,38 @@ class Metadata3(Metadata2):
         return codec
 
     @classmethod
-    def decode_array_metadata(cls, s: Union[MappingType, str]) -> MappingType[str, Any]:
+    def _encode_storage_transformer_metadata(
+        cls,
+        storage_transformer: "StorageTransformer"
+    ) -> Optional[Mapping]:
+        return {
+            "extension": storage_transformer.extension_uri,
+            "type": storage_transformer.type,
+            "configuration": storage_transformer.get_config(),
+        }
+
+    @classmethod
+    def _decode_storage_transformer_metadata(cls, meta: Mapping) -> "StorageTransformer":
+        from zarr.tests.test_storage_v3 import DummyStorageTransfomer
+        from zarr._storage.v3_storage_transformers import ShardingStorageTransformer
+
+        # This might be changed to a proper registry in the future
+        KNOWN_STORAGE_TRANSFORMERS = [DummyStorageTransfomer, ShardingStorageTransformer]
+
+        conf = meta.get('configuration', {})
+        extension_uri = meta['extension']
+        transformer_type = meta['type']
+
+        for StorageTransformerCls in KNOWN_STORAGE_TRANSFORMERS:
+            if StorageTransformerCls.extension_uri == extension_uri:
+                break
+        else:  # pragma: no cover
+            raise NotImplementedError
+
+        return StorageTransformerCls.from_config(transformer_type, conf)
+
+    @classmethod
+    def decode_array_metadata(cls, s: Union[MappingType, bytes, str]) -> MappingType[str, Any]:
         meta = cls.parse_metadata(s)
 
         # extract array metadata fields
@@ -476,6 +511,10 @@ class Metadata3(Metadata2):
             # TODO: remove dimension_separator?
 
             compressor = cls._decode_codec_metadata(meta.get("compressor", None))
+            storage_transformers = meta.get("storage_transformers", ())
+            storage_transformers = [
+                cls._decode_storage_transformer_metadata(i) for i in storage_transformers
+            ]
             extensions = meta.get("extensions", [])
             meta = dict(
                 shape=tuple(meta["shape"]),
@@ -493,6 +532,8 @@ class Metadata3(Metadata2):
             # compressor field should be absent when there is no compression
             if compressor:
                 meta['compressor'] = compressor
+            if storage_transformers:
+                meta['storage_transformers'] = storage_transformers
 
         except Exception as e:
             raise MetadataError("error decoding metadata: %s" % e)
@@ -514,6 +555,10 @@ class Metadata3(Metadata2):
             object_codec = None
 
         compressor = cls._encode_codec_metadata(meta.get("compressor", None))
+        storage_transformers = meta.get("storage_transformers", ())
+        storage_transformers = [
+            cls._encode_storage_transformer_metadata(i) for i in storage_transformers
+        ]
         extensions = meta.get("extensions", [])
         meta = dict(
             shape=meta["shape"] + sdshape,
@@ -532,6 +577,8 @@ class Metadata3(Metadata2):
             meta["compressor"] = compressor
         if dimension_separator:
             meta["dimension_separator"] = dimension_separator
+        if storage_transformers:
+            meta["storage_transformers"] = storage_transformers
         return json_dumps(meta)
 
 
diff --git a/zarr/n5.py b/zarr/n5.py
index 978cade..1eb6ef2 100644
--- a/zarr/n5.py
+++ b/zarr/n5.py
@@ -689,6 +689,7 @@ def array_metadata_to_zarr(array_metadata: Dict[str, Any],
     array_metadata['order'] = 'C'
     array_metadata['filters'] = []
     array_metadata['dimension_separator'] = '.'
+    array_metadata['dtype'] = np.dtype(array_metadata['dtype']).str
 
     compressor_config = array_metadata['compressor']
     compressor_config = compressor_config_to_zarr(compressor_config)
@@ -735,12 +736,6 @@ def compressor_config_to_n5(compressor_config: Optional[Dict[str, Any]]) -> Dict
 
     elif codec_id == 'blosc':
 
-        warnings.warn(
-            "Not all N5 implementations support blosc compression (yet). You "
-            "might not be able to open the dataset with another N5 library.",
-            RuntimeWarning
-        )
-
         n5_config['cname'] = _compressor_config['cname']
         n5_config['clevel'] = _compressor_config['clevel']
         n5_config['shuffle'] = _compressor_config['shuffle']
diff --git a/zarr/storage.py b/zarr/storage.py
index a2a8919..fae9530 100644
--- a/zarr/storage.py
+++ b/zarr/storage.py
@@ -55,8 +55,8 @@ from zarr.meta import encode_array_metadata, encode_group_metadata
 from zarr.util import (buffer_size, json_loads, nolock, normalize_chunks,
                        normalize_dimension_separator,
                        normalize_dtype, normalize_fill_value, normalize_order,
-                       normalize_shape, normalize_storage_path, retry_call
-                       )
+                       normalize_shape, normalize_storage_path, retry_call,
+                       ensure_contiguous_ndarray_or_bytes)
 
 from zarr._storage.absstore import ABSStore  # noqa: F401
 from zarr._storage.store import (_get_hierarchy_metadata,  # noqa: F401
@@ -311,6 +311,7 @@ def init_array(
     filters=None,
     object_codec=None,
     dimension_separator=None,
+    storage_transformers=(),
 ):
     """Initialize an array store with the given configuration. Note that this is a low-level
     function and there should be no need to call this directly from user code.
@@ -438,7 +439,8 @@ def init_array(
                          order=order, overwrite=overwrite, path=path,
                          chunk_store=chunk_store, filters=filters,
                          object_codec=object_codec,
-                         dimension_separator=dimension_separator)
+                         dimension_separator=dimension_separator,
+                         storage_transformers=storage_transformers)
 
 
 def _init_array_metadata(
@@ -455,6 +457,7 @@ def _init_array_metadata(
     filters=None,
     object_codec=None,
     dimension_separator=None,
+    storage_transformers=(),
 ):
 
     store_version = getattr(store, '_store_version', 2)
@@ -576,6 +579,7 @@ def _init_array_metadata(
     if store_version < 3:
         meta.update(dict(chunks=chunks, dtype=dtype, order=order,
                          filters=filters_config))
+        assert not storage_transformers
     else:
         if dimension_separator is None:
             dimension_separator = "/"
@@ -589,7 +593,8 @@ def _init_array_metadata(
                                  separator=dimension_separator),
                  chunk_memory_layout=order,
                  data_type=dtype,
-                 attributes=attributes)
+                 attributes=attributes,
+                 storage_transformers=storage_transformers)
         )
 
     key = _prefix_to_array_key(store, _path_to_prefix(path))
@@ -1199,7 +1204,9 @@ class DirectoryStore(Store):
                         for file_name in file_names:
                             file_path = os.path.join(dir_path, file_name)
                             rel_path = file_path.split(root_path + os.path.sep)[1]
-                            new_children.append(rel_path.replace(os.path.sep, '.'))
+                            new_children.append(rel_path.replace(
+                                os.path.sep,
+                                self._dimension_separator or '.'))
                 else:
                     new_children.append(entry)
             return sorted(new_children)
@@ -1390,13 +1397,19 @@ class FSStore(Store):
     def setitems(self, values):
         if self.mode == 'r':
             raise ReadOnlyError()
-        values = {self._normalize_key(key): val for key, val in values.items()}
+
+        # Normalize keys and make sure the values are bytes
+        values = {
+            self._normalize_key(key): ensure_contiguous_ndarray_or_bytes(val)
+            for key, val in values.items()
+        }
         self.map.setitems(values)
 
     def __setitem__(self, key, value):
         if self.mode == 'r':
             raise ReadOnlyError()
         key = self._normalize_key(key)
+        value = ensure_contiguous_ndarray_or_bytes(value)
         path = self.dir_path(key)
         try:
             if self.fs.isdir(path):
diff --git a/zarr/tests/test_attrs.py b/zarr/tests/test_attrs.py
index e4baf18..a329f46 100644
--- a/zarr/tests/test_attrs.py
+++ b/zarr/tests/test_attrs.py
@@ -1,12 +1,15 @@
 import json
 
+import pathlib
 import pytest
 
+import zarr
 from zarr._storage.store import meta_root
 from zarr.attrs import Attributes
-from zarr.storage import KVStore
+from zarr.storage import KVStore, DirectoryStore
 from zarr._storage.v3 import KVStoreV3
 from zarr.tests.util import CountingDict, CountingDictV3
+from zarr.hierarchy import group
 
 
 @pytest.fixture(params=[2, 3])
@@ -42,11 +45,27 @@ class TestAttributes():
         a['baz'] = 42
         assert attrs_key in store
         assert isinstance(store[attrs_key], bytes)
-        d = json.loads(str(store[attrs_key], 'ascii'))
+        d = json.loads(str(store[attrs_key], 'utf-8'))
         if zarr_version == 3:
             d = d['attributes']
         assert dict(foo='bar', baz=42) == d
 
+    def test_utf8_encoding(self, zarr_version):
+
+        project_root = pathlib.Path(zarr.__file__).resolve().parent.parent
+        fixdir = project_root / "fixture" / "utf8attrs"
+        if not fixdir.exists():  # pragma: no cover
+            # store the data - should be one-time operation
+            fixdir.mkdir()
+            with (fixdir / ".zattrs").open("w", encoding="utf-8") as f:
+                f.write('{"foo": "た"}')
+            with (fixdir / ".zgroup").open("w", encoding="utf-8") as f:
+                f.write("""{\n    "zarr_format": 2\n}""")
+
+        # fixture data
+        fixture = group(store=DirectoryStore('fixture'))
+        assert fixture['utf8attrs'].attrs.asdict() == dict(foo='た')
+
     def test_get_set_del_contains(self, zarr_version):
 
         store = _init_store(zarr_version)
diff --git a/zarr/tests/test_core.py b/zarr/tests/test_core.py
index e32026e..ba89db3 100644
--- a/zarr/tests/test_core.py
+++ b/zarr/tests/test_core.py
@@ -17,9 +17,11 @@ from numcodecs.tests.common import greetings
 from numpy.testing import assert_array_almost_equal, assert_array_equal
 from pkg_resources import parse_version
 
+import zarr
 from zarr._storage.store import (
     v3_api_available,
 )
+from .._storage.v3_storage_transformers import ShardingStorageTransformer, v3_sharding_available
 from zarr.core import Array
 from zarr.errors import ArrayNotFoundError, ContainsGroupError
 from zarr.meta import json_loads
@@ -49,9 +51,11 @@ from zarr._storage.v3 import (
     KVStoreV3,
     LMDBStoreV3,
     LRUStoreCacheV3,
+    RmdirV3,
     SQLiteStoreV3,
     StoreV3,
 )
+from zarr.tests.test_storage_v3 import DummyStorageTransfomer
 from zarr.util import buffer_size
 from zarr.tests.util import abs_container, skip_test_env_var, have_fsspec, mktemp
 
@@ -827,7 +831,6 @@ class TestArray(unittest.TestCase):
         attrs_cache = z.attrs.cache
         a = np.random.randint(0, 1000, 1000)
         z[:] = a
-
         # round trip through pickle
         dump = pickle.dumps(z)
         # some stores cannot be opened twice at the same time, need to close
@@ -2019,9 +2022,7 @@ class TestArrayWithN5Store(TestArrayWithDirectoryStore):
             a1[:] = 1
             assert np.all(a1[:] == 1)
 
-        compressors_warn = [
-            Blosc()
-        ]
+        compressors_warn = []
         if LZMA:
             compressors_warn.append(LZMA(2))  # Try lzma.FORMAT_ALONE, which N5 doesn't support.
         for compressor in compressors_warn:
@@ -2033,13 +2034,12 @@ class TestArrayWithN5Store(TestArrayWithDirectoryStore):
             assert np.all(a2[:] == 1)
 
     def expected(self):
-        return [
-           '4e9cf910000506455f82a70938a272a3fce932e5',
-           'f9d4cbf1402901f63dea7acf764d2546e4b6aa38',
-           '1d8199f5f7b70d61aa0d29cc375212c3df07d50a',
-           '874880f91aa6736825584509144afe6b06b0c05c',
-           'e2258fedc74752196a8c8383db49e27193c995e2',
-           ]
+        return ['8811a77d54caaa1901d5cc4452d946ae433c8d90',
+                'd880b007d9779db5f2cdbe13274eb1cbac4a425a',
+                'd80eb66d5521744f051e816ab368d8ccfc2e3edf',
+                '568f9f837e4b682a3819cb122988e2eebeb6572b',
+                '4fdf4475d786d6694110db5619acd30c80dfc372'
+                ]
 
 
 @pytest.mark.skipif(have_fsspec is False, reason="needs fsspec")
@@ -3098,7 +3098,7 @@ class TestArrayWithSQLiteStoreV3(TestArrayWithPathV3, TestArrayWithSQLiteStore):
 # Note: this custom mapping doesn't actually have all methods in the
 #       v3 spec (e.g. erase), but they aren't needed here.
 
-class CustomMappingV3(StoreV3):
+class CustomMappingV3(RmdirV3, StoreV3):
 
     def __init__(self):
         self.inner = KVStoreV3(dict())
@@ -3296,6 +3296,60 @@ class TestArrayWithFSStoreV3PartialRead(TestArrayWithPathV3, TestArrayWithFSStor
         ]
 
 
+@pytest.mark.skipif(have_fsspec is False, reason="needs fsspec")
+@pytest.mark.skipif(not v3_api_available, reason="V3 is disabled")
+@pytest.mark.skipif(not v3_sharding_available, reason="sharding is disabled")
+class TestArrayWithFSStoreV3PartialReadUncompressedSharded(
+    TestArrayWithPathV3, TestArrayWithFSStorePartialRead
+):
+
+    @staticmethod
+    def create_array(array_path='arr1', read_only=False, **kwargs):
+        path = mkdtemp()
+        atexit.register(shutil.rmtree, path)
+        store = FSStoreV3(path)
+        cache_metadata = kwargs.pop("cache_metadata", True)
+        cache_attrs = kwargs.pop("cache_attrs", True)
+        write_empty_chunks = kwargs.pop('write_empty_chunks', True)
+        kwargs.setdefault('compressor', None)
+        num_dims = 1 if isinstance(kwargs["shape"], int) else len(kwargs["shape"])
+        sharding_transformer = ShardingStorageTransformer(
+            "indexed", chunks_per_shard=(2, ) * num_dims
+        )
+        init_array(store, path=array_path, storage_transformers=[sharding_transformer], **kwargs)
+        return Array(
+            store,
+            path=array_path,
+            read_only=read_only,
+            cache_metadata=cache_metadata,
+            cache_attrs=cache_attrs,
+            partial_decompress=True,
+            write_empty_chunks=write_empty_chunks,
+        )
+
+    def test_nbytes_stored(self):
+        z = self.create_array(shape=1000, chunks=100)
+        expect_nbytes_stored = sum(buffer_size(v) for k, v in z._store.items() if k != 'zarr.json')
+        assert expect_nbytes_stored == z.nbytes_stored
+        z[:] = 42
+        expect_nbytes_stored = sum(buffer_size(v) for k, v in z._store.items() if k != 'zarr.json')
+        assert expect_nbytes_stored == z.nbytes_stored
+
+    def test_supports_efficient_get_set_partial_values(self):
+        z = self.create_array(shape=100, chunks=10)
+        assert z.chunk_store.supports_efficient_get_partial_values
+        assert not z.chunk_store.supports_efficient_set_partial_values()
+
+    def expected(self):
+        return [
+            "90109fc2a4e17efbcb447003ea1c08828b91f71e",
+            "2b73519f7260dba3ddce0d2b70041888856fec6b",
+            "bca5798be2ed71d444f3045b05432d937682b7dd",
+            "9ff1084501e28520e577662a6e3073f1116c76a2",
+            "882a97cad42417f90f111d0cb916a21579650467",
+        ]
+
+
 @pytest.mark.skipif(have_fsspec is False, reason="needs fsspec")
 @pytest.mark.skipif(not v3_api_available, reason="V3 is disabled")
 class TestArrayWithFSStoreV3Nested(TestArrayWithPathV3, TestArrayWithFSStoreNested):
@@ -3359,6 +3413,93 @@ class TestArrayWithFSStoreV3NestedPartialRead(TestArrayWithPathV3,
         ]
 
 
+@pytest.mark.skipif(not v3_api_available, reason="V3 is disabled")
+class TestArrayWithStorageTransformersV3(TestArrayWithChunkStoreV3):
+
+    @staticmethod
+    def create_array(array_path='arr1', read_only=False, **kwargs):
+        store = KVStoreV3(dict())
+        # separate chunk store
+        chunk_store = KVStoreV3(dict())
+        cache_metadata = kwargs.pop('cache_metadata', True)
+        cache_attrs = kwargs.pop('cache_attrs', True)
+        write_empty_chunks = kwargs.pop('write_empty_chunks', True)
+        dummy_storage_transformer = DummyStorageTransfomer(
+            "dummy_type", test_value=DummyStorageTransfomer.TEST_CONSTANT
+        )
+        init_array(store, path=array_path, chunk_store=chunk_store,
+                   storage_transformers=[dummy_storage_transformer], **kwargs)
+        return Array(store, path=array_path, read_only=read_only,
+                     chunk_store=chunk_store, cache_metadata=cache_metadata,
+                     cache_attrs=cache_attrs, write_empty_chunks=write_empty_chunks)
+
+    def expected(self):
+        return [
+            "3fb9a4f8233b09ad02067b6b7fc9fd5caa405c7d",
+            "89c8eb364beb84919fc9153d2c1ed2696274ec18",
+            "73307055c3aec095dd1232c38d793ef82a06bd97",
+            "6152c09255a5efa43b1a115546e35affa00c138c",
+            "2f8802fc391f67f713302e84fad4fd8f1366d6c2",
+        ]
+
+
+@pytest.mark.skipif(not v3_api_available, reason="V3 is disabled")
+@pytest.mark.skipif(not v3_sharding_available, reason="sharding is disabled")
+class TestArrayWithShardingStorageTransformerV3(TestArrayWithPathV3):
+
+    @staticmethod
+    def create_array(array_path='arr1', read_only=False, **kwargs):
+        store = KVStoreV3(dict())
+        cache_metadata = kwargs.pop('cache_metadata', True)
+        cache_attrs = kwargs.pop('cache_attrs', True)
+        write_empty_chunks = kwargs.pop('write_empty_chunks', True)
+        kwargs.setdefault('compressor', None)
+        num_dims = 1 if isinstance(kwargs["shape"], int) else len(kwargs["shape"])
+        sharding_transformer = ShardingStorageTransformer(
+            "indexed", chunks_per_shard=(2, ) * num_dims
+        )
+        init_array(store, path=array_path, storage_transformers=[sharding_transformer], **kwargs)
+        return Array(store, path=array_path, read_only=read_only,
+                     cache_metadata=cache_metadata,
+                     cache_attrs=cache_attrs, write_empty_chunks=write_empty_chunks)
+
+    def test_nbytes_stored(self):
+        z = self.create_array(shape=1000, chunks=100)
+        expect_nbytes_stored = sum(buffer_size(v) for k, v in z._store.items() if k != 'zarr.json')
+        assert expect_nbytes_stored == z.nbytes_stored
+        z[:] = 42
+        expect_nbytes_stored = sum(buffer_size(v) for k, v in z._store.items() if k != 'zarr.json')
+        assert expect_nbytes_stored == z.nbytes_stored
+
+        # mess with store
+        z.store[data_root + z._key_prefix + 'foo'] = list(range(10))
+        assert -1 == z.nbytes_stored
+
+    def test_keys_inner_store(self):
+        z = self.create_array(shape=1000, chunks=100)
+        assert z.chunk_store.keys() == z._store.keys()
+        meta_keys = set(z.store.keys())
+        z[:] = 42
+        assert len(z.chunk_store.keys() - meta_keys) == 10
+        # inner store should have half the data keys,
+        # since chunks_per_shard is 2:
+        assert len(z._store.keys() - meta_keys) == 5
+
+    def test_supports_efficient_get_set_partial_values(self):
+        z = self.create_array(shape=100, chunks=10)
+        assert not z.chunk_store.supports_efficient_get_partial_values
+        assert not z.chunk_store.supports_efficient_set_partial_values()
+
+    def expected(self):
+        return [
+            '90109fc2a4e17efbcb447003ea1c08828b91f71e',
+            '2b73519f7260dba3ddce0d2b70041888856fec6b',
+            'bca5798be2ed71d444f3045b05432d937682b7dd',
+            '9ff1084501e28520e577662a6e3073f1116c76a2',
+            '882a97cad42417f90f111d0cb916a21579650467',
+        ]
+
+
 @pytest.mark.skipif(not v3_api_available, reason="V3 is disabled")
 def test_array_mismatched_store_versions():
     store_v3 = KVStoreV3(dict())
@@ -3377,3 +3518,27 @@ def test_array_mismatched_store_versions():
         Array(store_v3, path='dataset', read_only=False, chunk_store=chunk_store_v2)
     with pytest.raises(ValueError):
         Array(store_v2, path='dataset', read_only=False, chunk_store=chunk_store_v3)
+
+
+@pytest.mark.skipif(have_fsspec is False, reason="needs fsspec")
+def test_issue_1279(tmpdir):
+    """See <https://github.com/zarr-developers/zarr-python/issues/1279>"""
+
+    data = np.arange(25).reshape((5, 5))
+    ds = zarr.create(
+        shape=data.shape,
+        chunks=(5, 5),
+        dtype=data.dtype,
+        compressor=(None),
+        store=FSStore(url=str(tmpdir), mode="a"),
+        order="F",
+    )
+
+    ds[:] = data
+
+    ds_reopened = zarr.open_array(
+        store=FSStore(url=str(tmpdir), mode="r")
+    )
+
+    written_data = ds_reopened[:]
+    assert_array_equal(data, written_data)
diff --git a/zarr/tests/test_creation.py b/zarr/tests/test_creation.py
index 4c9c292..b791bc3 100644
--- a/zarr/tests/test_creation.py
+++ b/zarr/tests/test_creation.py
@@ -19,8 +19,10 @@ from zarr.storage import DirectoryStore, KVStore
 from zarr._storage.store import v3_api_available
 from zarr._storage.v3 import DirectoryStoreV3, KVStoreV3
 from zarr.sync import ThreadSynchronizer
+from zarr.tests.test_storage_v3 import DummyStorageTransfomer
 from zarr.tests.util import mktemp, have_fsspec
 
+
 _VERSIONS = ((None, 2, 3) if v3_api_available else (None, 2))
 _VERSIONS2 = ((2, 3) if v3_api_available else (2, ))
 
@@ -747,3 +749,16 @@ def test_create_read_only(zarr_version, at_root):
 def test_json_dumps_chunks_numpy_dtype():
     z = zeros((10,), chunks=(np.int64(2),))
     assert np.all(z[...] == 0)
+
+
+@pytest.mark.skipif(not v3_api_available, reason="V3 is disabled")
+@pytest.mark.parametrize('at_root', [False, True])
+def test_create_with_storage_transformers(at_root):
+    kwargs = _init_creation_kwargs(zarr_version=3, at_root=at_root)
+    transformer = DummyStorageTransfomer(
+        "dummy_type",
+        test_value=DummyStorageTransfomer.TEST_CONSTANT
+    )
+    z = create(1000000000, chunks=True, storage_transformers=[transformer], **kwargs)
+    assert isinstance(z.chunk_store, DummyStorageTransfomer)
+    assert z.chunk_store.test_value == DummyStorageTransfomer.TEST_CONSTANT
diff --git a/zarr/tests/test_hierarchy.py b/zarr/tests/test_hierarchy.py
index 7d87b6d..d083345 100644
--- a/zarr/tests/test_hierarchy.py
+++ b/zarr/tests/test_hierarchy.py
@@ -1591,6 +1591,17 @@ def test_group(zarr_version):
     assert store is g.store
 
 
+@pytest.mark.skipif(have_fsspec is False, reason='needs fsspec')
+@pytest.mark.parametrize('zarr_version', _VERSIONS)
+def test_group_writeable_mode(zarr_version, tmp_path):
+    # Regression test for https://github.com/zarr-developers/zarr-python/issues/1353
+    import fsspec
+
+    store = fsspec.get_mapper(str(tmp_path))
+    zg = group(store=store)
+    assert zg.store.map == store
+
+
 @pytest.mark.parametrize('zarr_version', _VERSIONS)
 def test_open_group(zarr_version):
     # test the open_group() convenience function
diff --git a/zarr/tests/test_n5.py b/zarr/tests/test_n5.py
index a1a0a83..8f6d97d 100644
--- a/zarr/tests/test_n5.py
+++ b/zarr/tests/test_n5.py
@@ -1,10 +1,15 @@
-
 import pytest
 
-from zarr.n5 import N5ChunkWrapper
+from zarr.n5 import N5ChunkWrapper, N5FSStore
+from zarr.creation import create
+from zarr.storage import atexit_rmtree
 from numcodecs import GZip
 import numpy as np
 from typing import Tuple
+import json
+import atexit
+
+from zarr.tests.util import have_fsspec
 
 
 def test_make_n5_chunk_wrapper():
@@ -35,3 +40,15 @@ def test_partial_chunk_decode(chunk_shape: Tuple[int, ...]):
     chunk[subslices] = 1
     subchunk = np.ascontiguousarray(chunk[subslices])
     assert np.array_equal(codec_wrapped.decode(codec_wrapped.encode(subchunk)), chunk)
+
+
+@pytest.mark.skipif(have_fsspec is False, reason="needs fsspec")
+def test_dtype_decode():
+    path = 'data/array.n5'
+    atexit_rmtree(path)
+    atexit.register(atexit_rmtree, path)
+    n5_store = N5FSStore(path)
+    create(100, store=n5_store)
+    dtype_n5 = json.loads(n5_store[".zarray"])["dtype"]
+    dtype_zarr = json.loads(create(100).store[".zarray"])["dtype"]
+    assert dtype_n5 == dtype_zarr
diff --git a/zarr/tests/test_storage.py b/zarr/tests/test_storage.py
index 7c23735..0b21dfb 100644
--- a/zarr/tests/test_storage.py
+++ b/zarr/tests/test_storage.py
@@ -1442,6 +1442,13 @@ class TestNestedDirectoryStore(TestDirectoryStore):
         store[self.root + '42'] = b'zzz'
         assert b'zzz' == store[self.root + '42']
 
+    def test_listdir(self):
+        store = self.create_store()
+        z = zarr.zeros((10, 10), chunks=(5, 5), store=store)
+        z[:] = 1  # write to all chunks
+        for k in store.listdir():
+            assert store.get(k) is not None
+
 
 class TestNestedDirectoryStoreNone:
 
diff --git a/zarr/tests/test_storage_v3.py b/zarr/tests/test_storage_v3.py
index 4f62151..cc031f0 100644
--- a/zarr/tests/test_storage_v3.py
+++ b/zarr/tests/test_storage_v3.py
@@ -1,6 +1,7 @@
 import array
 import atexit
 import copy
+import inspect
 import os
 import tempfile
 
@@ -8,7 +9,9 @@ import numpy as np
 import pytest
 
 import zarr
-from zarr._storage.store import _get_hierarchy_metadata, v3_api_available
+from zarr._storage.store import _get_hierarchy_metadata, v3_api_available, StorageTransformer
+from zarr._storage.v3_storage_transformers import ShardingStorageTransformer, v3_sharding_available
+from zarr.core import Array
 from zarr.meta import _default_entry_point_metadata_v3
 from zarr.storage import (atexit_rmglob, atexit_rmtree, data_root,
                           default_compressor, getsize, init_array, meta_root,
@@ -88,6 +91,18 @@ class InvalidDummyStore():
         """keys"""
 
 
+class DummyStorageTransfomer(StorageTransformer):
+    TEST_CONSTANT = "test1234"
+
+    extension_uri = "https://purl.org/zarr/spec/storage_transformers/dummy/1.0"
+    valid_types = ["dummy_type"]
+
+    def __init__(self, _type, test_value) -> None:
+        super().__init__(_type)
+        assert test_value == self.TEST_CONSTANT
+        self.test_value = test_value
+
+
 def test_ensure_store_v3():
     class InvalidStore:
         pass
@@ -190,8 +205,11 @@ class StoreV3Tests(_StoreTests):
 
         store = self.create_store()
         path = 'arr1'
+        transformer = DummyStorageTransfomer(
+            "dummy_type", test_value=DummyStorageTransfomer.TEST_CONSTANT
+        )
         init_array(store, path=path, shape=1000, chunks=100,
-                   dimension_separator=pass_dim_sep)
+                   dimension_separator=pass_dim_sep, storage_transformers=[transformer])
 
         # check metadata
         mkey = meta_root + path + '.array.json'
@@ -204,6 +222,9 @@ class StoreV3Tests(_StoreTests):
         assert meta['fill_value'] is None
         # Missing MUST be assumed to be "/"
         assert meta['chunk_grid']['separator'] is want_dim_sep
+        assert len(meta["storage_transformers"]) == 1
+        assert isinstance(meta["storage_transformers"][0], DummyStorageTransfomer)
+        assert meta["storage_transformers"][0].test_value == DummyStorageTransfomer.TEST_CONSTANT
         store.close()
 
     def test_list_prefix(self):
@@ -235,6 +256,67 @@ class StoreV3Tests(_StoreTests):
             with pytest.raises(NotImplementedError):
                 store.rename('a', 'b')
 
+    def test_get_partial_values(self):
+        store = self.create_store()
+        store.supports_efficient_get_partial_values in [True, False]
+        store[data_root + 'foo'] = b'abcdefg'
+        store[data_root + 'baz'] = b'z'
+        assert [b'a'] == store.get_partial_values(
+            [
+                (data_root + 'foo', (0, 1))
+            ]
+        )
+        assert [
+            b'd', b'b', b'z', b'abc', b'defg', b'defg', b'g', b'ef'
+        ] == store.get_partial_values(
+            [
+                (data_root + 'foo', (3, 1)),
+                (data_root + 'foo', (1, 1)),
+                (data_root + 'baz', (0, 1)),
+                (data_root + 'foo', (0, 3)),
+                (data_root + 'foo', (3, 4)),
+                (data_root + 'foo', (3, None)),
+                (data_root + 'foo', (-1, None)),
+                (data_root + 'foo', (-3, 2)),
+            ]
+        )
+
+    def test_set_partial_values(self):
+        store = self.create_store()
+        store.supports_efficient_set_partial_values()
+        store[data_root + 'foo'] = b'abcdefg'
+        store.set_partial_values(
+            [
+                (data_root + 'foo', 0, b'hey')
+            ]
+        )
+        assert store[data_root + 'foo'] == b'heydefg'
+
+        store.set_partial_values(
+            [
+                (data_root + 'baz', 0, b'z')
+            ]
+        )
+        assert store[data_root + 'baz'] == b'z'
+        store.set_partial_values(
+            [
+                (data_root + 'foo', 1, b'oo'),
+                (data_root + 'baz', 1, b'zzz'),
+                (data_root + 'baz', 4, b'aaaa'),
+                (data_root + 'foo', 6, b'done'),
+            ]
+        )
+        assert store[data_root + 'foo'] == b'hoodefdone'
+        assert store[data_root + 'baz'] == b'zzzzaaaa'
+        store.set_partial_values(
+            [
+                (data_root + 'foo', -2, b'NE'),
+                (data_root + 'baz', -5, b'q'),
+            ]
+        )
+        assert store[data_root + 'foo'] == b'hoodefdoNE'
+        assert store[data_root + 'baz'] == b'zzzq'
+
 
 class TestMappingStoreV3(StoreV3Tests):
 
@@ -443,6 +525,43 @@ class TestRedisStoreV3(StoreV3Tests):
         return store
 
 
+@pytest.mark.skipif(not v3_sharding_available, reason="sharding is disabled")
+class TestStorageTransformerV3(TestMappingStoreV3):
+
+    def create_store(self, **kwargs):
+        inner_store = super().create_store(**kwargs)
+        dummy_transformer = DummyStorageTransfomer(
+            "dummy_type", test_value=DummyStorageTransfomer.TEST_CONSTANT
+        )
+        sharding_transformer = ShardingStorageTransformer(
+            "indexed", chunks_per_shard=2,
+        )
+        path = 'bla'
+        init_array(inner_store, path=path, shape=1000, chunks=100,
+                   dimension_separator=".",
+                   storage_transformers=[dummy_transformer, sharding_transformer])
+        store = Array(store=inner_store, path=path).chunk_store
+        store.erase_prefix("data/root/bla/")
+        store.clear()
+        return store
+
+    def test_method_forwarding(self):
+        store = self.create_store()
+        inner_store = store.inner_store.inner_store
+        assert store.list() == inner_store.list()
+        assert store.list_dir(data_root) == inner_store.list_dir(data_root)
+
+        assert store.is_readable()
+        assert store.is_writeable()
+        assert store.is_listable()
+        inner_store._readable = False
+        inner_store._writeable = False
+        inner_store._listable = False
+        assert not store.is_readable()
+        assert not store.is_writeable()
+        assert not store.is_listable()
+
+
 class TestLRUStoreCacheV3(_TestLRUStoreCache, StoreV3Tests):
 
     CountingClass = CountingDictV3
@@ -535,3 +654,19 @@ def test_top_level_imports():
             assert hasattr(zarr, store_name)  # pragma: no cover
         else:
             assert not hasattr(zarr, store_name)  # pragma: no cover
+
+
+def _get_public_and_dunder_methods(some_class):
+    return set(
+        name for name, _ in inspect.getmembers(some_class, predicate=inspect.isfunction)
+        if not name.startswith("_") or name.startswith("__")
+    )
+
+
+def test_storage_transformer_interface():
+    store_v3_methods = _get_public_and_dunder_methods(StoreV3)
+    store_v3_methods.discard("__init__")
+    storage_transformer_methods = _get_public_and_dunder_methods(StorageTransformer)
+    storage_transformer_methods.discard("__init__")
+    storage_transformer_methods.discard("get_config")
+    assert storage_transformer_methods == store_v3_methods
diff --git a/zarr/util.py b/zarr/util.py
index 9fcdac9..be5f174 100644
--- a/zarr/util.py
+++ b/zarr/util.py
@@ -5,17 +5,22 @@ import numbers
 from textwrap import TextWrapper
 import mmap
 import time
+from typing import Any, Callable, Dict, Optional, Tuple, Union
 
 import numpy as np
 from asciitree import BoxStyle, LeftAligned
 from asciitree.traversal import Traversal
 from collections.abc import Iterable
-from numcodecs.compat import ensure_text, ensure_ndarray_like
+from numcodecs.compat import (
+    ensure_text,
+    ensure_ndarray_like,
+    ensure_bytes,
+    ensure_contiguous_ndarray_like
+)
+from numcodecs.ndarray_like import NDArrayLike
 from numcodecs.registry import codec_registry
 from numcodecs.blosc import cbuffer_sizes, cbuffer_metainfo
 
-from typing import Any, Callable, Dict, Optional, Tuple, Union
-
 
 def flatten(arg: Iterable) -> Iterable:
     for element in arg:
@@ -51,9 +56,9 @@ def json_dumps(o: Any) -> bytes:
                       separators=(',', ': '), cls=NumberEncoder).encode('ascii')
 
 
-def json_loads(s: str) -> Dict[str, Any]:
+def json_loads(s: Union[bytes, str]) -> Dict[str, Any]:
     """Read JSON in a consistent way."""
-    return json.loads(ensure_text(s, 'ascii'))
+    return json.loads(ensure_text(s, 'utf-8'))
 
 
 def normalize_shape(shape) -> Tuple[int]:
@@ -639,6 +644,25 @@ class PartialReadBuffer:
         return self.chunk_store[self.store_key]
 
 
+class UncompressedPartialReadBufferV3:
+    def __init__(self, store_key, chunk_store, itemsize):
+        assert chunk_store.supports_efficient_get_partial_values
+        self.chunk_store = chunk_store
+        self.store_key = store_key
+        self.itemsize = itemsize
+
+    def prepare_chunk(self):
+        pass
+
+    def read_part(self, start, nitems):
+        return self.chunk_store.get_partial_values(
+            [(self.store_key, (start * self.itemsize, nitems * self.itemsize))]
+        )[0]
+
+    def read_full(self):
+        return self.chunk_store[self.store_key]
+
+
 def retry_call(callabl: Callable,
                args=None,
                kwargs=None,
@@ -696,3 +720,28 @@ def all_equal(value: Any, array: Any):
             # using == raises warnings from numpy deprecated pattern, but
             # using np.equal() raises type errors for structured dtypes...
             return np.all(value == array)
+
+
+def ensure_contiguous_ndarray_or_bytes(buf) -> Union[NDArrayLike, bytes]:
+    """Convenience function to coerce `buf` to ndarray-like array or bytes.
+
+    First check if `buf` can be zero-copy converted to a contiguous array.
+    If not, `buf` will be copied to a newly allocated `bytes` object.
+
+    Parameters
+    ----------
+    buf : ndarray-like, array-like, or bytes-like
+        A numpy array like object such as numpy.ndarray, cupy.ndarray, or
+        any object exporting a buffer interface.
+
+    Returns
+    -------
+    arr : NDArrayLike or bytes
+        A ndarray-like or bytes object
+    """
+
+    try:
+        return ensure_contiguous_ndarray_like(buf)
+    except TypeError:
+        # An error is raised if `buf` couldn't be zero-copy converted
+        return ensure_bytes(buf)

More details

Full run details

Historical runs