New Upstream Release - golang-github-u-root-uio

Ready changes

Summary

Merged new upstream version: 0.0~git20230305.3e8cd9d (was: 0.0~git20221213.c353755).

Diff

diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml
new file mode 100644
index 0000000..0735439
--- /dev/null
+++ b/.github/workflows/golangci-lint.yml
@@ -0,0 +1,28 @@
+name: golangci-lint
+on:
+  push:
+    tags:
+      - v*
+    branches:
+      - main
+  pull_request:
+    branches:
+      - main
+permissions:
+  contents: read
+  # Optional: allow read access to pull request. Use with `only-new-issues` option.
+  pull-requests: read
+jobs:
+  golangci:
+    name: lint
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/setup-go@v3
+        with:
+          go-version: '1.20'
+      - uses: actions/checkout@v3
+      - name: golangci-lint
+        uses: golangci/golangci-lint-action@v3
+        with:
+          version: latest
+          only-new-issues: true
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644
index 0000000..58fb28b
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,64 @@
+linters:
+  enable:
+    - containedctx
+    - gocritic
+    - godot
+    - nilerr
+    - revive
+    - thelper
+    - unconvert
+
+issues:
+  include:
+    - EXC0012
+    - EXC0013
+    - EXC0014
+    - EXC0015
+linters-settings:
+  revive:
+    # Maximum number of open files at the same time.
+    # See https://github.com/mgechev/revive#command-line-flags
+    # Defaults to unlimited.
+    max-open-files: 2048
+    # When set to false, ignores files with "GENERATED" header, similar to golint.
+    # See https://github.com/mgechev/revive#available-rules for details.
+    # Default: false
+    ignore-generated-header: true
+    # Sets the default severity.
+    # See https://github.com/mgechev/revive#configuration
+    # Default: warning
+    severity: error
+    # Default: false
+    # Sets the default failure confidence.
+    # This means that linting errors with less than 0.8 confidence will be ignored.
+    # Default: 0.8
+    confidence: 0.8
+    rules:
+      - name: blank-imports
+      - name: context-as-argument
+        arguments:
+          - allowTypesBefore: "*testing.T,*github.com/user/repo/testing.Harness"
+      - name: context-keys-type
+      - name: error-return
+      - name: error-strings
+      - name: error-naming
+      - name: exported
+        arguments:
+          - "checkPrivateReceivers"
+          - "sayRepetitiveInsteadOfStutters"
+      - name: if-return
+      - name: increment-decrement
+      - name: var-naming
+      - name: var-declaration
+      - name: package-comments
+      - name: range
+      - name: receiver-naming
+      - name: time-naming
+      - name: unexported-return
+      - name: indent-error-flow
+      - name: errorf
+
+      - name: early-return
+      - name: file-header
+        arguments:
+          - "Copyright 20[1-2][0-9](-20[1-2][0-9])? the u-root Authors. All rights reserved Use of this source code is governed by a BSD-style license that can be found in the LICENSE file."
diff --git a/.revive.toml b/.revive.toml
new file mode 100644
index 0000000..c403b4d
--- /dev/null
+++ b/.revive.toml
@@ -0,0 +1,28 @@
+ignoreGeneratedHeader = false
+severity = "warning"
+confidence = 0.8
+errorCode = 0
+warningCode = 0
+
+[rule.blank-imports]
+[rule.context-as-argument]
+[rule.context-keys-type]
+[rule.dot-imports]
+[rule.error-return]
+[rule.error-strings]
+[rule.error-naming]
+[rule.exported]
+[rule.if-return]
+[rule.increment-decrement]
+[rule.var-naming]
+[rule.var-declaration]
+[rule.package-comments]
+[rule.range]
+[rule.receiver-naming]
+[rule.time-naming]
+[rule.unexported-return]
+[rule.indent-error-flow]
+[rule.errorf]
+[rule.early-return]
+[rule.file-header]
+  arguments=["Copyright 20[1-2][0-9](-20[1-2][0-9])? the u-root Authors. All rights reserved Use of this source code is governed by a BSD-style license that can be found in the LICENSE file."]
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..84e0ed9
--- /dev/null
+++ b/README.md
@@ -0,0 +1,7 @@
+# uio
+
+[![CircleCI](https://circleci.com/gh/u-root/uio.svg?style=svg)](https://circleci.com/gh/u-root/uio)
+[![Go Report Card](https://goreportcard.com/badge/github.com/u-root/uio)](https://goreportcard.com/report/github.com/u-root/uio)
+[![GoDoc](https://godoc.org/github.com/u-root/uio?status.svg)](https://godoc.org/github.com/u-root/uio)
+[![Slack](https://slack.osfw.dev/badge.svg)](https://slack.osfw.dev)
+[![License](https://img.shields.io/badge/License-BSD%203--Clause-blue.svg)](https://github.com/u-root/uio/blob/main/LICENSE)
diff --git a/cp/cmp/cmp.go b/cp/cmp/cmp.go
index 3215749..5ac382c 100644
--- a/cp/cmp/cmp.go
+++ b/cp/cmp/cmp.go
@@ -18,7 +18,7 @@ import (
 	"github.com/u-root/uio/uio"
 )
 
-// isEqualFile compare two files by checksum
+// isEqualFile compare two files by checksum.
 func isEqualFile(fpath1, fpath2 string) error {
 	file1, err := os.Open(fpath1)
 	if err != nil {
@@ -56,7 +56,7 @@ func stat(o cp.Options, path string) (os.FileInfo, error) {
 	return os.Stat(path)
 }
 
-// IsEqualTree compare the content in the file trees in src and dst paths
+// IsEqualTree compare the content in the file trees in src and dst paths.
 func IsEqualTree(o cp.Options, src, dst string) error {
 	srcInfo, err := stat(o, src)
 	if err != nil {
diff --git a/cp/cp_test.go b/cp/cp_test.go
index c5208ea..711fa49 100644
--- a/cp/cp_test.go
+++ b/cp/cp_test.go
@@ -2,59 +2,136 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-package cp_test
+package cp
 
 import (
-	"io/ioutil"
+	"errors"
+	"fmt"
+	"io/fs"
 	"os"
 	"path/filepath"
+	"strings"
 	"testing"
 
-	"github.com/u-root/uio/cp"
-	"github.com/u-root/uio/cp/cmp"
+	"golang.org/x/sys/unix"
 )
 
-func copyAndTest(t *testing.T, o cp.Options, src, dst string) {
-	if err := o.Copy(src, dst); err != nil {
-		t.Fatalf("Copy(%q -> %q) = %v, want %v", src, dst, err, nil)
-	}
-	if err := cmp.IsEqualTree(o, src, dst); err != nil {
-		t.Fatalf("Expected %q and %q to be same, got %v", src, dst, err)
-	}
-}
+var (
+	testdata = []byte("This is a test string")
+)
 
-func TestSimpleCopy(t *testing.T) {
-	tmpDir, err := ioutil.TempDir("", "u-root-pkg-cp-")
-	if err != nil {
-		t.Fatal(err)
+func TestCopySimple(t *testing.T) {
+	var err error
+	tmpdirDst := t.TempDir()
+	tmpdirSrc := t.TempDir()
+
+	srcfiles := make([]*os.File, 2)
+	dstfiles := make([]*os.File, 2)
+	for iterator := range srcfiles {
+		srcfiles[iterator], err = os.CreateTemp(tmpdirSrc, "file-to-copy"+fmt.Sprintf("%d", iterator))
+		if err != nil {
+			t.Errorf("failed to create temp file: %q", err)
+		}
+		if _, err = srcfiles[iterator].Write(testdata); err != nil {
+			t.Errorf("failed to write testdata to file")
+		}
+		dstfiles[iterator], err = os.CreateTemp(tmpdirDst, "file-to-copy"+fmt.Sprintf("%d", iterator))
+		if err != nil {
+			t.Errorf("failed to create temp file: %q", err)
+		}
 	}
-	defer os.RemoveAll(tmpDir)
 
-	// Copy a directory.
-	origd := filepath.Join(tmpDir, "directory")
-	if err := os.Mkdir(origd, 0744); err != nil {
-		t.Fatal(err)
+	sl := filepath.Join(tmpdirDst, "test-symlink")
+	if err := os.Symlink(srcfiles[1].Name(), sl); err != nil {
+		t.Errorf("creating symlink failed")
 	}
 
-	copyAndTest(t, cp.Default, origd, filepath.Join(tmpDir, "directory-copied"))
-	copyAndTest(t, cp.NoFollowSymlinks, origd, filepath.Join(tmpDir, "directory-copied-2"))
+	for _, tt := range []struct {
+		name    string
+		srcfile string
+		dstfile string
+		opt     Options
+		wantErr error
+	}{
+		{
+			name:    "Success",
+			srcfile: srcfiles[0].Name(),
+			dstfile: dstfiles[0].Name(),
+			opt:     Default,
+		},
+		{
+			name:    "SrcDstDirctoriesSuccess",
+			srcfile: tmpdirSrc,
+			dstfile: tmpdirDst,
+		},
+		{
+			name:    "SrcNotExist",
+			srcfile: "file-does-not-exist",
+			dstfile: dstfiles[0].Name(),
+			wantErr: fs.ErrNotExist,
+		},
+		{
+			name:    "DstIsDirectory",
+			srcfile: srcfiles[0].Name(),
+			dstfile: tmpdirDst,
+			wantErr: unix.EISDIR,
+		},
+		{
+			name:    "CopySymlink",
+			srcfile: sl,
+			dstfile: dstfiles[1].Name(),
+			opt: Options{
+				NoFollowSymlinks: false,
+			},
+		},
+		{
+			name:    "CopySymlinkFollow",
+			srcfile: sl,
+			dstfile: filepath.Join(tmpdirDst, "followed-symlink"),
+			opt: Options{
+				NoFollowSymlinks: true,
+			},
+		},
+	} {
+		t.Run(tt.name, func(t *testing.T) {
+			err := Copy(tt.srcfile, tt.dstfile)
+			if !errors.Is(err, tt.wantErr) {
+				t.Errorf("Test %q failed. Want: %q, Got: %q", tt.name, tt.wantErr, err)
+			}
+		})
+		// After every test with NoFollowSymlink we have to delete the created symlink.
+		if strings.Contains(tt.dstfile, "symlink") {
+			os.Remove(tt.dstfile)
+		}
 
-	// Copy a file.
-	origf := filepath.Join(tmpDir, "normal-file")
-	if err := ioutil.WriteFile(origf, []byte("F is for fire that burns down the whole town"), 0766); err != nil {
-		t.Fatal(err)
-	}
+		t.Run(tt.name, func(t *testing.T) {
+			if err := tt.opt.Copy(tt.srcfile, tt.dstfile); !errors.Is(err, tt.wantErr) {
+				t.Errorf("%q failed. Want: %q, Got: %q", tt.name, tt.wantErr, err)
+			}
+		})
+		// After every test with NoFollowSymlink we have to delete the created symlink.
+		if strings.Contains(tt.dstfile, "symlink") {
+			os.Remove(tt.dstfile)
+		}
 
-	copyAndTest(t, cp.Default, origf, filepath.Join(tmpDir, "normal-file-copied"))
-	copyAndTest(t, cp.NoFollowSymlinks, origf, filepath.Join(tmpDir, "normal-file-copied-2"))
+		t.Run(tt.name, func(t *testing.T) {
+			if err := tt.opt.CopyTree(tt.srcfile, tt.dstfile); !errors.Is(err, tt.wantErr) {
+				t.Errorf("Test %q failed. Want: %q, Got: %q", tt.name, tt.wantErr, err)
+			}
+		})
+		// After every test with NoFollowSymlink we have to delete the created symlink.
+		if strings.Contains(tt.dstfile, "symlink") {
+			os.Remove(tt.dstfile)
+		}
 
-	// Copy a symlink.
-	origs := filepath.Join(tmpDir, "foobar")
-	// foobar -> normal-file
-	if err := os.Symlink(origf, origs); err != nil {
-		t.Fatal(err)
+		t.Run(tt.name, func(t *testing.T) {
+			if err := CopyTree(tt.srcfile, tt.dstfile); !errors.Is(err, tt.wantErr) {
+				t.Errorf("Test %q failed. Want: %q, Got: %q", tt.name, tt.wantErr, err)
+			}
+		})
+		// After every test with NoFollowSymlink we have to delete the created symlink.
+		if strings.Contains(tt.dstfile, "symlink") {
+			os.Remove(tt.dstfile)
+		}
 	}
-
-	copyAndTest(t, cp.Default, origf, filepath.Join(tmpDir, "foobar-copied"))
-	copyAndTest(t, cp.NoFollowSymlinks, origf, filepath.Join(tmpDir, "foobar-copied-just-symlink"))
 }
diff --git a/debian/changelog b/debian/changelog
index dd1bf53..a7fb61c 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,10 @@
+golang-github-u-root-uio (0.0~git20230305.3e8cd9d-1) UNRELEASED; urgency=low
+
+  * New upstream snapshot.
+  * New upstream snapshot.
+
+ -- Debian Janitor <janitor@jelmer.uk>  Mon, 03 Apr 2023 03:03:35 -0000
+
 golang-github-u-root-uio (0.0~git20220204.dac05f7-2) unstable; urgency=medium
 
   * Source only upload for testing migration
diff --git a/go.mod b/go.mod
index 117914c..ef2f482 100644
--- a/go.mod
+++ b/go.mod
@@ -1,5 +1,9 @@
 module github.com/u-root/uio
 
-go 1.15
+go 1.16
 
-require golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea
+require (
+	github.com/josharian/native v1.0.1-0.20221213033349-c1e37c09b531
+	github.com/pierrec/lz4/v4 v4.1.14
+	golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664
+)
diff --git a/go.sum b/go.sum
index 750c835..2a8038e 100644
--- a/go.sum
+++ b/go.sum
@@ -1,2 +1,6 @@
-golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea h1:+WiDlPBBaO+h9vPNZi8uJ3k4BkKQB7Iow3aqwHVA5hI=
-golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+github.com/josharian/native v1.0.1-0.20221213033349-c1e37c09b531 h1:3HNVAxEgGca1i23Ai/8DeCmibx02jBvTHAT11INaVfU=
+github.com/josharian/native v1.0.1-0.20221213033349-c1e37c09b531/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
+github.com/pierrec/lz4/v4 v4.1.14 h1:+fL8AQEZtz/ijeNnpduH0bROTu0O3NZAlPjQxGn8LwE=
+github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664 h1:wEZYwx+kK+KlZ0hpvP2Ls1Xr4+RWnlzGFwPP0aiDjIU=
+golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
diff --git a/rand/random.go b/rand/random.go
index 90e0a98..e189199 100644
--- a/rand/random.go
+++ b/rand/random.go
@@ -43,7 +43,7 @@ type contextReader interface {
 // ctxReader takes a contextReader and turns it into a ContextReader.
 type ctxReader struct {
 	contextReader
-	ctx context.Context
+	ctx context.Context //nolint:containedctx
 }
 
 func (cr ctxReader) Read(b []byte) (int, error) {
diff --git a/rand/random_linux.go b/rand/random_linux.go
index 2f059ca..42931cc 100644
--- a/rand/random_linux.go
+++ b/rand/random_linux.go
@@ -49,7 +49,7 @@ func (r *getrandomReader) ReadContext(ctx context.Context, b []byte) (int, error
 		// initialized.
 		n, err := unix.Getrandom(b, unix.GRND_NONBLOCK)
 		if err == nil {
-			return n, err
+			return n, nil
 		}
 		select {
 		case <-ctx.Done():
diff --git a/rand/random_urandom.go b/rand/random_urandom.go
index ab527c4..cd6e263 100644
--- a/rand/random_urandom.go
+++ b/rand/random_urandom.go
@@ -45,7 +45,7 @@ func (r *urandomReader) ReadContext(ctx context.Context, b []byte) (int, error)
 	for {
 		n, err := unix.Read(r.fd, b)
 		if err == nil {
-			return n, err
+			return n, nil
 		}
 		select {
 		case <-ctx.Done():
diff --git a/ubinary/big_endian.go b/ubinary/big_endian.go
deleted file mode 100644
index 8a1f944..0000000
--- a/ubinary/big_endian.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2018 the u-root Authors. All rights reserved
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build mips mips64 ppc64 s390x
-
-package ubinary
-
-import (
-	"encoding/binary"
-)
-
-// NativeEndian is $GOARCH's implementation of byte order.
-var NativeEndian = binary.BigEndian
diff --git a/ubinary/doc.go b/ubinary/doc.go
deleted file mode 100644
index 5d5c5b7..0000000
--- a/ubinary/doc.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2018 the u-root Authors. All rights reserved
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ubinary provides a native endian binary.ByteOrder.
-package ubinary
diff --git a/ubinary/little_endian.go b/ubinary/little_endian.go
deleted file mode 100644
index 317bb91..0000000
--- a/ubinary/little_endian.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2018 the u-root Authors. All rights reserved
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build 386 amd64 arm arm64 mipsle mips64le ppc64le riscv riscv64
-
-package ubinary
-
-import (
-	"encoding/binary"
-)
-
-// NativeEndian is $GOARCH's implementation of byte order.
-var NativeEndian = binary.LittleEndian
diff --git a/ubinary/ubinary.go b/ubinary/ubinary.go
new file mode 100644
index 0000000..cc89007
--- /dev/null
+++ b/ubinary/ubinary.go
@@ -0,0 +1,16 @@
+// Copyright 2018 the u-root Authors. All rights reserved
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ubinary provides a native endian binary.ByteOrder.
+//
+// Deprecated: use github.com/josharian/native instead.
+package ubinary
+
+import "github.com/josharian/native"
+
+// NativeEndian is $GOARCH's implementation of byte order.
+//
+// Deprecated: use github.com/josharian/native.Endian. This package
+// now just forwards to that one.
+var NativeEndian = native.Endian
diff --git a/uio/archivereader.go b/uio/archivereader.go
new file mode 100644
index 0000000..4a3a9fc
--- /dev/null
+++ b/uio/archivereader.go
@@ -0,0 +1,85 @@
+// Copyright 2021 the u-root Authors. All rights reserved
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uio
+
+import (
+	"bytes"
+	"errors"
+	"io"
+
+	"github.com/pierrec/lz4/v4"
+)
+
+const (
+	// preReadSizeBytes is the num of bytes pre-read from a io.Reader that will
+	// be used to match against archive header.
+	defaultArchivePreReadSizeBytes = 1024
+)
+
+var ErrPreReadError = errors.New("pre-read nothing")
+
+// ArchiveReader reads from a io.Reader, decompresses source bytes
+// when applicable.
+//
+// It allows probing for multiple archive format, while still able
+// to read from beginning, by pre-reading a small number of bytes.
+//
+// Always use newArchiveReader to initialize.
+type ArchiveReader struct {
+	// src is where we read source bytes.
+	src io.Reader
+	// buf stores pre-read bytes from original io.Reader. Archive format
+	// detection will be done against it.
+	buf []byte
+
+	// preReadSizeBytes is how many bytes we pre-read for magic number
+	// matching for each archive type. This should be greater than or
+	// equal to the largest header frame size of each supported archive
+	// format.
+	preReadSizeBytes int
+}
+
+func NewArchiveReader(r io.Reader) (ArchiveReader, error) {
+	ar := ArchiveReader{
+		src: r,
+		// Randomly chosen, should be enough for most types:
+		//
+		// e.g. gzip with 10 byte header, lz4 with a header size
+		// between 7 and 19 bytes.
+		preReadSizeBytes: defaultArchivePreReadSizeBytes,
+	}
+	pbuf := make([]byte, ar.preReadSizeBytes)
+
+	nr, err := io.ReadFull(r, pbuf)
+	// In case the image is smaller pre-read block size, 1kb for now.
+	// Ever possible ? probably not in case a compression is needed!
+	ar.buf = pbuf[:nr]
+	if err == io.EOF {
+		// If we could not pre-read anything, we can't determine if
+		// it is a compressed file.
+		ar.src = io.MultiReader(bytes.NewReader(pbuf[:nr]), r)
+		return ar, ErrPreReadError
+	}
+
+	// Try each supported compression type, return upon first match.
+
+	// Try lz4.
+	// magic number error will be thrown if source is not a lz4 archive.
+	// e.g. "lz4: bad magic number".
+	if ok, err := lz4.ValidFrameHeader(ar.buf); err == nil && ok {
+		ar.src = lz4.NewReader(io.MultiReader(bytes.NewReader(ar.buf), r))
+		return ar, nil
+	}
+
+	// Try other archive types here, gzip, xz, etc when needed.
+
+	// Last resort, read as is.
+	ar.src = io.MultiReader(bytes.NewReader(ar.buf), r)
+	return ar, nil
+}
+
+func (ar ArchiveReader) Read(p []byte) (n int, err error) {
+	return ar.src.Read(p)
+}
diff --git a/uio/archivereader_test.go b/uio/archivereader_test.go
new file mode 100644
index 0000000..52e70ae
--- /dev/null
+++ b/uio/archivereader_test.go
@@ -0,0 +1,255 @@
+// Copyright 2021 the u-root Authors. All rights reserved
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uio
+
+import (
+	"bytes"
+	"io"
+	"math/rand"
+	"strings"
+	"testing"
+	"time"
+
+	"github.com/pierrec/lz4/v4"
+)
+
+const choices = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+
+func TestArchiveReaderRegular(t *testing.T) {
+	dataStr := strings.Repeat("This is an important data!@#$%^^&&*&**(()())", 1000)
+
+	ar, err := NewArchiveReader(bytes.NewReader([]byte(dataStr)))
+	if err != nil {
+		t.Fatalf("newArchiveReader(bytes.NewReader(%v)) returned error: %v", []byte(dataStr), err)
+	}
+
+	buf := new(strings.Builder)
+	if _, err := io.Copy(buf, ar); err != nil {
+		t.Errorf("io.Copy(%v, %v) returned error: %v, want nil.", buf, ar, err)
+	}
+	if buf.String() != dataStr {
+		t.Errorf("got %s, want %s", buf.String(), dataStr)
+	}
+}
+
+func TestArchiveReaderPreReadShort(t *testing.T) {
+	dataStr := "short data"
+	ar, err := NewArchiveReader(bytes.NewReader([]byte(dataStr)))
+	if err != nil {
+		t.Errorf("newArchiveReader(bytes.NewReader([]byte(%s))) returned err: %v, want nil", dataStr, err)
+	}
+	got, err := io.ReadAll(ar)
+	if err != nil {
+		t.Errorf("got error reading archive reader: %v, want nil", err)
+	}
+	if string(got) != dataStr {
+		t.Errorf("got %s, want %s", string(got), dataStr)
+	}
+	// Pre-read nothing.
+	dataStr = ""
+	ar, err = NewArchiveReader(bytes.NewReader([]byte(dataStr)))
+	if err != ErrPreReadError {
+		t.Errorf("newArchiveReader(bytes.NewReader([]byte(%s))) returned err: %v, want %v", dataStr, err, ErrPreReadError)
+	}
+	got, err = io.ReadAll(ar)
+	if err != nil {
+		t.Errorf("got error reading archive reader: %v, want nil", err)
+	}
+	if string(got) != dataStr {
+		t.Errorf("got %s, want %s", string(got), dataStr)
+	}
+}
+
+// randomString generates random string of fixed length in a fast and simple way.
+func randomString(l int) string {
+	rand.Seed(time.Now().UnixNano())
+	r := make([]byte, l)
+	for i := 0; i < l; i++ {
+		r[i] = choices[rand.Intn(len(choices))]
+	}
+	return string(r)
+}
+
+func checkArchiveReaderLZ4(t *testing.T, tt archiveReaderLZ4Case) {
+	t.Helper()
+
+	srcR := bytes.NewReader([]byte(tt.dataStr))
+
+	srcBuf := new(bytes.Buffer)
+	lz4w := tt.setup(srcBuf)
+
+	n, err := io.Copy(lz4w, srcR)
+	if err != nil {
+		t.Fatalf("io.Copy(%v, %v) returned error: %v, want nil", lz4w, srcR, err)
+	}
+	if n != int64(len([]byte(tt.dataStr))) {
+		t.Fatalf("got %d bytes compressed, want %d", n, len([]byte(tt.dataStr)))
+	}
+	if err = lz4w.Close(); err != nil {
+		t.Fatalf("Failed to close lz4 writer: %v", err)
+	}
+
+	// Test ArchiveReader reading it.
+	ar, err := NewArchiveReader(bytes.NewReader(srcBuf.Bytes()))
+	if err != nil {
+		t.Fatalf("newArchiveReader(bytes.NewReader(%v)) returned error: %v", srcBuf.Bytes(), err)
+	}
+	buf := new(strings.Builder)
+	if _, err := io.Copy(buf, ar); err != nil {
+		t.Errorf("io.Copy(%v, %v) returned error: %v, want nil.", buf, ar, err)
+	}
+	if buf.String() != tt.dataStr {
+		t.Errorf("got %s, want %s", buf.String(), tt.dataStr)
+	}
+}
+
+type archiveReaderLZ4Case struct {
+	name    string
+	setup   func(w io.Writer) *lz4.Writer
+	dataStr string
+}
+
+func TestArchiveReaderLZ4(t *testing.T) {
+	for _, tt := range []archiveReaderLZ4Case{
+		{
+			name:    "non-legacy regular",
+			setup:   lz4.NewWriter,
+			dataStr: randomString(1024),
+		},
+		{
+			name:    "non-legacy larger data",
+			setup:   lz4.NewWriter,
+			dataStr: randomString(5 * 1024),
+		},
+		{
+			name:    "non-legacy short data", // Likley not realistic for most cases in the real world.
+			setup:   lz4.NewWriter,
+			dataStr: randomString(100), // Smaller than pre-read size, 1024 bytes.
+		},
+		{
+			name: "legacy regular",
+			setup: func(w io.Writer) *lz4.Writer {
+				lz4w := lz4.NewWriter(w)
+				if err := lz4w.Apply(lz4.LegacyOption(true)); err != nil {
+					t.Fatal(err)
+				}
+				return lz4w
+			},
+			dataStr: randomString(1024),
+		},
+		{
+			name: "legacy larger data",
+			setup: func(w io.Writer) *lz4.Writer {
+				lz4w := lz4.NewWriter(w)
+				if err := lz4w.Apply(lz4.LegacyOption(true)); err != nil {
+					t.Fatal(err)
+				}
+				return lz4w
+			},
+			dataStr: randomString(5 * 1024),
+		},
+		{
+			name: "legacy small data",
+			setup: func(w io.Writer) *lz4.Writer {
+				lz4w := lz4.NewWriter(w)
+				if err := lz4w.Apply(lz4.LegacyOption(true)); err != nil {
+					t.Fatal(err)
+				}
+				return lz4w
+			},
+			dataStr: randomString(100), // Smaller than pre-read size, 1024 bytes..
+		},
+		{
+			name: "legacy small data",
+			setup: func(w io.Writer) *lz4.Writer {
+				lz4w := lz4.NewWriter(w)
+				if err := lz4w.Apply(lz4.LegacyOption(true)); err != nil {
+					t.Fatal(err)
+				}
+				return lz4w
+			},
+			dataStr: randomString(100), // Smaller than pre-read size, 1024 bytes..
+		},
+		{
+			name: "regular larger data with fast compression",
+			setup: func(w io.Writer) *lz4.Writer {
+				lz4w := lz4.NewWriter(w)
+				if err := lz4w.Apply(lz4.CompressionLevelOption(lz4.Fast)); err != nil {
+					t.Fatal(err)
+				}
+				return lz4w
+			},
+			dataStr: randomString(5 * 1024),
+		},
+		{
+			name: "legacy larger data with fast compression",
+			setup: func(w io.Writer) *lz4.Writer {
+				lz4w := lz4.NewWriter(w)
+				if err := lz4w.Apply(lz4.LegacyOption(true), lz4.CompressionLevelOption(lz4.Fast)); err != nil {
+					t.Fatal(err)
+				}
+				return lz4w
+			},
+			dataStr: randomString(5 * 1024),
+		},
+	} {
+		t.Run(tt.name, func(t *testing.T) {
+			checkArchiveReaderLZ4(t, tt)
+		})
+	}
+}
+
+func TestArchiveReaderLZ4SlowCompressed(t *testing.T) {
+	for _, tt := range []archiveReaderLZ4Case{
+		{
+			name: "regular larger data with medium compression",
+			setup: func(w io.Writer) *lz4.Writer {
+				lz4w := lz4.NewWriter(w)
+				if err := lz4w.Apply(lz4.CompressionLevelOption(lz4.Level5)); err != nil {
+					t.Fatal(err)
+				}
+				return lz4w
+			},
+			dataStr: randomString(5 * 1024),
+		},
+		{
+			name: "regular larger data with slow compression",
+			setup: func(w io.Writer) *lz4.Writer {
+				lz4w := lz4.NewWriter(w)
+				if err := lz4w.Apply(lz4.CompressionLevelOption(lz4.Level9)); err != nil {
+					t.Fatal(err)
+				}
+				return lz4w
+			},
+			dataStr: randomString(5 * 1024),
+		},
+		{
+			name: "legacy larger data with medium compression",
+			setup: func(w io.Writer) *lz4.Writer {
+				lz4w := lz4.NewWriter(w)
+				if err := lz4w.Apply(lz4.LegacyOption(true), lz4.CompressionLevelOption(lz4.Level5)); err != nil {
+					t.Fatal(err)
+				}
+				return lz4w
+			},
+			dataStr: randomString(5 * 1024),
+		},
+		{
+			name: "legacy larger data with slow compression",
+			setup: func(w io.Writer) *lz4.Writer {
+				lz4w := lz4.NewWriter(w)
+				if err := lz4w.Apply(lz4.LegacyOption(true), lz4.CompressionLevelOption(lz4.Level9)); err != nil {
+					t.Fatal(err)
+				}
+				return lz4w
+			},
+			dataStr: randomString(5 * 1024),
+		},
+	} {
+		t.Run(tt.name, func(t *testing.T) {
+			checkArchiveReaderLZ4(t, tt)
+		})
+	}
+}
diff --git a/uio/buffer.go b/uio/buffer.go
index 506e741..158a4d1 100644
--- a/uio/buffer.go
+++ b/uio/buffer.go
@@ -6,9 +6,10 @@ package uio
 
 import (
 	"encoding/binary"
+	"errors"
 	"fmt"
 
-	"github.com/u-root/uio/ubinary"
+	"github.com/josharian/native"
 )
 
 // Marshaler is the interface implemented by an object that can marshal itself
@@ -93,11 +94,15 @@ func (b *Buffer) WriteN(n int) []byte {
 	return b.data[len(b.data)-n:]
 }
 
+// ErrBufferTooShort is returned when a caller wants to read more bytes than
+// are available in the buffer.
+var ErrBufferTooShort = errors.New("buffer too short")
+
 // ReadN consumes n bytes from the Buffer. It returns nil, false if there
 // aren't enough bytes left.
 func (b *Buffer) ReadN(n int) ([]byte, error) {
 	if !b.Has(n) {
-		return nil, fmt.Errorf("buffer too short at position %d: have %d bytes, want %d bytes", b.byteCount, b.Len(), n)
+		return nil, fmt.Errorf("%w at position %d: have %d bytes, want %d bytes", ErrBufferTooShort, b.byteCount, b.Len(), n)
 	}
 	rval := b.data[:n]
 	b.data = b.data[n:]
@@ -129,12 +134,12 @@ func (b *Buffer) Cap() int {
 //
 // Use:
 //
-//   func (s *something) Unmarshal(l *Lexer) {
-//     s.Foo = l.Read8()
-//     s.Bar = l.Read8()
-//     s.Baz = l.Read16()
-//     return l.Error()
-//   }
+//	func (s *something) Unmarshal(l *Lexer) {
+//	  s.Foo = l.Read8()
+//	  s.Bar = l.Read8()
+//	  s.Baz = l.Read16()
+//	  return l.Error()
+//	}
 type Lexer struct {
 	*Buffer
 
@@ -173,11 +178,14 @@ func NewBigEndianBuffer(b []byte) *Lexer {
 func NewNativeEndianBuffer(b []byte) *Lexer {
 	return &Lexer{
 		Buffer: NewBuffer(b),
-		order:  ubinary.NativeEndian,
+		order:  native.Endian,
 	}
 }
 
-func (l *Lexer) setError(err error) {
+// SetError sets the error if no error has previously been set.
+//
+// The error can later be retried with Error or FinError methods.
+func (l *Lexer) SetError(err error) {
 	if l.err == nil {
 		l.err = err
 	}
@@ -189,7 +197,7 @@ func (l *Lexer) setError(err error) {
 func (l *Lexer) Consume(n int) []byte {
 	v, err := l.Buffer.ReadN(n)
 	if err != nil {
-		l.setError(err)
+		l.SetError(err)
 		return nil
 	}
 	return v
@@ -204,6 +212,9 @@ func (l *Lexer) Error() error {
 	return l.err
 }
 
+// ErrUnreadBytes is returned when there is more data left to read in the buffer.
+var ErrUnreadBytes = errors.New("buffer contains unread bytes")
+
 // FinError returns an error if an error occurred or if there is more data left
 // to read in the buffer.
 func (l *Lexer) FinError() error {
@@ -211,7 +222,7 @@ func (l *Lexer) FinError() error {
 		return l.err
 	}
 	if l.Buffer.Len() > 0 {
-		return fmt.Errorf("buffer contains more bytes than it should")
+		return ErrUnreadBytes
 	}
 	return nil
 }
@@ -224,7 +235,7 @@ func (l *Lexer) Read8() uint8 {
 	if v == nil {
 		return 0
 	}
-	return uint8(v[0])
+	return v[0]
 }
 
 // Read16 reads a 16-bit value from the Buffer.
@@ -303,7 +314,7 @@ func (l *Lexer) Read(p []byte) (int, error) {
 //
 // If an error occurred, Error() will return a non-nil error.
 func (l *Lexer) ReadData(data interface{}) {
-	l.setError(binary.Read(l, l.order, data))
+	l.SetError(binary.Read(l, l.order, data))
 }
 
 // WriteData writes a binary representation of data to the buffer.
@@ -312,14 +323,14 @@ func (l *Lexer) ReadData(data interface{}) {
 //
 // If an error occurred, Error() will return a non-nil error.
 func (l *Lexer) WriteData(data interface{}) {
-	l.setError(binary.Write(l, l.order, data))
+	l.SetError(binary.Write(l, l.order, data))
 }
 
 // Write8 writes a byte to the Buffer.
 //
 // If an error occurred, Error() will return a non-nil error.
 func (l *Lexer) Write8(v uint8) {
-	l.append(1)[0] = byte(v)
+	l.append(1)[0] = v
 }
 
 // Write16 writes a 16-bit value to the Buffer.
diff --git a/uio/null.go b/uio/null.go
index 64156f4..7f3caeb 100644
--- a/uio/null.go
+++ b/uio/null.go
@@ -40,7 +40,7 @@ var blackHolePool = sync.Pool{
 
 func (devNull) ReadFrom(r io.Reader) (n int64, err error) {
 	bufp := blackHolePool.Get().(*[]byte)
-	readSize := 0
+	var readSize int
 	for {
 		readSize, err = r.Read(*bufp)
 		n += int64(readSize)
diff --git a/uio/progress.go b/uio/progress.go
index 80b9d3c..e2b595e 100644
--- a/uio/progress.go
+++ b/uio/progress.go
@@ -26,17 +26,17 @@ type ProgressReadCloser struct {
 func (rc *ProgressReadCloser) Read(p []byte) (n int, err error) {
 	defer func() {
 		numSymbols := (rc.counter%rc.Interval + n) / rc.Interval
-		rc.W.Write([]byte(strings.Repeat(rc.Symbol, numSymbols)))
+		_, _ = rc.W.Write([]byte(strings.Repeat(rc.Symbol, numSymbols)))
 		rc.counter += n
 		rc.written = (rc.written || numSymbols > 0)
 		if err == io.EOF && rc.written {
-			rc.W.Write([]byte("\n"))
+			_, _ = rc.W.Write([]byte("\n"))
 		}
 	}()
 	return rc.RC.Read(p)
 }
 
-// Close implements io.Closer for ProgressReader.
+// Read implements io.Closer for ProgressReader.
 func (rc *ProgressReadCloser) Close() error {
 	return rc.RC.Close()
 }
diff --git a/uio/progress_test.go b/uio/progress_test.go
index 868149a..2c161bb 100644
--- a/uio/progress_test.go
+++ b/uio/progress_test.go
@@ -6,12 +6,12 @@ package uio
 
 import (
 	"bytes"
-	"io/ioutil"
+	"io"
 	"testing"
 )
 
 func TestProgressReadCloser(t *testing.T) {
-	input := ioutil.NopCloser(bytes.NewBufferString("01234567890123456789"))
+	input := io.NopCloser(bytes.NewBufferString("01234567890123456789"))
 	stdout := &bytes.Buffer{}
 	prc := ProgressReadCloser{
 		RC:       input,
@@ -22,20 +22,20 @@ func TestProgressReadCloser(t *testing.T) {
 
 	// Read one byte at a time.
 	output := make([]byte, 1)
-	prc.Read(output)
-	prc.Read(output)
-	prc.Read(output)
+	_, _ = prc.Read(output)
+	_, _ = prc.Read(output)
+	_, _ = prc.Read(output)
 	if len(stdout.Bytes()) != 0 {
 		t.Errorf("found %q, but expected no bytes to be written", stdout)
 	}
-	prc.Read(output)
+	_, _ = prc.Read(output)
 	if stdout.String() != "#" {
 		t.Errorf("found %q, expected %q to be written", stdout.String(), "#")
 	}
 
 	// Read 9 bytes all at once.
 	output = make([]byte, 9)
-	prc.Read(output)
+	_, _ = prc.Read(output)
 	if stdout.String() != "###" {
 		t.Errorf("found %q, expected %q to be written", stdout.String(), "###")
 	}
@@ -44,7 +44,7 @@ func TestProgressReadCloser(t *testing.T) {
 	}
 
 	// Read until EOF
-	output, err := ioutil.ReadAll(&prc)
+	output, err := io.ReadAll(&prc)
 	if err != nil {
 		t.Errorf("got %v, expected nil error", err)
 	}
diff --git a/uio/reader.go b/uio/reader.go
index a32d665..0ca839a 100644
--- a/uio/reader.go
+++ b/uio/reader.go
@@ -7,8 +7,8 @@ package uio
 import (
 	"bytes"
 	"io"
-	"io/ioutil"
 	"math"
+	"os"
 	"reflect"
 )
 
@@ -26,7 +26,7 @@ func ReadAll(r io.ReaderAt) ([]byte, error) {
 	if imra, ok := r.(inMemReaderAt); ok {
 		return imra.Bytes(), nil
 	}
-	return ioutil.ReadAll(Reader(r))
+	return io.ReadAll(Reader(r))
 }
 
 // Reader generates a Reader from a ReaderAt.
@@ -46,3 +46,22 @@ func ReaderAtEqual(r1, r2 io.ReaderAt) bool {
 	}
 	return bytes.Equal(c, d) && reflect.DeepEqual(r1err, r2err)
 }
+
+// ReadIntoFile reads all from io.Reader into the file at given path.
+//
+// If the file at given path does not exist, a new file will be created.
+// If the file exists at the given path, but not empty, it will be truncated.
+func ReadIntoFile(r io.Reader, p string) error {
+	f, err := os.OpenFile(p, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+
+	_, err = io.Copy(f, r)
+	if err != nil {
+		return err
+	}
+
+	return f.Close()
+}
diff --git a/uio/reader_test.go b/uio/reader_test.go
new file mode 100644
index 0000000..c6ebfc7
--- /dev/null
+++ b/uio/reader_test.go
@@ -0,0 +1,64 @@
+// Copyright 2021 the u-root Authors. All rights reserved
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uio
+
+import (
+	"os"
+	"path/filepath"
+	"strings"
+	"testing"
+)
+
+func readAndCheck(t *testing.T, want, tmpfileP string) {
+	t.Helper()
+	r := strings.NewReader(want)
+	if err := ReadIntoFile(r, tmpfileP); err != nil {
+		t.Errorf("ReadIntoFile(%v, %s) = %v, want no error", r, tmpfileP, err)
+	}
+
+	got, err := os.ReadFile(tmpfileP)
+	if err != nil {
+		t.Fatalf("os.ReadFile(%s) = %v, want no error", tmpfileP, err)
+	}
+	if want != string(got) {
+		t.Errorf("got: %v, want %s", string(got), want)
+	}
+}
+
+func TestReadIntoFile(t *testing.T) {
+	want := "I am the wanted"
+
+	dir := t.TempDir()
+
+	// Write to a file already exist.
+	p := filepath.Join(dir, "uio-out")
+	// Expect net effect to be creating a new empty file: "uio-out".
+	f, err := os.OpenFile(p, os.O_RDONLY|os.O_CREATE|os.O_TRUNC, 0o755)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := f.Close(); err != nil {
+		t.Fatal(err)
+	}
+	readAndCheck(t, want, f.Name())
+
+	// Write to a file that does not exist.
+	p = filepath.Join(dir, "uio-out-not-existing")
+	readAndCheck(t, want, p)
+
+	// Write to an existing file that has pre-existing content.
+	p = filepath.Join(dir, "uio-out-prexist-content")
+	f, err = os.OpenFile(p, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := f.Write([]byte("temporary file's content")); err != nil {
+		t.Fatal(err)
+	}
+	if err := f.Close(); err != nil {
+		t.Fatal(err)
+	}
+	readAndCheck(t, want, p)
+}
diff --git a/uio/uiotest/uiotest.go b/uio/uiotest/uiotest.go
index 303f35c..5e670be 100644
--- a/uio/uiotest/uiotest.go
+++ b/uio/uiotest/uiotest.go
@@ -21,6 +21,7 @@ func NowLog() string {
 
 // TestLineWriter is an io.Writer that logs full lines of serial to tb.
 func TestLineWriter(tb testing.TB, prefix string) io.WriteCloser {
+	tb.Helper()
 	if len(prefix) > 0 {
 		return uio.FullLineWriter(&testLinePrefixWriter{tb: tb, prefix: prefix})
 	}
diff --git a/ulog/log.go b/ulog/log.go
index b8ec3a8..fbf00f7 100644
--- a/ulog/log.go
+++ b/ulog/log.go
@@ -9,10 +9,7 @@
 // testing.TB.Logf. To use the test logger import "ulog/ulogtest".
 package ulog
 
-import (
-	"log"
-	"os"
-)
+import "log"
 
 // Logger is a log receptacle.
 //
@@ -22,8 +19,8 @@ type Logger interface {
 	Print(v ...interface{})
 }
 
-// Log is a Logger that prints to stderr, like the default log package.
-var Log Logger = log.New(os.Stderr, "", log.LstdFlags)
+// Log is a Logger that prints to the log package's default logger.
+var Log Logger = log.Default()
 
 type emptyLogger struct{}
 
diff --git a/ulog/log_test.go b/ulog/log_test.go
new file mode 100644
index 0000000..b6462d6
--- /dev/null
+++ b/ulog/log_test.go
@@ -0,0 +1,25 @@
+// Copyright 2023 the u-root Authors. All rights reserved
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ulog
+
+import (
+	"log"
+	"strings"
+	"testing"
+)
+
+func TestDefault(t *testing.T) {
+	var b strings.Builder
+	log.SetPrefix("[foobar] ")
+	log.SetOutput(&b)
+	log.SetFlags(0)
+
+	Log.Printf("Some output")
+
+	want := "[foobar] Some output\n"
+	if got := b.String(); got != want {
+		t.Errorf("log is %q, want %q", got, want)
+	}
+}

More details

Full run details

Historical runs