Codebase list golang-github-vbatts-go-mtree / 1cf0881
New upstream version 0.4.2 Dmitry Smirnov 5 years ago
106 changed file(s) with 7871 addition(s) and 0 deletion(s). Raw diff Collapse all Expand all
0 *~
1 .cli.test
2 .lint
3 .test
4 .vet
5 gomtree
0 language: go
1 go:
2 - 1.x
3 - 1.8.x
4 - 1.7.x
5 - 1.6.3
6
7 sudo: false
8
9 before_install:
10 - git config --global url."https://".insteadOf git://
11 - go get -u github.com/golang/lint/golint
12 - go get -u github.com/Masterminds/glide
13 - mkdir -p $GOPATH/src/github.com/vbatts && ln -sf $(pwd) $GOPATH/src/github.com/vbatts/go-mtree
14
15 install: true
16
17 script:
18 - make validation
19 - make validation.tags
20 - make build.arches
0 // Available variables which can be used inside of strings.
1 // ${workspaceRoot}: the root folder of the team
2 // ${file}: the current opened file
3 // ${fileBasename}: the current opened file's basename
4 // ${fileDirname}: the current opened file's dirname
5 // ${fileExtname}: the current opened file's extension
6 // ${cwd}: the current working directory of the spawned process
7
8 {
9 // See https://go.microsoft.com/fwlink/?LinkId=733558
10 // for the documentation about the tasks.json format
11 "version": "2.0.0",
12 "tasks": [
13 {
14 "taskName": "build",
15 "type": "shell",
16 "command": "time go build .",
17 "problemMatcher": [
18 "$go"
19 ],
20 "group": {
21 "kind": "build",
22 "isDefault": true
23 },
24 "presentation": {
25 "echo": true,
26 "reveal": "silent",
27 "focus": true,
28 "panel": "shared"
29 }
30 },
31 {
32 "taskName": "build.arches",
33 "type": "shell",
34 "command": "make build.arches",
35 "problemMatcher": [
36 "$go"
37 ],
38 "group": "build",
39 "presentation": {
40 "echo": true,
41 "reveal": "always",
42 "focus": true,
43 "panel": "shared"
44 }
45 },
46 {
47 "taskName": "test",
48 "command": "time go test -v .",
49 "type": "shell",
50 "group": {
51 "kind": "test",
52 "isDefault": true
53 },
54 "problemMatcher": [
55 "$go"
56 ],
57 "presentation": {
58 "echo": true,
59 "reveal": "always",
60 "focus": true,
61 "panel": "shared"
62 }
63 }
64 ]
65 }
0 Copyright (c) 2016 Vincent Batts, Raleigh, NC, USA
1
2 All rights reserved.
3
4 Redistribution and use in source and binary forms, with or without
5 modification, are permitted provided that the following conditions are met:
6
7 1. Redistributions of source code must retain the above copyright notice, this
8 list of conditions and the following disclaimer.
9
10 2. Redistributions in binary form must reproduce the above copyright notice,
11 this list of conditions and the following disclaimer in the documentation
12 and/or other materials provided with the distribution.
13
14 3. Neither the name of the copyright holder nor the names of its contributors
15 may be used to endorse or promote products derived from this software without
16 specific prior written permission.
17
18 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
19 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
0
1 BUILD := gomtree
2 BUILDPATH := github.com/vbatts/go-mtree/cmd/gomtree
3 CWD := $(shell pwd)
4 SOURCE_FILES := $(shell find . -type f -name "*.go")
5 CLEAN_FILES := *~
6 TAGS :=
7 ARCHES := linux,386 linux,amd64 linux,arm linux,arm64 openbsd,amd64 windows,amd64 darwin,amd64
8
9 default: build validation
10
11 .PHONY: validation
12 validation: .test .lint .vet .cli.test
13
14 .PHONY: validation.tags
15 validation.tags: .test.tags .vet.tags .cli.test
16
17 .PHONY: test
18 test: .test
19
20 CLEAN_FILES += .test .test.tags
21
22 .test: $(SOURCE_FILES)
23 go test -v $$(glide novendor) && touch $@
24
25 .test.tags: $(SOURCE_FILES)
26 set -e ; for tag in $(TAGS) ; do go test -tags $$tag -v $$(glide novendor) ; done && touch $@
27
28 .PHONY: lint
29 lint: .lint
30
31 CLEAN_FILES += .lint
32
33 .lint: $(SOURCE_FILES)
34 set -e ; for dir in $$(glide novendor) ; do golint -set_exit_status $$dir ; done && touch $@
35
36 .PHONY: vet
37 vet: .vet .vet.tags
38
39 CLEAN_FILES += .vet .vet.tags
40
41 .vet: $(SOURCE_FILES)
42 go vet $$(glide novendor) && touch $@
43
44 .vet.tags: $(SOURCE_FILES)
45 set -e ; for tag in $(TAGS) ; do go vet -tags $$tag -v $$(glide novendor) ; done && touch $@
46
47 .PHONY: cli.test
48 cli.test: .cli.test
49
50 CLEAN_FILES += .cli.test .cli.test.tags
51
52 .cli.test: $(BUILD) $(wildcard ./test/cli/*.sh)
53 @go run ./test/cli.go ./test/cli/*.sh && touch $@
54
55 .cli.test.tags: $(BUILD) $(wildcard ./test/cli/*.sh)
56 @set -e ; for tag in $(TAGS) ; do go run -tags $$tag ./test/cli.go ./test/cli/*.sh ; done && touch $@
57
58 .PHONY: build
59 build: $(BUILD)
60
61 $(BUILD): $(SOURCE_FILES)
62 go build -o $(BUILD) $(BUILDPATH)
63
64 ./bin:
65 mkdir -p $@
66
67 CLEAN_FILES += bin
68
69 build.arches: ./bin
70 @set -e ;\
71 for pair in $(ARCHES); do \
72 p=$$(echo $$pair | cut -d , -f 1);\
73 a=$$(echo $$pair | cut -d , -f 2);\
74 echo "Building $$p/$$a ...";\
75 GOOS=$$p GOARCH=$$a go build -o ./bin/gomtree.$$p.$$a $(BUILDPATH) ;\
76 done
77
78 clean:
79 rm -rf $(BUILD) $(CLEAN_FILES)
80
0 # go-mtree
1
2 [![Go Report Card](https://goreportcard.com/badge/github.com/vbatts/go-mtree)](https://goreportcard.com/report/github.com/vbatts/go-mtree)
3
4 `mtree` is a filesystem hierarchy validation tooling and format.
5 This is a library and simple cli tool for [mtree(8)][mtree(8)] support.
6
7 While the traditional `mtree` cli utility is primarily on BSDs (FreeBSD,
8 openBSD, etc), even broader support for the `mtree` specification format is
9 provided with libarchive ([libarchive-formats(5)][libarchive-formats(5)]).
10
11 There is also an [mtree port for Linux][archiecobbs/mtree-port] though it is
12 not widely packaged for Linux distributions.
13
14
15 ## Format
16
17 The format of hierarchy specification is consistent with the `# mtree v2.0`
18 format. Both the BSD `mtree` and libarchive ought to be interoperable with it
19 with only one definite caveat. On Linux, extended attributes (`xattr`) on
20 files are often a critical aspect of the file, holding ACLs, capabilities, etc.
21 While FreeBSD filesystem do support `extattr`, this feature has not made its
22 way into their `mtree`.
23
24 This implementation of mtree supports a few non-upstream "keyword"s, such as:
25 `xattr` and `tar_time`. If you include these keywords, the FreeBSD `mtree`
26 will fail, as they are unknown keywords to that implementation.
27
28 To have `go-mtree` produce specifications that will be
29 strictly compatible with the BSD `mtree`, use the `-bsd-keywords` flag when
30 creating a manifest. This will make sure that only the keywords supported by
31 BSD `mtree` are used in the program.
32
33
34 ### Typical form
35
36 With the standard keywords, plus say `sha256digest`, the hierarchy
37 specification looks like:
38
39 ```mtree
40 # .
41 /set type=file nlink=1 mode=0664 uid=1000 gid=100
42 . size=4096 type=dir mode=0755 nlink=6 time=1459370393.273231538
43 LICENSE size=1502 mode=0644 time=1458851690.0 sha256digest=ef4e53d83096be56dc38dbf9bc8ba9e3068bec1ec37c179033d1e8f99a1c2a95
44 README.md size=2820 mode=0644 time=1459370256.316148361 sha256digest=d9b955134d99f84b17c0a711ce507515cc93cd7080a9dcd50400e3d993d876ac
45
46 [...]
47 ```
48
49 See the directory presently in, and the files present. Along with each
50 path, is provided the keywords and the unique values for each path. Any common
51 keyword and values are established in the `/set` command.
52
53
54 ### Extended attributes form
55
56 ```mtree
57 # .
58 /set type=file nlink=1 mode=0664 uid=1000 gid=1000
59 . size=4096 type=dir mode=0775 nlink=6 time=1459370191.11179595 xattr.security.selinux=dW5jb25maW5lZF91Om9iamVjdF9yOnVzZXJfaG9tZV90OnMwAA==
60 LICENSE size=1502 time=1458851690.583562292 xattr.security.selinux=dW5jb25maW5lZF91Om9iamVjdF9yOnVzZXJfaG9tZV90OnMwAA==
61 README.md size=2366 mode=0644 time=1459369604.0 xattr.security.selinux=dW5jb25maW5lZF91Om9iamVjdF9yOnVzZXJfaG9tZV90OnMwAA==
62
63 [...]
64 ```
65
66 See the keyword prefixed with `xattr.` followed by the extended attribute's
67 namespace and keyword. This setup is consistent for use with Linux extended
68 attributes as well as FreeBSD extended attributes.
69
70 Since extended attributes are an unordered hashmap, this approach allows for
71 checking each `<namespace>.<key>` individually.
72
73 The value is the [base64 encoded][base64] of the value of the particular
74 extended attribute. Since the values themselves could be raw bytes, this
75 approach avoids issues with encoding.
76
77 ### Tar form
78
79 ```mtree
80 # .
81 /set type=file mode=0664 uid=1000 gid=1000
82 . type=dir mode=0775 tar_time=1468430408.000000000
83
84 # samedir
85 samedir type=dir mode=0775 tar_time=1468000972.000000000
86 file2 size=0 tar_time=1467999782.000000000
87 file1 size=0 tar_time=1467999781.000000000
88
89 [...]
90 ```
91
92 While `go-mtree` serves mainly as a library for upstream `mtree` support,
93 `go-mtree` is also compatible with [tar archives][tar] (which is not an upstream feature).
94 This means that we can now create and validate a manifest by specifying a tar file.
95 More interestingly, this also means that we can create a manifest from an archive, and then
96 validate this manifest against a filesystem hierarchy that's on disk, and vice versa.
97
98 Notice that for the output of creating a validation manifest from a tar file, the default behavior
99 for evaluating a notion of time is to use the `tar_time` keyword. In the
100 "filesystem hierarchy" format of mtree, `time` is being evaluated with
101 nanosecond precision. However, GNU tar truncates a file's modification time
102 to 1-second precision. That is, if a file's full modification time is
103 123456789.123456789, the "tar time" equivalent would be 123456789.000000000.
104 This way, if you validate a manifest created using a tar file against an
105 actual root directory, there will be no complaints from `go-mtree` so long as the
106 1-second precision time of a file in the root directory is the same.
107
108
109 ## Usage
110
111 To use the Go programming language library, see [the docs][godoc].
112
113 To use the command line tool, first [build it](#Building), then the following.
114
115
116 ### Create a manifest
117
118 This will also include the sha512 digest of the files.
119
120 ```bash
121 gomtree -c -K sha512digest -p . > /tmp/root.mtree
122 ```
123
124 With a tar file:
125
126 ```bash
127 gomtree -c -K sha512digest -T sometarfile.tar > /tmp/tar.mtree
128 ```
129
130 ### Validate a manifest
131
132 ```bash
133 gomtree -p . -f /tmp/root.mtree
134 ```
135
136 With a tar file:
137
138 ```bash
139 gomtree -T sometarfile.tar -f /tmp/root.mtree
140 ```
141
142 ### See the supported keywords
143
144 ```bash
145 gomtree -list-keywords
146 Available keywords:
147 uname
148 sha1
149 sha1digest
150 sha256digest
151 xattrs (not upstream)
152 link (default)
153 nlink (default)
154 md5digest
155 rmd160digest
156 mode (default)
157 cksum
158 md5
159 rmd160
160 type (default)
161 time (default)
162 uid (default)
163 gid (default)
164 sha256
165 sha384
166 sha512
167 xattr (not upstream)
168 tar_time (not upstream)
169 size (default)
170 ripemd160digest
171 sha384digest
172 sha512digest
173 ```
174
175
176 ## Building
177
178 Either:
179
180 ```bash
181 go get github.com/vbatts/go-mtree/cmd/gomtree
182 ```
183
184 or
185
186 ```bash
187 git clone git://github.com/vbatts/go-mtree.git $GOPATH/src/github.com/vbatts/go-mtree
188 cd $GOPATH/src/github.com/vbatts/go-mtree
189 go build ./cmd/gomtree
190 ```
191
192 ## Testing
193
194 On Linux:
195 ```bash
196 cd $GOPATH/src/github.com/vbatts/go-mtree
197 make
198 ```
199
200 On FreeBSD:
201 ```bash
202 cd $GOPATH/src/github.com/vbatts/go-mtree
203 gmake
204 ```
205
206
207 [mtree(8)]: https://www.freebsd.org/cgi/man.cgi?mtree(8)
208 [libarchive-formats(5)]: https://www.freebsd.org/cgi/man.cgi?query=libarchive-formats&sektion=5&n=1
209 [archiecobbs/mtree-port]: https://github.com/archiecobbs/mtree-port
210 [godoc]: https://godoc.org/github.com/vbatts/go-mtree
211 [tar]: http://man7.org/linux/man-pages/man1/tar.1.html
212 [base64]: https://tools.ietf.org/html/rfc4648
0 package mtree
1
2 // Check a root directory path against the DirectoryHierarchy, regarding only
3 // the available keywords from the list and each entry in the hierarchy.
4 // If keywords is nil, the check all present in the DirectoryHierarchy
5 //
6 // This is equivalent to creating a new DirectoryHierarchy with Walk(root, nil,
7 // keywords, fs) and then doing a Compare(dh, newDh, keywords).
8 func Check(root string, dh *DirectoryHierarchy, keywords []Keyword, fs FsEval) ([]InodeDelta, error) {
9 if keywords == nil {
10 keywords = dh.UsedKeywords()
11 }
12
13 newDh, err := Walk(root, nil, keywords, fs)
14 if err != nil {
15 return nil, err
16 }
17
18 return Compare(dh, newDh, keywords)
19 }
20
21 // TarCheck is the tar equivalent of checking a file hierarchy spec against a
22 // tar stream to determine if files have been changed. This is precisely
23 // equivalent to Compare(dh, tarDH, keywords).
24 func TarCheck(tarDH, dh *DirectoryHierarchy, keywords []Keyword) ([]InodeDelta, error) {
25 if keywords == nil {
26 return Compare(dh, tarDH, dh.UsedKeywords())
27 }
28 return Compare(dh, tarDH, keywords)
29 }
0 package mtree
1
2 import (
3 "bytes"
4 "io/ioutil"
5 "os"
6 "path/filepath"
7 "testing"
8 "time"
9 )
10
11 // simple walk of current directory, and imediately check it.
12 // may not be parallelizable.
13 func TestCheck(t *testing.T) {
14 dh, err := Walk(".", nil, append(DefaultKeywords, []Keyword{"sha1", "xattr"}...), nil)
15 if err != nil {
16 t.Fatal(err)
17 }
18
19 res, err := Check(".", dh, nil, nil)
20 if err != nil {
21 t.Fatal(err)
22 }
23
24 if len(res) > 0 {
25 t.Errorf("%#v", res)
26 }
27 }
28
29 // make a directory, walk it, check it, modify the timestamp and ensure it fails.
30 // only check again for size and sha1, and ignore time, and ensure it passes
31 func TestCheckKeywords(t *testing.T) {
32 content := []byte("I know half of you half as well as I ought to")
33 dir, err := ioutil.TempDir("", "test-check-keywords")
34 if err != nil {
35 t.Fatal(err)
36 }
37 defer os.RemoveAll(dir) // clean up
38
39 tmpfn := filepath.Join(dir, "tmpfile")
40 if err := ioutil.WriteFile(tmpfn, content, 0666); err != nil {
41 t.Fatal(err)
42 }
43
44 // Walk this tempdir
45 dh, err := Walk(dir, nil, append(DefaultKeywords, "sha1"), nil)
46 if err != nil {
47 t.Fatal(err)
48 }
49
50 // Check for sanity. This ought to pass.
51 res, err := Check(dir, dh, nil, nil)
52 if err != nil {
53 t.Fatal(err)
54 }
55 if len(res) > 0 {
56 t.Errorf("%#v", res)
57 }
58
59 // Touch a file, so the mtime changes.
60 newtime := time.Date(2006, time.February, 1, 3, 4, 5, 0, time.UTC)
61 if err := os.Chtimes(tmpfn, newtime, newtime); err != nil {
62 t.Fatal(err)
63 }
64
65 // Check again. This ought to fail.
66 res, err = Check(dir, dh, nil, nil)
67 if err != nil {
68 t.Fatal(err)
69 }
70 if len(res) != 1 {
71 t.Fatal("expected to get 1 delta on changed mtimes, but did not")
72 }
73 if res[0].Type() != Modified {
74 t.Errorf("expected to get modified delta on changed mtimes, but did not")
75 }
76
77 // Check again, but only sha1 and mode. This ought to pass.
78 res, err = Check(dir, dh, []Keyword{"sha1", "mode"}, nil)
79 if err != nil {
80 t.Fatal(err)
81 }
82 if len(res) > 0 {
83 t.Errorf("%#v", res)
84 }
85 }
86
87 func ExampleCheck() {
88 dh, err := Walk(".", nil, append(DefaultKeywords, "sha1"), nil)
89 if err != nil {
90 // handle error ...
91 }
92
93 res, err := Check(".", dh, nil, nil)
94 if err != nil {
95 // handle error ...
96 }
97 if len(res) > 0 {
98 // handle failed validity ...
99 }
100 }
101
102 // Tests default action for evaluating a symlink, which is just to compare the
103 // link itself, not to follow it
104 func TestDefaultBrokenLink(t *testing.T) {
105 dh, err := Walk("./testdata/dirwithbrokenlink", nil, append(DefaultKeywords, "sha1"), nil)
106 if err != nil {
107 t.Fatal(err)
108 }
109 res, err := Check("./testdata/dirwithbrokenlink", dh, nil, nil)
110 if err != nil {
111 t.Fatal(err)
112 }
113 if len(res) > 0 {
114 for _, delta := range res {
115 t.Error(delta)
116 }
117 }
118 }
119
120 // https://github.com/vbatts/go-mtree/issues/8
121 func TestTimeComparison(t *testing.T) {
122 dir, err := ioutil.TempDir("", "test-time.")
123 if err != nil {
124 t.Fatal(err)
125 }
126 defer os.RemoveAll(dir)
127
128 // This is the format of time from FreeBSD
129 spec := `
130 /set type=file time=5.000000000
131 . type=dir
132 file time=5.000000000
133 ..
134 `
135
136 fh, err := os.Create(filepath.Join(dir, "file"))
137 if err != nil {
138 t.Fatal(err)
139 }
140 // This is what mode we're checking for. Round integer of epoch seconds
141 epoch := time.Unix(5, 0)
142 if err := os.Chtimes(fh.Name(), epoch, epoch); err != nil {
143 t.Fatal(err)
144 }
145 if err := os.Chtimes(dir, epoch, epoch); err != nil {
146 t.Fatal(err)
147 }
148 if err := fh.Close(); err != nil {
149 t.Error(err)
150 }
151
152 dh, err := ParseSpec(bytes.NewBufferString(spec))
153 if err != nil {
154 t.Fatal(err)
155 }
156
157 res, err := Check(dir, dh, nil, nil)
158 if err != nil {
159 t.Error(err)
160 }
161 if len(res) > 0 {
162 t.Fatal(res)
163 }
164 }
165
166 func TestTarTime(t *testing.T) {
167 dir, err := ioutil.TempDir("", "test-tar-time.")
168 if err != nil {
169 t.Fatal(err)
170 }
171 defer os.RemoveAll(dir)
172
173 // This is the format of time from FreeBSD
174 spec := `
175 /set type=file time=5.454353132
176 . type=dir time=5.123456789
177 file time=5.911134111
178 ..
179 `
180
181 fh, err := os.Create(filepath.Join(dir, "file"))
182 if err != nil {
183 t.Fatal(err)
184 }
185 // This is what mode we're checking for. Round integer of epoch seconds
186 epoch := time.Unix(5, 0)
187 if err := os.Chtimes(fh.Name(), epoch, epoch); err != nil {
188 t.Fatal(err)
189 }
190 if err := os.Chtimes(dir, epoch, epoch); err != nil {
191 t.Fatal(err)
192 }
193 if err := fh.Close(); err != nil {
194 t.Error(err)
195 }
196
197 dh, err := ParseSpec(bytes.NewBufferString(spec))
198 if err != nil {
199 t.Fatal(err)
200 }
201
202 keywords := dh.UsedKeywords()
203
204 // make sure "time" keyword works
205 _, err = Check(dir, dh, keywords, nil)
206 if err != nil {
207 t.Error(err)
208 }
209
210 // make sure tar_time wins
211 res, err := Check(dir, dh, append(keywords, "tar_time"), nil)
212 if err != nil {
213 t.Error(err)
214 }
215 if len(res) > 0 {
216 t.Fatal(res)
217 }
218 }
219
220 func TestIgnoreComments(t *testing.T) {
221 dir, err := ioutil.TempDir("", "test-comments.")
222 if err != nil {
223 t.Fatal(err)
224 }
225 defer os.RemoveAll(dir)
226
227 // This is the format of time from FreeBSD
228 spec := `
229 /set type=file time=5.000000000
230 . type=dir
231 file1 time=5.000000000
232 ..
233 `
234
235 fh, err := os.Create(filepath.Join(dir, "file1"))
236 if err != nil {
237 t.Fatal(err)
238 }
239 // This is what mode we're checking for. Round integer of epoch seconds
240 epoch := time.Unix(5, 0)
241 if err := os.Chtimes(fh.Name(), epoch, epoch); err != nil {
242 t.Fatal(err)
243 }
244 if err := os.Chtimes(dir, epoch, epoch); err != nil {
245 t.Fatal(err)
246 }
247 if err := fh.Close(); err != nil {
248 t.Error(err)
249 }
250
251 dh, err := ParseSpec(bytes.NewBufferString(spec))
252 if err != nil {
253 t.Fatal(err)
254 }
255
256 res, err := Check(dir, dh, nil, nil)
257 if err != nil {
258 t.Error(err)
259 }
260
261 if len(res) > 0 {
262 t.Fatal(res)
263 }
264
265 // now change the spec to a comment that looks like an actual Entry but has
266 // whitespace in front of it
267 spec = `
268 /set type=file time=5.000000000
269 . type=dir
270 file1 time=5.000000000
271 #file2 time=5.000000000
272 ..
273 `
274 dh, err = ParseSpec(bytes.NewBufferString(spec))
275
276 res, err = Check(dir, dh, nil, nil)
277 if err != nil {
278 t.Error(err)
279 }
280
281 if len(res) > 0 {
282 t.Fatal(res)
283 }
284 }
285
286 func TestCheckNeedsEncoding(t *testing.T) {
287 dir, err := ioutil.TempDir("", "test-needs-encoding")
288 if err != nil {
289 t.Fatal(err)
290 }
291 defer os.RemoveAll(dir)
292
293 fh, err := os.Create(filepath.Join(dir, "file[ "))
294 if err != nil {
295 t.Fatal(err)
296 }
297 if err := fh.Close(); err != nil {
298 t.Error(err)
299 }
300 fh, err = os.Create(filepath.Join(dir, " , should work"))
301 if err != nil {
302 t.Fatal(err)
303 }
304 if err := fh.Close(); err != nil {
305 t.Error(err)
306 }
307
308 dh, err := Walk(dir, nil, DefaultKeywords, nil)
309 if err != nil {
310 t.Fatal(err)
311 }
312 res, err := Check(dir, dh, nil, nil)
313 if err != nil {
314 t.Fatal(err)
315 }
316 if len(res) > 0 {
317 t.Fatal(res)
318 }
319 }
0 package mtree
1
2 import (
3 "bufio"
4 "io"
5 )
6
7 const posixPolynomial uint32 = 0x04C11DB7
8
9 // cksum is an implementation of the POSIX CRC algorithm
10 func cksum(r io.Reader) (uint32, int, error) {
11 in := bufio.NewReader(r)
12 count := 0
13 var sum uint32
14 f := func(b byte) {
15 for i := 7; i >= 0; i-- {
16 msb := sum & (1 << 31)
17 sum = sum << 1
18 if msb != 0 {
19 sum = sum ^ posixPolynomial
20 }
21 }
22 sum ^= uint32(b)
23 }
24
25 for done := false; !done; {
26 switch b, err := in.ReadByte(); err {
27 case io.EOF:
28 done = true
29 case nil:
30 f(b)
31 count++
32 default:
33 return ^sum, count, err
34 }
35 }
36 for m := count; ; {
37 f(byte(m) & 0xff)
38 m = m >> 8
39 if m == 0 {
40 break
41 }
42 }
43 f(0)
44 f(0)
45 f(0)
46 f(0)
47 return ^sum, count, nil
48 }
0 package mtree
1
2 import (
3 "os"
4 "testing"
5 )
6
7 var (
8 checkFile = "./testdata/source.mtree"
9 checkSum uint32 = 1048442895
10 checkSize = 9110
11 )
12
13 // testing that the cksum function matches that of cksum(1) utility (silly POSIX crc32)
14 func TestCksum(t *testing.T) {
15 fh, err := os.Open(checkFile)
16 if err != nil {
17 t.Fatal(err)
18 }
19 defer fh.Close()
20 sum, i, err := cksum(fh)
21 if err != nil {
22 t.Fatal(err)
23 }
24 if i != checkSize {
25 t.Errorf("%q: expected size %d, got %d", checkFile, checkSize, i)
26 }
27 if sum != checkSum {
28 t.Errorf("%q: expected sum %d, got %d", checkFile, checkSum, sum)
29 }
30 }
0 package main
1
2 import (
3 "bytes"
4 "encoding/json"
5 "flag"
6 "fmt"
7 "io"
8 "io/ioutil"
9 "os"
10 "strings"
11
12 "github.com/Sirupsen/logrus"
13 "github.com/vbatts/go-mtree"
14 )
15
16 var (
17 // Flags common with mtree(8)
18 flCreate = flag.Bool("c", false, "create a directory hierarchy spec")
19 flFile = flag.String("f", "", "directory hierarchy spec to validate")
20 flPath = flag.String("p", "", "root path that the hierarchy spec is relative to")
21 flAddKeywords = flag.String("K", "", "Add the specified (delimited by comma or space) keywords to the current set of keywords")
22 flUseKeywords = flag.String("k", "", "Use the specified (delimited by comma or space) keywords as the current set of keywords")
23 flDirectoryOnly = flag.Bool("d", false, "Ignore everything except directory type files")
24 flUpdateAttributes = flag.Bool("u", false, "Modify the owner, group, permissions and xattrs of files, symbolic links and devices, to match the provided specification. This is not compatible with '-T'.")
25
26 // Flags unique to gomtree
27 flListKeywords = flag.Bool("list-keywords", false, "List the keywords available")
28 flResultFormat = flag.String("result-format", "bsd", "output the validation results using the given format (bsd, json, path)")
29 flTar = flag.String("T", "", "use tar archive to create or validate a directory hierarchy spec (\"-\" indicates stdin)")
30 flBsdKeywords = flag.Bool("bsd-keywords", false, "only operate on keywords that are supported by upstream mtree(8)")
31 flListUsedKeywords = flag.Bool("list-used", false, "list all the keywords found in a validation manifest")
32 flDebug = flag.Bool("debug", false, "output debug info to STDERR")
33 flVersion = flag.Bool("version", false, "display the version of this tool")
34 )
35
36 func main() {
37 // so that defers cleanly exec
38 if err := app(); err != nil {
39 logrus.Fatal(err)
40 }
41 }
42
43 func app() error {
44 flag.Parse()
45
46 if *flDebug {
47 os.Setenv("DEBUG", "1")
48 logrus.SetLevel(logrus.DebugLevel)
49 }
50
51 if *flVersion {
52 fmt.Printf("%s :: %s\n", mtree.AppName, mtree.Version)
53 return nil
54 }
55
56 // -list-keywords
57 if *flListKeywords {
58 fmt.Println("Available keywords:")
59 for k := range mtree.KeywordFuncs {
60 fmt.Print(" ")
61 fmt.Print(k)
62 if mtree.Keyword(k).Default() {
63 fmt.Print(" (default)")
64 }
65 if !mtree.Keyword(k).Bsd() {
66 fmt.Print(" (not upstream)")
67 }
68 fmt.Print("\n")
69 }
70 return nil
71 }
72
73 // --result-format
74 formatFunc, ok := formats[*flResultFormat]
75 if !ok {
76 return fmt.Errorf("invalid output format: %s", *flResultFormat)
77 }
78
79 var (
80 err error
81 tmpKeywords []mtree.Keyword
82 currentKeywords []mtree.Keyword
83 )
84
85 // -k <keywords>
86 if *flUseKeywords != "" {
87 tmpKeywords = splitKeywordsArg(*flUseKeywords)
88 if !mtree.InKeywordSlice("type", tmpKeywords) {
89 tmpKeywords = append([]mtree.Keyword{"type"}, tmpKeywords...)
90 }
91 } else {
92 if *flTar != "" {
93 tmpKeywords = mtree.DefaultTarKeywords[:]
94 } else {
95 tmpKeywords = mtree.DefaultKeywords[:]
96 }
97 }
98
99 // -K <keywords>
100 if *flAddKeywords != "" {
101 for _, kw := range splitKeywordsArg(*flAddKeywords) {
102 if !mtree.InKeywordSlice(kw, tmpKeywords) {
103 tmpKeywords = append(tmpKeywords, kw)
104 }
105 }
106 }
107
108 // -bsd-keywords
109 if *flBsdKeywords {
110 for _, k := range tmpKeywords {
111 if mtree.Keyword(k).Bsd() {
112 currentKeywords = append(currentKeywords, k)
113 } else {
114 fmt.Fprintf(os.Stderr, "INFO: ignoring %q as it is not an upstream keyword\n", k)
115 }
116 }
117 } else {
118 currentKeywords = tmpKeywords
119 }
120
121 // Check mutual exclusivity of keywords.
122 // TODO(cyphar): Abstract this inside keywords.go.
123 if mtree.InKeywordSlice("tar_time", currentKeywords) && mtree.InKeywordSlice("time", currentKeywords) {
124 return fmt.Errorf("tar_time and time are mutually exclusive keywords")
125 }
126
127 // If we're doing a comparison, we always are comparing between a spec and
128 // state DH. If specDh is nil, we are generating a new one.
129 var (
130 specDh *mtree.DirectoryHierarchy
131 stateDh *mtree.DirectoryHierarchy
132 specKeywords []mtree.Keyword
133 )
134
135 // -f <file>
136 if *flFile != "" && !*flCreate {
137 // load the hierarchy, if we're not creating a new spec
138 fh, err := os.Open(*flFile)
139 if err != nil {
140 return err
141 }
142 specDh, err = mtree.ParseSpec(fh)
143 fh.Close()
144 if err != nil {
145 return err
146 }
147
148 // We can't check against more fields than in the specKeywords list, so
149 // currentKeywords can only have a subset of specKeywords.
150 specKeywords = specDh.UsedKeywords()
151 }
152
153 // -list-used
154 if *flListUsedKeywords {
155 if specDh == nil {
156 return fmt.Errorf("no specification provided. please provide a validation manifest")
157 }
158
159 if *flResultFormat == "json" {
160 // if they're asking for json, give it to them
161 data := map[string][]mtree.Keyword{*flFile: specKeywords}
162 buf, err := json.MarshalIndent(data, "", " ")
163 if err != nil {
164 return err
165 }
166 fmt.Println(string(buf))
167 } else {
168 fmt.Printf("Keywords used in [%s]:\n", *flFile)
169 for _, kw := range specKeywords {
170 fmt.Printf(" %s", kw)
171 if _, ok := mtree.KeywordFuncs[kw]; !ok {
172 fmt.Print(" (unsupported)")
173 }
174 fmt.Printf("\n")
175 }
176 }
177 return nil
178 }
179
180 if specKeywords != nil {
181 // If we didn't actually change the set of keywords, we can just use specKeywords.
182 if *flUseKeywords == "" && *flAddKeywords == "" {
183 currentKeywords = specKeywords
184 }
185
186 for _, keyword := range currentKeywords {
187 // As always, time is a special case.
188 // TODO: Fix that.
189 if (keyword == "time" && mtree.InKeywordSlice("tar_time", specKeywords)) || (keyword == "tar_time" && mtree.InKeywordSlice("time", specKeywords)) {
190 continue
191 }
192 }
193 }
194
195 // -p and -T are mutually exclusive
196 if *flPath != "" && *flTar != "" {
197 return fmt.Errorf("options -T and -p are mutually exclusive")
198 }
199
200 // -p <path>
201 var rootPath = "."
202 if *flPath != "" {
203 rootPath = *flPath
204 }
205
206 excludes := []mtree.ExcludeFunc{}
207 // -d
208 if *flDirectoryOnly {
209 excludes = append(excludes, mtree.ExcludeNonDirectories)
210 }
211
212 // -u
213 // Failing early here. Processing is done below.
214 if *flUpdateAttributes && *flTar != "" {
215 return fmt.Errorf("ERROR: -u can not be used with -T")
216 }
217
218 // -T <tar file>
219 if *flTar != "" {
220 var input io.Reader
221 if *flTar == "-" {
222 input = os.Stdin
223 } else {
224 fh, err := os.Open(*flTar)
225 if err != nil {
226 return err
227 }
228 defer fh.Close()
229 input = fh
230 }
231 ts := mtree.NewTarStreamer(input, excludes, currentKeywords)
232
233 if _, err := io.Copy(ioutil.Discard, ts); err != nil && err != io.EOF {
234 return err
235 }
236 if err := ts.Close(); err != nil {
237 return err
238 }
239 var err error
240 stateDh, err = ts.Hierarchy()
241 if err != nil {
242 return err
243 }
244 } else {
245 // with a root directory
246 stateDh, err = mtree.Walk(rootPath, excludes, currentKeywords, nil)
247 if err != nil {
248 return err
249 }
250 }
251
252 // -u
253 if *flUpdateAttributes && stateDh != nil {
254 // -u
255 // this comes before the next case, intentionally.
256 result, err := mtree.Update(rootPath, specDh, mtree.DefaultUpdateKeywords, nil)
257 if err != nil {
258 return err
259 }
260 if result != nil && len(result) > 0 {
261 fmt.Printf("%#v\n", result)
262 }
263
264 var res []mtree.InodeDelta
265 // only check the keywords that we just updated
266 res, err = mtree.Check(rootPath, specDh, mtree.DefaultUpdateKeywords, nil)
267 if err != nil {
268 return err
269 }
270 if res != nil {
271 out := formatFunc(res)
272 if _, err := os.Stdout.Write([]byte(out)); err != nil {
273 return err
274 }
275
276 // TODO: This should be a flag. Allowing files to be added and
277 // removed and still returning "it's all good" is simply
278 // unsafe IMO.
279 for _, diff := range res {
280 if diff.Type() == mtree.Modified {
281 return fmt.Errorf("mainfest validation failed")
282 }
283 }
284 }
285
286 return nil
287 }
288
289 // -c
290 if *flCreate {
291 fh := os.Stdout
292 if *flFile != "" {
293 fh, err = os.Create(*flFile)
294 if err != nil {
295 return err
296 }
297 }
298
299 // output stateDh
300 stateDh.WriteTo(fh)
301 return nil
302 }
303
304 // no spec manifest has been provided yet, so look for it on stdin
305 if specDh == nil {
306 // load the hierarchy
307 specDh, err = mtree.ParseSpec(os.Stdin)
308 if err != nil {
309 return err
310 }
311
312 // We can't check against more fields than in the specKeywords list, so
313 // currentKeywords can only have a subset of specKeywords.
314 specKeywords = specDh.UsedKeywords()
315 }
316
317 // This is a validation.
318 if specDh != nil && stateDh != nil {
319 var res []mtree.InodeDelta
320 res, err = mtree.Compare(specDh, stateDh, currentKeywords)
321 if err != nil {
322 return err
323 }
324 if res != nil {
325 if isTarSpec(specDh) || *flTar != "" {
326 res = filterMissingKeywords(res)
327 }
328
329 out := formatFunc(res)
330 if _, err := os.Stdout.Write([]byte(out)); err != nil {
331 return err
332 }
333
334 // TODO: This should be a flag. Allowing files to be added and
335 // removed and still returning "it's all good" is simply
336 // unsafe IMO.
337 for _, diff := range res {
338 if diff.Type() == mtree.Modified {
339 return fmt.Errorf("mainfest validation failed")
340 }
341 }
342 }
343 } else {
344 return fmt.Errorf("neither validating or creating a manifest. Please provide additional arguments")
345 }
346 return nil
347 }
348
349 var formats = map[string]func([]mtree.InodeDelta) string{
350 // Outputs the errors in the BSD format.
351 "bsd": func(d []mtree.InodeDelta) string {
352 var buffer bytes.Buffer
353 for _, delta := range d {
354 fmt.Fprintln(&buffer, delta)
355 }
356 return buffer.String()
357 },
358
359 // Outputs the full result struct in JSON.
360 "json": func(d []mtree.InodeDelta) string {
361 var buffer bytes.Buffer
362 if err := json.NewEncoder(&buffer).Encode(d); err != nil {
363 panic(err)
364 }
365 return buffer.String()
366 },
367
368 // Outputs only the paths which failed to validate.
369 "path": func(d []mtree.InodeDelta) string {
370 var buffer bytes.Buffer
371 for _, delta := range d {
372 if delta.Type() == mtree.Modified {
373 fmt.Fprintln(&buffer, delta.Path())
374 }
375 }
376 return buffer.String()
377 },
378 }
379
380 // isDirEntry returns wheter an mtree.Entry describes a directory.
381 func isDirEntry(e mtree.Entry) bool {
382 for _, kw := range e.Keywords {
383 kv := mtree.KeyVal(kw)
384 if kv.Keyword() == "type" {
385 return kv.Value() == "dir"
386 }
387 }
388 // Shouldn't be reached.
389 return false
390 }
391
392 // filterMissingKeywords is a fairly annoying hack to get around the fact that
393 // tar archive manifest generation has certain unsolveable problems regarding
394 // certain keywords. For example, the size=... keyword cannot be implemented
395 // for directories in a tar archive (which causes Missing errors for that
396 // keyword).
397 //
398 // This function just removes all instances of Missing errors for keywords.
399 // This makes certain assumptions about the type of issues tar archives have.
400 // Only call this on tar archive manifest comparisons.
401 func filterMissingKeywords(diffs []mtree.InodeDelta) []mtree.InodeDelta {
402 newDiffs := []mtree.InodeDelta{}
403 loop:
404 for _, diff := range diffs {
405 if diff.Type() == mtree.Modified {
406 // We only apply this filtering to directories.
407 // NOTE: This will probably break if someone drops the size keyword.
408 if isDirEntry(*diff.Old()) || isDirEntry(*diff.New()) {
409 // If this applies to '.' then we just filter everything
410 // (meaning we remove this entry). This is because note all tar
411 // archives include a '.' entry. Which makes checking this not
412 // practical.
413 if diff.Path() == "." {
414 continue
415 }
416
417 // Only filter out the size keyword.
418 // NOTE: This currently takes advantage of the fact the
419 // diff.Diff() returns the actual slice to diff.keys.
420 keys := diff.Diff()
421 for idx, k := range keys {
422 // Delete the key if it's "size". Unfortunately in Go you
423 // can't delete from a slice without reassigning it. So we
424 // just overwrite it with the last value.
425 if k.Name() == "size" {
426 if len(keys) < 2 {
427 continue loop
428 }
429 keys[idx] = keys[len(keys)-1]
430 }
431 }
432 }
433 }
434
435 // If we got here, append to the new set.
436 newDiffs = append(newDiffs, diff)
437 }
438 return newDiffs
439 }
440
441 // isTarSpec returns whether the spec provided came from the tar generator.
442 // This takes advantage of an unsolveable problem in tar generation.
443 func isTarSpec(spec *mtree.DirectoryHierarchy) bool {
444 // Find a directory and check whether it's missing size=...
445 // NOTE: This will definitely break if someone drops the size=... keyword.
446 for _, e := range spec.Entries {
447 if !isDirEntry(e) {
448 continue
449 }
450
451 for _, kw := range e.Keywords {
452 kv := mtree.KeyVal(kw)
453 if kv.Keyword() == "size" {
454 return false
455 }
456 }
457 return true
458 }
459
460 // Should never be reached.
461 return false
462 }
463
464 func splitKeywordsArg(str string) []mtree.Keyword {
465 keywords := []mtree.Keyword{}
466 for _, kw := range strings.Fields(strings.Replace(str, ",", " ", -1)) {
467 keywords = append(keywords, mtree.KeywordSynonym(kw))
468 }
469 return keywords
470 }
0 package mtree
1
2 import (
3 "encoding/json"
4 "fmt"
5 "strconv"
6 )
7
8 // XXX: Do we need a Difference interface to make it so people can do var x
9 // Difference = <something>? The main problem is that keys and inodes need to
10 // have different interfaces, so it's just a pain.
11
12 // DifferenceType represents the type of a discrepancy encountered for
13 // an object. This is also used to represent discrepancies between keys
14 // for objects.
15 type DifferenceType string
16
17 const (
18 // Missing represents a discrepancy where the object is present in
19 // the @old manifest but is not present in the @new manifest.
20 Missing DifferenceType = "missing"
21
22 // Extra represents a discrepancy where the object is not present in
23 // the @old manifest but is present in the @new manifest.
24 Extra DifferenceType = "extra"
25
26 // Modified represents a discrepancy where the object is present in
27 // both the @old and @new manifests, but one or more of the keys
28 // have different values (or have not been set in one of the
29 // manifests).
30 Modified DifferenceType = "modified"
31
32 // ErrorDifference represents an attempted update to the values of
33 // a keyword that failed
34 ErrorDifference DifferenceType = "errored"
35 )
36
37 // These functions return *type from the parameter. It's just shorthand, to
38 // ensure that we don't accidentally expose pointers to the caller that are
39 // internal data.
40 func ePtr(e Entry) *Entry { return &e }
41 func sPtr(s string) *string { return &s }
42
43 // InodeDelta Represents a discrepancy in a filesystem object between two
44 // DirectoryHierarchy manifests. Discrepancies are caused by entries only
45 // present in one manifest [Missing, Extra], keys only present in one of the
46 // manifests [Modified] or a difference between the keys of the same object in
47 // both manifests [Modified].
48 type InodeDelta struct {
49 diff DifferenceType
50 path string
51 new Entry
52 old Entry
53 keys []KeyDelta
54 }
55
56 // Type returns the type of discrepancy encountered when comparing this inode
57 // between the two DirectoryHierarchy manifests.
58 func (i InodeDelta) Type() DifferenceType {
59 return i.diff
60 }
61
62 // Path returns the path to the inode (relative to the root of the
63 // DirectoryHierarchy manifests).
64 func (i InodeDelta) Path() string {
65 return i.path
66 }
67
68 // Diff returns the set of key discrepancies between the two manifests for the
69 // specific inode. If the DifferenceType of the inode is not Modified, then
70 // Diff returns nil.
71 func (i InodeDelta) Diff() []KeyDelta {
72 return i.keys
73 }
74
75 // Old returns the value of the inode Entry in the "old" DirectoryHierarchy (as
76 // determined by the ordering of parameters to Compare).
77 func (i InodeDelta) Old() *Entry {
78 if i.diff == Modified || i.diff == Missing {
79 return ePtr(i.old)
80 }
81 return nil
82 }
83
84 // New returns the value of the inode Entry in the "new" DirectoryHierarchy (as
85 // determined by the ordering of parameters to Compare).
86 func (i InodeDelta) New() *Entry {
87 if i.diff == Modified || i.diff == Extra {
88 return ePtr(i.new)
89 }
90 return nil
91 }
92
93 // MarshalJSON creates a JSON-encoded version of InodeDelta.
94 func (i InodeDelta) MarshalJSON() ([]byte, error) {
95 return json.Marshal(struct {
96 Type DifferenceType `json:"type"`
97 Path string `json:"path"`
98 Keys []KeyDelta `json:"keys"`
99 }{
100 Type: i.diff,
101 Path: i.path,
102 Keys: i.keys,
103 })
104 }
105
106 // String returns a "pretty" formatting for InodeDelta.
107 func (i InodeDelta) String() string {
108 switch i.diff {
109 case Modified:
110 // Output the first failure.
111 f := i.keys[0]
112 return fmt.Sprintf("%q: keyword %q: expected %s; got %s", i.path, f.name, f.old, f.new)
113 case Extra:
114 return fmt.Sprintf("%q: unexpected path", i.path)
115 case Missing:
116 return fmt.Sprintf("%q: missing path", i.path)
117 default:
118 panic("programming error")
119 }
120 }
121
122 // KeyDelta Represents a discrepancy in a key for a particular filesystem
123 // object between two DirectoryHierarchy manifests. Discrepancies are caused by
124 // keys only present in one manifest [Missing, Extra] or a difference between
125 // the keys of the same object in both manifests [Modified]. A set of these is
126 // returned with InodeDelta.Diff().
127 type KeyDelta struct {
128 diff DifferenceType
129 name Keyword
130 old string
131 new string
132 err error // used for update delta results
133 }
134
135 // Type returns the type of discrepancy encountered when comparing this key
136 // between the two DirectoryHierarchy manifests' relevant inode entry.
137 func (k KeyDelta) Type() DifferenceType {
138 return k.diff
139 }
140
141 // Name returns the name (the key) of the KeyDeltaVal entry in the
142 // DirectoryHierarchy.
143 func (k KeyDelta) Name() Keyword {
144 return k.name
145 }
146
147 // Old returns the value of the KeyDeltaVal entry in the "old" DirectoryHierarchy
148 // (as determined by the ordering of parameters to Compare). Returns nil if
149 // there was no entry in the "old" DirectoryHierarchy.
150 func (k KeyDelta) Old() *string {
151 if k.diff == Modified || k.diff == Missing {
152 return sPtr(k.old)
153 }
154 return nil
155 }
156
157 // New returns the value of the KeyDeltaVal entry in the "new" DirectoryHierarchy
158 // (as determined by the ordering of parameters to Compare). Returns nil if
159 // there was no entry in the "old" DirectoryHierarchy.
160 func (k KeyDelta) New() *string {
161 if k.diff == Modified || k.diff == Extra {
162 return sPtr(k.old)
163 }
164 return nil
165 }
166
167 // MarshalJSON creates a JSON-encoded version of KeyDelta.
168 func (k KeyDelta) MarshalJSON() ([]byte, error) {
169 return json.Marshal(struct {
170 Type DifferenceType `json:"type"`
171 Name Keyword `json:"name"`
172 Old string `json:"old"`
173 New string `json:"new"`
174 }{
175 Type: k.diff,
176 Name: k.name,
177 Old: k.old,
178 New: k.new,
179 })
180 }
181
182 // Like Compare, but for single inode entries only. Used to compute the
183 // cached version of inode.keys.
184 func compareEntry(oldEntry, newEntry Entry) ([]KeyDelta, error) {
185 // Represents the new and old states for an entry's keys.
186 type stateT struct {
187 Old *KeyVal
188 New *KeyVal
189 }
190
191 diffs := map[Keyword]*stateT{}
192 oldKeys := oldEntry.AllKeys()
193 newKeys := newEntry.AllKeys()
194
195 // Fill the map with the old keys first.
196 for _, kv := range oldKeys {
197 key := kv.Keyword()
198 // only add this diff if the new keys has this keyword
199 if key != "tar_time" && key != "time" && key.Prefix() != "xattr" && len(HasKeyword(newKeys, key)) == 0 {
200 continue
201 }
202
203 // Cannot take &kv because it's the iterator.
204 copy := new(KeyVal)
205 *copy = kv
206
207 _, ok := diffs[key]
208 if !ok {
209 diffs[key] = new(stateT)
210 }
211 diffs[key].Old = copy
212 }
213
214 // Then fill the new keys.
215 for _, kv := range newKeys {
216 key := kv.Keyword()
217 // only add this diff if the old keys has this keyword
218 if key != "tar_time" && key != "time" && key.Prefix() != "xattr" && len(HasKeyword(oldKeys, key)) == 0 {
219 continue
220 }
221
222 // Cannot take &kv because it's the iterator.
223 copy := new(KeyVal)
224 *copy = kv
225
226 _, ok := diffs[key]
227 if !ok {
228 diffs[key] = new(stateT)
229 }
230 diffs[key].New = copy
231 }
232
233 // We need a full list of the keys so we can deal with different keyvalue
234 // orderings.
235 var kws []Keyword
236 for kw := range diffs {
237 kws = append(kws, kw)
238 }
239
240 // If both tar_time and time were specified in the set of keys, we have to
241 // mess with the diffs. This is an unfortunate side-effect of tar archives.
242 // TODO(cyphar): This really should be abstracted inside keywords.go
243 if InKeywordSlice("tar_time", kws) && InKeywordSlice("time", kws) {
244 // Delete "time".
245 timeStateT := diffs["time"]
246 delete(diffs, "time")
247
248 // Make a new tar_time.
249 if diffs["tar_time"].Old == nil {
250 time, err := strconv.ParseFloat(timeStateT.Old.Value(), 64)
251 if err != nil {
252 return nil, fmt.Errorf("failed to parse old time: %s", err)
253 }
254
255 newTime := new(KeyVal)
256 *newTime = KeyVal(fmt.Sprintf("tar_time=%d.000000000", int64(time)))
257
258 diffs["tar_time"].Old = newTime
259 } else if diffs["tar_time"].New == nil {
260 time, err := strconv.ParseFloat(timeStateT.New.Value(), 64)
261 if err != nil {
262 return nil, fmt.Errorf("failed to parse new time: %s", err)
263 }
264
265 newTime := new(KeyVal)
266 *newTime = KeyVal(fmt.Sprintf("tar_time=%d.000000000", int64(time)))
267
268 diffs["tar_time"].New = newTime
269 } else {
270 return nil, fmt.Errorf("time and tar_time set in the same manifest")
271 }
272 }
273
274 // Are there any differences?
275 var results []KeyDelta
276 for name, diff := range diffs {
277 // Invalid
278 if diff.Old == nil && diff.New == nil {
279 return nil, fmt.Errorf("invalid state: both old and new are nil: key=%s", name)
280 }
281
282 switch {
283 // Missing
284 case diff.New == nil:
285 results = append(results, KeyDelta{
286 diff: Missing,
287 name: name,
288 old: diff.Old.Value(),
289 })
290
291 // Extra
292 case diff.Old == nil:
293 results = append(results, KeyDelta{
294 diff: Extra,
295 name: name,
296 new: diff.New.Value(),
297 })
298
299 // Modified
300 default:
301 if !diff.Old.Equal(*diff.New) {
302 results = append(results, KeyDelta{
303 diff: Modified,
304 name: name,
305 old: diff.Old.Value(),
306 new: diff.New.Value(),
307 })
308 }
309 }
310 }
311
312 return results, nil
313 }
314
315 // Compare compares two directory hierarchy manifests, and returns the
316 // list of discrepancies between the two. All of the entries in the
317 // manifest are considered, with differences being generated for
318 // RelativeType and FullType entries. Differences in structure (such as
319 // the way /set and /unset are written) are not considered to be
320 // discrepancies. The list of differences are all filesystem objects.
321 //
322 // keys controls which keys will be compared, but if keys is nil then all
323 // possible keys will be compared between the two manifests (allowing for
324 // missing entries and the like). A missing or extra key is treated as a
325 // Modified type.
326 //
327 // NB: The order of the parameters matters (old, new) because Extra and
328 // Missing are considered as different discrepancy types.
329 func Compare(oldDh, newDh *DirectoryHierarchy, keys []Keyword) ([]InodeDelta, error) {
330 // Represents the new and old states for an entry.
331 type stateT struct {
332 Old *Entry
333 New *Entry
334 }
335
336 // To deal with different orderings of the entries, use a path-keyed
337 // map to make sure we don't start comparing unrelated entries.
338 diffs := map[string]*stateT{}
339
340 // First, iterate over the old hierarchy.
341 for _, e := range oldDh.Entries {
342 if e.Type == RelativeType || e.Type == FullType {
343 path, err := e.Path()
344 if err != nil {
345 return nil, err
346 }
347
348 // Cannot take &kv because it's the iterator.
349 copy := new(Entry)
350 *copy = e
351
352 _, ok := diffs[path]
353 if !ok {
354 diffs[path] = &stateT{}
355 }
356 diffs[path].Old = copy
357 }
358 }
359
360 // Then, iterate over the new hierarchy.
361 for _, e := range newDh.Entries {
362 if e.Type == RelativeType || e.Type == FullType {
363 path, err := e.Path()
364 if err != nil {
365 return nil, err
366 }
367
368 // Cannot take &kv because it's the iterator.
369 copy := new(Entry)
370 *copy = e
371
372 _, ok := diffs[path]
373 if !ok {
374 diffs[path] = &stateT{}
375 }
376 diffs[path].New = copy
377 }
378 }
379
380 // Now we compute the diff.
381 var results []InodeDelta
382 for path, diff := range diffs {
383 // Invalid
384 if diff.Old == nil && diff.New == nil {
385 return nil, fmt.Errorf("invalid state: both old and new are nil: path=%s", path)
386 }
387
388 switch {
389 // Missing
390 case diff.New == nil:
391 results = append(results, InodeDelta{
392 diff: Missing,
393 path: path,
394 old: *diff.Old,
395 })
396
397 // Extra
398 case diff.Old == nil:
399 results = append(results, InodeDelta{
400 diff: Extra,
401 path: path,
402 new: *diff.New,
403 })
404
405 // Modified
406 default:
407 changed, err := compareEntry(*diff.Old, *diff.New)
408 if err != nil {
409 return nil, fmt.Errorf("comparison failed %s: %s", path, err)
410 }
411
412 // Now remove "changed" entries that don't match the keys.
413 if keys != nil {
414 var filterChanged []KeyDelta
415 for _, keyDiff := range changed {
416 if InKeywordSlice(keyDiff.name.Prefix(), keys) {
417 filterChanged = append(filterChanged, keyDiff)
418 }
419 }
420 changed = filterChanged
421 }
422
423 // Check if there were any actual changes.
424 if len(changed) > 0 {
425 results = append(results, InodeDelta{
426 diff: Modified,
427 path: path,
428 old: *diff.Old,
429 new: *diff.New,
430 keys: changed,
431 })
432 }
433 }
434 }
435
436 return results, nil
437 }
0 package mtree
1
2 import (
3 "archive/tar"
4 "bytes"
5 "encoding/json"
6 "io"
7 "io/ioutil"
8 "os"
9 "path/filepath"
10 "testing"
11 "time"
12 )
13
14 // simple walk of current directory, and imediately check it.
15 // may not be parallelizable.
16 func TestCompare(t *testing.T) {
17 old, err := Walk(".", nil, append(DefaultKeywords, "sha1"), nil)
18 if err != nil {
19 t.Fatal(err)
20 }
21
22 new, err := Walk(".", nil, append(DefaultKeywords, "sha1"), nil)
23 if err != nil {
24 t.Fatal(err)
25 }
26
27 diffs, err := Compare(old, new, nil)
28 if err != nil {
29 t.Fatal(err)
30 }
31
32 if len(diffs) > 0 {
33 t.Errorf("%#v", diffs)
34 }
35 }
36
37 func TestCompareModified(t *testing.T) {
38 dir, err := ioutil.TempDir("", "test-compare-modified")
39 if err != nil {
40 t.Fatal(err)
41 }
42 defer os.RemoveAll(dir)
43
44 // Create a bunch of objects.
45 tmpfile := filepath.Join(dir, "tmpfile")
46 if err := ioutil.WriteFile(tmpfile, []byte("some content here"), 0666); err != nil {
47 t.Fatal(err)
48 }
49
50 tmpdir := filepath.Join(dir, "testdir")
51 if err := os.Mkdir(tmpdir, 0755); err != nil {
52 t.Fatal(err)
53 }
54
55 tmpsubfile := filepath.Join(tmpdir, "anotherfile")
56 if err := ioutil.WriteFile(tmpsubfile, []byte("some different content"), 0666); err != nil {
57 t.Fatal(err)
58 }
59
60 // Walk the current state.
61 old, err := Walk(dir, nil, append(DefaultKeywords, "sha1"), nil)
62 if err != nil {
63 t.Fatal(err)
64 }
65
66 // Overwrite the content in one of the files.
67 if err := ioutil.WriteFile(tmpsubfile, []byte("modified content"), 0666); err != nil {
68 t.Fatal(err)
69 }
70
71 // Walk the new state.
72 new, err := Walk(dir, nil, append(DefaultKeywords, "sha1"), nil)
73 if err != nil {
74 t.Fatal(err)
75 }
76
77 // Compare.
78 diffs, err := Compare(old, new, nil)
79 if err != nil {
80 t.Fatal(err)
81 }
82
83 // 1 object
84 if len(diffs) != 1 {
85 t.Errorf("expected the diff length to be 1, got %d", len(diffs))
86 for i, diff := range diffs {
87 t.Logf("diff[%d] = %#v", i, diff)
88 }
89 }
90
91 // These cannot fail.
92 tmpfile, _ = filepath.Rel(dir, tmpfile)
93 tmpdir, _ = filepath.Rel(dir, tmpdir)
94 tmpsubfile, _ = filepath.Rel(dir, tmpsubfile)
95
96 for _, diff := range diffs {
97 switch diff.Path() {
98 case tmpsubfile:
99 if diff.Type() != Modified {
100 t.Errorf("unexpected diff type for %s: %s", diff.Path(), diff.Type())
101 }
102
103 if diff.Diff() == nil {
104 t.Errorf("expect to not get nil for .Diff()")
105 }
106
107 old := diff.Old()
108 new := diff.New()
109 if old == nil || new == nil {
110 t.Errorf("expected to get (!nil, !nil) for (.Old, .New), got (%#v, %#v)", old, new)
111 }
112 default:
113 t.Errorf("unexpected diff found: %#v", diff)
114 }
115 }
116 }
117
118 func TestCompareMissing(t *testing.T) {
119 dir, err := ioutil.TempDir("", "test-compare-missing")
120 if err != nil {
121 t.Fatal(err)
122 }
123 defer os.RemoveAll(dir)
124
125 // Create a bunch of objects.
126 tmpfile := filepath.Join(dir, "tmpfile")
127 if err := ioutil.WriteFile(tmpfile, []byte("some content here"), 0666); err != nil {
128 t.Fatal(err)
129 }
130
131 tmpdir := filepath.Join(dir, "testdir")
132 if err := os.Mkdir(tmpdir, 0755); err != nil {
133 t.Fatal(err)
134 }
135
136 tmpsubfile := filepath.Join(tmpdir, "anotherfile")
137 if err := ioutil.WriteFile(tmpsubfile, []byte("some different content"), 0666); err != nil {
138 t.Fatal(err)
139 }
140
141 // Walk the current state.
142 old, err := Walk(dir, nil, append(DefaultKeywords, "sha1"), nil)
143 if err != nil {
144 t.Fatal(err)
145 }
146
147 // Delete the objects.
148 if err := os.RemoveAll(tmpfile); err != nil {
149 t.Fatal(err)
150 }
151
152 if err := os.RemoveAll(tmpsubfile); err != nil {
153 t.Fatal(err)
154 }
155
156 if err := os.RemoveAll(tmpdir); err != nil {
157 t.Fatal(err)
158 }
159
160 // Walk the new state.
161 new, err := Walk(dir, nil, append(DefaultKeywords, "sha1"), nil)
162 if err != nil {
163 t.Fatal(err)
164 }
165
166 // Compare.
167 diffs, err := Compare(old, new, nil)
168 if err != nil {
169 t.Fatal(err)
170 }
171
172 // 3 objects + the changes to '.'
173 if len(diffs) != 4 {
174 t.Errorf("expected the diff length to be 4, got %d", len(diffs))
175 for i, diff := range diffs {
176 t.Logf("diff[%d] = %#v", i, diff)
177 }
178 }
179
180 // These cannot fail.
181 tmpfile, _ = filepath.Rel(dir, tmpfile)
182 tmpdir, _ = filepath.Rel(dir, tmpdir)
183 tmpsubfile, _ = filepath.Rel(dir, tmpsubfile)
184
185 for _, diff := range diffs {
186 switch diff.Path() {
187 case ".":
188 // ignore these changes
189 case tmpfile, tmpdir, tmpsubfile:
190 if diff.Type() != Missing {
191 t.Errorf("unexpected diff type for %s: %s", diff.Path(), diff.Type())
192 }
193
194 if diff.Diff() != nil {
195 t.Errorf("expect to get nil for .Diff(), got %#v", diff.Diff())
196 }
197
198 old := diff.Old()
199 new := diff.New()
200 if old == nil || new != nil {
201 t.Errorf("expected to get (!nil, nil) for (.Old, .New), got (%#v, %#v)", old, new)
202 }
203 default:
204 t.Errorf("unexpected diff found: %#v", diff)
205 }
206 }
207 }
208
209 func TestCompareExtra(t *testing.T) {
210 dir, err := ioutil.TempDir("", "test-compare-extra")
211 if err != nil {
212 t.Fatal(err)
213 }
214 defer os.RemoveAll(dir)
215
216 // Walk the current state.
217 old, err := Walk(dir, nil, append(DefaultKeywords, "sha1"), nil)
218 if err != nil {
219 t.Fatal(err)
220 }
221
222 // Create a bunch of objects.
223 tmpfile := filepath.Join(dir, "tmpfile")
224 if err := ioutil.WriteFile(tmpfile, []byte("some content here"), 0666); err != nil {
225 t.Fatal(err)
226 }
227
228 tmpdir := filepath.Join(dir, "testdir")
229 if err := os.Mkdir(tmpdir, 0755); err != nil {
230 t.Fatal(err)
231 }
232
233 tmpsubfile := filepath.Join(tmpdir, "anotherfile")
234 if err := ioutil.WriteFile(tmpsubfile, []byte("some different content"), 0666); err != nil {
235 t.Fatal(err)
236 }
237
238 // Walk the new state.
239 new, err := Walk(dir, nil, append(DefaultKeywords, "sha1"), nil)
240 if err != nil {
241 t.Fatal(err)
242 }
243
244 // Compare.
245 diffs, err := Compare(old, new, nil)
246 if err != nil {
247 t.Fatal(err)
248 }
249
250 // 3 objects + the changes to '.'
251 if len(diffs) != 4 {
252 t.Errorf("expected the diff length to be 4, got %d", len(diffs))
253 for i, diff := range diffs {
254 t.Logf("diff[%d] = %#v", i, diff)
255 }
256 }
257
258 // These cannot fail.
259 tmpfile, _ = filepath.Rel(dir, tmpfile)
260 tmpdir, _ = filepath.Rel(dir, tmpdir)
261 tmpsubfile, _ = filepath.Rel(dir, tmpsubfile)
262
263 for _, diff := range diffs {
264 switch diff.Path() {
265 case ".":
266 // ignore these changes
267 case tmpfile, tmpdir, tmpsubfile:
268 if diff.Type() != Extra {
269 t.Errorf("unexpected diff type for %s: %s", diff.Path(), diff.Type())
270 }
271
272 if diff.Diff() != nil {
273 t.Errorf("expect to get nil for .Diff(), got %#v", diff.Diff())
274 }
275
276 old := diff.Old()
277 new := diff.New()
278 if old != nil || new == nil {
279 t.Errorf("expected to get (!nil, nil) for (.Old, .New), got (%#v, %#v)", old, new)
280 }
281 default:
282 t.Errorf("unexpected diff found: %#v", diff)
283 }
284 }
285 }
286
287 func TestCompareKeys(t *testing.T) {
288 dir, err := ioutil.TempDir("", "test-compare-keys")
289 if err != nil {
290 t.Fatal(err)
291 }
292 defer os.RemoveAll(dir)
293
294 // Create a bunch of objects.
295 tmpfile := filepath.Join(dir, "tmpfile")
296 if err := ioutil.WriteFile(tmpfile, []byte("some content here"), 0666); err != nil {
297 t.Fatal(err)
298 }
299
300 tmpdir := filepath.Join(dir, "testdir")
301 if err := os.Mkdir(tmpdir, 0755); err != nil {
302 t.Fatal(err)
303 }
304
305 tmpsubfile := filepath.Join(tmpdir, "anotherfile")
306 if err := ioutil.WriteFile(tmpsubfile, []byte("aaa"), 0666); err != nil {
307 t.Fatal(err)
308 }
309
310 // Walk the current state.
311 old, err := Walk(dir, nil, append(DefaultKeywords, "sha1"), nil)
312 if err != nil {
313 t.Fatal(err)
314 }
315
316 // Overwrite the content in one of the files, but without changing the size.
317 if err := ioutil.WriteFile(tmpsubfile, []byte("bbb"), 0666); err != nil {
318 t.Fatal(err)
319 }
320
321 // Walk the new state.
322 new, err := Walk(dir, nil, append(DefaultKeywords, "sha1"), nil)
323 if err != nil {
324 t.Fatal(err)
325 }
326
327 // Compare.
328 diffs, err := Compare(old, new, []Keyword{"size"})
329 if err != nil {
330 t.Fatal(err)
331 }
332
333 // 0 objects
334 if len(diffs) != 0 {
335 t.Errorf("expected the diff length to be 0, got %d", len(diffs))
336 for i, diff := range diffs {
337 t.Logf("diff[%d] = %#v", i, diff)
338 }
339 }
340 }
341
342 func TestTarCompare(t *testing.T) {
343 dir, err := ioutil.TempDir("", "test-compare-tar")
344 if err != nil {
345 t.Fatal(err)
346 }
347 defer os.RemoveAll(dir)
348
349 // Create a bunch of objects.
350 tmpfile := filepath.Join(dir, "tmpfile")
351 if err := ioutil.WriteFile(tmpfile, []byte("some content"), 0644); err != nil {
352 t.Fatal(err)
353 }
354
355 tmpdir := filepath.Join(dir, "testdir")
356 if err := os.Mkdir(tmpdir, 0755); err != nil {
357 t.Fatal(err)
358 }
359
360 tmpsubfile := filepath.Join(tmpdir, "anotherfile")
361 if err := ioutil.WriteFile(tmpsubfile, []byte("aaa"), 0644); err != nil {
362 t.Fatal(err)
363 }
364
365 // Create a tar-like archive.
366 compareFiles := []fakeFile{
367 {"./", "", 0700, tar.TypeDir, 100, 0, nil},
368 {"tmpfile", "some content", 0644, tar.TypeReg, 100, 0, nil},
369 {"testdir/", "", 0755, tar.TypeDir, 100, 0, nil},
370 {"testdir/anotherfile", "aaa", 0644, tar.TypeReg, 100, 0, nil},
371 }
372
373 for _, file := range compareFiles {
374 path := filepath.Join(dir, file.Name)
375
376 // Change the time to something known with nanosec != 0.
377 chtime := time.Unix(file.Sec, 987654321)
378 if err := os.Chtimes(path, chtime, chtime); err != nil {
379 t.Fatal(err)
380 }
381 }
382
383 // Walk the current state.
384 old, err := Walk(dir, nil, append(DefaultKeywords, "sha1"), nil)
385 if err != nil {
386 t.Fatal(err)
387 }
388
389 ts, err := makeTarStream(compareFiles)
390 if err != nil {
391 t.Fatal(err)
392 }
393
394 str := NewTarStreamer(bytes.NewBuffer(ts), nil, append(DefaultTarKeywords, "sha1"))
395 if _, err = io.Copy(ioutil.Discard, str); err != nil && err != io.EOF {
396 t.Fatal(err)
397 }
398 if err = str.Close(); err != nil {
399 t.Fatal(err)
400 }
401
402 new, err := str.Hierarchy()
403 if err != nil {
404 t.Fatal(err)
405 }
406
407 // Compare.
408 diffs, err := Compare(old, new, append(DefaultTarKeywords, "sha1"))
409 if err != nil {
410 t.Fatal(err)
411 }
412
413 // 0 objects
414 if len(diffs) != 0 {
415 actualFailure := false
416 for i, delta := range diffs {
417 // XXX: Tar generation is slightly broken, so we need to ignore some bugs.
418 if delta.Path() == "." && delta.Type() == Modified {
419 // FIXME: This is a known bug.
420 t.Logf("'.' is different in the tar -- this is a bug in the tar generation")
421
422 // The tar generation bug means that '.' is missing a bunch of keys.
423 allMissing := true
424 for _, keyDelta := range delta.Diff() {
425 if keyDelta.Type() != Missing {
426 allMissing = false
427 }
428 }
429 if !allMissing {
430 t.Errorf("'.' has changed in a way not consistent with known bugs")
431 }
432
433 continue
434 }
435
436 // XXX: Another bug.
437 keys := delta.Diff()
438 if len(keys) == 1 && keys[0].Name() == "size" && keys[0].Type() == Missing {
439 // FIXME: Also a known bug with tar generation dropping size=.
440 t.Logf("'%s' is missing a size= keyword -- a bug in tar generation", delta.Path())
441
442 continue
443 }
444
445 actualFailure = true
446 buf, err := json.MarshalIndent(delta, "", " ")
447 if err == nil {
448 t.Logf("FAILURE: diff[%d] = %s", i, string(buf))
449 } else {
450 t.Logf("FAILURE: diff[%d] = %#v", i, delta)
451 }
452 }
453
454 if actualFailure {
455 t.Errorf("expected the diff length to be 0, got %d", len(diffs))
456 }
457 }
458 }
0 package mtree
1
2 // dhCreator is used in when building a DirectoryHierarchy
3 type dhCreator struct {
4 DH *DirectoryHierarchy
5 fs FsEval
6 curSet *Entry
7 curDir *Entry
8 curEnt *Entry
9 }
0 package mtree
1
2 import (
3 "fmt"
4 "path/filepath"
5 "strings"
6
7 "github.com/vbatts/go-mtree/pkg/govis"
8 )
9
10 type byPos []Entry
11
12 func (bp byPos) Len() int { return len(bp) }
13 func (bp byPos) Less(i, j int) bool { return bp[i].Pos < bp[j].Pos }
14 func (bp byPos) Swap(i, j int) { bp[i], bp[j] = bp[j], bp[i] }
15
16 // Entry is each component of content in the mtree spec file
17 type Entry struct {
18 Parent *Entry // up
19 Children []*Entry // down
20 Prev, Next *Entry // left, right
21 Set *Entry // current `/set` for additional keywords
22 Pos int // order in the spec
23 Raw string // file or directory name
24 Name string // file or directory name
25 Keywords []KeyVal // TODO(vbatts) maybe a keyword typed set of values?
26 Type EntryType
27 }
28
29 // Descend searches thru an Entry's children to find the Entry associated with
30 // `filename`. Directories are stored at the end of an Entry's children so do a
31 // traverse backwards. If you descend to a "."
32 func (e Entry) Descend(filename string) *Entry {
33 if filename == "." || filename == "" {
34 return &e
35 }
36 numChildren := len(e.Children)
37 for i := range e.Children {
38 c := e.Children[numChildren-1-i]
39 if c.Name == filename {
40 return c
41 }
42 }
43 return nil
44 }
45
46 // Find is a wrapper around Descend that takes in a whole string path and tries
47 // to find that Entry
48 func (e Entry) Find(filepath string) *Entry {
49 resultnode := &e
50 for _, path := range strings.Split(filepath, "/") {
51 encoded, err := govis.Vis(path, DefaultVisFlags)
52 if err != nil {
53 return nil
54 }
55 resultnode = resultnode.Descend(encoded)
56 if resultnode == nil {
57 return nil
58 }
59 }
60 return resultnode
61 }
62
63 // Ascend gets the parent of an Entry. Serves mainly to maintain readability
64 // when traversing up and down an Entry tree
65 func (e Entry) Ascend() *Entry {
66 return e.Parent
67 }
68
69 // Path provides the full path of the file, despite RelativeType or FullType. It
70 // will be in Unvis'd form.
71 func (e Entry) Path() (string, error) {
72 decodedName, err := govis.Unvis(e.Name, DefaultVisFlags)
73 if err != nil {
74 return "", err
75 }
76 if e.Parent == nil || e.Type == FullType {
77 return filepath.Clean(decodedName), nil
78 }
79 parentName, err := e.Parent.Path()
80 if err != nil {
81 return "", err
82 }
83 return filepath.Clean(filepath.Join(parentName, decodedName)), nil
84 }
85
86 // String joins a file with its associated keywords. The file name will be the
87 // Vis'd encoded version so that it can be parsed appropriately when Check'd.
88 func (e Entry) String() string {
89 if e.Raw != "" {
90 return e.Raw
91 }
92 if e.Type == BlankType {
93 return ""
94 }
95 if e.Type == DotDotType {
96 return e.Name
97 }
98 if e.Type == SpecialType || e.Type == FullType || inKeyValSlice("type=dir", e.Keywords) {
99 return fmt.Sprintf("%s %s", e.Name, strings.Join(KeyValToString(e.Keywords), " "))
100 }
101 return fmt.Sprintf(" %s %s", e.Name, strings.Join(KeyValToString(e.Keywords), " "))
102 }
103
104 // AllKeys returns the full set of KeyVal for the given entry, based on the
105 // /set keys as well as the entry-local keys. Entry-local keys always take
106 // precedence.
107 func (e Entry) AllKeys() []KeyVal {
108 if e.Set != nil {
109 return MergeKeyValSet(e.Set.Keywords, e.Keywords)
110 }
111 return e.Keywords
112 }
113
114 // IsDir checks the type= value for this entry on whether it is a directory
115 func (e Entry) IsDir() bool {
116 for _, kv := range e.AllKeys() {
117 if kv.Keyword().Prefix() == "type" {
118 return kv.Value() == "dir"
119 }
120 }
121 return false
122 }
123
124 // EntryType are the formats of lines in an mtree spec file
125 type EntryType int
126
127 // The types of lines to be found in an mtree spec file
128 const (
129 SignatureType EntryType = iota // first line of the file, like `#mtree v2.0`
130 BlankType // blank lines are ignored
131 CommentType // Lines beginning with `#` are ignored
132 SpecialType // line that has `/` prefix issue a "special" command (currently only /set and /unset)
133 RelativeType // if the first white-space delimited word does not have a '/' in it. Options/keywords are applied.
134 DotDotType // .. - A relative path step. keywords/options are ignored
135 FullType // if the first word on the line has a `/` after the first character, it interpretted as a file pathname with options
136 )
137
138 // String returns the name of the EntryType
139 func (et EntryType) String() string {
140 return typeNames[et]
141 }
142
143 var typeNames = map[EntryType]string{
144 SignatureType: "SignatureType",
145 BlankType: "BlankType",
146 CommentType: "CommentType",
147 SpecialType: "SpecialType",
148 RelativeType: "RelativeType",
149 DotDotType: "DotDotType",
150 FullType: "FullType",
151 }
0 package mtree
1
2 import "os"
3
4 // FsEval is a mock-friendly method of specifying to go-mtree how to carry out
5 // filesystem operations such as opening files and the like. The semantics of
6 // all of these wrappers MUST be identical to the semantics described here.
7 type FsEval interface {
8 // Open must have the same semantics as os.Open.
9 Open(path string) (*os.File, error)
10
11 // Lstat must have the same semantics as os.Lstat.
12 Lstat(path string) (os.FileInfo, error)
13
14 // Readdir must have the same semantics as calling os.Open on the given
15 // path and then returning the result of (*os.File).Readdir(-1).
16 Readdir(path string) ([]os.FileInfo, error)
17
18 // KeywordFunc must return a wrapper around the provided function (in other
19 // words, the returned function must refer to the same keyword).
20 KeywordFunc(fn KeywordFunc) KeywordFunc
21 }
22
23 // DefaultFsEval is the default implementation of FsEval (and is the default
24 // used if a nil interface is passed to any mtree function). It does not modify
25 // or wrap any of the methods (they all just call out to os.*).
26 type DefaultFsEval struct{}
27
28 // Open must have the same semantics as os.Open.
29 func (fs DefaultFsEval) Open(path string) (*os.File, error) {
30 return os.Open(path)
31 }
32
33 // Lstat must have the same semantics as os.Lstat.
34 func (fs DefaultFsEval) Lstat(path string) (os.FileInfo, error) {
35 return os.Lstat(path)
36 }
37
38 // Readdir must have the same semantics as calling os.Open on the given
39 // path and then returning the result of (*os.File).Readdir(-1).
40 func (fs DefaultFsEval) Readdir(path string) ([]os.FileInfo, error) {
41 fh, err := os.Open(path)
42 if err != nil {
43 return nil, err
44 }
45 defer fh.Close()
46 return fh.Readdir(-1)
47 }
48
49 // KeywordFunc must return a wrapper around the provided function (in other
50 // words, the returned function must refer to the same keyword).
51 func (fs DefaultFsEval) KeywordFunc(fn KeywordFunc) KeywordFunc {
52 return fn
53 }
0 package mtree
1
2 import (
3 "encoding/json"
4 "io/ioutil"
5 "os"
6 "path/filepath"
7 "testing"
8 "time"
9 )
10
11 var mockTime = time.Unix(1337888823, 0)
12
13 // Here be some dodgy testing. In particular, we have to mess around with some
14 // of the FsEval functions. In particular, we change all of the FileInfos to a
15 // different value.
16
17 type mockFileInfo struct {
18 os.FileInfo
19 }
20
21 func (fi mockFileInfo) Mode() os.FileMode {
22 return os.FileMode(fi.FileInfo.Mode() | 0777)
23 }
24
25 func (fi mockFileInfo) ModTime() time.Time {
26 return mockTime
27 }
28
29 type MockFsEval struct {
30 open, lstat, readdir, keywordFunc int
31 }
32
33 // Open must have the same semantics as os.Open.
34 func (fs *MockFsEval) Open(path string) (*os.File, error) {
35 fs.open++
36 return os.Open(path)
37 }
38
39 // Lstat must have the same semantics as os.Lstat.
40 func (fs *MockFsEval) Lstat(path string) (os.FileInfo, error) {
41 fs.lstat++
42 fi, err := os.Lstat(path)
43 return mockFileInfo{fi}, err
44 }
45
46 // Readdir must have the same semantics as calling os.Open on the given
47 // path and then returning the result of (*os.File).Readdir(-1).
48 func (fs *MockFsEval) Readdir(path string) ([]os.FileInfo, error) {
49 fs.readdir++
50 fh, err := os.Open(path)
51 if err != nil {
52 return nil, err
53 }
54 defer fh.Close()
55
56 fis, err := fh.Readdir(-1)
57 if err != nil {
58 return nil, err
59 }
60 for idx := range fis {
61 fis[idx] = mockFileInfo{fis[idx]}
62 }
63 return fis, nil
64 }
65
66 // KeywordFunc must return a wrapper around the provided function (in other
67 // words, the returned function must refer to the same keyword).
68 func (fs *MockFsEval) KeywordFunc(fn KeywordFunc) KeywordFunc {
69 fs.keywordFunc++
70 return fn
71 }
72
73 func TestCheckFsEval(t *testing.T) {
74 dir, err := ioutil.TempDir("", "test-check-fs-eval")
75 if err != nil {
76 t.Fatal(err)
77 }
78 defer os.RemoveAll(dir) // clean up
79
80 content := []byte("If you hide your ignorance, no one will hit you and you'll never learn.")
81 tmpfn := filepath.Join(dir, "tmpfile")
82 if err := ioutil.WriteFile(tmpfn, content, 0451); err != nil {
83 t.Fatal(err)
84 }
85
86 // Walk this tempdir
87 mock := &MockFsEval{}
88 dh, err := Walk(dir, nil, append(DefaultKeywords, "sha1"), mock)
89 if err != nil {
90 t.Fatal(err)
91 }
92 // Make sure that mock functions have been called.
93 if mock.open == 0 {
94 t.Errorf("mock.Open not called")
95 }
96 if mock.lstat == 0 {
97 t.Errorf("mock.Lstat not called")
98 }
99 if mock.readdir == 0 {
100 t.Errorf("mock.Readdir not called")
101 }
102 if mock.keywordFunc == 0 {
103 t.Errorf("mock.KeywordFunc not called")
104 }
105
106 // Check for sanity. This ought to pass.
107 mock = &MockFsEval{}
108 res, err := Check(dir, dh, nil, mock)
109 if err != nil {
110 t.Fatal(err)
111 }
112 if len(res) > 0 {
113 t.Errorf("%#v", res)
114 }
115 // Make sure that mock functions have been called.
116 if mock.open == 0 {
117 t.Errorf("mock.Open not called")
118 }
119 if mock.lstat == 0 {
120 t.Errorf("mock.Lstat not called")
121 }
122 if mock.readdir == 0 {
123 t.Errorf("mock.Readdir not called")
124 }
125 if mock.keywordFunc == 0 {
126 t.Errorf("mock.KeywordFunc not called")
127 }
128
129 // This should FAIL.
130 res, err = Check(dir, dh, nil, nil)
131 if err != nil {
132 t.Fatal(err)
133 }
134 if len(res) == 0 {
135 t.Errorf("expected Check to fail")
136 }
137
138 // Modify the metadata so you can get the right output.
139 if err := os.Chmod(tmpfn, 0777); err != nil {
140 t.Fatal(err)
141 }
142 if err := os.Chtimes(tmpfn, mockTime, mockTime); err != nil {
143 t.Fatal(err)
144 }
145 if err := os.Chmod(dir, 0777); err != nil {
146 t.Fatal(err)
147 }
148 if err := os.Chtimes(dir, mockTime, mockTime); err != nil {
149 t.Fatal(err)
150 }
151
152 // It should now succeed.
153 res, err = Check(dir, dh, nil, nil)
154 if err != nil {
155 t.Fatal(err)
156 }
157 if len(res) > 0 {
158 buf, err := json.MarshalIndent(res, "", " ")
159 if err != nil {
160 t.Errorf("%#v", res)
161 } else {
162 t.Errorf("%s", buf)
163 }
164 }
165 }
0 hash: 72477bc1ec8c50d432740cbf3940cae19387d596c02f1046c7e989458462e536
1 updated: 2017-10-20T11:31:42.730083725+02:00
2 imports:
3 - name: github.com/Sirupsen/logrus
4 version: f006c2ac4710855cf0f916dd6b77acf6b048dc6e
5 - name: golang.org/x/crypto
6 version: 1351f936d976c60a0a48d728281922cf63eafb8d
7 subpackages:
8 - ripemd160
9 - ssh/terminal
10 - name: golang.org/x/sys
11 version: 8dbc5d05d6edcc104950cc299a1ce6641235bc86
12 subpackages:
13 - unix
14 testImports: []
0 package: github.com/vbatts/go-mtree
1 description: File systems verification utility and library, in likeness of mtree(8)
2 homepage: https://github.com/vbatts/go-mtree
3 license: BSD-3-Clause
4 import:
5 - package: golang.org/x/crypto
6 subpackages:
7 - ripemd160
8 - package: github.com/Sirupsen/logrus
9 version: ^1.0.0
10 - package: golang.org/x/sys
11 version: 8dbc5d05d6edcc104950cc299a1ce6641235bc86
12 subpackages:
13 - unix
0 package mtree
1
2 import (
3 "io"
4 "sort"
5 )
6
7 // DirectoryHierarchy is the mapped structure for an mtree directory hierarchy
8 // spec
9 type DirectoryHierarchy struct {
10 Entries []Entry
11 }
12
13 // WriteTo simplifies the output of the resulting hierarchy spec
14 func (dh DirectoryHierarchy) WriteTo(w io.Writer) (n int64, err error) {
15 sort.Sort(byPos(dh.Entries))
16 var sum int64
17 for _, e := range dh.Entries {
18 str := e.String()
19 i, err := io.WriteString(w, str+"\n")
20 if err != nil {
21 return sum, err
22 }
23 sum += int64(i)
24 }
25 return sum, nil
26 }
27
28 // UsedKeywords collects and returns all the keywords used in a
29 // a DirectoryHierarchy
30 func (dh DirectoryHierarchy) UsedKeywords() []Keyword {
31 usedkeywords := []Keyword{}
32 for _, e := range dh.Entries {
33 switch e.Type {
34 case FullType, RelativeType, SpecialType:
35 if e.Type != SpecialType || e.Name == "/set" {
36 kvs := e.Keywords
37 for _, kv := range kvs {
38 kw := KeyVal(kv).Keyword().Prefix()
39 if !InKeywordSlice(kw, usedkeywords) {
40 usedkeywords = append(usedkeywords, KeywordSynonym(string(kw)))
41 }
42 }
43 }
44 }
45 }
46 return usedkeywords
47 }
0 package mtree
1
2 import (
3 "strings"
4 "testing"
5 )
6
7 var checklist = []struct {
8 blob string
9 set []Keyword
10 }{
11 {blob: `
12 # machine: bananaboat
13 # tree: .git
14 # date: Wed Nov 16 14:54:17 2016
15
16 # .
17 /set type=file nlink=1 mode=0664 uid=1000 gid=100
18 . size=4096 type=dir mode=0755 nlink=8 time=1479326055.423853146
19 .COMMIT_EDITMSG.un~ size=1006 mode=0644 time=1479325423.450468662 sha1digest=dead0face
20 .TAG_EDITMSG.un~ size=1069 mode=0600 time=1471362316.801317529 sha256digest=dead0face
21 `, set: []Keyword{"size", "mode", "time", "sha256digest"}},
22 {blob: `
23 # user: cyphar
24 # machine: ryuk
25 # tree: xattr
26 # date: Fri Sep 29 21:00:41 2017
27 # keywords: size,type,uid,gid,mode,link,nlink,time,xattr
28
29 # .
30 /set type=file nlink=1 mode=0664 uid=1000 gid=100 xattr.user.kira=SSdsbCB0YWtlIGEgcG90YXRvIGNoaXAuLi4gYW5kIGVhdCBpdCE=
31 . size=8 type=dir mode=0755 time=1506666472.255992830
32 file size=0 mode=0644 time=1506666472.255992830 xattr.user.something=dGVzdA==
33 ..
34 `, set: []Keyword{"size", "type", "uid", "gid", "mode", "nlink", "time", "xattr"}},
35 }
36
37 func TestUsedKeywords(t *testing.T) {
38 for i, item := range checklist {
39 dh, err := ParseSpec(strings.NewReader(item.blob))
40 if err != nil {
41 t.Error(err)
42 }
43 used := dh.UsedKeywords()
44 for _, k := range item.set {
45 if !InKeywordSlice(k, used) {
46 t.Errorf("%d: expected to find %q in %q", i, k, used)
47 }
48 }
49 }
50 }
0 package mtree
1
2 import (
3 "archive/tar"
4 "crypto/md5"
5 "crypto/sha1"
6 "crypto/sha256"
7 "crypto/sha512"
8 "fmt"
9 "hash"
10 "io"
11 "os"
12
13 "github.com/vbatts/go-mtree/pkg/govis"
14 "golang.org/x/crypto/ripemd160"
15 )
16
17 // KeywordFunc is the type of a function called on each file to be included in
18 // a DirectoryHierarchy, that will produce the string output of the keyword to
19 // be included for the file entry. Otherwise, empty string.
20 // io.Reader `r` is to the file stream for the file payload. While this
21 // function takes an io.Reader, the caller needs to reset it to the beginning
22 // for each new KeywordFunc
23 type KeywordFunc func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error)
24
25 var (
26 // KeywordFuncs is the map of all keywords (and the functions to produce them)
27 KeywordFuncs = map[Keyword]KeywordFunc{
28 "size": sizeKeywordFunc, // The size, in bytes, of the file
29 "type": typeKeywordFunc, // The type of the file
30 "time": timeKeywordFunc, // The last modification time of the file
31 "link": linkKeywordFunc, // The target of the symbolic link when type=link
32 "uid": uidKeywordFunc, // The file owner as a numeric value
33 "gid": gidKeywordFunc, // The file group as a numeric value
34 "nlink": nlinkKeywordFunc, // The number of hard links the file is expected to have
35 "uname": unameKeywordFunc, // The file owner as a symbolic name
36 "gname": gnameKeywordFunc, // The file group as a symbolic name
37 "mode": modeKeywordFunc, // The current file's permissions as a numeric (octal) or symbolic value
38 "cksum": cksumKeywordFunc, // The checksum of the file using the default algorithm specified by the cksum(1) utility
39 "md5": hasherKeywordFunc("md5digest", md5.New), // The MD5 message digest of the file
40 "md5digest": hasherKeywordFunc("md5digest", md5.New), // A synonym for `md5`
41 "rmd160": hasherKeywordFunc("ripemd160digest", ripemd160.New), // The RIPEMD160 message digest of the file
42 "rmd160digest": hasherKeywordFunc("ripemd160digest", ripemd160.New), // A synonym for `rmd160`
43 "ripemd160digest": hasherKeywordFunc("ripemd160digest", ripemd160.New), // A synonym for `rmd160`
44 "sha1": hasherKeywordFunc("sha1digest", sha1.New), // The SHA1 message digest of the file
45 "sha1digest": hasherKeywordFunc("sha1digest", sha1.New), // A synonym for `sha1`
46 "sha256": hasherKeywordFunc("sha256digest", sha256.New), // The SHA256 message digest of the file
47 "sha256digest": hasherKeywordFunc("sha256digest", sha256.New), // A synonym for `sha256`
48 "sha384": hasherKeywordFunc("sha384digest", sha512.New384), // The SHA384 message digest of the file
49 "sha384digest": hasherKeywordFunc("sha384digest", sha512.New384), // A synonym for `sha384`
50 "sha512": hasherKeywordFunc("sha512digest", sha512.New), // The SHA512 message digest of the file
51 "sha512digest": hasherKeywordFunc("sha512digest", sha512.New), // A synonym for `sha512`
52 "sha512256": hasherKeywordFunc("sha512digest", sha512.New512_256), // The SHA512/256 message digest of the file
53 "sha512256digest": hasherKeywordFunc("sha512digest", sha512.New512_256), // A synonym for `sha512256`
54
55 "flags": flagsKeywordFunc, // NOTE: this is a noop, but here to support the presence of the "flags" keyword.
56
57 // This is not an upstreamed keyword, but used to vary from "time", as tar
58 // archives do not store nanosecond precision. So comparing on "time" will
59 // be only seconds level accurate.
60 "tar_time": tartimeKeywordFunc, // The last modification time of the file, from a tar archive mtime
61
62 // This is not an upstreamed keyword, but a needed attribute for file validation.
63 // The pattern for this keyword key is prefixed by "xattr." followed by the extended attribute "namespace.key".
64 // The keyword value is the SHA1 digest of the extended attribute's value.
65 // In this way, the order of the keys does not matter, and the contents of the value is not revealed.
66 "xattr": xattrKeywordFunc,
67 "xattrs": xattrKeywordFunc,
68 }
69 )
70 var (
71 modeKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
72 permissions := info.Mode().Perm()
73 if os.ModeSetuid&info.Mode() > 0 {
74 permissions |= (1 << 11)
75 }
76 if os.ModeSetgid&info.Mode() > 0 {
77 permissions |= (1 << 10)
78 }
79 if os.ModeSticky&info.Mode() > 0 {
80 permissions |= (1 << 9)
81 }
82 return []KeyVal{KeyVal(fmt.Sprintf("mode=%#o", permissions))}, nil
83 }
84 sizeKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
85 if sys, ok := info.Sys().(*tar.Header); ok {
86 if sys.Typeflag == tar.TypeSymlink {
87 return []KeyVal{KeyVal(fmt.Sprintf("size=%d", len(sys.Linkname)))}, nil
88 }
89 }
90 return []KeyVal{KeyVal(fmt.Sprintf("size=%d", info.Size()))}, nil
91 }
92 cksumKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
93 if !info.Mode().IsRegular() {
94 return nil, nil
95 }
96 sum, _, err := cksum(r)
97 if err != nil {
98 return nil, err
99 }
100 return []KeyVal{KeyVal(fmt.Sprintf("cksum=%d", sum))}, nil
101 }
102 hasherKeywordFunc = func(name string, newHash func() hash.Hash) KeywordFunc {
103 return func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
104 if !info.Mode().IsRegular() {
105 return nil, nil
106 }
107 h := newHash()
108 if _, err := io.Copy(h, r); err != nil {
109 return nil, err
110 }
111 return []KeyVal{KeyVal(fmt.Sprintf("%s=%x", KeywordSynonym(name), h.Sum(nil)))}, nil
112 }
113 }
114 tartimeKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
115 return []KeyVal{KeyVal(fmt.Sprintf("tar_time=%d.%9.9d", info.ModTime().Unix(), 0))}, nil
116 }
117 timeKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
118 tSec := info.ModTime().Unix()
119 tNano := info.ModTime().Nanosecond()
120 return []KeyVal{KeyVal(fmt.Sprintf("time=%d.%9.9d", tSec, tNano))}, nil
121 }
122 linkKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
123 if sys, ok := info.Sys().(*tar.Header); ok {
124 if sys.Linkname != "" {
125 linkname, err := govis.Vis(sys.Linkname, DefaultVisFlags)
126 if err != nil {
127 return nil, nil
128 }
129 return []KeyVal{KeyVal(fmt.Sprintf("link=%s", linkname))}, nil
130 }
131 return nil, nil
132 }
133
134 if info.Mode()&os.ModeSymlink != 0 {
135 str, err := os.Readlink(path)
136 if err != nil {
137 return nil, nil
138 }
139 linkname, err := govis.Vis(str, DefaultVisFlags)
140 if err != nil {
141 return nil, nil
142 }
143 return []KeyVal{KeyVal(fmt.Sprintf("link=%s", linkname))}, nil
144 }
145 return nil, nil
146 }
147 typeKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
148 if info.Mode().IsDir() {
149 return []KeyVal{"type=dir"}, nil
150 }
151 if info.Mode().IsRegular() {
152 return []KeyVal{"type=file"}, nil
153 }
154 if info.Mode()&os.ModeSocket != 0 {
155 return []KeyVal{"type=socket"}, nil
156 }
157 if info.Mode()&os.ModeSymlink != 0 {
158 return []KeyVal{"type=link"}, nil
159 }
160 if info.Mode()&os.ModeNamedPipe != 0 {
161 return []KeyVal{"type=fifo"}, nil
162 }
163 if info.Mode()&os.ModeDevice != 0 {
164 if info.Mode()&os.ModeCharDevice != 0 {
165 return []KeyVal{"type=char"}, nil
166 }
167 return []KeyVal{"type=block"}, nil
168 }
169 return nil, nil
170 }
171 )
0 // +build darwin freebsd netbsd openbsd
1
2 package mtree
3
4 import (
5 "archive/tar"
6 "fmt"
7 "io"
8 "os"
9 "os/user"
10 "syscall"
11 )
12
13 var (
14 flagsKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
15 // ideally this will pull in from here https://www.freebsd.org/cgi/man.cgi?query=chflags&sektion=2
16 return nil, nil
17 }
18
19 unameKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
20 if hdr, ok := info.Sys().(*tar.Header); ok {
21 return []KeyVal{KeyVal(fmt.Sprintf("uname=%s", hdr.Uname))}, nil
22 }
23
24 stat := info.Sys().(*syscall.Stat_t)
25 u, err := user.LookupId(fmt.Sprintf("%d", stat.Uid))
26 if err != nil {
27 return nil, err
28 }
29 return []KeyVal{KeyVal(fmt.Sprintf("uname=%s", u.Username))}, nil
30 }
31 gnameKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
32 if hdr, ok := info.Sys().(*tar.Header); ok {
33 return []KeyVal{KeyVal(fmt.Sprintf("gname=%s", hdr.Gname))}, nil
34 }
35
36 stat := info.Sys().(*syscall.Stat_t)
37 g, err := lookupGroupID(fmt.Sprintf("%d", stat.Gid))
38 if err != nil {
39 return nil, err
40 }
41 return []KeyVal{KeyVal(fmt.Sprintf("gname=%s", g.Name))}, nil
42 }
43 uidKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
44 if hdr, ok := info.Sys().(*tar.Header); ok {
45 return []KeyVal{KeyVal(fmt.Sprintf("uid=%d", hdr.Uid))}, nil
46 }
47 stat := info.Sys().(*syscall.Stat_t)
48 return []KeyVal{KeyVal(fmt.Sprintf("uid=%d", stat.Uid))}, nil
49 }
50 gidKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
51 if hdr, ok := info.Sys().(*tar.Header); ok {
52 return []KeyVal{KeyVal(fmt.Sprintf("gid=%d", hdr.Gid))}, nil
53 }
54 if stat, ok := info.Sys().(*syscall.Stat_t); ok {
55 return []KeyVal{KeyVal(fmt.Sprintf("gid=%d", stat.Gid))}, nil
56 }
57 return nil, nil
58 }
59 nlinkKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
60 if stat, ok := info.Sys().(*syscall.Stat_t); ok {
61 return []KeyVal{KeyVal(fmt.Sprintf("nlink=%d", stat.Nlink))}, nil
62 }
63 return nil, nil
64 }
65 xattrKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
66 return nil, nil
67 }
68 )
0 // +build linux
1
2 package mtree
3
4 import (
5 "archive/tar"
6 "encoding/base64"
7 "fmt"
8 "io"
9 "os"
10 "os/user"
11 "syscall"
12
13 "github.com/vbatts/go-mtree/pkg/govis"
14 "github.com/vbatts/go-mtree/xattr"
15 )
16
17 var (
18 // this is bsd specific https://www.freebsd.org/cgi/man.cgi?query=chflags&sektion=2
19 flagsKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
20 return nil, nil
21 }
22
23 unameKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
24 if hdr, ok := info.Sys().(*tar.Header); ok {
25 return []KeyVal{KeyVal(fmt.Sprintf("uname=%s", hdr.Uname))}, nil
26 }
27
28 stat := info.Sys().(*syscall.Stat_t)
29 u, err := user.LookupId(fmt.Sprintf("%d", stat.Uid))
30 if err != nil {
31 return nil, nil
32 }
33 return []KeyVal{KeyVal(fmt.Sprintf("uname=%s", u.Username))}, nil
34 }
35 gnameKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
36 if hdr, ok := info.Sys().(*tar.Header); ok {
37 return []KeyVal{KeyVal(fmt.Sprintf("gname=%s", hdr.Gname))}, nil
38 }
39
40 stat := info.Sys().(*syscall.Stat_t)
41 g, err := lookupGroupID(fmt.Sprintf("%d", stat.Gid))
42 if err != nil {
43 return nil, nil
44 }
45 return []KeyVal{KeyVal(fmt.Sprintf("gname=%s", g.Name))}, nil
46 }
47 uidKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
48 if hdr, ok := info.Sys().(*tar.Header); ok {
49 return []KeyVal{KeyVal(fmt.Sprintf("uid=%d", hdr.Uid))}, nil
50 }
51 stat := info.Sys().(*syscall.Stat_t)
52 return []KeyVal{KeyVal(fmt.Sprintf("uid=%d", stat.Uid))}, nil
53 }
54 gidKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
55 if hdr, ok := info.Sys().(*tar.Header); ok {
56 return []KeyVal{KeyVal(fmt.Sprintf("gid=%d", hdr.Gid))}, nil
57 }
58 if stat, ok := info.Sys().(*syscall.Stat_t); ok {
59 return []KeyVal{KeyVal(fmt.Sprintf("gid=%d", stat.Gid))}, nil
60 }
61 return nil, nil
62 }
63 nlinkKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
64 if stat, ok := info.Sys().(*syscall.Stat_t); ok {
65 return []KeyVal{KeyVal(fmt.Sprintf("nlink=%d", stat.Nlink))}, nil
66 }
67 return nil, nil
68 }
69 xattrKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
70 if hdr, ok := info.Sys().(*tar.Header); ok {
71 if len(hdr.Xattrs) == 0 {
72 return nil, nil
73 }
74 klist := []KeyVal{}
75 for k, v := range hdr.Xattrs {
76 encKey, err := govis.Vis(k, DefaultVisFlags)
77 if err != nil {
78 return nil, nil
79 }
80 klist = append(klist, KeyVal(fmt.Sprintf("xattr.%s=%s", encKey, base64.StdEncoding.EncodeToString([]byte(v)))))
81 }
82 return klist, nil
83 }
84 if !info.Mode().IsRegular() && !info.Mode().IsDir() {
85 return nil, nil
86 }
87
88 xlist, err := xattr.List(path)
89 if err != nil {
90 return nil, nil
91 }
92 klist := make([]KeyVal, len(xlist))
93 for i := range xlist {
94 data, err := xattr.Get(path, xlist[i])
95 if err != nil {
96 return nil, nil
97 }
98 encKey, err := govis.Vis(xlist[i], DefaultVisFlags)
99 if err != nil {
100 return nil, nil
101 }
102 klist[i] = KeyVal(fmt.Sprintf("xattr.%s=%s", encKey, base64.StdEncoding.EncodeToString(data)))
103 }
104 return klist, nil
105 }
106 )
0 // +build !linux,!darwin,!freebsd,!netbsd,!openbsd
1
2 package mtree
3
4 import (
5 "archive/tar"
6 "fmt"
7 "io"
8 "os"
9 )
10
11 var (
12 // this is bsd specific https://www.freebsd.org/cgi/man.cgi?query=chflags&sektion=2
13 flagsKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
14 return nil, nil
15 }
16 unameKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
17 if hdr, ok := info.Sys().(*tar.Header); ok {
18 return []KeyVal{KeyVal(fmt.Sprintf("uname=%s", hdr.Uname))}, nil
19 }
20 return nil, nil
21 }
22 gnameKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
23 if hdr, ok := info.Sys().(*tar.Header); ok {
24 return []KeyVal{KeyVal(fmt.Sprintf("gname=%s", hdr.Gname))}, nil
25 }
26 return nil, nil
27 }
28 uidKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
29 if hdr, ok := info.Sys().(*tar.Header); ok {
30 return []KeyVal{KeyVal(fmt.Sprintf("uid=%d", hdr.Uid))}, nil
31 }
32 return nil, nil
33 }
34 gidKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
35 if hdr, ok := info.Sys().(*tar.Header); ok {
36 return []KeyVal{KeyVal(fmt.Sprintf("gid=%d", hdr.Gid))}, nil
37 }
38 return nil, nil
39 }
40 nlinkKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
41 return nil, nil
42 }
43 xattrKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
44 return nil, nil
45 }
46 )
0 package mtree
1
2 import (
3 "fmt"
4 "strings"
5
6 "github.com/vbatts/go-mtree/pkg/govis"
7 )
8
9 // DefaultVisFlags is the set of Vis flags used when encoding filenames and
10 // other similar entries.
11 const DefaultVisFlags govis.VisFlag = govis.VisWhite | govis.VisOctal | govis.VisGlob
12
13 // Keyword is the string name of a keyword, with some convenience functions for
14 // determining whether it is a default or bsd standard keyword.
15 // It first portion before the "="
16 type Keyword string
17
18 // Prefix is the portion of the keyword before a first "." (if present).
19 //
20 // Primarly for the xattr use-case, where the keyword `xattr.security.selinux` would have a Suffix of `security.selinux`.
21 func (k Keyword) Prefix() Keyword {
22 if strings.Contains(string(k), ".") {
23 return Keyword(strings.SplitN(string(k), ".", 2)[0])
24 }
25 return k
26 }
27
28 // Suffix is the portion of the keyword after a first ".".
29 // This is an option feature.
30 //
31 // Primarly for the xattr use-case, where the keyword `xattr.security.selinux` would have a Suffix of `security.selinux`.
32 func (k Keyword) Suffix() string {
33 if strings.Contains(string(k), ".") {
34 return strings.SplitN(string(k), ".", 2)[1]
35 }
36 return string(k)
37 }
38
39 // Default returns whether this keyword is in the default set of keywords
40 func (k Keyword) Default() bool {
41 return InKeywordSlice(k, DefaultKeywords)
42 }
43
44 // Bsd returns whether this keyword is in the upstream FreeBSD mtree(8)
45 func (k Keyword) Bsd() bool {
46 return InKeywordSlice(k, BsdKeywords)
47 }
48
49 // Synonym returns the canonical name for this keyword. This is provides the
50 // same functionality as KeywordSynonym()
51 func (k Keyword) Synonym() Keyword {
52 return KeywordSynonym(string(k))
53 }
54
55 // InKeywordSlice checks for the presence of `a` in `list`
56 func InKeywordSlice(a Keyword, list []Keyword) bool {
57 for _, b := range list {
58 if b == a {
59 return true
60 }
61 }
62 return false
63 }
64 func inKeyValSlice(a KeyVal, list []KeyVal) bool {
65 for _, b := range list {
66 if b == a {
67 return true
68 }
69 }
70 return false
71 }
72
73 // ToKeywords makes a list of Keyword from a list of string
74 func ToKeywords(list []string) []Keyword {
75 ret := make([]Keyword, len(list))
76 for i := range list {
77 ret[i] = Keyword(list[i])
78 }
79 return ret
80 }
81
82 // FromKeywords makes a list of string from a list of Keyword
83 func FromKeywords(list []Keyword) []string {
84 ret := make([]string, len(list))
85 for i := range list {
86 ret[i] = string(list[i])
87 }
88 return ret
89 }
90
91 // KeyValToString constructs a list of string from the list of KeyVal
92 func KeyValToString(list []KeyVal) []string {
93 ret := make([]string, len(list))
94 for i := range list {
95 ret[i] = string(list[i])
96 }
97 return ret
98 }
99
100 // StringToKeyVals constructs a list of KeyVal from the list of strings, like "keyword=value"
101 func StringToKeyVals(list []string) []KeyVal {
102 ret := make([]KeyVal, len(list))
103 for i := range list {
104 ret[i] = KeyVal(list[i])
105 }
106 return ret
107 }
108
109 // KeyVal is a "keyword=value"
110 type KeyVal string
111
112 // Keyword is the mapping to the available keywords
113 func (kv KeyVal) Keyword() Keyword {
114 if !strings.Contains(string(kv), "=") {
115 return Keyword("")
116 }
117 return Keyword(strings.SplitN(strings.TrimSpace(string(kv)), "=", 2)[0])
118 }
119
120 // Value is the data/value portion of "keyword=value"
121 func (kv KeyVal) Value() string {
122 if !strings.Contains(string(kv), "=") {
123 return ""
124 }
125 return strings.SplitN(strings.TrimSpace(string(kv)), "=", 2)[1]
126 }
127
128 // NewValue returns a new KeyVal with the newval
129 func (kv KeyVal) NewValue(newval string) KeyVal {
130 return KeyVal(fmt.Sprintf("%s=%s", kv.Keyword(), newval))
131 }
132
133 // Equal returns whether two KeyVal are equivalent. This takes
134 // care of certain odd cases such as tar_mtime, and should be used over
135 // using == comparisons directly unless you really know what you're
136 // doing.
137 func (kv KeyVal) Equal(b KeyVal) bool {
138 // TODO: Implement handling of tar_mtime.
139 return kv.Keyword() == b.Keyword() && kv.Value() == b.Value()
140 }
141
142 func keywordPrefixes(kvset []Keyword) []Keyword {
143 kvs := []Keyword{}
144 for _, kv := range kvset {
145 kvs = append(kvs, kv.Prefix())
146 }
147 return kvs
148 }
149
150 // keyvalSelector takes an array of KeyVal ("keyword=value") and filters out
151 // that only the set of keywords
152 func keyvalSelector(keyval []KeyVal, keyset []Keyword) []KeyVal {
153 retList := []KeyVal{}
154 for _, kv := range keyval {
155 if InKeywordSlice(kv.Keyword().Prefix(), keywordPrefixes(keyset)) {
156 retList = append(retList, kv)
157 }
158 }
159 return retList
160 }
161
162 func keyValDifference(this, that []KeyVal) []KeyVal {
163 if len(this) == 0 {
164 return that
165 }
166 diff := []KeyVal{}
167 for _, kv := range this {
168 if !inKeyValSlice(kv, that) {
169 diff = append(diff, kv)
170 }
171 }
172 return diff
173 }
174 func keyValCopy(set []KeyVal) []KeyVal {
175 ret := make([]KeyVal, len(set))
176 for i := range set {
177 ret[i] = set[i]
178 }
179 return ret
180 }
181
182 // Has the "keyword" present in the list of KeyVal, and returns the
183 // corresponding KeyVal, else an empty string.
184 func Has(keyvals []KeyVal, keyword string) []KeyVal {
185 return HasKeyword(keyvals, Keyword(keyword))
186 }
187
188 // HasKeyword the "keyword" present in the list of KeyVal, and returns the
189 // corresponding KeyVal, else an empty string.
190 // This match is done on the Prefix of the keyword only.
191 func HasKeyword(keyvals []KeyVal, keyword Keyword) []KeyVal {
192 kvs := []KeyVal{}
193 for i := range keyvals {
194 if keyvals[i].Keyword().Prefix() == keyword.Prefix() {
195 kvs = append(kvs, keyvals[i])
196 }
197 }
198 return kvs
199 }
200
201 // MergeSet takes the current setKeyVals, and then applies the entryKeyVals
202 // such that the entry's values win. The union is returned.
203 func MergeSet(setKeyVals, entryKeyVals []string) []KeyVal {
204 retList := StringToKeyVals(setKeyVals)
205 eKVs := StringToKeyVals(entryKeyVals)
206 return MergeKeyValSet(retList, eKVs)
207 }
208
209 // MergeKeyValSet does a merge of the two sets of KeyVal, and the KeyVal of
210 // entryKeyVals win when there is a duplicate Keyword.
211 func MergeKeyValSet(setKeyVals, entryKeyVals []KeyVal) []KeyVal {
212 retList := keyValCopy(setKeyVals)
213 seenKeywords := []Keyword{}
214 for i := range retList {
215 word := retList[i].Keyword()
216 for _, kv := range HasKeyword(entryKeyVals, word) {
217 // match on the keyword prefix and suffix here
218 if kv.Keyword() == word {
219 retList[i] = kv
220 }
221 }
222 seenKeywords = append(seenKeywords, word)
223 }
224 for i := range entryKeyVals {
225 if !InKeywordSlice(entryKeyVals[i].Keyword(), seenKeywords) {
226 retList = append(retList, entryKeyVals[i])
227 }
228 }
229 return retList
230 }
231
232 var (
233 // DefaultKeywords has the several default keyword producers (uid, gid,
234 // mode, nlink, type, size, mtime)
235 DefaultKeywords = []Keyword{
236 "size",
237 "type",
238 "uid",
239 "gid",
240 "mode",
241 "link",
242 "nlink",
243 "time",
244 }
245
246 // DefaultTarKeywords has keywords that should be used when creating a manifest from
247 // an archive. Currently, evaluating the # of hardlinks has not been implemented yet
248 DefaultTarKeywords = []Keyword{
249 "size",
250 "type",
251 "uid",
252 "gid",
253 "mode",
254 "link",
255 "tar_time",
256 }
257
258 // BsdKeywords is the set of keywords that is only in the upstream FreeBSD mtree
259 BsdKeywords = []Keyword{
260 "cksum",
261 "flags", // this one is really mostly BSD specific ...
262 "ignore",
263 "gid",
264 "gname",
265 "link",
266 "md5",
267 "md5digest",
268 "mode",
269 "nlink",
270 "nochange",
271 "optional",
272 "ripemd160digest",
273 "rmd160",
274 "rmd160digest",
275 "sha1",
276 "sha1digest",
277 "sha256",
278 "sha256digest",
279 "sha384",
280 "sha384digest",
281 "sha512",
282 "sha512digest",
283 "size",
284 "tags",
285 "time",
286 "type",
287 "uid",
288 "uname",
289 }
290
291 // SetKeywords is the default set of keywords calculated for a `/set` SpecialType
292 SetKeywords = []Keyword{
293 "uid",
294 "gid",
295 }
296 )
297
298 // KeywordSynonym returns the canonical name for keywords that have synonyms,
299 // and just returns the name provided if there is no synonym. In this way it
300 // ought to be safe to wrap any keyword name.
301 func KeywordSynonym(name string) Keyword {
302 var retname string
303 switch name {
304 case "md5":
305 retname = "md5digest"
306 case "rmd160":
307 retname = "ripemd160digest"
308 case "rmd160digest":
309 retname = "ripemd160digest"
310 case "sha1":
311 retname = "sha1digest"
312 case "sha256":
313 retname = "sha256digest"
314 case "sha384":
315 retname = "sha384digest"
316 case "sha512":
317 retname = "sha512digest"
318 case "sha512256":
319 retname = "sha512256digest"
320 case "xattrs":
321 retname = "xattr"
322 default:
323 retname = name
324 }
325 return Keyword(retname)
326 }
0 // +build linux
1
2 package mtree
3
4 import (
5 "fmt"
6 "io/ioutil"
7 "os"
8 "path/filepath"
9 "testing"
10
11 "github.com/vbatts/go-mtree/xattr"
12 )
13
14 func TestXattr(t *testing.T) {
15 testDir, present := os.LookupEnv("MTREE_TESTDIR")
16 if present == false {
17 // a bit dirty to create/destory a directory in cwd,
18 // but often /tmp is mounted tmpfs and doesn't support
19 // xattrs
20 testDir = "."
21 }
22 dir, err := ioutil.TempDir(testDir, "test.xattrs.")
23 if err != nil {
24 t.Fatal(err)
25 }
26 defer os.RemoveAll(dir)
27 fh, err := os.Create(filepath.Join(dir, "file"))
28 if err != nil {
29 t.Fatal(err)
30 }
31 fh.WriteString("howdy")
32 fh.Sync()
33 if _, err := fh.Seek(0, 0); err != nil {
34 t.Fatal(err)
35 }
36
37 if err := os.Symlink("./no/such/path", filepath.Join(dir, "symlink")); err != nil {
38 t.Fatal(err)
39 }
40
41 if err := xattr.Set(dir, "user.test", []byte("directory")); err != nil {
42 t.Skip(fmt.Sprintf("skipping: %q does not support xattrs", dir))
43 }
44 if err := xattr.Set(filepath.Join(dir, "file"), "user.test", []byte("regular file")); err != nil {
45 t.Fatal(err)
46 }
47
48 dirstat, err := os.Lstat(dir)
49 if err != nil {
50 t.Fatal(err)
51 }
52 // Check the directory
53 kvs, err := xattrKeywordFunc(dir, dirstat, nil)
54 if err != nil {
55 t.Error(err)
56 }
57 if len(kvs) == 0 {
58 t.Errorf("expected a keyval; got none")
59 }
60
61 filestat, err := fh.Stat()
62 if err != nil {
63 t.Fatal(err)
64 }
65 // Check the regular file
66 kvs, err = xattrKeywordFunc(filepath.Join(dir, "file"), filestat, fh)
67 if err != nil {
68 t.Error(err)
69 }
70 if len(kvs) == 0 {
71 t.Errorf("expected a keyval; got none")
72 }
73
74 linkstat, err := os.Lstat(filepath.Join(dir, "symlink"))
75 if err != nil {
76 t.Fatal(err)
77 }
78 // Check a broken symlink
79 _, err = xattrKeywordFunc(filepath.Join(dir, "symlink"), linkstat, nil)
80 if err != nil {
81 t.Error(err)
82 }
83 }
0 package mtree
1
2 import (
3 "fmt"
4 "os"
5 "testing"
6 "time"
7 )
8
9 func TestKeyValRoundtrip(t *testing.T) {
10 kv := KeyVal("xattr.security.selinux=dW5jb25maW5lZF91Om9iamVjdF9yOnVzZXJfaG9tZV90OnMwAA==")
11 expected := "xattr.security.selinux"
12 got := string(kv.Keyword())
13 if got != expected {
14 t.Errorf("expected %q; got %q", expected, got)
15 }
16
17 expected = "xattr"
18 got = string(kv.Keyword().Prefix())
19 if got != expected {
20 t.Errorf("expected %q; got %q", expected, got)
21 }
22
23 expected = "security.selinux"
24 got = kv.Keyword().Suffix()
25 if got != expected {
26 t.Errorf("expected %q; got %q", expected, got)
27 }
28
29 expected = "dW5jb25maW5lZF91Om9iamVjdF9yOnVzZXJfaG9tZV90OnMwAA=="
30 got = kv.Value()
31 if got != expected {
32 t.Errorf("expected %q; got %q", expected, got)
33 }
34
35 expected = "xattr.security.selinux=farts"
36 got = string(kv.NewValue("farts"))
37 if got != expected {
38 t.Errorf("expected %q; got %q", expected, got)
39 }
40
41 expected = "xattr.security.selinux=farts"
42 kv1 := KeyVal(got)
43 kv2 := kv.NewValue("farts")
44 if !kv2.Equal(kv1) {
45 t.Errorf("expected equality of %q and %q", kv1, kv2)
46 }
47
48 }
49
50 type fakeFileInfo struct {
51 mtime time.Time
52 }
53
54 func (ffi fakeFileInfo) Name() string {
55 // noop
56 return ""
57 }
58
59 func (ffi fakeFileInfo) Size() int64 {
60 // noop
61 return -1
62 }
63
64 func (ffi fakeFileInfo) Mode() os.FileMode {
65 // noop
66 return 0
67 }
68
69 func (ffi fakeFileInfo) ModTime() time.Time {
70 return ffi.mtime
71 }
72
73 func (ffi fakeFileInfo) IsDir() bool {
74 return ffi.Mode().IsDir()
75 }
76
77 func (ffi fakeFileInfo) Sys() interface{} {
78 // noop
79 return nil
80 }
81
82 func TestKeywordsTimeNano(t *testing.T) {
83 // We have to make sure that timeKeywordFunc always returns the correct
84 // formatting with regards to the nanotime.
85
86 for _, test := range []struct {
87 sec, nsec int64
88 }{
89 {1234, 123456789},
90 {5555, 987654321},
91 {1337, 100000000},
92 {8888, 999999999},
93 {144123582122, 1},
94 {857125628319, 0},
95 } {
96 mtime := time.Unix(test.sec, test.nsec)
97 expected := KeyVal(fmt.Sprintf("time=%d.%9.9d", test.sec, test.nsec))
98 got, err := timeKeywordFunc("", fakeFileInfo{
99 mtime: mtime,
100 }, nil)
101 if err != nil {
102 t.Errorf("unexpected error while parsing '%q': %q", mtime, err)
103 }
104 if len(got) != 1 {
105 t.Errorf("expected 1 KeyVal, but got %d", len(got))
106 }
107 if expected != got[0] {
108 t.Errorf("keyword didn't match, expected '%s' got '%s'", expected, got[0])
109 }
110 }
111 }
112
113 func TestKeywordsTimeTar(t *testing.T) {
114 // tartimeKeywordFunc always has nsec = 0.
115
116 for _, test := range []struct {
117 sec, nsec int64
118 }{
119 {1234, 123456789},
120 {5555, 987654321},
121 {1337, 100000000},
122 {8888, 999999999},
123 {144123582122, 1},
124 {857125628319, 0},
125 } {
126 mtime := time.Unix(test.sec, test.nsec)
127 expected := KeyVal(fmt.Sprintf("tar_time=%d.%9.9d", test.sec, 0))
128 got, err := tartimeKeywordFunc("", fakeFileInfo{
129 mtime: mtime,
130 }, nil)
131 if err != nil {
132 t.Errorf("unexpected error while parsing '%q': %q", mtime, err)
133 }
134 if len(got) != 1 {
135 t.Errorf("expected 1 KeyVal, but got %d", len(got))
136 }
137 if expected != got[0] {
138 t.Errorf("keyword didn't match, expected '%s' got '%s'", expected, got[0])
139 }
140 }
141 }
142
143 func TestKeywordSynonym(t *testing.T) {
144 checklist := []struct {
145 give string
146 expect Keyword
147 }{
148 {give: "time", expect: "time"},
149 {give: "md5", expect: "md5digest"},
150 {give: "md5digest", expect: "md5digest"},
151 {give: "rmd160", expect: "ripemd160digest"},
152 {give: "rmd160digest", expect: "ripemd160digest"},
153 {give: "ripemd160digest", expect: "ripemd160digest"},
154 {give: "sha1", expect: "sha1digest"},
155 {give: "sha1digest", expect: "sha1digest"},
156 {give: "sha256", expect: "sha256digest"},
157 {give: "sha256digest", expect: "sha256digest"},
158 {give: "sha384", expect: "sha384digest"},
159 {give: "sha384digest", expect: "sha384digest"},
160 {give: "sha512", expect: "sha512digest"},
161 {give: "sha512digest", expect: "sha512digest"},
162 {give: "xattr", expect: "xattr"},
163 {give: "xattrs", expect: "xattr"},
164 }
165
166 for i, check := range checklist {
167 got := KeywordSynonym(check.give)
168 if got != check.expect {
169 t.Errorf("%d: expected %q; got %q", i, check.expect, got)
170 }
171 }
172 }
0 // +build darwin dragonfly freebsd openbsd linux netbsd solaris
1
2 package mtree
3
4 import (
5 "os"
6 "time"
7
8 "golang.org/x/sys/unix"
9 )
10
11 func lchtimes(name string, atime time.Time, mtime time.Time) error {
12 utimes := []unix.Timespec{
13 unix.NsecToTimespec(atime.UnixNano()),
14 unix.NsecToTimespec(mtime.UnixNano()),
15 }
16 if e := unix.UtimesNanoAt(unix.AT_FDCWD, name, utimes, unix.AT_SYMLINK_NOFOLLOW); e != nil {
17 return &os.PathError{Op: "chtimes", Path: name, Err: e}
18 }
19 return nil
20
21 }
0 // +build windows
1
2 package mtree
3
4 import (
5 "time"
6 )
7
8 func lchtimes(name string, atime time.Time, mtime time.Time) error {
9 return nil
10 }
0 // +build go1.7
1
2 package mtree
3
4 import (
5 "os/user"
6 )
7
8 var lookupGroupID = user.LookupGroupId
0 // +build !go1.7
1
2 package mtree
3
4 import (
5 "bufio"
6 "bytes"
7 "io"
8 "os"
9 "strconv"
10 "strings"
11 )
12
13 const groupFile = "/etc/group"
14
15 var colon = []byte{':'}
16
17 // Group represents a grouping of users.
18 //
19 // On POSIX systems Gid contains a decimal number representing the group ID.
20 type Group struct {
21 Gid string // group ID
22 Name string // group name
23 }
24
25 func lookupGroupID(id string) (*Group, error) {
26 f, err := os.Open(groupFile)
27 if err != nil {
28 return nil, err
29 }
30 defer f.Close()
31 return findGroupID(id, f)
32 }
33
34 func findGroupID(id string, r io.Reader) (*Group, error) {
35 if v, err := readColonFile(r, matchGroupIndexValue(id, 2)); err != nil {
36 return nil, err
37 } else if v != nil {
38 return v.(*Group), nil
39 }
40 return nil, UnknownGroupIDError(id)
41 }
42
43 // lineFunc returns a value, an error, or (nil, nil) to skip the row.
44 type lineFunc func(line []byte) (v interface{}, err error)
45
46 // readColonFile parses r as an /etc/group or /etc/passwd style file, running
47 // fn for each row. readColonFile returns a value, an error, or (nil, nil) if
48 // the end of the file is reached without a match.
49 func readColonFile(r io.Reader, fn lineFunc) (v interface{}, err error) {
50 bs := bufio.NewScanner(r)
51 for bs.Scan() {
52 line := bs.Bytes()
53 // There's no spec for /etc/passwd or /etc/group, but we try to follow
54 // the same rules as the glibc parser, which allows comments and blank
55 // space at the beginning of a line.
56 line = bytes.TrimSpace(line)
57 if len(line) == 0 || line[0] == '#' {
58 continue
59 }
60 v, err = fn(line)
61 if v != nil || err != nil {
62 return
63 }
64 }
65 return nil, bs.Err()
66 }
67
68 func matchGroupIndexValue(value string, idx int) lineFunc {
69 var leadColon string
70 if idx > 0 {
71 leadColon = ":"
72 }
73 substr := []byte(leadColon + value + ":")
74 return func(line []byte) (v interface{}, err error) {
75 if !bytes.Contains(line, substr) || bytes.Count(line, colon) < 3 {
76 return
77 }
78 // wheel:*:0:root
79 parts := strings.SplitN(string(line), ":", 4)
80 if len(parts) < 4 || parts[0] == "" || parts[idx] != value ||
81 // If the file contains +foo and you search for "foo", glibc
82 // returns an "invalid argument" error. Similarly, if you search
83 // for a gid for a row where the group name starts with "+" or "-",
84 // glibc fails to find the record.
85 parts[0][0] == '+' || parts[0][0] == '-' {
86 return
87 }
88 if _, err := strconv.Atoi(parts[2]); err != nil {
89 return nil, nil
90 }
91 return &Group{Name: parts[0], Gid: parts[2]}, nil
92 }
93 }
94
95 // UnknownGroupIDError is returned by LookupGroupId when
96 // a group cannot be found.
97 type UnknownGroupIDError string
98
99 func (e UnknownGroupIDError) Error() string {
100 return "group: unknown groupid " + string(e)
101 }
0 package mtree
1
2 import (
3 "io/ioutil"
4 "os"
5 "testing"
6 )
7
8 var (
9 testFiles = []string{"testdata/source.mtree"}
10 numEntries = map[EntryType]int{
11 FullType: 0,
12 RelativeType: 45,
13 CommentType: 37,
14 SpecialType: 7,
15 DotDotType: 17,
16 BlankType: 34,
17 }
18 expectedLength = int64(7887)
19 )
20
21 func TestParser(t *testing.T) {
22 for _, file := range testFiles {
23 func() {
24 fh, err := os.Open(file)
25 if err != nil {
26 t.Error(err)
27 return
28 }
29 defer fh.Close()
30
31 dh, err := ParseSpec(fh)
32 if err != nil {
33 t.Error(err)
34 }
35 gotNums := countTypes(dh)
36 for typ, num := range numEntries {
37 if gNum, ok := gotNums[typ]; ok {
38 if num != gNum {
39 t.Errorf("for type %s: expected %d, got %d", typ, num, gNum)
40 }
41 }
42 }
43
44 i, err := dh.WriteTo(ioutil.Discard)
45 if err != nil {
46 t.Error(err)
47 }
48 if i != expectedLength {
49 t.Errorf("expected to write %d, but wrote %d", expectedLength, i)
50 }
51
52 }()
53 }
54 }
55
56 func countTypes(dh *DirectoryHierarchy) map[EntryType]int {
57 nT := map[EntryType]int{}
58 for i := range dh.Entries {
59 typ := dh.Entries[i].Type
60 if _, ok := nT[typ]; !ok {
61 nT[typ] = 1
62 } else {
63 nT[typ]++
64 }
65 }
66 return nT
67 }
0 package mtree
1
2 import (
3 "bufio"
4 "io"
5 "path/filepath"
6 "strings"
7 )
8
9 // ParseSpec reads a stream of an mtree specification, and returns the DirectoryHierarchy
10 func ParseSpec(r io.Reader) (*DirectoryHierarchy, error) {
11 s := bufio.NewScanner(r)
12 i := int(0)
13 creator := dhCreator{
14 DH: &DirectoryHierarchy{},
15 }
16 for s.Scan() {
17 str := s.Text()
18 trimmedStr := strings.TrimLeftFunc(str, func(c rune) bool {
19 return c == ' ' || c == '\t'
20 })
21 e := Entry{Pos: i}
22 switch {
23 case strings.HasPrefix(trimmedStr, "#"):
24 e.Raw = str
25 if strings.HasPrefix(trimmedStr, "#mtree") {
26 e.Type = SignatureType
27 } else {
28 e.Type = CommentType
29 // from here, the comment could be "# key: value" metadata
30 // or a relative path hint
31 }
32 case str == "":
33 e.Type = BlankType
34 // nothing else to do here
35 case strings.HasPrefix(str, "/"):
36 e.Type = SpecialType
37 // collapse any escaped newlines
38 for {
39 if strings.HasSuffix(str, `\`) {
40 str = str[:len(str)-1]
41 s.Scan()
42 str += s.Text()
43 } else {
44 break
45 }
46 }
47 // parse the options
48 f := strings.Fields(str)
49 e.Name = f[0]
50 e.Keywords = StringToKeyVals(f[1:])
51 if e.Name == "/set" {
52 creator.curSet = &e
53 } else if e.Name == "/unset" {
54 creator.curSet = nil
55 }
56 case len(strings.Fields(str)) > 0 && strings.Fields(str)[0] == "..":
57 e.Type = DotDotType
58 e.Raw = str
59 if creator.curDir != nil {
60 creator.curDir = creator.curDir.Parent
61 }
62 // nothing else to do here
63 case len(strings.Fields(str)) > 0:
64 // collapse any escaped newlines
65 for {
66 if strings.HasSuffix(str, `\`) {
67 str = str[:len(str)-1]
68 s.Scan()
69 str += s.Text()
70 } else {
71 break
72 }
73 }
74 // parse the options
75 f := strings.Fields(str)
76 e.Name = filepath.Clean(f[0])
77 if strings.Contains(e.Name, "/") {
78 e.Type = FullType
79 } else {
80 e.Type = RelativeType
81 }
82 e.Keywords = StringToKeyVals(f[1:])
83 // TODO: gather keywords if using tar stream
84 e.Parent = creator.curDir
85 for i := range e.Keywords {
86 kv := KeyVal(e.Keywords[i])
87 if kv.Keyword() == "type" {
88 if kv.Value() == "dir" {
89 creator.curDir = &e
90 } else {
91 creator.curEnt = &e
92 }
93 }
94 }
95 e.Set = creator.curSet
96 default:
97 // TODO(vbatts) log a warning?
98 continue
99 }
100 creator.DH.Entries = append(creator.DH.Entries, e)
101 i++
102 }
103 return creator.DH, s.Err()
104 }
0
1 Apache License
2 Version 2.0, January 2004
3 http://www.apache.org/licenses/
4
5 TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
7 1. Definitions.
8
9 "License" shall mean the terms and conditions for use, reproduction,
10 and distribution as defined by Sections 1 through 9 of this document.
11
12 "Licensor" shall mean the copyright owner or entity authorized by
13 the copyright owner that is granting the License.
14
15 "Legal Entity" shall mean the union of the acting entity and all
16 other entities that control, are controlled by, or are under common
17 control with that entity. For the purposes of this definition,
18 "control" means (i) the power, direct or indirect, to cause the
19 direction or management of such entity, whether by contract or
20 otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 outstanding shares, or (iii) beneficial ownership of such entity.
22
23 "You" (or "Your") shall mean an individual or Legal Entity
24 exercising permissions granted by this License.
25
26 "Source" form shall mean the preferred form for making modifications,
27 including but not limited to software source code, documentation
28 source, and configuration files.
29
30 "Object" form shall mean any form resulting from mechanical
31 transformation or translation of a Source form, including but
32 not limited to compiled object code, generated documentation,
33 and conversions to other media types.
34
35 "Work" shall mean the work of authorship, whether in Source or
36 Object form, made available under the License, as indicated by a
37 copyright notice that is included in or attached to the work
38 (an example is provided in the Appendix below).
39
40 "Derivative Works" shall mean any work, whether in Source or Object
41 form, that is based on (or derived from) the Work and for which the
42 editorial revisions, annotations, elaborations, or other modifications
43 represent, as a whole, an original work of authorship. For the purposes
44 of this License, Derivative Works shall not include works that remain
45 separable from, or merely link (or bind by name) to the interfaces of,
46 the Work and Derivative Works thereof.
47
48 "Contribution" shall mean any work of authorship, including
49 the original version of the Work and any modifications or additions
50 to that Work or Derivative Works thereof, that is intentionally
51 submitted to Licensor for inclusion in the Work by the copyright owner
52 or by an individual or Legal Entity authorized to submit on behalf of
53 the copyright owner. For the purposes of this definition, "submitted"
54 means any form of electronic, verbal, or written communication sent
55 to the Licensor or its representatives, including but not limited to
56 communication on electronic mailing lists, source code control systems,
57 and issue tracking systems that are managed by, or on behalf of, the
58 Licensor for the purpose of discussing and improving the Work, but
59 excluding communication that is conspicuously marked or otherwise
60 designated in writing by the copyright owner as "Not a Contribution."
61
62 "Contributor" shall mean Licensor and any individual or Legal Entity
63 on behalf of whom a Contribution has been received by Licensor and
64 subsequently incorporated within the Work.
65
66 2. Grant of Copyright License. Subject to the terms and conditions of
67 this License, each Contributor hereby grants to You a perpetual,
68 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 copyright license to reproduce, prepare Derivative Works of,
70 publicly display, publicly perform, sublicense, and distribute the
71 Work and such Derivative Works in Source or Object form.
72
73 3. Grant of Patent License. Subject to the terms and conditions of
74 this License, each Contributor hereby grants to You a perpetual,
75 worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 (except as stated in this section) patent license to make, have made,
77 use, offer to sell, sell, import, and otherwise transfer the Work,
78 where such license applies only to those patent claims licensable
79 by such Contributor that are necessarily infringed by their
80 Contribution(s) alone or by combination of their Contribution(s)
81 with the Work to which such Contribution(s) was submitted. If You
82 institute patent litigation against any entity (including a
83 cross-claim or counterclaim in a lawsuit) alleging that the Work
84 or a Contribution incorporated within the Work constitutes direct
85 or contributory patent infringement, then any patent licenses
86 granted to You under this License for that Work shall terminate
87 as of the date such litigation is filed.
88
89 4. Redistribution. You may reproduce and distribute copies of the
90 Work or Derivative Works thereof in any medium, with or without
91 modifications, and in Source or Object form, provided that You
92 meet the following conditions:
93
94 (a) You must give any other recipients of the Work or
95 Derivative Works a copy of this License; and
96
97 (b) You must cause any modified files to carry prominent notices
98 stating that You changed the files; and
99
100 (c) You must retain, in the Source form of any Derivative Works
101 that You distribute, all copyright, patent, trademark, and
102 attribution notices from the Source form of the Work,
103 excluding those notices that do not pertain to any part of
104 the Derivative Works; and
105
106 (d) If the Work includes a "NOTICE" text file as part of its
107 distribution, then any Derivative Works that You distribute must
108 include a readable copy of the attribution notices contained
109 within such NOTICE file, excluding those notices that do not
110 pertain to any part of the Derivative Works, in at least one
111 of the following places: within a NOTICE text file distributed
112 as part of the Derivative Works; within the Source form or
113 documentation, if provided along with the Derivative Works; or,
114 within a display generated by the Derivative Works, if and
115 wherever such third-party notices normally appear. The contents
116 of the NOTICE file are for informational purposes only and
117 do not modify the License. You may add Your own attribution
118 notices within Derivative Works that You distribute, alongside
119 or as an addendum to the NOTICE text from the Work, provided
120 that such additional attribution notices cannot be construed
121 as modifying the License.
122
123 You may add Your own copyright statement to Your modifications and
124 may provide additional or different license terms and conditions
125 for use, reproduction, or distribution of Your modifications, or
126 for any such Derivative Works as a whole, provided Your use,
127 reproduction, and distribution of the Work otherwise complies with
128 the conditions stated in this License.
129
130 5. Submission of Contributions. Unless You explicitly state otherwise,
131 any Contribution intentionally submitted for inclusion in the Work
132 by You to the Licensor shall be under the terms and conditions of
133 this License, without any additional terms or conditions.
134 Notwithstanding the above, nothing herein shall supersede or modify
135 the terms of any separate license agreement you may have executed
136 with Licensor regarding such Contributions.
137
138 6. Trademarks. This License does not grant permission to use the trade
139 names, trademarks, service marks, or product names of the Licensor,
140 except as required for reasonable and customary use in describing the
141 origin of the Work and reproducing the content of the NOTICE file.
142
143 7. Disclaimer of Warranty. Unless required by applicable law or
144 agreed to in writing, Licensor provides the Work (and each
145 Contributor provides its Contributions) on an "AS IS" BASIS,
146 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 implied, including, without limitation, any warranties or conditions
148 of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 PARTICULAR PURPOSE. You are solely responsible for determining the
150 appropriateness of using or redistributing the Work and assume any
151 risks associated with Your exercise of permissions under this License.
152
153 8. Limitation of Liability. In no event and under no legal theory,
154 whether in tort (including negligence), contract, or otherwise,
155 unless required by applicable law (such as deliberate and grossly
156 negligent acts) or agreed to in writing, shall any Contributor be
157 liable to You for damages, including any direct, indirect, special,
158 incidental, or consequential damages of any character arising as a
159 result of this License or out of the use or inability to use the
160 Work (including but not limited to damages for loss of goodwill,
161 work stoppage, computer failure or malfunction, or any and all
162 other commercial damages or losses), even if such Contributor
163 has been advised of the possibility of such damages.
164
165 9. Accepting Warranty or Additional Liability. While redistributing
166 the Work or Derivative Works thereof, You may choose to offer,
167 and charge a fee for, acceptance of support, warranty, indemnity,
168 or other liability obligations and/or rights consistent with this
169 License. However, in accepting such obligations, You may act only
170 on Your own behalf and on Your sole responsibility, not on behalf
171 of any other Contributor, and only if You agree to indemnify,
172 defend, and hold each Contributor harmless for any liability
173 incurred by, or claims asserted against, such Contributor by reason
174 of your accepting any such warranty or additional liability.
175
176 END OF TERMS AND CONDITIONS
177
178 APPENDIX: How to apply the Apache License to your work.
179
180 To apply the Apache License to your work, attach the following
181 boilerplate notice, with the fields enclosed by brackets "[]"
182 replaced with your own identifying information. (Don't include
183 the brackets!) The text should be enclosed in the appropriate
184 comment syntax for the file format. We also recommend that a
185 file or class name and description of purpose be included on the
186 same "printed page" as the copyright notice for easier
187 identification within third-party archives.
188
189 Copyright [yyyy] [name of copyright owner]
190
191 Licensed under the Apache License, Version 2.0 (the "License");
192 you may not use this file except in compliance with the License.
193 You may obtain a copy of the License at
194
195 http://www.apache.org/licenses/LICENSE-2.0
196
197 Unless required by applicable law or agreed to in writing, software
198 distributed under the License is distributed on an "AS IS" BASIS,
199 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 See the License for the specific language governing permissions and
201 limitations under the License.
0 ## `govis` ##
1
2 `govis` is a BSD-compatible `vis(3)` and `unvis(3)` encoding implementation
3 that is unicode aware and written in Go. None of this code comes from the
4 original BSD code, nor does it come from `go-mtree`'s port of said code.
5 Because 80s BSD code is not very nice to read.
6
7 ### License ###
8
9 `govis` is licensed under the Apache 2.0 license.
10
11 ```
12 govis: unicode aware vis(3) encoding implementation
13 Copyright (C) 2017 SUSE LLC.
14
15 Licensed under the Apache License, Version 2.0 (the "License");
16 you may not use this file except in compliance with the License.
17 You may obtain a copy of the License at
18
19 http://www.apache.org/licenses/LICENSE-2.0
20
21 Unless required by applicable law or agreed to in writing, software
22 distributed under the License is distributed on an "AS IS" BASIS,
23 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
24 See the License for the specific language governing permissions and
25 limitations under the License.
26 ```
0 /*
1 * govis: unicode aware vis(3) encoding implementation
2 * Copyright (C) 2017 SUSE LLC.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 package govis
18
19 // VisFlag manipulates how the characters are encoded/decoded
20 type VisFlag uint
21
22 // vis() has a variety of flags when deciding what encodings to use. While
23 // mtree only uses one set of flags, implementing them all is necessary in
24 // order to have compatibility with BSD's vis() and unvis() commands.
25 const (
26 VisOctal VisFlag = (1 << iota) // VIS_OCTAL: Use octal \ddd format.
27 VisCStyle // VIS_CSTYLE: Use \[nrft0..] where appropriate.
28 VisSpace // VIS_SP: Also encode space.
29 VisTab // VIS_TAB: Also encode tab.
30 VisNewline // VIS_NL: Also encode newline.
31 VisSafe // VIS_SAFE: Encode unsafe characters.
32 VisNoSlash // VIS_NOSLASH: Inhibit printing '\'.
33 VisHTTPStyle // VIS_HTTPSTYLE: HTTP-style escape %xx.
34 VisGlob // VIS_GLOB: Encode glob(3) magics.
35 visMask VisFlag = (1 << iota) - 1 // Mask of all flags.
36
37 VisWhite VisFlag = (VisSpace | VisTab | VisNewline)
38 )
0 /*
1 * govis: unicode aware vis(3) encoding implementation
2 * Copyright (C) 2017 SUSE LLC.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 package govis
18
19 import (
20 "bytes"
21 "crypto/rand"
22 "testing"
23 )
24
25 const DefaultVisFlags = VisWhite | VisOctal | VisGlob
26
27 func TestRandomVisUnvis(t *testing.T) {
28 // Randomly generate N strings.
29 const N = 100
30
31 for i := 0; i < N; i++ {
32 testBytes := make([]byte, 256)
33 if n, err := rand.Read(testBytes); n != cap(testBytes) || err != nil {
34 t.Fatalf("could not read enough bytes: err=%v n=%d", err, n)
35 }
36 test := string(testBytes)
37
38 for flag := VisFlag(0); flag <= visMask; flag++ {
39 // VisNoSlash is frankly just a dumb flag, and it is impossible for us
40 // to actually preserve things in a round-trip.
41 if flag&VisNoSlash == VisNoSlash {
42 continue
43 }
44
45 enc, err := Vis(test, flag)
46 if err != nil {
47 t.Errorf("unexpected error doing vis(%q, %b): %s", test, flag, err)
48 continue
49 }
50 dec, err := Unvis(enc, flag)
51 if err != nil {
52 t.Errorf("unexpected error doing unvis(%q, %b): %s", enc, flag, err)
53 continue
54 }
55 if dec != test {
56 t.Errorf("roundtrip failed: unvis(vis(%q, %b) = %q, %b) = %q", test, flag, enc, flag, dec)
57 }
58 }
59 }
60 }
61
62 func TestRandomVisVisUnvisUnvis(t *testing.T) {
63 // Randomly generate N strings.
64 const N = 100
65
66 for i := 0; i < N; i++ {
67 testBytes := make([]byte, 256)
68 if n, err := rand.Read(testBytes); n != cap(testBytes) || err != nil {
69 t.Fatalf("could not read enough bytes: err=%v n=%d", err, n)
70 }
71 test := string(testBytes)
72
73 for flag := VisFlag(0); flag <= visMask; flag++ {
74 // VisNoSlash is frankly just a dumb flag, and it is impossible for us
75 // to actually preserve things in a round-trip.
76 if flag&VisNoSlash == VisNoSlash {
77 continue
78 }
79
80 enc, err := Vis(test, flag)
81 if err != nil {
82 t.Errorf("unexpected error doing vis(%q, %b): %s", test, flag, err)
83 continue
84 }
85 enc2, err := Vis(enc, flag)
86 if err != nil {
87 t.Errorf("unexpected error doing vis(%q, %b): %s", enc, flag, err)
88 continue
89 }
90 dec, err := Unvis(enc2, flag)
91 if err != nil {
92 t.Errorf("unexpected error doing unvis(%q, %b): %s", enc2, flag, err)
93 continue
94 }
95 dec2, err := Unvis(dec, flag)
96 if err != nil {
97 t.Errorf("unexpected error doing unvis(%q, %b): %s", dec, flag, err)
98 continue
99 }
100 if dec2 != test {
101 t.Errorf("roundtrip failed: unvis(unvis(vis(vis(%q) = %q) = %q) = %q, %b) = %q", test, enc, enc2, dec, flag, dec2)
102 }
103 }
104 }
105 }
106
107 func TestVisUnvis(t *testing.T) {
108 for flag := VisFlag(0); flag <= visMask; flag++ {
109 // VisNoSlash is frankly just a dumb flag, and it is impossible for us
110 // to actually preserve things in a round-trip.
111 if flag&VisNoSlash == VisNoSlash {
112 continue
113 }
114
115 // Round-trip testing.
116 for _, test := range []string{
117 "",
118 "hello world",
119 "THIS\\IS_A_TEST1234",
120 "this.is.a.normal_string",
121 "AC_Ra\u00edz_Certic\u00e1mara_S.A..pem",
122 "NetLock_Arany_=Class_Gold=_F\u0151tan\u00fas\u00edtv\u00e1ny.pem",
123 "T\u00dcB\u0130TAK_UEKAE_K\u00f6k_Sertifika_Hizmet_Sa\u011flay\u0131c\u0131s\u0131_-_S\u00fcr\u00fcm_3.pem",
124 "hello world [ this string needs=enco ding! ]",
125 "even \n more encoding necessary\a\a ",
126 "\024 <-- some more weird characters --> \u4f60\u597d\uff0c\u4e16\u754c",
127 "\\xff\\n double encoding is also great fun \\x",
128 "AC_Ra\\M-C\\M--z_Certic\\M-C\\M-!mara_S.A..pem",
129 "z^i3i$\u00d3\u008anqgh5/t\u00e5<86>\u00b2kzla\\e^lv\u00df\u0093nv\u00df\u00aea|3}\u00d8\u0088\u00d6\u0084",
130 `z^i3i$\M-C\M^S\M-B\M^Jnqgh5/t\M-C\M-%<86>\M-B\M-2kzla\\e^lv\M-C\M^_\M-B\M^Snv\M-C\M^_\M-B\M-.a|3}\M-C\M^X\M-B\M^H\M-C\M^V\M-B\M^D`,
131 "@?e1xs+.R_Kjo]7s8pgRP:*nXCE4{!c",
132 "62_\u00c6\u00c62\u00ae\u00b7m\u00db\u00c3r^\u00bfp\u00c6u'q\u00fbc2\u00f0u\u00b8\u00dd\u00e8v\u00ff\u00b0\u00dc\u00c2\u00f53\u00db-k\u00f2sd4\\p\u00da\u00a6\u00d3\u00eea<\u00e6s{\u00a0p\u00f0\u00ffj\u00e0\u00e8\u00b8\u00b8\u00bc\u00fcb",
133 `62_\M-C\M^F\M-C\M^F2\M-B\M-.\M-B\M-7m\M-C\M^[\M-C\M^Cr^\M-B\M-?p\M-C\M^Fu'q\M-C\M-;c2\M-C\M-0u\M-B\M-8\M-C\M^]\M-C\M-(v\M-C\M-?\M-B\M-0\M-C\M^\\M-C\M^B\M-C\M-53\M-C\M^[-k\M-C\M-2sd4\\p\M-C\M^Z\M-B\M-&\M-C\M^S\M-C\M-.a<\M-C\M-&s{\M-B\240p\M-C\M-0\M-C\M-?j\M-C\240\M-C\M-(\M-B\M-8\M-B\M-8\M-B\M-<\M-C\M-<b`,
134 "\u9003\"9v1)T798|o;fly jnKX\u0489Be=",
135 `\M-i\M^@\M^C"9v1)T798|o;fly jnKX\M-R\M^IBe=`,
136 "'3Ze\u050e|\u02del\u069du-Rpct4+Z5b={@_{b",
137 `'3Ze\M-T\M^N|\M-K\M^^l\M-Z\M^]u-Rpct4+Z5b={@_{b`,
138 "1\u00c6\u00abTcz+Vda?)k1%\\\"P;`po`h",
139 `1%C3%86%C2%ABTcz+Vda%3F)k1%25%5C%22P%3B%60po%60h`,
140 } {
141 enc, err := Vis(test, flag)
142 if err != nil {
143 t.Errorf("unexpected error doing vis(%q, %b): %s", test, flag, err)
144 continue
145 }
146 dec, err := Unvis(enc, flag)
147 if err != nil {
148 t.Errorf("unexpected error doing unvis(%q, %b): %s", enc, flag, err)
149 continue
150 }
151 if dec != test {
152 t.Errorf("roundtrip failed: unvis(vis(%q, %b) = %q, %b) = %q", test, flag, enc, flag, dec)
153 }
154 }
155 }
156 }
157
158 func TestByteStrings(t *testing.T) {
159 // It's important to make sure that we don't mess around with the layout of
160 // bytes when doing a round-trip. Otherwise we risk outputting visually
161 // identical but bit-stream non-identical strings (causing much confusion
162 // when trying to access such files).
163
164 for _, test := range [][]byte{
165 []byte("This is a man in business suit levitating: \U0001f574"),
166 {0x7f, 0x17, 0x01, 0x33},
167 // TODO: Test arbitrary byte streams like the one below. Currently this
168 // fails because Vis() is messing around with it (converting it
169 // to a rune and spacing it out).
170 //{'\xef', '\xae', 'h', '\077', 'k'},
171 } {
172 testString := string(test)
173 enc, err := Vis(testString, DefaultVisFlags)
174 if err != nil {
175 t.Errorf("unexpected error doing vis(%q): %s", test, err)
176 continue
177 }
178 dec, err := Unvis(enc, DefaultVisFlags)
179 if err != nil {
180 t.Errorf("unexpected error doing unvis(%q): %s", enc, err)
181 continue
182 }
183 decBytes := []byte(dec)
184
185 if dec != testString {
186 t.Errorf("roundtrip failed [string comparison]: unvis(vis(%q) = %q) = %q", test, enc, dec)
187 }
188 if !bytes.Equal(decBytes, test) {
189 t.Errorf("roundtrip failed [byte comparison]: unvis(vis(%q) = %q) = %q", test, enc, dec)
190 }
191 }
192
193 }
0 /*
1 * govis: unicode aware vis(3) encoding implementation
2 * Copyright (C) 2017 SUSE LLC.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 package govis
18
19 import (
20 "fmt"
21 "strconv"
22 "unicode"
23 )
24
25 // unvisParser stores the current state of the token parser.
26 type unvisParser struct {
27 tokens []rune
28 idx int
29 flag VisFlag
30 }
31
32 // Next moves the index to the next character.
33 func (p *unvisParser) Next() {
34 p.idx++
35 }
36
37 // Peek gets the current token.
38 func (p *unvisParser) Peek() (rune, error) {
39 if p.idx >= len(p.tokens) {
40 return unicode.ReplacementChar, fmt.Errorf("tried to read past end of token list")
41 }
42 return p.tokens[p.idx], nil
43 }
44
45 // End returns whether all of the tokens have been consumed.
46 func (p *unvisParser) End() bool {
47 return p.idx >= len(p.tokens)
48 }
49
50 func newParser(input string, flag VisFlag) *unvisParser {
51 return &unvisParser{
52 tokens: []rune(input),
53 idx: 0,
54 flag: flag,
55 }
56 }
57
58 // While a recursive descent parser is overkill for parsing simple escape
59 // codes, this is IMO much easier to read than the ugly 80s coroutine code used
60 // by the original unvis(3) parser. Here's the EBNF for an unvis sequence:
61 //
62 // <input> ::= (<rune>)*
63 // <rune> ::= ("\" <escape-sequence>) | ("%" <escape-hex>) | <plain-rune>
64 // <plain-rune> ::= any rune
65 // <escape-sequence> ::= ("x" <escape-hex>) | ("M" <escape-meta>) | ("^" <escape-ctrl) | <escape-cstyle> | <escape-octal>
66 // <escape-meta> ::= ("-" <escape-meta1>) | ("^" <escape-ctrl>)
67 // <escape-meta1> ::= any rune
68 // <escape-ctrl> ::= "?" | any rune
69 // <escape-cstyle> ::= "\" | "n" | "r" | "b" | "a" | "v" | "t" | "f"
70 // <escape-hex> ::= [0-9a-f] [0-9a-f]
71 // <escape-octal> ::= [0-7] ([0-7] ([0-7])?)?
72
73 func unvisPlainRune(p *unvisParser) ([]byte, error) {
74 ch, err := p.Peek()
75 if err != nil {
76 return nil, fmt.Errorf("plain rune: %c", ch)
77 }
78 p.Next()
79
80 // XXX: Maybe we should not be converting to runes and then back to strings
81 // here. Are we sure that the byte-for-byte representation is the
82 // same? If the bytes change, then using these strings for paths will
83 // break...
84
85 str := string(ch)
86 return []byte(str), nil
87 }
88
89 func unvisEscapeCStyle(p *unvisParser) ([]byte, error) {
90 ch, err := p.Peek()
91 if err != nil {
92 return nil, fmt.Errorf("escape hex: %s", err)
93 }
94
95 output := ""
96 switch ch {
97 case 'n':
98 output = "\n"
99 case 'r':
100 output = "\r"
101 case 'b':
102 output = "\b"
103 case 'a':
104 output = "\x07"
105 case 'v':
106 output = "\v"
107 case 't':
108 output = "\t"
109 case 'f':
110 output = "\f"
111 case 's':
112 output = " "
113 case 'E':
114 output = "\x1b"
115 case '\n':
116 // Hidden newline.
117 case '$':
118 // Hidden marker.
119 default:
120 // XXX: We should probably allow falling through and return "\" here...
121 return nil, fmt.Errorf("escape cstyle: unknown escape character: %q", ch)
122 }
123
124 p.Next()
125 return []byte(output), nil
126 }
127
128 func unvisEscapeDigits(p *unvisParser, base int, force bool) ([]byte, error) {
129 var code int
130
131 for i := int(0xFF); i > 0; i /= base {
132 ch, err := p.Peek()
133 if err != nil {
134 if !force && i != 0xFF {
135 break
136 }
137 return nil, fmt.Errorf("escape base %d: %s", base, err)
138 }
139
140 digit, err := strconv.ParseInt(string(ch), base, 8)
141 if err != nil {
142 if !force && i != 0xFF {
143 break
144 }
145 return nil, fmt.Errorf("escape base %d: could not parse digit: %s", base, err)
146 }
147
148 code = (code * base) + int(digit)
149 p.Next()
150 }
151
152 if code > unicode.MaxLatin1 {
153 return nil, fmt.Errorf("escape base %d: code %q outside latin-1 encoding", base, code)
154 }
155
156 char := byte(code & 0xFF)
157 return []byte{char}, nil
158 }
159
160 func unvisEscapeCtrl(p *unvisParser, mask byte) ([]byte, error) {
161 ch, err := p.Peek()
162 if err != nil {
163 return nil, fmt.Errorf("escape ctrl: %s", err)
164 }
165 if ch > unicode.MaxLatin1 {
166 return nil, fmt.Errorf("escape ctrl: code %q outside latin-1 encoding", ch)
167 }
168
169 char := byte(ch) & 0x1f
170 if ch == '?' {
171 char = 0x7f
172 }
173
174 p.Next()
175 return []byte{mask | char}, nil
176 }
177
178 func unvisEscapeMeta(p *unvisParser) ([]byte, error) {
179 ch, err := p.Peek()
180 if err != nil {
181 return nil, fmt.Errorf("escape meta: %s", err)
182 }
183
184 mask := byte(0x80)
185
186 switch ch {
187 case '^':
188 // The same as "\^..." except we apply a mask.
189 p.Next()
190 return unvisEscapeCtrl(p, mask)
191
192 case '-':
193 p.Next()
194
195 ch, err := p.Peek()
196 if err != nil {
197 return nil, fmt.Errorf("escape meta1: %s", err)
198 }
199 if ch > unicode.MaxLatin1 {
200 return nil, fmt.Errorf("escape meta1: code %q outside latin-1 encoding", ch)
201 }
202
203 // Add mask to character.
204 p.Next()
205 return []byte{mask | byte(ch)}, nil
206 }
207
208 return nil, fmt.Errorf("escape meta: unknown escape char: %s", err)
209 }
210
211 func unvisEscapeSequence(p *unvisParser) ([]byte, error) {
212 ch, err := p.Peek()
213 if err != nil {
214 return nil, fmt.Errorf("escape sequence: %s", err)
215 }
216
217 switch ch {
218 case '\\':
219 p.Next()
220 return []byte("\\"), nil
221
222 case '0', '1', '2', '3', '4', '5', '6', '7':
223 return unvisEscapeDigits(p, 8, false)
224
225 case 'x':
226 p.Next()
227 return unvisEscapeDigits(p, 16, true)
228
229 case '^':
230 p.Next()
231 return unvisEscapeCtrl(p, 0x00)
232
233 case 'M':
234 p.Next()
235 return unvisEscapeMeta(p)
236
237 default:
238 return unvisEscapeCStyle(p)
239 }
240 }
241
242 func unvisRune(p *unvisParser) ([]byte, error) {
243 ch, err := p.Peek()
244 if err != nil {
245 return nil, fmt.Errorf("rune: %s", err)
246 }
247
248 switch ch {
249 case '\\':
250 p.Next()
251 return unvisEscapeSequence(p)
252
253 case '%':
254 // % HEX HEX only applies to HTTPStyle encodings.
255 if p.flag&VisHTTPStyle == VisHTTPStyle {
256 p.Next()
257 return unvisEscapeDigits(p, 16, true)
258 }
259 fallthrough
260
261 default:
262 return unvisPlainRune(p)
263 }
264 }
265
266 func unvis(p *unvisParser) (string, error) {
267 var output []byte
268 for !p.End() {
269 ch, err := unvisRune(p)
270 if err != nil {
271 return "", fmt.Errorf("input: %s", err)
272 }
273 output = append(output, ch...)
274 }
275 return string(output), nil
276 }
277
278 // Unvis takes a string formatted with the given Vis flags (though only the
279 // VisHTTPStyle flag is checked) and output the un-encoded version of the
280 // encoded string. An error is returned if any escape sequences in the input
281 // string were invalid.
282 func Unvis(input string, flag VisFlag) (string, error) {
283 // TODO: Check all of the VisFlag bits.
284 p := newParser(input, flag)
285 output, err := unvis(p)
286 if err != nil {
287 return "", fmt.Errorf("unvis: %s", err)
288 }
289 if !p.End() {
290 return "", fmt.Errorf("unvis: trailing characters at end of input")
291 }
292 return output, nil
293 }
0 /*
1 * govis: unicode aware vis(3) encoding implementation
2 * Copyright (C) 2017 SUSE LLC.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 package govis
18
19 import (
20 "testing"
21 )
22
23 func TestUnvisError(t *testing.T) {
24 for _, test := range []string{
25 // Octal escape codes allow you to specify invalid byte values.
26 "\\777",
27 "\\420\\322\\455",
28 "\\652\\233",
29 } {
30 got, err := Unvis(test, DefaultVisFlags)
31 if err == nil {
32 t.Errorf("expected unvis(%q) to give an error, got %q", test, got)
33 }
34 }
35 }
36
37 func TestUnvisCStyleEscape(t *testing.T) {
38 for _, test := range []struct {
39 input string
40 expected string
41 }{
42 {"", ""},
43 {"\\n\\v\\t\\s", "\n\v\t "},
44 {"\\\\n\\tt", "\\n\tt"},
45 {"\\b", "\b"},
46 {"\\r\\b\\n", "\r\b\n"},
47 {"\\a\\a\\b", "\x07\x07\b"},
48 {"\\f\\s\\E", "\f \x1b"},
49 // Hidden markers. They actually aren't generated by vis(3) but for
50 // some reason, they're supported...
51 {"test\\\ning", "testing"},
52 {"test\\$\\$ing", "testing"},
53 } {
54 got, err := Unvis(test.input, DefaultVisFlags)
55 if err != nil {
56 t.Errorf("unexpected error doing unvis(%q): %q", test.input, err)
57 continue
58 }
59 if got != test.expected {
60 t.Errorf("expected unvis(%q) = %q, got %q", test.input, test.expected, got)
61 }
62 }
63 }
64
65 func TestUnvisMetaEscape(t *testing.T) {
66 for _, test := range []struct {
67 input string
68 expected string
69 }{
70 {"", ""},
71 {"\\M^ ?\\^ ", "\x80?\x00"},
72 {"\\M- ?\\^?", "\xa0?\x7f"},
73 {"\\M-x butterfly\\M^?", "\xf8 butterfly\xff"},
74 {"\\M^X steady-hand \\^& needle", "\x98 steady-hand \x06 needle"},
75 // TODO: Add some more of these tests, but I need to have some
76 // secondary source to verify these outputs properly.
77 } {
78 got, err := Unvis(test.input, DefaultVisFlags)
79 if err != nil {
80 t.Errorf("unexpected error doing unvis(%q): %q", test.input, err)
81 continue
82 }
83 if got != test.expected {
84 t.Errorf("expected unvis(%q) = %q, got %q", test.input, test.expected, got)
85 }
86 }
87 }
88
89 func TestUnvisOctalEscape(t *testing.T) {
90 for _, test := range []struct {
91 input string
92 expected string
93 }{
94 {"", ""},
95 {"\\1", "\001"},
96 {"\\01\\02\\3", "\001\002\003"},
97 {"\\001\\023\\32", "\001\023\032"},
98 {"this is a test\\0k1\\133", "this is a test\000k1\133"},
99 {"\\170YET\\01another test\\1\\\\82", "\170YET\001another test\001\\82"},
100 {"\\177MORE tests\\09a", "\177MORE tests\x009a"},
101 {"\\\\710more\\1215testing", "\\710more\1215testing"},
102 // Make sure that decoding unicode works properly, when it's been encoded as single bytes.
103 {"\\360\\237\\225\\264", "\U0001f574"},
104 {"T\\303\\234B\\304\\260TAK_UEKAE_K\\303\\266k_Sertifika_Hizmet_Sa\\304\\237lay\\304\\261c\\304\\261s\\304\\261_-_S\\303\\274r\\303\\274m_3.pem", "TÜBİTAK_UEKAE_Kök_Sertifika_Hizmet_Sağlayıcısı_-_Sürüm_3.pem"},
105 // Some invalid characters...
106 {"\\377\\2\\225\\264", "\xff\x02\x95\xb4"},
107 } {
108 got, err := Unvis(test.input, DefaultVisFlags)
109 if err != nil {
110 t.Errorf("unexpected error doing unvis(%q): %q", test.input, err)
111 continue
112 }
113 if got != test.expected {
114 t.Errorf("expected unvis(%q) = %q, got %q", test.input, test.expected, got)
115 }
116 }
117 }
118
119 func TestUnvisHexEscape(t *testing.T) {
120 for _, test := range []struct {
121 input string
122 expected string
123 }{
124 {"", ""},
125 {"\\x01", "\x01"},
126 {"\\x01\\x02\\x7a", "\x01\x02\x7a"},
127 {"this is a test\\x13\\x52\\x6f", "this is a test\x13\x52\x6f"},
128 {"\\x170YET\\x01a\\x22nother test\\x11", "\x170YET\x01a\x22nother test\x11"},
129 {"\\\\x007more\\\\x215testing", "\\x007more\\x215testing"},
130 // Make sure that decoding unicode works properly, when it's been encoded as single bytes.
131 {"\\xf0\\x9f\\x95\\xb4", "\U0001f574"},
132 {"T\\xc3\\x9cB\\xc4\\xb0TAK_UEKAE_K\\xc3\\xb6k_Sertifika_Hizmet_Sa\\xc4\\x9flay\\xc4\\xb1c\\xc4\\xb1s\\xc4\\xb1_-_S\\xc3\\xbcr\\xc3\\xbcm_3.pem", "TÜBİTAK_UEKAE_Kök_Sertifika_Hizmet_Sağlayıcısı_-_Sürüm_3.pem"},
133 // Some invalid characters...
134 {"\\xff\\x02\\x95\\xb4", "\xff\x02\x95\xb4"},
135 } {
136 got, err := Unvis(test.input, DefaultVisFlags)
137 if err != nil {
138 t.Errorf("unexpected error doing unvis(%q): %q", test.input, err)
139 continue
140 }
141 if got != test.expected {
142 t.Errorf("expected unvis(%q) = %q, got %q", test.input, test.expected, got)
143 }
144 }
145 }
146
147 func TestUnvisUnicode(t *testing.T) {
148 // Ensure that unicode strings are not messed up by Unvis.
149 for _, test := range []string{
150 "",
151 "this.is.a.normal_string",
152 "AC_Raíz_Certicámara_S.A..pem",
153 "NetLock_Arany_=Class_Gold=_Főtanúsítvány.pem",
154 "TÜBİTAK_UEKAE_Kök_Sertifika_Hizmet_Sağlayıcısı_-_Sürüm_3.pem",
155 } {
156 got, err := Unvis(test, DefaultVisFlags)
157 if err != nil {
158 t.Errorf("unexpected error doing unvis(%q): %s", test, err)
159 continue
160 }
161 if got != test {
162 t.Errorf("expected %q to be unchanged, got %q", test, got)
163 }
164 }
165 }
0 /*
1 * govis: unicode aware vis(3) encoding implementation
2 * Copyright (C) 2017 SUSE LLC.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 package govis
18
19 import (
20 "fmt"
21 "unicode"
22 )
23
24 func isunsafe(ch rune) bool {
25 return ch == '\b' || ch == '\007' || ch == '\r'
26 }
27
28 func isglob(ch rune) bool {
29 return ch == '*' || ch == '?' || ch == '[' || ch == '#'
30 }
31
32 // ishttp is defined by RFC 1808.
33 func ishttp(ch rune) bool {
34 // RFC1808 does not really consider characters outside of ASCII, so just to
35 // be safe always treat characters outside the ASCII character set as "not
36 // HTTP".
37 if ch > unicode.MaxASCII {
38 return false
39 }
40
41 return unicode.IsDigit(ch) || unicode.IsLetter(ch) ||
42 // Safe characters.
43 ch == '$' || ch == '-' || ch == '_' || ch == '.' || ch == '+' ||
44 // Extra characters.
45 ch == '!' || ch == '*' || ch == '\'' || ch == '(' ||
46 ch == ')' || ch == ','
47 }
48
49 func isgraph(ch rune) bool {
50 return unicode.IsGraphic(ch) && !unicode.IsSpace(ch) && ch <= unicode.MaxASCII
51 }
52
53 // vis converts a single *byte* into its encoding. While Go supports the
54 // concept of runes (and thus native utf-8 parsing), in order to make sure that
55 // the bit-stream will be completely maintained through an Unvis(Vis(...))
56 // round-trip. The downside is that Vis() will never output unicode -- but on
57 // the plus side this is actually a benefit on the encoding side (it will
58 // always work with the simple unvis(3) implementation). It also means that we
59 // don't have to worry about different multi-byte encodings.
60 func vis(b byte, flag VisFlag) (string, error) {
61 // Treat the single-byte character as a rune.
62 ch := rune(b)
63
64 // XXX: This is quite a horrible thing to support.
65 if flag&VisHTTPStyle == VisHTTPStyle {
66 if !ishttp(ch) {
67 return "%" + fmt.Sprintf("%.2X", ch), nil
68 }
69 }
70
71 // Figure out if the character doesn't need to be encoded. Effectively, we
72 // encode most "normal" (graphical) characters as themselves unless we have
73 // been specifically asked not to. Note though that we *ALWAYS* encode
74 // everything outside ASCII.
75 // TODO: Switch this to much more logical code.
76
77 if ch > unicode.MaxASCII {
78 /* ... */
79 } else if flag&VisGlob == VisGlob && isglob(ch) {
80 /* ... */
81 } else if isgraph(ch) ||
82 (flag&VisSpace != VisSpace && ch == ' ') ||
83 (flag&VisTab != VisTab && ch == '\t') ||
84 (flag&VisNewline != VisNewline && ch == '\n') ||
85 (flag&VisSafe != 0 && isunsafe(ch)) {
86
87 encoded := string(ch)
88 if ch == '\\' && flag&VisNoSlash == 0 {
89 encoded += "\\"
90 }
91 return encoded, nil
92 }
93
94 // Try to use C-style escapes first.
95 if flag&VisCStyle == VisCStyle {
96 switch ch {
97 case ' ':
98 return "\\s", nil
99 case '\n':
100 return "\\n", nil
101 case '\r':
102 return "\\r", nil
103 case '\b':
104 return "\\b", nil
105 case '\a':
106 return "\\a", nil
107 case '\v':
108 return "\\v", nil
109 case '\t':
110 return "\\t", nil
111 case '\f':
112 return "\\f", nil
113 case '\x00':
114 // Output octal just to be safe.
115 return "\\000", nil
116 }
117 }
118
119 // For graphical characters we generate octal output (and also if it's
120 // being forced by the caller's flags). Also spaces should always be
121 // encoded as octal.
122 if flag&VisOctal == VisOctal || isgraph(ch) || ch&0x7f == ' ' {
123 // Always output three-character octal just to be safe.
124 return fmt.Sprintf("\\%.3o", ch), nil
125 }
126
127 // Now we have to output meta or ctrl escapes. As far as I can tell, this
128 // is not actually defined by any standard -- so this logic is basically
129 // copied from the original vis(3) implementation. Hopefully nobody
130 // actually relies on this (octal and hex are better).
131
132 encoded := ""
133 if flag&VisNoSlash == 0 {
134 encoded += "\\"
135 }
136
137 // Meta characters have 0x80 set, but are otherwise identical to control
138 // characters.
139 if b&0x80 != 0 {
140 b &= 0x7f
141 encoded += "M"
142 }
143
144 if unicode.IsControl(rune(b)) {
145 encoded += "^"
146 if b == 0x7f {
147 encoded += "?"
148 } else {
149 encoded += fmt.Sprintf("%c", b+'@')
150 }
151 } else {
152 encoded += fmt.Sprintf("-%c", b)
153 }
154
155 return encoded, nil
156 }
157
158 // Vis encodes the provided string to a BSD-compatible encoding using BSD's
159 // vis() flags. However, it will correctly handle multi-byte encoding (which is
160 // not done properly by BSD's vis implementation).
161 func Vis(src string, flag VisFlag) (string, error) {
162 if flag&visMask != flag {
163 return "", fmt.Errorf("vis: flag %q contains unknown or unsupported flags", flag)
164 }
165
166 output := ""
167 for _, ch := range []byte(src) {
168 encodedCh, err := vis(ch, flag)
169 if err != nil {
170 return "", err
171 }
172 output += encodedCh
173 }
174
175 return output, nil
176 }
0 /*
1 * govis: unicode aware vis(3) encoding implementation
2 * Copyright (C) 2017 SUSE LLC.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 package govis
18
19 import (
20 "testing"
21 )
22
23 func TestVisUnchanged(t *testing.T) {
24 for _, test := range []struct {
25 input string
26 flag VisFlag
27 }{
28 {"", DefaultVisFlags},
29 {"helloworld", DefaultVisFlags},
30 {"THIS_IS_A_TEST1234", DefaultVisFlags},
31 {"SomeEncodingsAreCool", DefaultVisFlags},
32 {"spaces are totally safe", DefaultVisFlags &^ VisSpace},
33 {"tabs\tare\talso\tsafe!!", DefaultVisFlags &^ VisTab},
34 {"just\a\atrustme\r\b\b!!", DefaultVisFlags | VisSafe},
35 } {
36 enc, err := Vis(test.input, test.flag)
37 if err != nil {
38 t.Errorf("unexpected error with %q: %s", test, err)
39 }
40 if enc != test.input {
41 t.Errorf("expected encoding of %q (flag=%q) to be unchanged, got %q", test.input, test.flag, enc)
42 }
43 }
44 }
45
46 func TestVisFlags(t *testing.T) {
47 for _, test := range []struct {
48 input string
49 output string
50 flag VisFlag
51 }{
52 // Default
53 {"AC_Ra\u00edz_Certic\u00e1mara_S.A..pem", "AC_Ra\\M-C\\M--z_Certic\\M-C\\M-!mara_S.A..pem", 0},
54 {"z^i3i$\u00d3\u008anqgh5/t\u00e5<86>\u00b2kzla\\e^lv\u00df\u0093nv\u00df\u00aea|3}\u00d8\u0088\u00d6\u0084", `z^i3i$\M-C\M^S\M-B\M^Jnqgh5/t\M-C\M-%<86>\M-B\M-2kzla\\e^lv\M-C\M^_\M-B\M^Snv\M-C\M^_\M-B\M-.a|3}\M-C\M^X\M-B\M^H\M-C\M^V\M-B\M^D`, 0},
55 {"@?e1xs+.R_Kjo]7s8pgRP:*nXCE4{!c", "@?e1xs+.R_Kjo]7s8pgRP:*nXCE4{!c", 0},
56 {"62_\u00c6\u00c62\u00ae\u00b7m\u00db\u00c3r^\u00bfp\u00c6u'q\u00fbc2\u00f0u\u00b8\u00dd\u00e8v\u00ff\u00b0\u00dc\u00c2\u00f53\u00db-k\u00f2sd4\\p\u00da\u00a6\u00d3\u00eea<\u00e6s{\u00a0p\u00f0\u00ffj\u00e0\u00e8\u00b8\u00b8\u00bc\u00fcb", `62_\M-C\M^F\M-C\M^F2\M-B\M-.\M-B\M-7m\M-C\M^[\M-C\M^Cr^\M-B\M-?p\M-C\M^Fu'q\M-C\M-;c2\M-C\M-0u\M-B\M-8\M-C\M^]\M-C\M-(v\M-C\M-?\M-B\M-0\M-C\M^\\M-C\M^B\M-C\M-53\M-C\M^[-k\M-C\M-2sd4\\p\M-C\M^Z\M-B\M-&\M-C\M^S\M-C\M-.a<\M-C\M-&s{\M-B\240p\M-C\M-0\M-C\M-?j\M-C\240\M-C\M-(\M-B\M-8\M-B\M-8\M-B\M-<\M-C\M-<b`, 0},
57 {"\u9003\"9v1)T798|o;fly jnKX\u0489Be=", `\M-i\M^@\M^C"9v1)T798|o;fly jnKX\M-R\M^IBe=`, 0},
58 // VisOctal
59 {"", "", VisOctal},
60 {"\022", "\\022", VisOctal},
61 {"\n \t", "\\012\\040\t", VisNewline | VisSpace | VisOctal},
62 {"\x12\f\a\n\v\b \U00012312", "\\022\\014\\007\n\\013\\010 \\360\\222\\214\\222", VisOctal},
63 {"AC_Ra\u00edz_Certic\u00e1mara_S.A..pem", "AC_Ra\\303\\255z_Certic\\303\\241mara_S.A..pem", VisOctal},
64 {"z^i3i$\u00d3\u008anqgh5/t\u00e5<86>\u00b2kzla\\e^lv\u00df\u0093nv\u00df\u00aea|3}\u00d8\u0088\u00d6\u0084", `z^i3i$\303\223\302\212nqgh5/t\303\245<86>\302\262kzla\\e^lv\303\237\302\223nv\303\237\302\256a|3}\303\230\302\210\303\226\302\204`, VisOctal},
65 {"62_\u00c6\u00c62\u00ae\u00b7m\u00db\u00c3r^\u00bfp\u00c6u'q\u00fbc2\u00f0u\u00b8\u00dd\u00e8v\u00ff\u00b0\u00dc\u00c2\u00f53\u00db-k\u00f2sd4\\p\u00da\u00a6\u00d3\u00eea<\u00e6s{\u00a0p\u00f0\u00ffj\u00e0\u00e8\u00b8\u00b8\u00bc\u00fcb", `62_\303\206\303\2062\302\256\302\267m\303\233\303\203r^\302\277p\303\206u'q\303\273c2\303\260u\302\270\303\235\303\250v\303\277\302\260\303\234\303\202\303\2653\303\233-k\303\262sd4\\p\303\232\302\246\303\223\303\256a<\303\246s{\302\240p\303\260\303\277j\303\240\303\250\302\270\302\270\302\274\303\274b`, VisOctal},
66 {"\u9003\"9v1)T798|o;fly jnKX\u0489Be=", `\351\200\203"9v1)T798|o;fly jnKX\322\211Be=`, VisOctal},
67 // VisCStyle
68 {"\x00 \f \a \n\v\b \r \t\r", "\\000 \\f \\a \n\\v\\b \\r \t\\r", VisCStyle},
69 {"\t \n\v\b", "\\t \n\\v\\b", VisTab | VisCStyle},
70 {"\n\v\t ", "\n\\v\t\\s\\s\\s", VisSpace | VisCStyle},
71 {"\n \n ", "\\n \\n ", VisNewline | VisCStyle},
72 {"z^i3i$\u00d3\u008anqgh5/t\u00e5<86>\u00b2kzla\\e^lv\u00df\u0093nv\u00df\u00aea|3}\u00d8\u0088\u00d6\u0084", `z^i3i$\M-C\M^S\M-B\M^Jnqgh5/t\M-C\M-%<86>\M-B\M-2kzla\\e^lv\M-C\M^_\M-B\M^Snv\M-C\M^_\M-B\M-.a|3}\M-C\M^X\M-B\M^H\M-C\M^V\M-B\M^D`, VisCStyle},
73 {"62_\u00c6\u00c62\u00ae\u00b7m\u00db\u00c3r^\u00bfp\u00c6u'q\u00fbc2\u00f0u\u00b8\u00dd\u00e8v\u00ff\u00b0\u00dc\u00c2\u00f53\u00db-k\u00f2sd4\\p\u00da\u00a6\u00d3\u00eea<\u00e6s{\u00a0p\u00f0\u00ffj\u00e0\u00e8\u00b8\u00b8\u00bc\u00fcb", `62_\M-C\M^F\M-C\M^F2\M-B\M-.\M-B\M-7m\M-C\M^[\M-C\M^Cr^\M-B\M-?p\M-C\M^Fu'q\M-C\M-;c2\M-C\M-0u\M-B\M-8\M-C\M^]\M-C\M-(v\M-C\M-?\M-B\M-0\M-C\M^\\M-C\M^B\M-C\M-53\M-C\M^[-k\M-C\M-2sd4\\p\M-C\M^Z\M-B\M-&\M-C\M^S\M-C\M-.a<\M-C\M-&s{\M-B\240p\M-C\M-0\M-C\M-?j\M-C\240\M-C\M-(\M-B\M-8\M-B\M-8\M-B\M-<\M-C\M-<b`, VisCStyle},
74 {"\u9003\"9v1)T798|o;fly jnKX\u0489Be=", `\M-i\M^@\M^C"9v1)T798|o;fly\sjnKX\M-R\M^IBe=`, VisCStyle | VisSpace},
75 // VisSpace
76 {" ", `\040\040`, VisSpace},
77 {"\t \t", "\t\\040\t", VisSpace},
78 {"\\040 plenty of characters here ", `\\040\040\040\040plenty\040of\040characters\040here\040\040\040`, VisSpace},
79 {"Js9L\u00cd\u00b2o?4824y'$|P}FIr%mW /KL9$]~", `Js9L\M-C\M^M\M-B\M-2o?4824y'$|P}FIr%mW\040/KL9$]~`, VisWhite},
80 {"1\u00c6\u00abTcz+Vda?)k1%\\\"P;`po`h", `1\M-C\M^F\M-B\M-+Tcz+Vda?)k1%\\"P;` + "`po`" + `h`, VisWhite},
81 {"\u9003\"9v1)T798|o;fly jnKX\u0489Be=", `\M-i\M^@\M^C"9v1)T798|o;fly\040jnKX\M-R\M^IBe=`, VisSpace},
82 // VisTab
83 {"\t \v", "\\^I \\^K", VisTab},
84 {"\t \v", "\\011 \\013", VisTab | VisOctal},
85 // VisNewline
86 {"\t\n \v\r\n", "\t\\^J \\^K\\^M\\^J", VisNewline},
87 {"\t\n \v\r\n", "\t\\012 \\013\\015\\012", VisNewline | VisOctal},
88 // VisSafe
89 // VisHTTPStyle
90 {"\x12\f\a\n\v\b \U00012312", `%12%0C%07%0A%0B%08%20%20%F0%92%8C%92`, VisHTTPStyle},
91 {"1\u00c6\u00abTcz+Vda?)k1%\\\"P;`po`h", `1%C3%86%C2%ABTcz+Vda%3F)k1%25%5C%22P%3B%60po%60h`, VisHTTPStyle},
92 {"62_\u00c6\u00c62\u00ae\u00b7m\u00db\u00c3r^\u00bfp\u00c6u'q\u00fbc2\u00f0u\u00b8\u00dd\u00e8v\u00ff\u00b0\u00dc\u00c2\u00f53\u00db-k\u00f2sd4\\p\u00da\u00a6\u00d3\u00eea<\u00e6s{\u00a0p\u00f0\u00ffj\u00e0\u00e8\u00b8\u00b8\u00bc\u00fcb", `62_%C3%86%C3%862%C2%AE%C2%B7m%C3%9B%C3%83r%5E%C2%BFp%C3%86u'q%C3%BBc2%C3%B0u%C2%B8%C3%9D%C3%A8v%C3%BF%C2%B0%C3%9C%C3%82%C3%B53%C3%9B-k%C3%B2sd4%5Cp%C3%9A%C2%A6%C3%93%C3%AEa%3C%C3%A6s%7B%C2%A0p%C3%B0%C3%BFj%C3%A0%C3%A8%C2%B8%C2%B8%C2%BC%C3%BCb`, VisHTTPStyle},
93 {"'3Ze\u050e|\u02del\u069du-Rpct4+Z5b={@_{b", `'3Ze%D4%8E%7C%CB%9El%DA%9Du-Rpct4+Z5b%3D%7B%40_%7Bb`, VisHTTPStyle},
94 // VisGlob
95 {"cat /proc/**/status | grep '[pid]' ;; # cool code here", `cat /proc/\052\052/status | grep '\133pid]' ;; \043 cool code here`, VisGlob},
96 {"@?e1xs+.R_Kjo]7s8pgRP:*nXCE4{!c", `@\077e1xs+.R_Kjo]7s8pgRP:\052nXCE4{!c`, VisGlob},
97 {"62_\u00c6\u00c62\u00ae\u00b7m\u00db\u00c3r^\u00bfp\u00c6u'q\u00fbc2\u00f0u\u00b8\u00dd\u00e8v\u00ff\u00b0\u00dc\u00c2\u00f53\u00db-k\u00f2sd4\\p\u00da\u00a6\u00d3\u00eea<\u00e6s{\u00a0p\u00f0\u00ffj\u00e0\u00e8\u00b8\u00b8\u00bc\u00fcb", `62_\M-C\M^F\M-C\M^F2\M-B\M-.\M-B\M-7m\M-C\M^[\M-C\M^Cr^\M-B\M-?p\M-C\M^Fu'q\M-C\M-;c2\M-C\M-0u\M-B\M-8\M-C\M^]\M-C\M-(v\M-C\M-?\M-B\M-0\M-C\M^\\M-C\M^B\M-C\M-53\M-C\M^[-k\M-C\M-2sd4\\p\M-C\M^Z\M-B\M-&\M-C\M^S\M-C\M-.a<\M-C\M-&s{\M-B\240p\M-C\M-0\M-C\M-?j\M-C\240\M-C\M-(\M-B\M-8\M-B\M-8\M-B\M-<\M-C\M-<b`, VisGlob},
98 {"62_\u00c6\u00c62\u00ae\u00b7m\u00db\u00c3r^\u00bfp\u00c6u'q\u00fbc2\u00f0u\u00b8\u00dd\u00e8v\u00ff\u00b0\u00dc\u00c2\u00f53\u00db-k\u00f2sd4\\p\u00da\u00a6\u00d3\u00eea<\u00e6s{\u00a0p\u00f0\u00ffj\u00e0\u00e8\u00b8\u00b8\u00bc\u00fcb", `62_\303\206\303\2062\302\256\302\267m\303\233\303\203r^\302\277p\303\206u'q\303\273c2\303\260u\302\270\303\235\303\250v\303\277\302\260\303\234\303\202\303\2653\303\233-k\303\262sd4\\p\303\232\302\246\303\223\303\256a<\303\246s{\302\240p\303\260\303\277j\303\240\303\250\302\270\302\270\302\274\303\274b`, VisGlob | VisOctal},
99 {"'3Ze\u050e|\u02del\u069du-Rpct4+Z5b={@_{b", `'3Ze\M-T\M^N|\M-K\M^^l\M-Z\M^]u-Rpct4+Z5b={@_{b`, VisGlob},
100 {"'3Ze\u050e|\u02del\u069du-Rpct4+Z5b={@_{b", `'3Ze\324\216|\313\236l\332\235u-Rpct4+Z5b={@_{b`, VisGlob | VisOctal},
101 } {
102 enc, err := Vis(test.input, test.flag)
103 if err != nil {
104 t.Errorf("unexpected error with %q: %s", test, err)
105 }
106 if enc != test.output {
107 t.Errorf("expected vis(%q, flag=%b) = %q, got %q", test.input, test.flag, test.output, enc)
108 }
109 }
110 }
111
112 func TestVisChanged(t *testing.T) {
113 for _, test := range []string{
114 "hello world",
115 "THIS\\IS_A_TEST1234",
116 "AC_Ra\u00edz_Certic\u00e1mara_S.A..pem",
117 } {
118 enc, err := Vis(test, DefaultVisFlags)
119 if err != nil {
120 t.Errorf("unexpected error with %q: %s", test, err)
121 }
122 if enc == test {
123 t.Errorf("expected encoding of %q to be changed", test)
124 }
125 }
126 }
0 # How to do releases:
1
2 * Create a changeset with an update to `version.go`
3 - this commit will be tagged
4 - add another commit putting it back with '-dev' appended
5 * gpg sign the commit with an incremented version, like 'vX.Y.Z'
6 * Push the tag
7 * Create a "release" from the tag on github
8 - include the binaries from `make build.arches`
9 - write about notable changes, and their contributors
10 - PRs merged for the release
0 // +build !windows
1
2 package mtree
3
4 import (
5 "os"
6 "syscall"
7 )
8
9 func statIsUID(stat os.FileInfo, uid int) bool {
10 statT := stat.Sys().(*syscall.Stat_t)
11 return statT.Uid == uint32(uid)
12 }
13
14 func statIsGID(stat os.FileInfo, gid int) bool {
15 statT := stat.Sys().(*syscall.Stat_t)
16 return statT.Gid == uint32(gid)
17 }
0 // +build windows
1
2 package mtree
3
4 import "os"
5
6 func statIsUID(stat os.FileInfo, uid int) bool {
7 return false
8 }
9 func statIsGID(stat os.FileInfo, uid int) bool {
10 return false
11 }
0 package mtree
1
2 import (
3 "archive/tar"
4 "fmt"
5 "io"
6 "io/ioutil"
7 "os"
8 "path/filepath"
9 "strings"
10
11 "github.com/Sirupsen/logrus"
12 "github.com/vbatts/go-mtree/pkg/govis"
13 )
14
15 // Streamer creates a file hierarchy out of a tar stream
16 type Streamer interface {
17 io.ReadCloser
18 Hierarchy() (*DirectoryHierarchy, error)
19 }
20
21 var tarDefaultSetKeywords = []KeyVal{
22 "type=file",
23 "flags=none",
24 "mode=0664",
25 }
26
27 // NewTarStreamer streams a tar archive and creates a file hierarchy based off
28 // of the tar metadata headers
29 func NewTarStreamer(r io.Reader, excludes []ExcludeFunc, keywords []Keyword) Streamer {
30 pR, pW := io.Pipe()
31 ts := &tarStream{
32 pipeReader: pR,
33 pipeWriter: pW,
34 creator: dhCreator{DH: &DirectoryHierarchy{}},
35 teeReader: io.TeeReader(r, pW),
36 tarReader: tar.NewReader(pR),
37 keywords: keywords,
38 hardlinks: map[string][]string{},
39 excludes: excludes,
40 }
41
42 go ts.readHeaders()
43 return ts
44 }
45
46 type tarStream struct {
47 root *Entry
48 hardlinks map[string][]string
49 creator dhCreator
50 pipeReader *io.PipeReader
51 pipeWriter *io.PipeWriter
52 teeReader io.Reader
53 tarReader *tar.Reader
54 keywords []Keyword
55 excludes []ExcludeFunc
56 err error
57 }
58
59 func (ts *tarStream) readHeaders() {
60 // remove "time" keyword
61 notimekws := []Keyword{}
62 for _, kw := range ts.keywords {
63 if !InKeywordSlice(kw, notimekws) {
64 if kw == "time" {
65 if !InKeywordSlice("tar_time", ts.keywords) {
66 notimekws = append(notimekws, "tar_time")
67 }
68 } else {
69 notimekws = append(notimekws, kw)
70 }
71 }
72 }
73 ts.keywords = notimekws
74 // We have to start with the directory we're in, and anything beyond these
75 // items is determined at the time a tar is extracted.
76 ts.root = &Entry{
77 Name: ".",
78 Type: RelativeType,
79 Prev: &Entry{
80 Raw: "# .",
81 Type: CommentType,
82 },
83 Set: nil,
84 Keywords: []KeyVal{"type=dir"},
85 }
86 // insert signature and metadata comments first (user, machine, tree, date)
87 for _, e := range signatureEntries("<user specified tar archive>") {
88 e.Pos = len(ts.creator.DH.Entries)
89 ts.creator.DH.Entries = append(ts.creator.DH.Entries, e)
90 }
91 // insert keyword metadata next
92 for _, e := range keywordEntries(ts.keywords) {
93 e.Pos = len(ts.creator.DH.Entries)
94 ts.creator.DH.Entries = append(ts.creator.DH.Entries, e)
95 }
96 hdrloop:
97 for {
98 hdr, err := ts.tarReader.Next()
99 if err != nil {
100 ts.pipeReader.CloseWithError(err)
101 return
102 }
103
104 for _, ex := range ts.excludes {
105 if ex(hdr.Name, hdr.FileInfo()) {
106 continue hdrloop
107 }
108 }
109
110 // Because the content of the file may need to be read by several
111 // KeywordFuncs, it needs to be an io.Seeker as well. So, just reading from
112 // ts.tarReader is not enough.
113 tmpFile, err := ioutil.TempFile("", "ts.payload.")
114 if err != nil {
115 ts.pipeReader.CloseWithError(err)
116 return
117 }
118 // for good measure
119 if err := tmpFile.Chmod(0600); err != nil {
120 tmpFile.Close()
121 os.Remove(tmpFile.Name())
122 ts.pipeReader.CloseWithError(err)
123 return
124 }
125 if _, err := io.Copy(tmpFile, ts.tarReader); err != nil {
126 tmpFile.Close()
127 os.Remove(tmpFile.Name())
128 ts.pipeReader.CloseWithError(err)
129 return
130 }
131 // Alright, it's either file or directory
132 encodedName, err := govis.Vis(filepath.Base(hdr.Name), DefaultVisFlags)
133 if err != nil {
134 tmpFile.Close()
135 os.Remove(tmpFile.Name())
136 ts.pipeReader.CloseWithError(err)
137 return
138 }
139 e := Entry{
140 Name: encodedName,
141 Type: RelativeType,
142 }
143
144 // Keep track of which files are hardlinks so we can resolve them later
145 if hdr.Typeflag == tar.TypeLink {
146 keyFunc := KeywordFuncs["link"]
147 kvs, err := keyFunc(hdr.Name, hdr.FileInfo(), nil)
148 if err != nil {
149 logrus.Warn(err)
150 break // XXX is breaking an okay thing to do here?
151 }
152 linkname, err := govis.Unvis(KeyVal(kvs[0]).Value(), DefaultVisFlags)
153 if err != nil {
154 logrus.Warn(err)
155 break // XXX is breaking an okay thing to do here?
156 }
157 if _, ok := ts.hardlinks[linkname]; !ok {
158 ts.hardlinks[linkname] = []string{hdr.Name}
159 } else {
160 ts.hardlinks[linkname] = append(ts.hardlinks[linkname], hdr.Name)
161 }
162 }
163
164 // now collect keywords on the file
165 for _, keyword := range ts.keywords {
166 if keyFunc, ok := KeywordFuncs[keyword.Prefix()]; ok {
167 // We can't extract directories on to disk, so "size" keyword
168 // is irrelevant for now
169 if hdr.FileInfo().IsDir() && keyword == "size" {
170 continue
171 }
172 kvs, err := keyFunc(hdr.Name, hdr.FileInfo(), tmpFile)
173 if err != nil {
174 ts.setErr(err)
175 }
176 // for good measure, check that we actually get a value for a keyword
177 if len(kvs) > 0 && kvs[0] != "" {
178 e.Keywords = append(e.Keywords, kvs[0])
179 }
180
181 // don't forget to reset the reader
182 if _, err := tmpFile.Seek(0, 0); err != nil {
183 tmpFile.Close()
184 os.Remove(tmpFile.Name())
185 ts.pipeReader.CloseWithError(err)
186 return
187 }
188 }
189 }
190 // collect meta-set keywords for a directory so that we can build the
191 // actual sets in `flatten`
192 if hdr.FileInfo().IsDir() {
193 s := Entry{
194 Name: "meta-set",
195 Type: SpecialType,
196 }
197 for _, setKW := range SetKeywords {
198 if keyFunc, ok := KeywordFuncs[setKW.Prefix()]; ok {
199 kvs, err := keyFunc(hdr.Name, hdr.FileInfo(), tmpFile)
200 if err != nil {
201 ts.setErr(err)
202 }
203 for _, kv := range kvs {
204 if kv != "" {
205 s.Keywords = append(s.Keywords, kv)
206 }
207 }
208 if _, err := tmpFile.Seek(0, 0); err != nil {
209 tmpFile.Close()
210 os.Remove(tmpFile.Name())
211 ts.pipeReader.CloseWithError(err)
212 }
213 }
214 }
215 e.Set = &s
216 }
217 err = populateTree(ts.root, &e, hdr)
218 if err != nil {
219 ts.setErr(err)
220 }
221 tmpFile.Close()
222 os.Remove(tmpFile.Name())
223 }
224 }
225
226 // populateTree creates a pseudo file tree hierarchy using an Entry's Parent and
227 // Children fields. When examining the Entry e to insert in the tree, we
228 // determine if the path to that Entry exists yet. If it does, insert it in the
229 // appropriate position in the tree. If not, create a path up until the Entry's
230 // directory that it is contained in. Then, insert the Entry.
231 // root: the "." Entry
232 // e: the Entry we are looking to insert
233 // hdr: the tar header struct associated with e
234 func populateTree(root, e *Entry, hdr *tar.Header) error {
235 if root == nil || e == nil {
236 return fmt.Errorf("cannot populate or insert nil Entry's")
237 } else if root.Prev == nil {
238 return fmt.Errorf("root needs to be an Entry associated with a directory")
239 }
240 isDir := hdr.FileInfo().IsDir()
241 wd := filepath.Clean(hdr.Name)
242 if !isDir {
243 // directory up until the actual file
244 wd = filepath.Dir(wd)
245 if wd == "." {
246 root.Children = append([]*Entry{e}, root.Children...)
247 e.Parent = root
248 return nil
249 }
250 }
251 dirNames := strings.Split(wd, "/")
252 parent := root
253 for _, name := range dirNames[:] {
254 encoded, err := govis.Vis(name, DefaultVisFlags)
255 if err != nil {
256 return err
257 }
258 if node := parent.Descend(encoded); node == nil {
259 // Entry for directory doesn't exist in tree relative to root.
260 // We don't know if this directory is an actual tar header (because a
261 // user could have just specified a path to a deep file), so we must
262 // specify this placeholder directory as a "type=dir", and Set=nil.
263 newEntry := Entry{
264 Name: encoded,
265 Type: RelativeType,
266 Parent: parent,
267 Keywords: []KeyVal{"type=dir"}, // temp data
268 Set: nil, // temp data
269 }
270 pathname, err := newEntry.Path()
271 if err != nil {
272 return err
273 }
274 newEntry.Prev = &Entry{
275 Type: CommentType,
276 Raw: "# " + pathname,
277 }
278 parent.Children = append(parent.Children, &newEntry)
279 parent = &newEntry
280 } else {
281 // Entry for directory exists in tree, just keep going
282 parent = node
283 }
284 }
285 if !isDir {
286 parent.Children = append([]*Entry{e}, parent.Children...)
287 e.Parent = parent
288 } else {
289 // fill in the actual data from e
290 parent.Keywords = e.Keywords
291 parent.Set = e.Set
292 }
293 return nil
294 }
295
296 // After constructing a pseudo file hierarchy tree, we want to "flatten" this
297 // tree by putting the Entries into a slice with appropriate positioning.
298 // root: the "head" of the sub-tree to flatten
299 // creator: a dhCreator that helps with the '/set' keyword
300 // keywords: keywords specified by the user that should be evaluated
301 func flatten(root *Entry, creator *dhCreator, keywords []Keyword) {
302 if root == nil || creator == nil {
303 return
304 }
305 if root.Prev != nil {
306 // root.Prev != nil implies root is a directory
307 creator.DH.Entries = append(creator.DH.Entries,
308 Entry{
309 Type: BlankType,
310 Pos: len(creator.DH.Entries),
311 })
312 root.Prev.Pos = len(creator.DH.Entries)
313 creator.DH.Entries = append(creator.DH.Entries, *root.Prev)
314
315 if root.Set != nil {
316 // Check if we need a new set
317 consolidatedKeys := keyvalSelector(append(tarDefaultSetKeywords, root.Set.Keywords...), keywords)
318 if creator.curSet == nil {
319 creator.curSet = &Entry{
320 Type: SpecialType,
321 Name: "/set",
322 Keywords: consolidatedKeys,
323 Pos: len(creator.DH.Entries),
324 }
325 creator.DH.Entries = append(creator.DH.Entries, *creator.curSet)
326 } else {
327 needNewSet := false
328 for _, k := range root.Set.Keywords {
329 if !inKeyValSlice(k, creator.curSet.Keywords) {
330 needNewSet = true
331 break
332 }
333 }
334 if needNewSet {
335 creator.curSet = &Entry{
336 Name: "/set",
337 Type: SpecialType,
338 Pos: len(creator.DH.Entries),
339 Keywords: consolidatedKeys,
340 }
341 creator.DH.Entries = append(creator.DH.Entries, *creator.curSet)
342 }
343 }
344 } else if creator.curSet != nil {
345 // Getting into here implies that the Entry's set has not and
346 // was not supposed to be evaluated, thus, we need to reset curSet
347 creator.DH.Entries = append(creator.DH.Entries, Entry{
348 Name: "/unset",
349 Type: SpecialType,
350 Pos: len(creator.DH.Entries),
351 })
352 creator.curSet = nil
353 }
354 }
355 root.Set = creator.curSet
356 if creator.curSet != nil {
357 root.Keywords = keyValDifference(root.Keywords, creator.curSet.Keywords)
358 }
359 root.Pos = len(creator.DH.Entries)
360 creator.DH.Entries = append(creator.DH.Entries, *root)
361 for _, c := range root.Children {
362 flatten(c, creator, keywords)
363 }
364 if root.Prev != nil {
365 // Show a comment when stepping out
366 root.Prev.Pos = len(creator.DH.Entries)
367 creator.DH.Entries = append(creator.DH.Entries, *root.Prev)
368 dotEntry := Entry{
369 Type: DotDotType,
370 Name: "..",
371 Pos: len(creator.DH.Entries),
372 }
373 creator.DH.Entries = append(creator.DH.Entries, dotEntry)
374 }
375 return
376 }
377
378 // resolveHardlinks goes through an Entry tree, and finds the Entry's associated
379 // with hardlinks and fills them in with the actual data from the base file.
380 func resolveHardlinks(root *Entry, hardlinks map[string][]string, countlinks bool) {
381 originals := make(map[string]*Entry)
382 for base, links := range hardlinks {
383 var basefile *Entry
384 if seen, ok := originals[base]; !ok {
385 basefile = root.Find(base)
386 if basefile == nil {
387 logrus.Printf("%s does not exist in this tree\n", base)
388 continue
389 }
390 originals[base] = basefile
391 } else {
392 basefile = seen
393 }
394 for _, link := range links {
395 linkfile := root.Find(link)
396 if linkfile == nil {
397 logrus.Printf("%s does not exist in this tree\n", link)
398 continue
399 }
400 linkfile.Keywords = basefile.Keywords
401 if countlinks {
402 linkfile.Keywords = append(linkfile.Keywords, KeyVal(fmt.Sprintf("nlink=%d", len(links)+1)))
403 }
404 }
405 if countlinks {
406 basefile.Keywords = append(basefile.Keywords, KeyVal(fmt.Sprintf("nlink=%d", len(links)+1)))
407 }
408 }
409 }
410
411 // filter takes in a pointer to an Entry, and returns a slice of Entry's that
412 // satisfy the predicate p
413 func filter(root *Entry, p func(*Entry) bool) []Entry {
414 if root != nil {
415 var validEntrys []Entry
416 if len(root.Children) > 0 || root.Prev != nil {
417 for _, c := range root.Children {
418 // filter the sub-directory
419 if c.Prev != nil {
420 validEntrys = append(validEntrys, filter(c, p)...)
421 }
422 if p(c) {
423 if c.Prev == nil {
424 validEntrys = append([]Entry{*c}, validEntrys...)
425 } else {
426 validEntrys = append(validEntrys, *c)
427 }
428 }
429 }
430 return validEntrys
431 }
432 }
433 return nil
434 }
435
436 func (ts *tarStream) setErr(err error) {
437 ts.err = err
438 }
439
440 func (ts *tarStream) Read(p []byte) (n int, err error) {
441 return ts.teeReader.Read(p)
442 }
443
444 func (ts *tarStream) Close() error {
445 return ts.pipeReader.Close()
446 }
447
448 // Hierarchy returns the DirectoryHierarchy of the archive. It flattens the
449 // Entry tree before returning the DirectoryHierarchy
450 func (ts *tarStream) Hierarchy() (*DirectoryHierarchy, error) {
451 if ts.err != nil && ts.err != io.EOF {
452 return nil, ts.err
453 }
454 if ts.root == nil {
455 return nil, fmt.Errorf("root Entry not found, nothing to flatten")
456 }
457 resolveHardlinks(ts.root, ts.hardlinks, InKeywordSlice(Keyword("nlink"), ts.keywords))
458 flatten(ts.root, &ts.creator, ts.keywords)
459 return ts.creator.DH, nil
460 }
0 package mtree
1
2 import (
3 "archive/tar"
4 "bytes"
5 "io"
6 "io/ioutil"
7 "os"
8 "path/filepath"
9 "syscall"
10 "testing"
11 "time"
12 )
13
14 func ExampleStreamer() {
15 fh, err := os.Open("./testdata/test.tar")
16 if err != nil {
17 // handle error ...
18 }
19 str := NewTarStreamer(fh, nil, nil)
20 if err := extractTar("/tmp/dir", str); err != nil {
21 // handle error ...
22 }
23
24 dh, err := str.Hierarchy()
25 if err != nil {
26 // handle error ...
27 }
28
29 res, err := Check("/tmp/dir/", dh, nil, nil)
30 if err != nil {
31 // handle error ...
32 }
33 if len(res) > 0 {
34 // handle validation issue ...
35 }
36 }
37 func extractTar(root string, tr io.Reader) error {
38 return nil
39 }
40
41 func TestTar(t *testing.T) {
42 /*
43 data, err := makeTarStream()
44 if err != nil {
45 t.Fatal(err)
46 }
47 buf := bytes.NewBuffer(data)
48 str := NewTarStreamer(buf, append(DefaultKeywords, "sha1"))
49 */
50 /*
51 // open empty folder and check size.
52 fh, err := os.Open("./testdata/empty")
53 if err != nil {
54 t.Fatal(err)
55 }
56 log.Println(fh.Stat())
57 fh.Close() */
58 fh, err := os.Open("./testdata/test.tar")
59 if err != nil {
60 t.Fatal(err)
61 }
62 str := NewTarStreamer(fh, nil, append(DefaultKeywords, "sha1"))
63
64 if _, err := io.Copy(ioutil.Discard, str); err != nil && err != io.EOF {
65 t.Fatal(err)
66 }
67 if err := str.Close(); err != nil {
68 t.Fatal(err)
69 }
70 defer fh.Close()
71
72 // get DirectoryHierarcy struct from walking the tar archive
73 tdh, err := str.Hierarchy()
74 if err != nil {
75 t.Fatal(err)
76 }
77 if tdh == nil {
78 t.Fatal("expected a DirectoryHierarchy struct, but got nil")
79 }
80
81 testDir, present := os.LookupEnv("MTREE_TESTDIR")
82 if present == false {
83 testDir = "."
84 }
85 testPath := filepath.Join(testDir, "test.mtree")
86 fh, err = os.Create(testPath)
87 if err != nil {
88 t.Fatal(err)
89 }
90 defer os.Remove(testPath)
91
92 // put output of tar walk into test.mtree
93 _, err = tdh.WriteTo(fh)
94 if err != nil {
95 t.Fatal(err)
96 }
97 fh.Close()
98
99 // now simulate gomtree -T testdata/test.tar -f testdata/test.mtree
100 fh, err = os.Open(testPath)
101 if err != nil {
102 t.Fatal(err)
103 }
104 defer fh.Close()
105
106 dh, err := ParseSpec(fh)
107 if err != nil {
108 t.Fatal(err)
109 }
110
111 res, err := TarCheck(tdh, dh, append(DefaultKeywords, "sha1"))
112 if err != nil {
113 t.Fatal(err)
114 }
115
116 // print any failures, and then call t.Fatal once all failures/extra/missing
117 // are outputted
118 if len(res) > 0 {
119 for _, delta := range res {
120 t.Error(delta)
121 }
122 t.Fatal("unexpected errors")
123 }
124 }
125
126 // This test checks how gomtree handles archives that were created
127 // with multiple directories, i.e, archives created with something like:
128 // `tar -cvf some.tar dir1 dir2 dir3 dir4/dir5 dir6` ... etc.
129 // The testdata of collection.tar resemble such an archive. the `collection` folder
130 // is the contents of `collection.tar` extracted
131 func TestArchiveCreation(t *testing.T) {
132 fh, err := os.Open("./testdata/collection.tar")
133 if err != nil {
134 t.Fatal(err)
135 }
136 str := NewTarStreamer(fh, nil, []Keyword{"sha1"})
137
138 if _, err := io.Copy(ioutil.Discard, str); err != nil && err != io.EOF {
139 t.Fatal(err)
140 }
141 if err := str.Close(); err != nil {
142 t.Fatal(err)
143 }
144 defer fh.Close()
145
146 // get DirectoryHierarcy struct from walking the tar archive
147 tdh, err := str.Hierarchy()
148 if err != nil {
149 t.Fatal(err)
150 }
151
152 // Test the tar manifest against the actual directory
153 res, err := Check("./testdata/collection", tdh, []Keyword{"sha1"}, nil)
154 if err != nil {
155 t.Fatal(err)
156 }
157
158 if len(res) > 0 {
159 for _, delta := range res {
160 t.Error(delta)
161 }
162 t.Fatal("unexpected errors")
163 }
164
165 // Test the tar manifest against itself
166 res, err = TarCheck(tdh, tdh, []Keyword{"sha1"})
167 if err != nil {
168 t.Fatal(err)
169 }
170 if len(res) > 0 {
171 for _, delta := range res {
172 t.Error(delta)
173 }
174 t.Fatal("unexpected errors")
175 }
176
177 // Validate the directory manifest against the archive
178 dh, err := Walk("./testdata/collection", nil, []Keyword{"sha1"}, nil)
179 if err != nil {
180 t.Fatal(err)
181 }
182 res, err = TarCheck(tdh, dh, []Keyword{"sha1"})
183 if err != nil {
184 t.Fatal(err)
185 }
186 if len(res) > 0 {
187 for _, delta := range res {
188 t.Error(delta)
189 }
190 t.Fatal("unexpected errors")
191 }
192 }
193
194 // Now test a tar file that was created with just the path to a file. In this
195 // test case, the traversal and creation of "placeholder" directories are
196 // evaluated. Also, The fact that this archive contains a single entry, yet the
197 // entry is associated with a file that has parent directories, means that the
198 // "." directory should be the lowest sub-directory under which `file` is contained.
199 func TestTreeTraversal(t *testing.T) {
200 fh, err := os.Open("./testdata/traversal.tar")
201 if err != nil {
202 t.Fatal(err)
203 }
204 str := NewTarStreamer(fh, nil, DefaultTarKeywords)
205
206 if _, err = io.Copy(ioutil.Discard, str); err != nil && err != io.EOF {
207 t.Fatal(err)
208 }
209 if err = str.Close(); err != nil {
210 t.Fatal(err)
211 }
212
213 fh.Close()
214 tdh, err := str.Hierarchy()
215
216 if err != nil {
217 t.Fatal(err)
218 }
219
220 res, err := TarCheck(tdh, tdh, []Keyword{"sha1"})
221 if err != nil {
222 t.Fatal(err)
223 }
224 if len(res) > 0 {
225 for _, delta := range res {
226 t.Error(delta)
227 }
228 t.Fatal("unexpected errors")
229 }
230
231 // top-level "." directory will contain contents of traversal.tar
232 res, err = Check("./testdata/.", tdh, []Keyword{"sha1"}, nil)
233 if err != nil {
234 t.Fatal(err)
235 }
236 if len(res) > 0 {
237 var failed bool
238 for _, delta := range res {
239 // We only care about missing or modified files.
240 // The original test was written using the old check code.
241 if delta.Type() != Extra {
242 failed = true
243 t.Error(delta)
244 }
245 }
246 if failed {
247 t.Fatal("unexpected errors")
248 }
249 }
250
251 // Now test an archive that requires placeholder directories, i.e, there are
252 // no headers in the archive that are associated with the actual directory name
253 fh, err = os.Open("./testdata/singlefile.tar")
254 if err != nil {
255 t.Fatal(err)
256 }
257 str = NewTarStreamer(fh, nil, DefaultTarKeywords)
258 if _, err = io.Copy(ioutil.Discard, str); err != nil && err != io.EOF {
259 t.Fatal(err)
260 }
261 if err = str.Close(); err != nil {
262 t.Fatal(err)
263 }
264 tdh, err = str.Hierarchy()
265 if err != nil {
266 t.Fatal(err)
267 }
268
269 // Implied top-level "." directory will contain the contents of singlefile.tar
270 res, err = Check("./testdata/.", tdh, []Keyword{"sha1"}, nil)
271 if err != nil {
272 t.Fatal(err)
273 }
274 if len(res) > 0 {
275 var failed bool
276 for _, delta := range res {
277 // We only care about missing or modified files.
278 // The original test was written using the old check code.
279 if delta.Type() != Extra {
280 failed = true
281 t.Error(delta)
282 }
283 }
284 if failed {
285 t.Fatal("unexpected errors")
286 }
287 }
288 }
289
290 func TestHardlinks(t *testing.T) {
291 fh, err := os.Open("./testdata/hardlinks.tar")
292 if err != nil {
293 t.Fatal(err)
294 }
295 str := NewTarStreamer(fh, nil, append(DefaultTarKeywords, "nlink"))
296
297 if _, err = io.Copy(ioutil.Discard, str); err != nil && err != io.EOF {
298 t.Fatal(err)
299 }
300 if err = str.Close(); err != nil {
301 t.Fatal(err)
302 }
303
304 fh.Close()
305 tdh, err := str.Hierarchy()
306
307 if err != nil {
308 t.Fatal(err)
309 }
310 foundnlink := false
311 for _, e := range tdh.Entries {
312 if e.Type == RelativeType {
313 for _, kv := range e.Keywords {
314 if KeyVal(kv).Keyword() == "nlink" {
315 foundnlink = true
316 if KeyVal(kv).Value() != "3" {
317 t.Errorf("expected to have 3 hardlinks for %s", e.Name)
318 }
319 }
320 }
321 }
322 }
323 if !foundnlink {
324 t.Errorf("nlink expected to be evaluated")
325 }
326 }
327
328 type fakeFile struct {
329 Name, Body string
330 Mode int64
331 Type byte
332 Sec, Nsec int64
333 Xattrs map[string]string
334 }
335
336 // minimal tar archive that mimics what is in ./testdata/test.tar
337 var minimalFiles = []fakeFile{
338 {"x/", "", 0755, '5', 0, 0, nil},
339 {"x/files", "howdy\n", 0644, '0', 0, 0, nil},
340 }
341
342 func makeTarStream(ff []fakeFile) ([]byte, error) {
343 buf := new(bytes.Buffer)
344
345 // Create a new tar archive.
346 tw := tar.NewWriter(buf)
347
348 // Add some files to the archive.
349 for _, file := range ff {
350 hdr := &tar.Header{
351 Name: file.Name,
352 Uid: syscall.Getuid(),
353 Gid: syscall.Getgid(),
354 Mode: file.Mode,
355 Typeflag: file.Type,
356 Size: int64(len(file.Body)),
357 ModTime: time.Unix(file.Sec, file.Nsec),
358 AccessTime: time.Unix(file.Sec, file.Nsec),
359 ChangeTime: time.Unix(file.Sec, file.Nsec),
360 Xattrs: file.Xattrs,
361 }
362 if err := tw.WriteHeader(hdr); err != nil {
363 return nil, err
364 }
365 if len(file.Body) > 0 {
366 if _, err := tw.Write([]byte(file.Body)); err != nil {
367 return nil, err
368 }
369 }
370 }
371 // Make sure to check the error on Close.
372 if err := tw.Close(); err != nil {
373 return nil, err
374 }
375 return buf.Bytes(), nil
376 }
377
378 func TestArchiveExcludeNonDirectory(t *testing.T) {
379 fh, err := os.Open("./testdata/collection.tar")
380 if err != nil {
381 t.Fatal(err)
382 }
383 str := NewTarStreamer(fh, []ExcludeFunc{ExcludeNonDirectories}, []Keyword{"type"})
384
385 if _, err := io.Copy(ioutil.Discard, str); err != nil && err != io.EOF {
386 t.Fatal(err)
387 }
388 if err := str.Close(); err != nil {
389 t.Fatal(err)
390 }
391 fh.Close()
392 // get DirectoryHierarcy struct from walking the tar archive
393 tdh, err := str.Hierarchy()
394 if err != nil {
395 t.Fatal(err)
396 }
397 for i := range tdh.Entries {
398 for _, keyval := range tdh.Entries[i].AllKeys() {
399 if tdh.Entries[i].Type == FullType || tdh.Entries[i].Type == RelativeType {
400 if keyval.Keyword() == "type" && keyval.Value() != "dir" {
401 t.Errorf("expected only directories, but %q is a %q", tdh.Entries[i].Name, keyval.Value())
402 }
403 }
404 }
405 }
406 }
0 #!/bin/bash
1 set -e
2
3 name=$(basename $0)
4 root="$(dirname $(dirname $(dirname $0)))"
5 gomtree=$(readlink -f ${root}/gomtree)
6 t=$(mktemp -t -d go-mtree.XXXXXX)
7
8 echo "[${name}] Running in ${t}"
9 # This test is for basic running check of manifest, and check against tar and file system
10 #
11
12 pushd ${root}
13
14 git archive --format=tar HEAD^{tree} . > ${t}/${name}.tar
15
16 prev_umask=$(umask)
17 umask 0 # this is so the tar command can set the mode's properly
18 mkdir -p ${t}/extract
19 tar -C ${t}/extract/ -xf ${t}/${name}.tar
20 umask ${prev_umask}
21
22 # create manifest from tar
23 ${gomtree} -K sha256digest -c -T ${t}/${name}.tar > ${t}/${name}.mtree
24
25 # check tar-manifest against the tar
26 ${gomtree} -f ${t}/${name}.mtree -T ${t}/${name}.tar
27
28 # check tar-manifest against the filesystem
29 # git archive makes the uid/gid as 0, so don't check them for this test
30 ${gomtree} -k size,sha256digest,mode,type -f ${t}/${name}.mtree -p ${t}/extract/
31
32 # create a manifest from filesystem
33 ${gomtree} -K sha256digest -c -p ${t}/extract/ > ${t}/${name}.mtree
34
35 # check filesystem-manifest against the filesystem
36 ${gomtree} -f ${t}/${name}.mtree -p ${t}/extract/
37
38 # check filesystem-manifest against the tar
39 # git archive makes the uid/gid as 0, so don't check them for this test
40 ${gomtree} -k size,sha256digest,mode,type -f ${t}/${name}.mtree -T ${t}/${name}.tar
41
42 popd
43 rm -rf ${t}
0 #!/bin/bash
1 set -e
2
3 name=$(basename $0)
4 root="$(dirname $(dirname $(dirname $0)))"
5 gomtree=$(readlink -f ${root}/gomtree)
6 t=$(mktemp -t -d go-mtree.XXXXXX)
7
8 echo "[${name}] Running in ${t}"
9 # This test is for basic running check of manifest, and check against tar and file system
10 #
11
12 pushd ${root}
13
14 # Create a symlink with spaces in the entries.
15 mkdir ${t}/root
16 ln -s "this is a dummy symlink" ${t}/root/link
17
18 # Create manifest and check it against the same symlink.
19 ${gomtree} -K link,sha256digest -c -p ${t}/root > ${t}/root.mtree
20 ${gomtree} -K link,sha256digest -f ${t}/root.mtree -p ${t}/root
21
22 popd
23 rm -rf ${t}
0 #!/bin/bash
1 set -e
2
3 name=$(basename $0)
4 root="$(dirname $(dirname $(dirname $0)))"
5 gomtree=$(readlink -f ${root}/gomtree)
6 t=$(mktemp -t -d go-mtree.XXXXXX)
7
8 setfattr -n user.has.xattrs -v "true" "${t}" || exit 0
9
10 echo "[${name}] Running in ${t}"
11
12 mkdir "${t}/dir"
13 touch "${t}/dir/file"
14
15 setfattr -n user.mtree.testing -v "apples and=bananas" "${t}/dir/file"
16 $gomtree -c -k "sha256digest,xattrs" -p ${t}/dir > ${t}/${name}.mtree
17
18 setfattr -n user.mtree.testing -v "bananas and lemons" "${t}/dir/file"
19 ! $gomtree -p ${t}/dir -f ${t}/${name}.mtree
20
21 setfattr -x user.mtree.testing "${t}/dir/file"
22 ! $gomtree -p ${t}/dir -f ${t}/${name}.mtree
23
24 setfattr -n user.mtree.testing -v "apples and=bananas" "${t}/dir/file"
25 setfattr -n user.mtree.another -v "another a=b" "${t}/dir/file"
26 ! $gomtree -p ${t}/dir -f ${t}/${name}.mtree
27
28 setfattr -n user.mtree.testing -v "apples and=bananas" "${t}/dir/file"
29 setfattr -x user.mtree.another "${t}/dir/file"
30 $gomtree -p ${t}/dir -f ${t}/${name}.mtree
31
32 rm -fr ${t}
0 #!/bin/bash
1 set -e
2 #set -x
3
4 name=$(basename $0)
5 root="$(dirname $(dirname $(dirname $0)))"
6 gomtree=$(readlink -f ${root}/gomtree)
7 t=$(mktemp -t -d go-mtree.XXXXXX)
8
9 echo "[${name}] Running in ${t}"
10
11 pushd ${root}
12 git archive --format=tar HEAD^{tree} . > ${t}/${name}.tar
13 mkdir -p ${t}/extract
14 tar -C ${t}/extract/ -xf ${t}/${name}.tar
15
16 ## This is a checking that keyword synonyms are respected
17 ${gomtree} -k sha1digest -c -p ${t}/extract/ > ${t}/${name}.mtree
18 ${gomtree} -k sha1 -f ${t}/${name}.mtree -p ${t}/extract/
19 ${gomtree} -k sha1 -c -p ${t}/extract/ > ${t}/${name}.mtree
20 ${gomtree} -k sha1digest -f ${t}/${name}.mtree -p ${t}/extract/
21
22 popd
23 rm -rf ${t}
0 #!/bin/bash
1 set -e
2
3 name=$(basename $0)
4 root="$(dirname $(dirname $(dirname $0)))"
5 gomtree=$(readlink -f ${root}/gomtree)
6 t=$(mktemp -t -d go-mtree.XXXXXX)
7
8 echo "[${name}] Running in ${t}"
9
10 pushd ${root}
11 mkdir -p ${t}/extract
12 git archive --format=tar HEAD^{tree} . | tar -C ${t}/extract/ -x
13
14 ${gomtree} -k sha1digest -c -p ${t}/extract/ > ${t}/${name}.mtree
15 ${gomtree} -f ${t}/${name}.mtree -k md5digest -p ${t}/extract/
16
17 popd
18 rm -rf ${t}
0 #!/bin/bash
1 set -e
2
3 name=$(basename $0)
4 root="$(dirname $(dirname $(dirname $0)))"
5 gomtree=$(readlink -f ${root}/gomtree)
6 t=$(mktemp -t -d go-mtree.XXXXXX)
7
8 echo "[${name}] Running in ${t}"
9 # This test is for basic running check of manifest, and check against tar and file system
10 #
11
12 pushd ${root}
13
14 git archive --format=tar HEAD^{tree} . > ${t}/${name}.tar
15
16 prev_umask=$(umask)
17 umask 0 # this is so the tar command can set the mode's properly
18 mkdir -p ${t}/extract
19 tar -C ${t}/extract/ -xf ${t}/${name}.tar
20 umask ${prev_umask}
21
22 # create manifest from tar, ignoring non directories
23 ${gomtree} -d -c -k type -T ${t}/${name}.tar > ${t}/${name}.mtree
24
25 # check tar-manifest against the tar
26 ${gomtree} -d -f ${t}/${name}.mtree -T ${t}/${name}.tar
27
28 # check filesystem-manifest against the filesystem
29 ${gomtree} -f ${t}/${name}.mtree -p ${t}/extract/
30
31 popd
32 rm -rf ${t}
0 #!/bin/bash
1 set -e
2
3 name=$(basename $0)
4 root="$(dirname $(dirname $(dirname $0)))"
5 gomtree=$(readlink -f ${root}/gomtree)
6 left=$(mktemp -t -d go-mtree.XXXXXX)
7 right=$(mktemp -t -d go-mtree.XXXXXX)
8
9 echo "[${name}] Running in ${left} and ${right}"
10
11 touch ${left}/one
12 touch ${left}/two
13 cp -a ${left}/one ${right}/
14
15 $gomtree -K "sha256digest" -p ${left} -c > /tmp/left.mtree
16 $gomtree -k "sha256digest" -p ${right} -f /tmp/left.mtree
17 rm -fr ${left} ${right}
0 #!/bin/bash
1 set -e
2
3 name=$(basename $0)
4 root="$(dirname $(dirname $(dirname $0)))"
5 gomtree=$(readlink -f ${root}/gomtree)
6 t=$(mktemp -d /tmp/go-mtree.XXXXXX)
7
8 echo "[${name}] Running in ${t}"
9
10 pushd ${root}
11 mkdir -p ${t}/extract
12 git archive --format=tar HEAD^{tree} . | tar -C ${t}/extract/ -x
13
14 ${gomtree} -K sha256digest -c -p ${t}/extract/ > ${t}/${name}.mtree
15
16 ## This is a use-case for checking a directory, but by reading the manifest from stdin
17 ## since the `-f` flag is not provided.
18 cat ${t}/${name}.mtree | ${gomtree} -p ${t}/extract/
19
20 popd
21 rm -rf ${t}
0 #!/bin/bash
1 set -e
2
3 name=$(basename $0)
4 root="$(dirname $(dirname $(dirname $0)))"
5 gomtree=$(readlink -f ${root}/gomtree)
6 t=$(mktemp -t -d go-mtree.XXXXXX)
7
8 echo "[${name}] Running in ${t}"
9
10 pushd ${root}
11
12 # Create some unicode files.
13 mkdir ${t}/root
14 echo "some data" > "${t}/root/$(printf 'this file has \u042a some unicode !!')"
15 echo "more data" > "${t}/root/$(printf 'even more \x07 unicode \ua4ff characters \udead\ubeef\ucafe')"
16 mkdir -p "${t}/root/$(printf '\024 <-- some more weird characters --> \u4f60\u597d\uff0c\u4e16\u754c')"
17 ln -s "$(printf '62_\u00c6\u00c62\u00ae\u00b7m\u00db\u00c3r^\u00bfp\u00c6u"q\u00fbc2\u00f0u\u00b8\u00dd\u00e8v\u00ff\u00b0\u00dc\u00c2\u00f53\u00db')" "${t}/root/$(printf 'k\u00f2sd4\\p\u00da\u00a6\u00d3\u00eea<\u00e6s{\u00a0p\u00f0\u00ffj\u00e0\u00e8\u00b8\u00b8\u00bc\u00fcb')"
18 printf 'some lovely data 62_\u00c6\u00c62\u00ae\u00b7m\u00db\u00c3r' > "${t}/root/$(printf 'T\u00dcB\u0130TAK_UEKAE_K\u00f6k_Sertifika_Hizmet_Sa\u011flay\u0131c\u0131s\u0131_-_S\u00fcr\u00fcm_3.pem')"
19
20 # Create manifest and check it against the same root.
21 ${gomtree} -k uid,gid,size,type,link,nlink,sha256digest -c -p ${t}/root > ${t}/root.mtree
22 ${gomtree} -k uid,gid,size,type,link,nlink,sha256digest -f ${t}/root.mtree -p ${t}/root
23
24 # Modify it and make sure that it successfully figures out what changed.
25 echo "othe data" > "${t}/root/$(printf 'this file has \u042a some unicode !!')"
26 ! ${gomtree} -k uid,gid,size,type,link,nlink,sha256digest -f ${t}/root.mtree -p ${t}/root
27
28 echo "some data" > "${t}/root/$(printf 'this file has \u042a some unicode !!')"
29 ${gomtree} -k uid,gid,size,type,link,nlink,sha256digest -f ${t}/root.mtree -p ${t}/root
30
31 popd
32 rm -rf ${t}
0 package main
1
2 import (
3 "flag"
4 "fmt"
5 "os"
6 "os/exec"
7 )
8
9 func main() {
10 flag.Parse()
11
12 failed := 0
13 for _, arg := range flag.Args() {
14 cmd := exec.Command("bash", arg)
15 if os.Getenv("TMPDIR") != "" {
16 cmd.Env = append(cmd.Env, "TMPDIR="+os.Getenv("TMPDIR"))
17 }
18 cmd.Stderr = os.Stderr
19 cmd.Stdout = os.Stdout
20 if err := cmd.Run(); err != nil {
21 failed++
22 fmt.Fprintf(os.Stderr, "FAILED: %s\n", arg)
23 }
24 }
25 if failed > 0 {
26 fmt.Fprintf(os.Stderr, "%d FAILED tests\n", failed)
27 os.Exit(1)
28 }
29 }
Binary diff not shown
Binary diff not shown
Binary diff not shown
0 # user: vbatts
1 # machine: valse.usersys.redhat.com
2 # tree: /home/vbatts/src/vb/go-mtree
3 # date: Thu Mar 10 16:15:36 2016
4
5 # .
6 /set type=file uid=1000 gid=1000 mode=0664 nlink=1 flags=none
7 . type=dir mode=0775 nlink=5 size=4096 \
8 time=1457644509.817998120
9 .mtree.go.swp \
10 mode=0644 size=12288 time=1457644483.833957552 \
11 sha512digest=7efd38cc957b53e3d5abd25e9df9bde715a5e18dc18abfa8f0c0eaa9bca0fda32faad4972bd4d9b555270fa3b3b92e6a1e97ad1e3bfff14fc933284004f7e8d3
12 .mtree_test.go.swp \
13 mode=0644 size=12288 time=1457644509.813998114 \
14 sha512digest=2aaeb1e5f03a563de86a4c1f722bde40312bbecbe4504d67fb441240c049e6ad11294ef834114a05e620a390ee9f42c1c77b43bf1a4f2d47f128226183968a33
15 mtree.go size=1719 time=1457644447.020900074 \
16 sha512digest=2dee9f50ba167cd90d4f6e8494d4735c6b064c279ea4d26ed2c47552a5c0b82b0bdcadb214e0ca919a1768ce0e321e8d1bb71eacb9c5b4bb5fd4342967055a04
17 mtree.go~ size=1719 time=1457644447.012900062 \
18 sha512digest=2dee9f50ba167cd90d4f6e8494d4735c6b064c279ea4d26ed2c47552a5c0b82b0bdcadb214e0ca919a1768ce0e321e8d1bb71eacb9c5b4bb5fd4342967055a04
19 mtree_test.go \
20 size=581 time=1457644509.817998120 \
21 sha512digest=1b3ab2de2759df6e7d424d05ffd1d45101fd10ae86328abad1657e68044add69d83d6b3271225070f4f4154a7c0c995016077f8c7969890e068cd0a54b23f4b6
22 mtree_test.go~ \
23 size=581 time=1457644509.812998113 \
24 sha512digest=1b3ab2de2759df6e7d424d05ffd1d45101fd10ae86328abad1657e68044add69d83d6b3271225070f4f4154a7c0c995016077f8c7969890e068cd0a54b23f4b6
25
26 # ./.git
27 /set type=file uid=1000 gid=1000 mode=0775 nlink=1 flags=none
28 .git type=dir nlink=7 size=4096 time=1457644470.212936285
29 HEAD mode=0664 size=23 time=1454678171.602940889 \
30 sha512digest=76b199f1bffdc6d21547895431b0f85f3857df400225cbd1ae55e7ded09786a8a44e1cb21f1feff4951cf57c893043a0be696dcb0e4f5758a4d6081a833eb6cf
31 config mode=0664 size=92 time=1454678171.607940896 \
32 sha512digest=961231503987d2154696364e8e4d7b3aaf2d2abe5da55367b615a5d9e1bcda30824c30880bfdb8e9959444040d91cd3ec6fa225e8a36a93cfc182bf263d6e02a
33 description mode=0664 size=73 time=1454678171.602940889 \
34 sha512digest=f7e152f75b124c3e1c5d12f00427729d9eec4e2c1bf70d7606440a6600d003367eb178331e75ab976a61496e79c2c822020849d28ffd170946397b934611b471
35 index mode=0664 size=272 time=1457644470.212936285 \
36 sha512digest=fcecb444e3748cccfc498a63939f489f6456f1cf9e0e8ace21b7cdf8eecfb0e0a3db9bf6d449db2608752e5c0dbae786b4facb46f359555871b7e48d80b9a765
37
38 # ./.git/branches
39 branches type=dir nlink=2 size=4096 time=1454678171.596940880
40 # ./.git/branches
41 ..
42
43
44 # ./.git/hooks
45 hooks type=dir nlink=2 size=4096 time=1454678171.600940886
46 applypatch-msg.sample \
47 size=478 time=1454678171.597940882 \
48 sha512digest=536cce804d84e25813993efdd240537b52d00ce9cdcecf1982f85096d56a521290104c825c00b370b2752201952a9616a3f4e28c5d27a5b4e4842101a2ff9bee
49 commit-msg.sample \
50 size=896 time=1454678171.598940883 \
51 sha512digest=d6bb7fa747f4625adf1877f546565cbe812ca7dd4168f7e9068e6732555d8737eba549546cf5946649e3f38de82d173aaf9c160a4c9f9445655258b4c5f955eb
52 post-update.sample \
53 size=189 time=1454678171.598940883 \
54 sha512digest=473ad124642571656276bf83b9ff63ab1804d3c23a5bdae52391c6f70a894849ac60c10c9d31deff3938922ce83b68b1e60c11592bbf7ea503f4acd39968cefa
55 pre-applypatch.sample \
56 size=424 time=1454678171.598940883 \
57 sha512digest=cb78aa7e9b9c146e5db65d86dd83f04e2b6942a06fab50c704a0fd900683f3b6ad1164e74afe2f267f6da91cdff0b9ab07713e12cefc6f8d741b5df194f4fda6
58 pre-commit.sample \
59 size=1642 time=1454678171.599940885 \
60 sha512digest=4a7ac2ed2ffe50d2a997c3477b5855c3d19423c295a82f7d660be3418217a183b0ce25f94ae2e350f4d93eda3b75a4bd62cf27f0685d8a837919bcc9e7e0962a
61 pre-push.sample \
62 size=1348 time=1454678171.599940885 \
63 sha512digest=37cd20a090494acb16c2255fe93dadeb409f2553afddc409dad5b5e99c9f5612a28eba397c0f3ae771e17edbe0a6c2a3af311154285e6d9c43e4cc4f06ba5b8d
64 pre-rebase.sample \
65 size=4951 time=1454678171.600940886 \
66 sha512digest=de9bdf513f383b3215e467c025ca851a0f2661b91811bfd441486b678e4d45e450ce9185a7c3ac2988bd6154d4983b3e29427623117e73623f2af43cf4b746da
67 prepare-commit-msg.sample \
68 size=1239 time=1454678171.600940886 \
69 sha512digest=fbf15f12136a413dd590e57392e6122714d67456beed50d9ef285e40076a138a7d44291b5642fe5c518dc56c640436caf6225d1a966e06c042b30e6a7d08dcef
70 update.sample \
71 size=3611 time=1454678171.601940888 \
72 sha512digest=f8e20df0e6c970fa809d5c9494cb9d5e32894fe20f23f6730df84f5fa065e52c55cca98d0db1ed17df53c606629c787ae56b6af6faca527f62c817f8cb148c1a
73 # ./.git/hooks
74 ..
75
76
77 # ./.git/info
78 /set type=file uid=1000 gid=1000 mode=0664 nlink=1 flags=none
79 info type=dir mode=0775 nlink=2 size=4096 \
80 time=1454678171.601940888
81 exclude size=240 time=1454678171.601940888 \
82 sha512digest=9828c6ecdf91bf117416e17f4ee9caee2e1e37b6fb00b9ff04035ace17a3089b9d0a25c6baa1046c0e1c62d3da88838e8fca74ea82973d6b975905fde58f3072
83 # ./.git/info
84 ..
85
86
87 # ./.git/objects
88 /set type=file uid=1000 gid=1000 mode=0775 nlink=1 flags=none
89 objects type=dir nlink=7 size=4096 time=1457644470.212936285
90
91 # ./.git/objects/35
92 /set type=file uid=1000 gid=1000 mode=0444 nlink=1 flags=none
93 35 type=dir mode=0775 nlink=2 size=4096 \
94 time=1457644470.212936285
95 51bcca10021bb56f13dd57a84b97cdf7fda0cf \
96 size=344 time=1457644470.212936285 \
97 sha512digest=cc9cbb88b1c060f78a06d65decb59f2e4f7cafe25fc962d549b1826b91f2b27f22d13ec104acdea1dc30fa22c533fa88da9df2785a782dacbf93264741099043
98 # ./.git/objects/35
99 ..
100
101
102 # ./.git/objects/45
103 45 type=dir mode=0775 nlink=2 size=4096 \
104 time=1457644470.212936285
105 15ff8d078d5291b75e99cbaa3e6c5e83ffaa2e \
106 size=363 time=1457644470.212936285 \
107 sha512digest=b4cb2c25df4a6d5a158b4d945d1c1bd26a17b286c472bb9ad5b43624ee09c01b24c817875ac7611992f2b45eea0287cb0d8e4ae257ed054d1e33ae6f481728c8
108 # ./.git/objects/45
109 ..
110
111
112 # ./.git/objects/88
113 88 type=dir mode=0775 nlink=2 size=4096 \
114 time=1454678174.768945501
115 2ba9732d762797f0f338de29bf79a8c0b7a679 \
116 size=30 time=1454678174.768945501 \
117 sha512digest=cc95f98c49e284de74626cc5da73ff54388a65fd796f532ae070c09635784ae2dac04537e1d2b1c73fae7be8ad96c96fab3ddddd943a7abc7b3ee8741fe56de5
118 # ./.git/objects/88
119 ..
120
121
122 # ./.git/objects/info
123 info type=dir mode=0775 nlink=2 size=4096 \
124 time=1454678171.609940899
125 # ./.git/objects/info
126 ..
127
128
129 # ./.git/objects/pack
130 pack type=dir mode=0775 nlink=2 size=4096 \
131 time=1454678171.609940899
132 # ./.git/objects/pack
133 ..
134
135 # ./.git/objects
136 ..
137
138
139 # ./.git/refs
140 /set type=file uid=1000 gid=1000 mode=0775 nlink=1 flags=none
141 refs type=dir nlink=4 size=4096 time=1454678171.595940879
142
143 # ./.git/refs/heads
144 heads type=dir nlink=2 size=4096 time=1454678171.595940879
145 # ./.git/refs/heads
146 ..
147
148
149 # ./.git/refs/tags
150 tags type=dir nlink=2 size=4096 time=1454678171.595940879
151 # ./.git/refs/tags
152 ..
153
154 # ./.git/refs
155 ..
156
157 # ./.git
158 ..
159
160
161 # ./cmd
162 cmd type=dir nlink=3 size=4096 time=1457644245.753596701
163
164 # ./cmd/gomtree
165 /set type=file uid=1000 gid=1000 mode=0664 nlink=1 flags=none
166 gomtree type=dir mode=0775 nlink=2 size=4096 \
167 time=1457644313.182697118
168 .main.go.swp \
169 mode=0644 size=12288 time=1457644334.048728193 \
170 sha512digest=ac1a05940c2033962a7ba6878a25aeeac84df3d41548b15c2b3ae283c8090012f3646821906b0edc4b716cf3a220536c862ea370ce235bffc7b7520f77df8903
171 main.go size=552 time=1457644313.182697118 \
172 sha512digest=519009a7756506e7aac81734b9701ac33aa0c22b1866abdc1fc1550e6c4e7164fa794d60f524116e7cfcdc9456f8637f504359749690f8199008263925350dad
173 main.go~ size=552 time=1457644313.171697102 \
174 sha512digest=519009a7756506e7aac81734b9701ac33aa0c22b1866abdc1fc1550e6c4e7164fa794d60f524116e7cfcdc9456f8637f504359749690f8199008263925350dad
175 # ./cmd/gomtree
176 ..
177
178 # ./cmd
179 ..
180
181
182 # ./testdata
183 testdata type=dir mode=0775 nlink=2 size=4096 \
184 time=1457644536.529039825
185 Downloads.mtree \
186 size=31406 time=1457644391.210813319 \
187 sha512digest=a31c61a8907078a82c1ad542ded518e10e862dcf9f561970a6174652d716c4e1ea745410c6a69d7382b13607e85426a190268955d4ecb657990c3fa456e39b46
188 source.mtree \
189 size=8192 time=1457644536.536039836 \
190 sha512digest=9748642c0ef6784611d155030067ccc5cdad5d51b4dc61b103da08377bd75aa3132756dcdb81c850e6555de5e845f26e22996b8bf1f76d39e71128585fbe6d17
191 # ./testdata
192 ..
193
194 ..
195
Binary diff not shown
Binary diff not shown
0 package mtree
1
2 import (
3 "container/heap"
4 "os"
5 "sort"
6
7 "github.com/Sirupsen/logrus"
8 )
9
10 // DefaultUpdateKeywords is the default set of keywords that can take updates to the files on disk
11 var DefaultUpdateKeywords = []Keyword{
12 "uid",
13 "gid",
14 "mode",
15 "xattr",
16 "link",
17 "time",
18 }
19
20 // Update attempts to set the attributes of root directory path, given the values of `keywords` in dh DirectoryHierarchy.
21 func Update(root string, dh *DirectoryHierarchy, keywords []Keyword, fs FsEval) ([]InodeDelta, error) {
22 creator := dhCreator{DH: dh}
23 curDir, err := os.Getwd()
24 if err == nil {
25 defer os.Chdir(curDir)
26 }
27
28 if err := os.Chdir(root); err != nil {
29 return nil, err
30 }
31 sort.Sort(byPos(creator.DH.Entries))
32
33 // This is for deferring the update of mtimes of directories, to unwind them
34 // in a most specific path first
35 h := &pathUpdateHeap{}
36 heap.Init(h)
37
38 results := []InodeDelta{}
39 for i, e := range creator.DH.Entries {
40 switch e.Type {
41 case SpecialType:
42 if e.Name == "/set" {
43 creator.curSet = &creator.DH.Entries[i]
44 } else if e.Name == "/unset" {
45 creator.curSet = nil
46 }
47 logrus.Debugf("%#v", e)
48 continue
49 case RelativeType, FullType:
50 e.Set = creator.curSet
51 pathname, err := e.Path()
52 if err != nil {
53 return nil, err
54 }
55
56 // filter the keywords to update on the file, from the keywords available for this entry:
57 var kvToUpdate []KeyVal
58 kvToUpdate = keyvalSelector(e.AllKeys(), keywords)
59 logrus.Debugf("kvToUpdate(%q): %#v", pathname, kvToUpdate)
60
61 for _, kv := range kvToUpdate {
62 if !InKeywordSlice(kv.Keyword().Prefix(), keywordPrefixes(keywords)) {
63 continue
64 }
65 logrus.Debugf("finding function for %q (%q)", kv.Keyword(), kv.Keyword().Prefix())
66 ukFunc, ok := UpdateKeywordFuncs[kv.Keyword().Prefix()]
67 if !ok {
68 logrus.Debugf("no UpdateKeywordFunc for %s; skipping", kv.Keyword())
69 continue
70 }
71
72 // TODO check for the type=dir of the entry as well
73 if kv.Keyword().Prefix() == "time" && e.IsDir() {
74 heap.Push(h, pathUpdate{
75 Path: pathname,
76 E: e,
77 KV: kv,
78 Func: ukFunc,
79 })
80
81 continue
82 }
83
84 if _, err := ukFunc(pathname, kv); err != nil {
85 results = append(results, InodeDelta{
86 diff: ErrorDifference,
87 path: pathname,
88 old: e,
89 keys: []KeyDelta{
90 {
91 diff: ErrorDifference,
92 name: kv.Keyword(),
93 err: err,
94 },
95 }})
96 }
97 // XXX really would be great to have a Check() or Compare() right here,
98 // to compare each entry as it is encountered, rather than just running
99 // Check() on this path after the whole update is finished.
100 }
101 }
102 }
103
104 for h.Len() > 0 {
105 pu := heap.Pop(h).(pathUpdate)
106 if _, err := pu.Func(pu.Path, pu.KV); err != nil {
107 results = append(results, InodeDelta{
108 diff: ErrorDifference,
109 path: pu.Path,
110 old: pu.E,
111 keys: []KeyDelta{
112 {
113 diff: ErrorDifference,
114 name: pu.KV.Keyword(),
115 err: err,
116 },
117 }})
118 }
119 }
120 return results, nil
121 }
122
123 type pathUpdateHeap []pathUpdate
124
125 func (h pathUpdateHeap) Len() int { return len(h) }
126 func (h pathUpdateHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
127
128 // This may end up looking backwards, but for container/heap, Less evaluates
129 // the negative priority. So when popping members of the array, it will be
130 // sorted by least. For this use-case, we want the most-qualified-name popped
131 // first (the longest path name), such that "." is the last entry popped.
132 func (h pathUpdateHeap) Less(i, j int) bool {
133 return len(h[i].Path) > len(h[j].Path)
134 }
135
136 func (h *pathUpdateHeap) Push(x interface{}) {
137 *h = append(*h, x.(pathUpdate))
138 }
139
140 func (h *pathUpdateHeap) Pop() interface{} {
141 old := *h
142 n := len(old)
143 x := old[n-1]
144 *h = old[0 : n-1]
145 return x
146 }
147
148 type pathUpdate struct {
149 Path string
150 E Entry
151 KV KeyVal
152 Func UpdateKeywordFunc
153 }
0 package mtree
1
2 import (
3 "encoding/json"
4 "fmt"
5 "io/ioutil"
6 "os"
7 "path/filepath"
8 "testing"
9
10 "github.com/vbatts/go-mtree/xattr"
11 )
12
13 func init() {
14 //logrus.SetLevel(logrus.DebugLevel)
15 }
16
17 func TestXattrUpdate(t *testing.T) {
18 content := []byte("I know half of you half as well as I ought to")
19 // a bit dirty to create/destory a directory in cwd, but often /tmp is
20 // mounted tmpfs and doesn't support xattrs
21 dir, err := ioutil.TempDir(".", "test.xattr.restore.")
22 if err != nil {
23 t.Fatal(err)
24 }
25 defer os.RemoveAll(dir) // clean up
26
27 tmpfn := filepath.Join(dir, "tmpfile")
28 if err := ioutil.WriteFile(tmpfn, content, 0666); err != nil {
29 t.Fatal(err)
30 }
31
32 if err := xattr.Set(dir, "user.test", []byte("directory")); err != nil {
33 t.Skip(fmt.Sprintf("skipping: %q does not support xattrs", dir))
34 }
35 if err := xattr.Set(tmpfn, "user.test", []byte("regular file")); err != nil {
36 t.Fatal(err)
37 }
38
39 // Walk this tempdir
40 dh, err := Walk(dir, nil, append(DefaultKeywords, []Keyword{"xattr", "sha1"}...), nil)
41 if err != nil {
42 t.Fatal(err)
43 }
44
45 // Now check that we're sane
46 res, err := Check(dir, dh, nil, nil)
47 if err != nil {
48 t.Fatal(err)
49 }
50 if len(res) != 0 {
51 t.Errorf("expecting no failures, but got %q", res)
52 }
53
54 if err := xattr.Set(tmpfn, "user.test", []byte("let it fly")); err != nil {
55 t.Fatal(err)
56 }
57
58 // Now check that we fail the check
59 res, err = Check(dir, dh, nil, nil)
60 if err != nil {
61 t.Fatal(err)
62 }
63 if len(res) == 0 {
64 t.Error("expected failures (like xattrs), but got none")
65 }
66
67 // restore the xattrs to original
68 res, err = Update(dir, dh, append(DefaultUpdateKeywords, "xattr"), nil)
69 if err != nil {
70 t.Error(err)
71 }
72 if len(res) != 0 {
73 t.Errorf("expecting no failures, but got %q", res)
74 }
75
76 // Now check that we're sane again
77 res, err = Check(dir, dh, nil, nil)
78 if err != nil {
79 t.Fatal(err)
80 }
81 if len(res) != 0 {
82 // pretty this shit up
83 buf, err := json.MarshalIndent(res, "", " ")
84 if err != nil {
85 t.Errorf("expecting no failures, but got %q", res)
86 } else {
87 t.Errorf("expecting no failures, but got %s", string(buf))
88 }
89 }
90
91 // TODO make a test for xattr here. Likely in the user space for privileges. Even still this may be prone to error for some tmpfs don't act right with xattrs. :-\
92 // I'd hate to have to t.Skip() a test rather than fail alltogether.
93 }
0 // +build go1.7
1
2 package mtree
3
4 import (
5 "container/heap"
6 "encoding/json"
7 "io/ioutil"
8 "os"
9 "os/user"
10 "path/filepath"
11 "strconv"
12 "testing"
13 "time"
14
15 "github.com/Sirupsen/logrus"
16 )
17
18 func init() {
19 logrus.SetLevel(logrus.DebugLevel)
20 }
21
22 func TestUpdate(t *testing.T) {
23 content := []byte("I know half of you half as well as I ought to")
24 dir, err := ioutil.TempDir("", "test-check-keywords")
25 if err != nil {
26 t.Fatal(err)
27 }
28 defer os.RemoveAll(dir) // clean up
29
30 tmpfn := filepath.Join(dir, "tmpfile")
31 if err := ioutil.WriteFile(tmpfn, content, 0666); err != nil {
32 t.Fatal(err)
33 }
34
35 // Walk this tempdir
36 dh, err := Walk(dir, nil, append(DefaultKeywords, "sha1"), nil)
37 if err != nil {
38 t.Fatal(err)
39 }
40
41 // Touch a file, so the mtime changes.
42 now := time.Now()
43 if err := os.Chtimes(tmpfn, now, now); err != nil {
44 t.Fatal(err)
45 }
46 if err := os.Chmod(tmpfn, os.FileMode(0600)); err != nil {
47 t.Fatal(err)
48 }
49
50 // Changing user is a little tough, but the group can be changed by a limited user to any group that the user is a member of. So just choose one that is not the current main group.
51 u, err := user.Current()
52 if err != nil {
53 t.Fatal(err)
54 }
55 ugroups, err := u.GroupIds()
56 if err != nil {
57 t.Fatal(err)
58 }
59 for _, ugroup := range ugroups {
60 if ugroup == u.Gid {
61 continue
62 }
63 gid, err := strconv.Atoi(ugroup)
64 if err != nil {
65 t.Fatal(ugroup)
66 }
67 if err := os.Lchown(tmpfn, -1, gid); err != nil {
68 t.Fatal(err)
69 }
70 }
71
72 // Check for sanity. This ought to have failures
73 res, err := Check(dir, dh, nil, nil)
74 if err != nil {
75 t.Fatal(err)
76 }
77 if len(res) == 0 {
78 t.Error("expected failures (like mtimes), but got none")
79 }
80 //dh.WriteTo(os.Stdout)
81
82 res, err = Update(dir, dh, DefaultUpdateKeywords, nil)
83 if err != nil {
84 t.Error(err)
85 }
86 if len(res) > 0 {
87 // pretty this shit up
88 buf, err := json.MarshalIndent(res, "", " ")
89 if err != nil {
90 t.Errorf("%#v", res)
91 }
92 t.Error(string(buf))
93 }
94
95 // Now check that we're sane again
96 res, err = Check(dir, dh, nil, nil)
97 if err != nil {
98 t.Fatal(err)
99 }
100 // should have no failures now
101 if len(res) > 0 {
102 // pretty this shit up
103 buf, err := json.MarshalIndent(res, "", " ")
104 if err != nil {
105 t.Errorf("%#v", res)
106 } else {
107 t.Error(string(buf))
108 }
109 }
110
111 }
112
113 func TestPathUpdateHeap(t *testing.T) {
114 h := &pathUpdateHeap{
115 pathUpdate{Path: "not/the/longest"},
116 pathUpdate{Path: "almost/the/longest"},
117 pathUpdate{Path: "."},
118 pathUpdate{Path: "short"},
119 }
120 heap.Init(h)
121 v := "this/is/one/is/def/the/longest"
122 heap.Push(h, pathUpdate{Path: v})
123
124 longest := len(v)
125 var p string
126 for h.Len() > 0 {
127 p = heap.Pop(h).(pathUpdate).Path
128 if len(p) > longest {
129 t.Errorf("expected next path to be shorter, but it was not %q is longer than %d", p, longest)
130 }
131 }
132 if p != "." {
133 t.Errorf("expected \".\" to be the last, but got %q", p)
134 }
135 }
0 package mtree
1
2 import (
3 "fmt"
4 "os"
5 "strconv"
6 "strings"
7 "time"
8
9 "github.com/Sirupsen/logrus"
10 "github.com/vbatts/go-mtree/pkg/govis"
11 )
12
13 // UpdateKeywordFunc is the signature for a function that will restore a file's
14 // attributes. Where path is relative path to the file, and value to be
15 // restored to.
16 type UpdateKeywordFunc func(path string, kv KeyVal) (os.FileInfo, error)
17
18 // UpdateKeywordFuncs is the registered list of functions to update file attributes.
19 // Keyed by the keyword as it would show up in the manifest
20 var UpdateKeywordFuncs = map[Keyword]UpdateKeywordFunc{
21 "mode": modeUpdateKeywordFunc,
22 "time": timeUpdateKeywordFunc,
23 "tar_time": tartimeUpdateKeywordFunc,
24 "uid": uidUpdateKeywordFunc,
25 "gid": gidUpdateKeywordFunc,
26 "xattr": xattrUpdateKeywordFunc,
27 "link": linkUpdateKeywordFunc,
28 }
29
30 func uidUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) {
31 uid, err := strconv.Atoi(kv.Value())
32 if err != nil {
33 return nil, err
34 }
35
36 stat, err := os.Lstat(path)
37 if err != nil {
38 return nil, err
39 }
40 if statIsUID(stat, uid) {
41 return stat, nil
42 }
43
44 if err := os.Lchown(path, uid, -1); err != nil {
45 return nil, err
46 }
47 return os.Lstat(path)
48 }
49
50 func gidUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) {
51 gid, err := strconv.Atoi(kv.Value())
52 if err != nil {
53 return nil, err
54 }
55
56 stat, err := os.Lstat(path)
57 if err != nil {
58 return nil, err
59 }
60 if statIsGID(stat, gid) {
61 return stat, nil
62 }
63
64 if err := os.Lchown(path, -1, gid); err != nil {
65 return nil, err
66 }
67 return os.Lstat(path)
68 }
69
70 func modeUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) {
71 info, err := os.Lstat(path)
72 if err != nil {
73 return nil, err
74 }
75
76 // don't set mode on symlinks, as it passes through to the backing file
77 if info.Mode()&os.ModeSymlink != 0 {
78 return info, nil
79 }
80 vmode, err := strconv.ParseInt(kv.Value(), 8, 32)
81 if err != nil {
82 return nil, err
83 }
84
85 stat, err := os.Lstat(path)
86 if err != nil {
87 return nil, err
88 }
89 if stat.Mode() == os.FileMode(vmode) {
90 return stat, nil
91 }
92
93 logrus.Debugf("path: %q, kv.Value(): %q, vmode: %o", path, kv.Value(), vmode)
94 if err := os.Chmod(path, os.FileMode(vmode)); err != nil {
95 return nil, err
96 }
97 return os.Lstat(path)
98 }
99
100 // since tar_time will only be second level precision, then when restoring the
101 // filepath from a tar_time, then compare the seconds first and only Chtimes if
102 // the seconds value is different.
103 func tartimeUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) {
104 info, err := os.Lstat(path)
105 if err != nil {
106 return nil, err
107 }
108
109 v := strings.SplitN(kv.Value(), ".", 2)
110 if len(v) != 2 {
111 return nil, fmt.Errorf("expected a number like 1469104727.000000000")
112 }
113 sec, err := strconv.ParseInt(v[0], 10, 64)
114 if err != nil {
115 return nil, fmt.Errorf("expected seconds, but got %q", v[0])
116 }
117
118 // if the seconds are the same, don't do anything, because the file might
119 // have nanosecond value, and if using tar_time it would zero it out.
120 if info.ModTime().Unix() == sec {
121 return info, nil
122 }
123
124 vtime := time.Unix(sec, 0)
125
126 // if times are same then don't modify anything
127 // comparing Unix, since it does not include Nano seconds
128 if info.ModTime().Unix() == vtime.Unix() {
129 return info, nil
130 }
131
132 // symlinks are strange and most of the time passes through to the backing file
133 if info.Mode()&os.ModeSymlink != 0 {
134 if err := lchtimes(path, vtime, vtime); err != nil {
135 return nil, err
136 }
137 } else if err := os.Chtimes(path, vtime, vtime); err != nil {
138 return nil, err
139 }
140 return os.Lstat(path)
141 }
142
143 // this is nano second precision
144 func timeUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) {
145 info, err := os.Lstat(path)
146 if err != nil {
147 return nil, err
148 }
149
150 v := strings.SplitN(kv.Value(), ".", 2)
151 if len(v) != 2 {
152 return nil, fmt.Errorf("expected a number like 1469104727.871937272")
153 }
154 nsec, err := strconv.ParseInt(v[0]+v[1], 10, 64)
155 if err != nil {
156 return nil, fmt.Errorf("expected nano seconds, but got %q", v[0]+v[1])
157 }
158 logrus.Debugf("arg: %q; nsec: %d", v[0]+v[1], nsec)
159
160 vtime := time.Unix(0, nsec)
161
162 // if times are same then don't modify anything
163 if info.ModTime().Equal(vtime) {
164 return info, nil
165 }
166
167 // symlinks are strange and most of the time passes through to the backing file
168 if info.Mode()&os.ModeSymlink != 0 {
169 if err := lchtimes(path, vtime, vtime); err != nil {
170 return nil, err
171 }
172 } else if err := os.Chtimes(path, vtime, vtime); err != nil {
173 return nil, err
174 }
175 return os.Lstat(path)
176 }
177
178 func linkUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) {
179 linkname, err := govis.Unvis(kv.Value(), DefaultVisFlags)
180 if err != nil {
181 return nil, err
182 }
183 got, err := os.Readlink(path)
184 if err != nil {
185 return nil, err
186 }
187 if got == linkname {
188 return os.Lstat(path)
189 }
190
191 logrus.Debugf("linkUpdateKeywordFunc: removing %q to link to %q", path, linkname)
192 if err := os.Remove(path); err != nil {
193 return nil, err
194 }
195 if err := os.Symlink(linkname, path); err != nil {
196 return nil, err
197 }
198
199 return os.Lstat(path)
200 }
0 // +build linux
1
2 package mtree
3
4 import (
5 "encoding/base64"
6 "os"
7
8 "github.com/vbatts/go-mtree/xattr"
9 )
10
11 func xattrUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) {
12 buf, err := base64.StdEncoding.DecodeString(kv.Value())
13 if err != nil {
14 return nil, err
15 }
16 if err := xattr.Set(path, kv.Keyword().Suffix(), buf); err != nil {
17 return nil, err
18 }
19 return os.Lstat(path)
20 }
0 // +build !linux
1
2 package mtree
3
4 import (
5 "os"
6 )
7
8 func xattrUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) {
9 return os.Lstat(path)
10 }
0 package mtree
1
2 import "fmt"
3
4 const (
5 // AppName is the name ... of this library/application
6 AppName = "gomtree"
7 )
8
9 const (
10 // VersionMajor is for an API incompatible changes
11 VersionMajor = 0
12 // VersionMinor is for functionality in a backwards-compatible manner
13 VersionMinor = 4
14 // VersionPatch is for backwards-compatible bug fixes
15 VersionPatch = 2
16
17 // VersionDev indicates development branch. Releases will be empty string.
18 VersionDev = ""
19 )
20
21 // Version is the specification version that the package types support.
22 var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev)
0 package mtree
1
2 import (
3 "fmt"
4 "io"
5 "os"
6 "os/user"
7 "path/filepath"
8 "sort"
9 "strings"
10 "time"
11
12 "github.com/vbatts/go-mtree/pkg/govis"
13 )
14
15 // ExcludeFunc is the type of function called on each path walked to determine
16 // whether to be excluded from the assembled DirectoryHierarchy. If the func
17 // returns true, then the path is not included in the spec.
18 type ExcludeFunc func(path string, info os.FileInfo) bool
19
20 // ExcludeNonDirectories is an ExcludeFunc for excluding all paths that are not directories
21 var ExcludeNonDirectories = func(path string, info os.FileInfo) bool {
22 return !info.IsDir()
23 }
24
25 var defaultSetKeyVals = []KeyVal{"type=file", "nlink=1", "flags=none", "mode=0664"}
26
27 // Walk from root directory and assemble the DirectoryHierarchy
28 // * `excludes` provided are used to skip paths
29 // * `keywords` are the set to collect from the walked paths. The recommended default list is DefaultKeywords.
30 // * `fsEval` is the interface to use in evaluating files. If `nil`, then DefaultFsEval is used.
31 func Walk(root string, excludes []ExcludeFunc, keywords []Keyword, fsEval FsEval) (*DirectoryHierarchy, error) {
32 if fsEval == nil {
33 fsEval = DefaultFsEval{}
34 }
35 creator := dhCreator{DH: &DirectoryHierarchy{}, fs: fsEval}
36 // insert signature and metadata comments first (user, machine, tree, date)
37 for _, e := range signatureEntries(root) {
38 e.Pos = len(creator.DH.Entries)
39 creator.DH.Entries = append(creator.DH.Entries, e)
40 }
41 // insert keyword metadata next
42 for _, e := range keywordEntries(keywords) {
43 e.Pos = len(creator.DH.Entries)
44 creator.DH.Entries = append(creator.DH.Entries, e)
45 }
46 // walk the directory and add entries
47 err := startWalk(&creator, root, func(path string, info os.FileInfo, err error) error {
48 if err != nil {
49 return err
50 }
51 for _, ex := range excludes {
52 if ex(path, info) {
53 return nil
54 }
55 }
56
57 entryPathName := filepath.Base(path)
58 if info.IsDir() {
59 creator.DH.Entries = append(creator.DH.Entries, Entry{
60 Type: BlankType,
61 Pos: len(creator.DH.Entries),
62 })
63
64 // Insert a comment of the full path of the directory's name
65 if creator.curDir != nil {
66 dirname, err := creator.curDir.Path()
67 if err != nil {
68 return err
69 }
70 creator.DH.Entries = append(creator.DH.Entries, Entry{
71 Pos: len(creator.DH.Entries),
72 Raw: "# " + filepath.Join(dirname, entryPathName),
73 Type: CommentType,
74 })
75 } else {
76 entryPathName = "."
77 creator.DH.Entries = append(creator.DH.Entries, Entry{
78 Pos: len(creator.DH.Entries),
79 Raw: "# .",
80 Type: CommentType,
81 })
82 }
83
84 // set the initial /set keywords
85 if creator.curSet == nil {
86 e := Entry{
87 Name: "/set",
88 Type: SpecialType,
89 Pos: len(creator.DH.Entries),
90 Keywords: keyvalSelector(defaultSetKeyVals, keywords),
91 }
92 for _, keyword := range SetKeywords {
93 err := func() error {
94 var r io.Reader
95 if info.Mode().IsRegular() {
96 fh, err := creator.fs.Open(path)
97 if err != nil {
98 return err
99 }
100 defer fh.Close()
101 r = fh
102 }
103 keyFunc, ok := KeywordFuncs[keyword.Prefix()]
104 if !ok {
105 return fmt.Errorf("Unknown keyword %q for file %q", keyword.Prefix(), path)
106 }
107 kvs, err := creator.fs.KeywordFunc(keyFunc)(path, info, r)
108 if err != nil {
109 return err
110 }
111 for _, kv := range kvs {
112 if kv != "" {
113 e.Keywords = append(e.Keywords, kv)
114 }
115 }
116 return nil
117 }()
118 if err != nil {
119 return err
120 }
121 }
122 creator.curSet = &e
123 creator.DH.Entries = append(creator.DH.Entries, e)
124 } else if creator.curSet != nil {
125 // check the attributes of the /set keywords and re-set if changed
126 klist := []KeyVal{}
127 for _, keyword := range SetKeywords {
128 err := func() error {
129 var r io.Reader
130 if info.Mode().IsRegular() {
131 fh, err := creator.fs.Open(path)
132 if err != nil {
133 return err
134 }
135 defer fh.Close()
136 r = fh
137 }
138 keyFunc, ok := KeywordFuncs[keyword.Prefix()]
139 if !ok {
140 return fmt.Errorf("Unknown keyword %q for file %q", keyword.Prefix(), path)
141 }
142 kvs, err := creator.fs.KeywordFunc(keyFunc)(path, info, r)
143 if err != nil {
144 return err
145 }
146 for _, kv := range kvs {
147 if kv != "" {
148 klist = append(klist, kv)
149 }
150 }
151 return nil
152 }()
153 if err != nil {
154 return err
155 }
156 }
157
158 needNewSet := false
159 for _, k := range klist {
160 if !inKeyValSlice(k, creator.curSet.Keywords) {
161 needNewSet = true
162 }
163 }
164 if needNewSet {
165 e := Entry{
166 Name: "/set",
167 Type: SpecialType,
168 Pos: len(creator.DH.Entries),
169 Keywords: keyvalSelector(append(defaultSetKeyVals, klist...), keywords),
170 }
171 creator.curSet = &e
172 creator.DH.Entries = append(creator.DH.Entries, e)
173 }
174 }
175 }
176 encodedEntryName, err := govis.Vis(entryPathName, DefaultVisFlags)
177 if err != nil {
178 return err
179 }
180 e := Entry{
181 Name: encodedEntryName,
182 Pos: len(creator.DH.Entries),
183 Type: RelativeType,
184 Set: creator.curSet,
185 Parent: creator.curDir,
186 }
187 for _, keyword := range keywords {
188 err := func() error {
189 var r io.Reader
190 if info.Mode().IsRegular() {
191 fh, err := creator.fs.Open(path)
192 if err != nil {
193 return err
194 }
195 defer fh.Close()
196 r = fh
197 }
198 keyFunc, ok := KeywordFuncs[keyword.Prefix()]
199 if !ok {
200 return fmt.Errorf("Unknown keyword %q for file %q", keyword.Prefix(), path)
201 }
202 kvs, err := creator.fs.KeywordFunc(keyFunc)(path, info, r)
203 if err != nil {
204 return err
205 }
206 for _, kv := range kvs {
207 if kv != "" && !inKeyValSlice(kv, creator.curSet.Keywords) {
208 e.Keywords = append(e.Keywords, kv)
209 }
210 }
211 return nil
212 }()
213 if err != nil {
214 return err
215 }
216 }
217 if info.IsDir() {
218 if creator.curDir != nil {
219 creator.curDir.Next = &e
220 }
221 e.Prev = creator.curDir
222 creator.curDir = &e
223 } else {
224 if creator.curEnt != nil {
225 creator.curEnt.Next = &e
226 }
227 e.Prev = creator.curEnt
228 creator.curEnt = &e
229 }
230 creator.DH.Entries = append(creator.DH.Entries, e)
231 return nil
232 })
233 return creator.DH, err
234 }
235
236 // startWalk walks the file tree rooted at root, calling walkFn for each file or
237 // directory in the tree, including root. All errors that arise visiting files
238 // and directories are filtered by walkFn. The files are walked in lexical
239 // order, which makes the output deterministic but means that for very
240 // large directories Walk can be inefficient.
241 // Walk does not follow symbolic links.
242 func startWalk(c *dhCreator, root string, walkFn filepath.WalkFunc) error {
243 info, err := c.fs.Lstat(root)
244 if err != nil {
245 return walkFn(root, nil, err)
246 }
247 return walk(c, root, info, walkFn)
248 }
249
250 // walk recursively descends path, calling w.
251 func walk(c *dhCreator, path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
252 err := walkFn(path, info, nil)
253 if err != nil {
254 if info.IsDir() && err == filepath.SkipDir {
255 return nil
256 }
257 return err
258 }
259
260 if !info.IsDir() {
261 return nil
262 }
263
264 names, err := readOrderedDirNames(c, path)
265 if err != nil {
266 return walkFn(path, info, err)
267 }
268
269 for _, name := range names {
270 filename := filepath.Join(path, name)
271 fileInfo, err := c.fs.Lstat(filename)
272 if err != nil {
273 if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
274 return err
275 }
276 } else {
277 err = walk(c, filename, fileInfo, walkFn)
278 if err != nil {
279 if !fileInfo.IsDir() || err != filepath.SkipDir {
280 return err
281 }
282 }
283 }
284 }
285 c.DH.Entries = append(c.DH.Entries, Entry{
286 Name: "..",
287 Type: DotDotType,
288 Pos: len(c.DH.Entries),
289 })
290 if c.curDir != nil {
291 c.curDir = c.curDir.Parent
292 }
293 return nil
294 }
295
296 // readOrderedDirNames reads the directory and returns a sorted list of all
297 // entries with non-directories first, followed by directories.
298 func readOrderedDirNames(c *dhCreator, dirname string) ([]string, error) {
299 infos, err := c.fs.Readdir(dirname)
300 if err != nil {
301 return nil, err
302 }
303
304 names := []string{}
305 dirnames := []string{}
306 for _, info := range infos {
307 if info.IsDir() {
308 dirnames = append(dirnames, info.Name())
309 continue
310 }
311 names = append(names, info.Name())
312 }
313 sort.Strings(names)
314 sort.Strings(dirnames)
315 return append(names, dirnames...), nil
316 }
317
318 // signatureEntries is a simple helper function that returns a slice of Entry's
319 // that describe the metadata signature about the host. Items like date, user,
320 // machine, and tree (which is specified by argument `root`), are considered.
321 // These Entry's construct comments in the mtree specification, so if there is
322 // an error trying to obtain a particular metadata, we simply don't construct
323 // the Entry.
324 func signatureEntries(root string) []Entry {
325 var sigEntries []Entry
326 user, err := user.Current()
327 if err == nil {
328 userEntry := Entry{
329 Type: CommentType,
330 Raw: fmt.Sprintf("#%16s%s", "user: ", user.Username),
331 }
332 sigEntries = append(sigEntries, userEntry)
333 }
334
335 hostname, err := os.Hostname()
336 if err == nil {
337 hostEntry := Entry{
338 Type: CommentType,
339 Raw: fmt.Sprintf("#%16s%s", "machine: ", hostname),
340 }
341 sigEntries = append(sigEntries, hostEntry)
342 }
343
344 if tree := filepath.Clean(root); tree == "." || tree == ".." {
345 root, err := os.Getwd()
346 if err == nil {
347 // use parent directory of current directory
348 if tree == ".." {
349 root = filepath.Dir(root)
350 }
351 treeEntry := Entry{
352 Type: CommentType,
353 Raw: fmt.Sprintf("#%16s%s", "tree: ", filepath.Clean(root)),
354 }
355 sigEntries = append(sigEntries, treeEntry)
356 }
357 } else {
358 treeEntry := Entry{
359 Type: CommentType,
360 Raw: fmt.Sprintf("#%16s%s", "tree: ", filepath.Clean(root)),
361 }
362 sigEntries = append(sigEntries, treeEntry)
363 }
364
365 dateEntry := Entry{
366 Type: CommentType,
367 Raw: fmt.Sprintf("#%16s%s", "date: ", time.Now().Format("Mon Jan 2 15:04:05 2006")),
368 }
369 sigEntries = append(sigEntries, dateEntry)
370
371 return sigEntries
372 }
373
374 // keywordEntries returns a slice of entries including a comment of the
375 // keywords requested when generating this manifest.
376 func keywordEntries(keywords []Keyword) []Entry {
377 // Convert all of the keywords to zero-value keyvals.
378 return []Entry{
379 {
380 Type: CommentType,
381 Raw: fmt.Sprintf("#%16s%s", "keywords: ", strings.Join(FromKeywords(keywords), ",")),
382 },
383 }
384 }
0 package mtree
1
2 import (
3 "io/ioutil"
4 "os"
5 "testing"
6 )
7
8 func TestWalk(t *testing.T) {
9 dh, err := Walk(".", nil, append(DefaultKeywords, "sha1"), nil)
10 if err != nil {
11 t.Fatal(err)
12 }
13 numEntries = countTypes(dh)
14
15 fh, err := ioutil.TempFile("", "walk.")
16 if err != nil {
17 t.Fatal(err)
18 }
19 defer os.Remove(fh.Name())
20 defer fh.Close()
21
22 if _, err = dh.WriteTo(fh); err != nil {
23 t.Fatal(err)
24 }
25 if _, err := fh.Seek(0, 0); err != nil {
26 t.Fatal(err)
27 }
28 if dh, err = ParseSpec(fh); err != nil {
29 t.Fatal(err)
30 }
31 for k, v := range countTypes(dh) {
32 if numEntries[k] != v {
33 t.Errorf("for type %s: expected %d, got %d", k, numEntries[k], v)
34 }
35 }
36 }
37
38 func TestWalkDirectory(t *testing.T) {
39 dh, err := Walk(".", []ExcludeFunc{ExcludeNonDirectories}, []Keyword{"type"}, nil)
40 if err != nil {
41 t.Fatal(err)
42 }
43
44 for i := range dh.Entries {
45 for _, keyval := range dh.Entries[i].AllKeys() {
46 if dh.Entries[i].Type == FullType || dh.Entries[i].Type == RelativeType {
47 if keyval.Keyword() == "type" && keyval.Value() != "dir" {
48 t.Errorf("expected only directories, but %q is a %q", dh.Entries[i].Name, keyval.Value())
49 }
50 }
51 }
52 }
53 }
0 // +build linux
1
2 package xattr
3
4 import (
5 "strings"
6 "syscall"
7 )
8
9 // Get returns the extended attributes (xattr) on file `path`, for the given `name`.
10 func Get(path, name string) ([]byte, error) {
11 dest := make([]byte, 1024)
12 i, err := syscall.Getxattr(path, name, dest)
13 if err != nil {
14 return nil, err
15 }
16 return dest[:i], nil
17 }
18
19 // Set sets the extended attributes (xattr) on file `path`, for the given `name` and `value`
20 func Set(path, name string, value []byte) error {
21 return syscall.Setxattr(path, name, value, 0)
22 }
23
24 // List returns a list of all the extended attributes (xattr) for file `path`
25 func List(path string) ([]string, error) {
26 dest := make([]byte, 1024)
27 i, err := syscall.Listxattr(path, dest)
28 if err != nil {
29 return nil, err
30 }
31
32 // If the returned list is empty, return nil instead of []string{""}
33 str := string(dest[:i])
34 if str == "" {
35 return nil, nil
36 }
37
38 return strings.Split(strings.TrimRight(str, nilByte), nilByte), nil
39 }
40
41 const nilByte = "\x00"
0 // +build linux
1
2 package xattr
3
4 import (
5 "bytes"
6 "io/ioutil"
7 "os"
8 "testing"
9 )
10
11 func TestXattr(t *testing.T) {
12 testDir, present := os.LookupEnv("MTREE_TESTDIR")
13 if present == false {
14 testDir = "."
15 }
16 fh, err := ioutil.TempFile(testDir, "xattr.")
17 if err != nil {
18 t.Fatal(err)
19 }
20 defer os.Remove(fh.Name())
21 if err := fh.Close(); err != nil {
22 t.Fatal(err)
23 }
24
25 expected := []byte("1234")
26 if err := Set(fh.Name(), "user.testing", expected); err != nil {
27 t.Fatal(fh.Name(), err)
28 }
29 l, err := List(fh.Name())
30 if err != nil {
31 t.Error(fh.Name(), err)
32 }
33 if !(len(l) > 0) {
34 t.Errorf("%q: expected a list of at least 1; got %d", fh.Name(), len(l))
35 }
36 got, err := Get(fh.Name(), "user.testing")
37 if err != nil {
38 t.Fatal(fh.Name(), err)
39 }
40 if !bytes.Equal(got, expected) {
41 t.Errorf("%q: expected %q; got %q", fh.Name(), expected, got)
42 }
43 }
0 // +build !linux
1
2 package xattr
3
4 // Get would return the extended attributes, but this unsupported feature
5 // returns nil, nil
6 func Get(path, name string) ([]byte, error) {
7 return nil, nil
8 }
9
10 // Set would set the extended attributes, but this unsupported feature returns
11 // nil
12 func Set(path, name string, value []byte) error {
13 return nil
14 }
15
16 // List would return the keys of extended attributes, but this unsupported
17 // feature returns nil, nil
18 func List(path string) ([]string, error) {
19 return nil, nil
20 }
0 // +build !linux
1
2 package xattr
3
4 import (
5 "bytes"
6 "io/ioutil"
7 "os"
8 "testing"
9 )
10
11 func TestXattrUnsupported(t *testing.T) {
12 fh, err := ioutil.TempFile(".", "xattr.")
13 if err != nil {
14 t.Fatal(err)
15 }
16 defer os.Remove(fh.Name())
17 if err := fh.Close(); err != nil {
18 t.Fatal(err)
19 }
20
21 // because xattrs are "not supported" on this platform, they're like a black
22 // box.
23 write := []byte("1234")
24 expected := []byte("")
25
26 if err := Set(fh.Name(), "user.testing", write); err != nil {
27 t.Fatal(fh.Name(), err)
28 }
29 l, err := List(fh.Name())
30 if err != nil {
31 t.Error(fh.Name(), err)
32 }
33 if len(l) > 0 {
34 t.Errorf("%q: expected a list of at least 0; got %d", fh.Name(), len(l))
35 }
36 got, err := Get(fh.Name(), "user.testing")
37 if err != nil {
38 t.Fatal(fh.Name(), err)
39 }
40 if !bytes.Equal(got, expected) {
41 t.Errorf("%q: expected %q; got %q", fh.Name(), expected, got)
42 }
43 }