ADAPT-Chase commited on
Commit
3799b9b
·
verified ·
1 Parent(s): 0e0d680

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. platform/dbops/binaries/go/go/src/archive/tar/common.go +729 -0
  2. platform/dbops/binaries/go/go/src/archive/tar/example_test.go +71 -0
  3. platform/dbops/binaries/go/go/src/archive/tar/format.go +307 -0
  4. platform/dbops/binaries/go/go/src/archive/tar/fuzz_test.go +80 -0
  5. platform/dbops/binaries/go/go/src/archive/tar/reader.go +882 -0
  6. platform/dbops/binaries/go/go/src/archive/tar/reader_test.go +1674 -0
  7. platform/dbops/binaries/go/go/src/archive/tar/stat_actime1.go +20 -0
  8. platform/dbops/binaries/go/go/src/archive/tar/stat_actime2.go +20 -0
  9. platform/dbops/binaries/go/go/src/archive/tar/stat_unix.go +101 -0
  10. platform/dbops/binaries/go/go/src/archive/tar/strconv.go +327 -0
  11. platform/dbops/binaries/go/go/src/archive/tar/strconv_test.go +441 -0
  12. platform/dbops/binaries/go/go/src/archive/tar/tar_test.go +850 -0
  13. platform/dbops/binaries/go/go/src/archive/tar/testdata/file-and-dir.tar +3 -0
  14. platform/dbops/binaries/go/go/src/archive/tar/testdata/gnu-incremental.tar +3 -0
  15. platform/dbops/binaries/go/go/src/archive/tar/testdata/gnu-long-nul.tar +3 -0
  16. platform/dbops/binaries/go/go/src/archive/tar/testdata/gnu-multi-hdrs.tar +3 -0
  17. platform/dbops/binaries/go/go/src/archive/tar/testdata/gnu-nil-sparse-data.tar +3 -0
  18. platform/dbops/binaries/go/go/src/archive/tar/testdata/gnu-nil-sparse-hole.tar +3 -0
  19. platform/dbops/binaries/go/go/src/archive/tar/testdata/gnu-not-utf8.tar +3 -0
  20. platform/dbops/binaries/go/go/src/archive/tar/testdata/gnu-sparse-big.tar +3 -0
  21. platform/dbops/binaries/go/go/src/archive/tar/testdata/gnu-utf8.tar +3 -0
  22. platform/dbops/binaries/go/go/src/archive/tar/testdata/gnu.tar +3 -0
  23. platform/dbops/binaries/go/go/src/archive/tar/testdata/hardlink.tar +3 -0
  24. platform/dbops/binaries/go/go/src/archive/tar/testdata/hdr-only.tar +3 -0
  25. platform/dbops/binaries/go/go/src/archive/tar/testdata/invalid-go17.tar +3 -0
  26. platform/dbops/binaries/go/go/src/archive/tar/testdata/issue10968.tar +3 -0
  27. platform/dbops/binaries/go/go/src/archive/tar/testdata/issue11169.tar +3 -0
  28. platform/dbops/binaries/go/go/src/archive/tar/testdata/issue12435.tar +3 -0
  29. platform/dbops/binaries/go/go/src/archive/tar/testdata/neg-size.tar +3 -0
  30. platform/dbops/binaries/go/go/src/archive/tar/testdata/nil-uid.tar +3 -0
  31. platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-bad-hdr-file.tar +3 -0
  32. platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-bad-hdr-large.tar.bz2 +3 -0
  33. platform/dbops/binaries/go/go/src/archive/tar/writer.go +698 -0
  34. platform/dbops/binaries/go/go/src/archive/tar/writer_test.go +1401 -0
  35. platform/dbops/binaries/go/go/src/archive/zip/example_test.go +93 -0
  36. platform/dbops/binaries/go/go/src/archive/zip/fuzz_test.go +81 -0
  37. platform/dbops/binaries/go/go/src/archive/zip/reader.go +983 -0
  38. platform/dbops/binaries/go/go/src/archive/zip/reader_test.go +1836 -0
  39. platform/dbops/binaries/go/go/src/archive/zip/register.go +147 -0
  40. platform/dbops/binaries/go/go/src/archive/zip/struct.go +419 -0
  41. platform/dbops/binaries/go/go/src/archive/zip/writer.go +666 -0
  42. platform/dbops/binaries/go/go/src/archive/zip/writer_test.go +673 -0
  43. platform/dbops/binaries/go/go/src/archive/zip/zip_test.go +821 -0
  44. platform/dbops/binaries/go/go/src/runtime/msan0.go +23 -0
  45. platform/dbops/binaries/go/go/src/runtime/msan_amd64.s +89 -0
  46. platform/dbops/binaries/go/go/src/runtime/msan_arm64.s +73 -0
  47. platform/dbops/binaries/go/go/src/runtime/msan_loong64.s +72 -0
  48. platform/dbops/binaries/go/go/src/runtime/msize_allocheaders.go +36 -0
  49. platform/dbops/binaries/go/go/src/runtime/msize_noallocheaders.go +29 -0
  50. platform/dbops/binaries/go/go/src/runtime/mspanset.go +404 -0
platform/dbops/binaries/go/go/src/archive/tar/common.go ADDED
@@ -0,0 +1,729 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2009 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ // Package tar implements access to tar archives.
6
+ //
7
+ // Tape archives (tar) are a file format for storing a sequence of files that
8
+ // can be read and written in a streaming manner.
9
+ // This package aims to cover most variations of the format,
10
+ // including those produced by GNU and BSD tar tools.
11
+ package tar
12
+
13
+ import (
14
+ "errors"
15
+ "fmt"
16
+ "internal/godebug"
17
+ "io/fs"
18
+ "math"
19
+ "path"
20
+ "reflect"
21
+ "strconv"
22
+ "strings"
23
+ "time"
24
+ )
25
+
26
+ // BUG: Use of the Uid and Gid fields in Header could overflow on 32-bit
27
+ // architectures. If a large value is encountered when decoding, the result
28
+ // stored in Header will be the truncated version.
29
+
30
+ var tarinsecurepath = godebug.New("tarinsecurepath")
31
+
32
+ var (
33
+ ErrHeader = errors.New("archive/tar: invalid tar header")
34
+ ErrWriteTooLong = errors.New("archive/tar: write too long")
35
+ ErrFieldTooLong = errors.New("archive/tar: header field too long")
36
+ ErrWriteAfterClose = errors.New("archive/tar: write after close")
37
+ ErrInsecurePath = errors.New("archive/tar: insecure file path")
38
+ errMissData = errors.New("archive/tar: sparse file references non-existent data")
39
+ errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data")
40
+ errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole")
41
+ )
42
+
43
+ type headerError []string
44
+
45
+ func (he headerError) Error() string {
46
+ const prefix = "archive/tar: cannot encode header"
47
+ var ss []string
48
+ for _, s := range he {
49
+ if s != "" {
50
+ ss = append(ss, s)
51
+ }
52
+ }
53
+ if len(ss) == 0 {
54
+ return prefix
55
+ }
56
+ return fmt.Sprintf("%s: %v", prefix, strings.Join(ss, "; and "))
57
+ }
58
+
59
+ // Type flags for Header.Typeflag.
60
+ const (
61
+ // Type '0' indicates a regular file.
62
+ TypeReg = '0'
63
+
64
+ // Deprecated: Use TypeReg instead.
65
+ TypeRegA = '\x00'
66
+
67
+ // Type '1' to '6' are header-only flags and may not have a data body.
68
+ TypeLink = '1' // Hard link
69
+ TypeSymlink = '2' // Symbolic link
70
+ TypeChar = '3' // Character device node
71
+ TypeBlock = '4' // Block device node
72
+ TypeDir = '5' // Directory
73
+ TypeFifo = '6' // FIFO node
74
+
75
+ // Type '7' is reserved.
76
+ TypeCont = '7'
77
+
78
+ // Type 'x' is used by the PAX format to store key-value records that
79
+ // are only relevant to the next file.
80
+ // This package transparently handles these types.
81
+ TypeXHeader = 'x'
82
+
83
+ // Type 'g' is used by the PAX format to store key-value records that
84
+ // are relevant to all subsequent files.
85
+ // This package only supports parsing and composing such headers,
86
+ // but does not currently support persisting the global state across files.
87
+ TypeXGlobalHeader = 'g'
88
+
89
+ // Type 'S' indicates a sparse file in the GNU format.
90
+ TypeGNUSparse = 'S'
91
+
92
+ // Types 'L' and 'K' are used by the GNU format for a meta file
93
+ // used to store the path or link name for the next file.
94
+ // This package transparently handles these types.
95
+ TypeGNULongName = 'L'
96
+ TypeGNULongLink = 'K'
97
+ )
98
+
99
+ // Keywords for PAX extended header records.
100
+ const (
101
+ paxNone = "" // Indicates that no PAX key is suitable
102
+ paxPath = "path"
103
+ paxLinkpath = "linkpath"
104
+ paxSize = "size"
105
+ paxUid = "uid"
106
+ paxGid = "gid"
107
+ paxUname = "uname"
108
+ paxGname = "gname"
109
+ paxMtime = "mtime"
110
+ paxAtime = "atime"
111
+ paxCtime = "ctime" // Removed from later revision of PAX spec, but was valid
112
+ paxCharset = "charset" // Currently unused
113
+ paxComment = "comment" // Currently unused
114
+
115
+ paxSchilyXattr = "SCHILY.xattr."
116
+
117
+ // Keywords for GNU sparse files in a PAX extended header.
118
+ paxGNUSparse = "GNU.sparse."
119
+ paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
120
+ paxGNUSparseOffset = "GNU.sparse.offset"
121
+ paxGNUSparseNumBytes = "GNU.sparse.numbytes"
122
+ paxGNUSparseMap = "GNU.sparse.map"
123
+ paxGNUSparseName = "GNU.sparse.name"
124
+ paxGNUSparseMajor = "GNU.sparse.major"
125
+ paxGNUSparseMinor = "GNU.sparse.minor"
126
+ paxGNUSparseSize = "GNU.sparse.size"
127
+ paxGNUSparseRealSize = "GNU.sparse.realsize"
128
+ )
129
+
130
+ // basicKeys is a set of the PAX keys for which we have built-in support.
131
+ // This does not contain "charset" or "comment", which are both PAX-specific,
132
+ // so adding them as first-class features of Header is unlikely.
133
+ // Users can use the PAXRecords field to set it themselves.
134
+ var basicKeys = map[string]bool{
135
+ paxPath: true, paxLinkpath: true, paxSize: true, paxUid: true, paxGid: true,
136
+ paxUname: true, paxGname: true, paxMtime: true, paxAtime: true, paxCtime: true,
137
+ }
138
+
139
+ // A Header represents a single header in a tar archive.
140
+ // Some fields may not be populated.
141
+ //
142
+ // For forward compatibility, users that retrieve a Header from Reader.Next,
143
+ // mutate it in some ways, and then pass it back to Writer.WriteHeader
144
+ // should do so by creating a new Header and copying the fields
145
+ // that they are interested in preserving.
146
+ type Header struct {
147
+ // Typeflag is the type of header entry.
148
+ // The zero value is automatically promoted to either TypeReg or TypeDir
149
+ // depending on the presence of a trailing slash in Name.
150
+ Typeflag byte
151
+
152
+ Name string // Name of file entry
153
+ Linkname string // Target name of link (valid for TypeLink or TypeSymlink)
154
+
155
+ Size int64 // Logical file size in bytes
156
+ Mode int64 // Permission and mode bits
157
+ Uid int // User ID of owner
158
+ Gid int // Group ID of owner
159
+ Uname string // User name of owner
160
+ Gname string // Group name of owner
161
+
162
+ // If the Format is unspecified, then Writer.WriteHeader rounds ModTime
163
+ // to the nearest second and ignores the AccessTime and ChangeTime fields.
164
+ //
165
+ // To use AccessTime or ChangeTime, specify the Format as PAX or GNU.
166
+ // To use sub-second resolution, specify the Format as PAX.
167
+ ModTime time.Time // Modification time
168
+ AccessTime time.Time // Access time (requires either PAX or GNU support)
169
+ ChangeTime time.Time // Change time (requires either PAX or GNU support)
170
+
171
+ Devmajor int64 // Major device number (valid for TypeChar or TypeBlock)
172
+ Devminor int64 // Minor device number (valid for TypeChar or TypeBlock)
173
+
174
+ // Xattrs stores extended attributes as PAX records under the
175
+ // "SCHILY.xattr." namespace.
176
+ //
177
+ // The following are semantically equivalent:
178
+ // h.Xattrs[key] = value
179
+ // h.PAXRecords["SCHILY.xattr."+key] = value
180
+ //
181
+ // When Writer.WriteHeader is called, the contents of Xattrs will take
182
+ // precedence over those in PAXRecords.
183
+ //
184
+ // Deprecated: Use PAXRecords instead.
185
+ Xattrs map[string]string
186
+
187
+ // PAXRecords is a map of PAX extended header records.
188
+ //
189
+ // User-defined records should have keys of the following form:
190
+ // VENDOR.keyword
191
+ // Where VENDOR is some namespace in all uppercase, and keyword may
192
+ // not contain the '=' character (e.g., "GOLANG.pkg.version").
193
+ // The key and value should be non-empty UTF-8 strings.
194
+ //
195
+ // When Writer.WriteHeader is called, PAX records derived from the
196
+ // other fields in Header take precedence over PAXRecords.
197
+ PAXRecords map[string]string
198
+
199
+ // Format specifies the format of the tar header.
200
+ //
201
+ // This is set by Reader.Next as a best-effort guess at the format.
202
+ // Since the Reader liberally reads some non-compliant files,
203
+ // it is possible for this to be FormatUnknown.
204
+ //
205
+ // If the format is unspecified when Writer.WriteHeader is called,
206
+ // then it uses the first format (in the order of USTAR, PAX, GNU)
207
+ // capable of encoding this Header (see Format).
208
+ Format Format
209
+ }
210
+
211
+ // sparseEntry represents a Length-sized fragment at Offset in the file.
212
+ type sparseEntry struct{ Offset, Length int64 }
213
+
214
+ func (s sparseEntry) endOffset() int64 { return s.Offset + s.Length }
215
+
216
+ // A sparse file can be represented as either a sparseDatas or a sparseHoles.
217
+ // As long as the total size is known, they are equivalent and one can be
218
+ // converted to the other form and back. The various tar formats with sparse
219
+ // file support represent sparse files in the sparseDatas form. That is, they
220
+ // specify the fragments in the file that has data, and treat everything else as
221
+ // having zero bytes. As such, the encoding and decoding logic in this package
222
+ // deals with sparseDatas.
223
+ //
224
+ // However, the external API uses sparseHoles instead of sparseDatas because the
225
+ // zero value of sparseHoles logically represents a normal file (i.e., there are
226
+ // no holes in it). On the other hand, the zero value of sparseDatas implies
227
+ // that the file has no data in it, which is rather odd.
228
+ //
229
+ // As an example, if the underlying raw file contains the 10-byte data:
230
+ //
231
+ // var compactFile = "abcdefgh"
232
+ //
233
+ // And the sparse map has the following entries:
234
+ //
235
+ // var spd sparseDatas = []sparseEntry{
236
+ // {Offset: 2, Length: 5}, // Data fragment for 2..6
237
+ // {Offset: 18, Length: 3}, // Data fragment for 18..20
238
+ // }
239
+ // var sph sparseHoles = []sparseEntry{
240
+ // {Offset: 0, Length: 2}, // Hole fragment for 0..1
241
+ // {Offset: 7, Length: 11}, // Hole fragment for 7..17
242
+ // {Offset: 21, Length: 4}, // Hole fragment for 21..24
243
+ // }
244
+ //
245
+ // Then the content of the resulting sparse file with a Header.Size of 25 is:
246
+ //
247
+ // var sparseFile = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
248
+ type (
249
+ sparseDatas []sparseEntry
250
+ sparseHoles []sparseEntry
251
+ )
252
+
253
+ // validateSparseEntries reports whether sp is a valid sparse map.
254
+ // It does not matter whether sp represents data fragments or hole fragments.
255
+ func validateSparseEntries(sp []sparseEntry, size int64) bool {
256
+ // Validate all sparse entries. These are the same checks as performed by
257
+ // the BSD tar utility.
258
+ if size < 0 {
259
+ return false
260
+ }
261
+ var pre sparseEntry
262
+ for _, cur := range sp {
263
+ switch {
264
+ case cur.Offset < 0 || cur.Length < 0:
265
+ return false // Negative values are never okay
266
+ case cur.Offset > math.MaxInt64-cur.Length:
267
+ return false // Integer overflow with large length
268
+ case cur.endOffset() > size:
269
+ return false // Region extends beyond the actual size
270
+ case pre.endOffset() > cur.Offset:
271
+ return false // Regions cannot overlap and must be in order
272
+ }
273
+ pre = cur
274
+ }
275
+ return true
276
+ }
277
+
278
+ // alignSparseEntries mutates src and returns dst where each fragment's
279
+ // starting offset is aligned up to the nearest block edge, and each
280
+ // ending offset is aligned down to the nearest block edge.
281
+ //
282
+ // Even though the Go tar Reader and the BSD tar utility can handle entries
283
+ // with arbitrary offsets and lengths, the GNU tar utility can only handle
284
+ // offsets and lengths that are multiples of blockSize.
285
+ func alignSparseEntries(src []sparseEntry, size int64) []sparseEntry {
286
+ dst := src[:0]
287
+ for _, s := range src {
288
+ pos, end := s.Offset, s.endOffset()
289
+ pos += blockPadding(+pos) // Round-up to nearest blockSize
290
+ if end != size {
291
+ end -= blockPadding(-end) // Round-down to nearest blockSize
292
+ }
293
+ if pos < end {
294
+ dst = append(dst, sparseEntry{Offset: pos, Length: end - pos})
295
+ }
296
+ }
297
+ return dst
298
+ }
299
+
300
+ // invertSparseEntries converts a sparse map from one form to the other.
301
+ // If the input is sparseHoles, then it will output sparseDatas and vice-versa.
302
+ // The input must have been already validated.
303
+ //
304
+ // This function mutates src and returns a normalized map where:
305
+ // - adjacent fragments are coalesced together
306
+ // - only the last fragment may be empty
307
+ // - the endOffset of the last fragment is the total size
308
+ func invertSparseEntries(src []sparseEntry, size int64) []sparseEntry {
309
+ dst := src[:0]
310
+ var pre sparseEntry
311
+ for _, cur := range src {
312
+ if cur.Length == 0 {
313
+ continue // Skip empty fragments
314
+ }
315
+ pre.Length = cur.Offset - pre.Offset
316
+ if pre.Length > 0 {
317
+ dst = append(dst, pre) // Only add non-empty fragments
318
+ }
319
+ pre.Offset = cur.endOffset()
320
+ }
321
+ pre.Length = size - pre.Offset // Possibly the only empty fragment
322
+ return append(dst, pre)
323
+ }
324
+
325
+ // fileState tracks the number of logical (includes sparse holes) and physical
326
+ // (actual in tar archive) bytes remaining for the current file.
327
+ //
328
+ // Invariant: logicalRemaining >= physicalRemaining
329
+ type fileState interface {
330
+ logicalRemaining() int64
331
+ physicalRemaining() int64
332
+ }
333
+
334
+ // allowedFormats determines which formats can be used.
335
+ // The value returned is the logical OR of multiple possible formats.
336
+ // If the value is FormatUnknown, then the input Header cannot be encoded
337
+ // and an error is returned explaining why.
338
+ //
339
+ // As a by-product of checking the fields, this function returns paxHdrs, which
340
+ // contain all fields that could not be directly encoded.
341
+ // A value receiver ensures that this method does not mutate the source Header.
342
+ func (h Header) allowedFormats() (format Format, paxHdrs map[string]string, err error) {
343
+ format = FormatUSTAR | FormatPAX | FormatGNU
344
+ paxHdrs = make(map[string]string)
345
+
346
+ var whyNoUSTAR, whyNoPAX, whyNoGNU string
347
+ var preferPAX bool // Prefer PAX over USTAR
348
+ verifyString := func(s string, size int, name, paxKey string) {
349
+ // NUL-terminator is optional for path and linkpath.
350
+ // Technically, it is required for uname and gname,
351
+ // but neither GNU nor BSD tar checks for it.
352
+ tooLong := len(s) > size
353
+ allowLongGNU := paxKey == paxPath || paxKey == paxLinkpath
354
+ if hasNUL(s) || (tooLong && !allowLongGNU) {
355
+ whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%q", name, s)
356
+ format.mustNotBe(FormatGNU)
357
+ }
358
+ if !isASCII(s) || tooLong {
359
+ canSplitUSTAR := paxKey == paxPath
360
+ if _, _, ok := splitUSTARPath(s); !canSplitUSTAR || !ok {
361
+ whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%q", name, s)
362
+ format.mustNotBe(FormatUSTAR)
363
+ }
364
+ if paxKey == paxNone {
365
+ whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%q", name, s)
366
+ format.mustNotBe(FormatPAX)
367
+ } else {
368
+ paxHdrs[paxKey] = s
369
+ }
370
+ }
371
+ if v, ok := h.PAXRecords[paxKey]; ok && v == s {
372
+ paxHdrs[paxKey] = v
373
+ }
374
+ }
375
+ verifyNumeric := func(n int64, size int, name, paxKey string) {
376
+ if !fitsInBase256(size, n) {
377
+ whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%d", name, n)
378
+ format.mustNotBe(FormatGNU)
379
+ }
380
+ if !fitsInOctal(size, n) {
381
+ whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%d", name, n)
382
+ format.mustNotBe(FormatUSTAR)
383
+ if paxKey == paxNone {
384
+ whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%d", name, n)
385
+ format.mustNotBe(FormatPAX)
386
+ } else {
387
+ paxHdrs[paxKey] = strconv.FormatInt(n, 10)
388
+ }
389
+ }
390
+ if v, ok := h.PAXRecords[paxKey]; ok && v == strconv.FormatInt(n, 10) {
391
+ paxHdrs[paxKey] = v
392
+ }
393
+ }
394
+ verifyTime := func(ts time.Time, size int, name, paxKey string) {
395
+ if ts.IsZero() {
396
+ return // Always okay
397
+ }
398
+ if !fitsInBase256(size, ts.Unix()) {
399
+ whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%v", name, ts)
400
+ format.mustNotBe(FormatGNU)
401
+ }
402
+ isMtime := paxKey == paxMtime
403
+ fitsOctal := fitsInOctal(size, ts.Unix())
404
+ if (isMtime && !fitsOctal) || !isMtime {
405
+ whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%v", name, ts)
406
+ format.mustNotBe(FormatUSTAR)
407
+ }
408
+ needsNano := ts.Nanosecond() != 0
409
+ if !isMtime || !fitsOctal || needsNano {
410
+ preferPAX = true // USTAR may truncate sub-second measurements
411
+ if paxKey == paxNone {
412
+ whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%v", name, ts)
413
+ format.mustNotBe(FormatPAX)
414
+ } else {
415
+ paxHdrs[paxKey] = formatPAXTime(ts)
416
+ }
417
+ }
418
+ if v, ok := h.PAXRecords[paxKey]; ok && v == formatPAXTime(ts) {
419
+ paxHdrs[paxKey] = v
420
+ }
421
+ }
422
+
423
+ // Check basic fields.
424
+ var blk block
425
+ v7 := blk.toV7()
426
+ ustar := blk.toUSTAR()
427
+ gnu := blk.toGNU()
428
+ verifyString(h.Name, len(v7.name()), "Name", paxPath)
429
+ verifyString(h.Linkname, len(v7.linkName()), "Linkname", paxLinkpath)
430
+ verifyString(h.Uname, len(ustar.userName()), "Uname", paxUname)
431
+ verifyString(h.Gname, len(ustar.groupName()), "Gname", paxGname)
432
+ verifyNumeric(h.Mode, len(v7.mode()), "Mode", paxNone)
433
+ verifyNumeric(int64(h.Uid), len(v7.uid()), "Uid", paxUid)
434
+ verifyNumeric(int64(h.Gid), len(v7.gid()), "Gid", paxGid)
435
+ verifyNumeric(h.Size, len(v7.size()), "Size", paxSize)
436
+ verifyNumeric(h.Devmajor, len(ustar.devMajor()), "Devmajor", paxNone)
437
+ verifyNumeric(h.Devminor, len(ustar.devMinor()), "Devminor", paxNone)
438
+ verifyTime(h.ModTime, len(v7.modTime()), "ModTime", paxMtime)
439
+ verifyTime(h.AccessTime, len(gnu.accessTime()), "AccessTime", paxAtime)
440
+ verifyTime(h.ChangeTime, len(gnu.changeTime()), "ChangeTime", paxCtime)
441
+
442
+ // Check for header-only types.
443
+ var whyOnlyPAX, whyOnlyGNU string
444
+ switch h.Typeflag {
445
+ case TypeReg, TypeChar, TypeBlock, TypeFifo, TypeGNUSparse:
446
+ // Exclude TypeLink and TypeSymlink, since they may reference directories.
447
+ if strings.HasSuffix(h.Name, "/") {
448
+ return FormatUnknown, nil, headerError{"filename may not have trailing slash"}
449
+ }
450
+ case TypeXHeader, TypeGNULongName, TypeGNULongLink:
451
+ return FormatUnknown, nil, headerError{"cannot manually encode TypeXHeader, TypeGNULongName, or TypeGNULongLink headers"}
452
+ case TypeXGlobalHeader:
453
+ h2 := Header{Name: h.Name, Typeflag: h.Typeflag, Xattrs: h.Xattrs, PAXRecords: h.PAXRecords, Format: h.Format}
454
+ if !reflect.DeepEqual(h, h2) {
455
+ return FormatUnknown, nil, headerError{"only PAXRecords should be set for TypeXGlobalHeader"}
456
+ }
457
+ whyOnlyPAX = "only PAX supports TypeXGlobalHeader"
458
+ format.mayOnlyBe(FormatPAX)
459
+ }
460
+ if !isHeaderOnlyType(h.Typeflag) && h.Size < 0 {
461
+ return FormatUnknown, nil, headerError{"negative size on header-only type"}
462
+ }
463
+
464
+ // Check PAX records.
465
+ if len(h.Xattrs) > 0 {
466
+ for k, v := range h.Xattrs {
467
+ paxHdrs[paxSchilyXattr+k] = v
468
+ }
469
+ whyOnlyPAX = "only PAX supports Xattrs"
470
+ format.mayOnlyBe(FormatPAX)
471
+ }
472
+ if len(h.PAXRecords) > 0 {
473
+ for k, v := range h.PAXRecords {
474
+ switch _, exists := paxHdrs[k]; {
475
+ case exists:
476
+ continue // Do not overwrite existing records
477
+ case h.Typeflag == TypeXGlobalHeader:
478
+ paxHdrs[k] = v // Copy all records
479
+ case !basicKeys[k] && !strings.HasPrefix(k, paxGNUSparse):
480
+ paxHdrs[k] = v // Ignore local records that may conflict
481
+ }
482
+ }
483
+ whyOnlyPAX = "only PAX supports PAXRecords"
484
+ format.mayOnlyBe(FormatPAX)
485
+ }
486
+ for k, v := range paxHdrs {
487
+ if !validPAXRecord(k, v) {
488
+ return FormatUnknown, nil, headerError{fmt.Sprintf("invalid PAX record: %q", k+" = "+v)}
489
+ }
490
+ }
491
+
492
+ // TODO(dsnet): Re-enable this when adding sparse support.
493
+ // See https://golang.org/issue/22735
494
+ /*
495
+ // Check sparse files.
496
+ if len(h.SparseHoles) > 0 || h.Typeflag == TypeGNUSparse {
497
+ if isHeaderOnlyType(h.Typeflag) {
498
+ return FormatUnknown, nil, headerError{"header-only type cannot be sparse"}
499
+ }
500
+ if !validateSparseEntries(h.SparseHoles, h.Size) {
501
+ return FormatUnknown, nil, headerError{"invalid sparse holes"}
502
+ }
503
+ if h.Typeflag == TypeGNUSparse {
504
+ whyOnlyGNU = "only GNU supports TypeGNUSparse"
505
+ format.mayOnlyBe(FormatGNU)
506
+ } else {
507
+ whyNoGNU = "GNU supports sparse files only with TypeGNUSparse"
508
+ format.mustNotBe(FormatGNU)
509
+ }
510
+ whyNoUSTAR = "USTAR does not support sparse files"
511
+ format.mustNotBe(FormatUSTAR)
512
+ }
513
+ */
514
+
515
+ // Check desired format.
516
+ if wantFormat := h.Format; wantFormat != FormatUnknown {
517
+ if wantFormat.has(FormatPAX) && !preferPAX {
518
+ wantFormat.mayBe(FormatUSTAR) // PAX implies USTAR allowed too
519
+ }
520
+ format.mayOnlyBe(wantFormat) // Set union of formats allowed and format wanted
521
+ }
522
+ if format == FormatUnknown {
523
+ switch h.Format {
524
+ case FormatUSTAR:
525
+ err = headerError{"Format specifies USTAR", whyNoUSTAR, whyOnlyPAX, whyOnlyGNU}
526
+ case FormatPAX:
527
+ err = headerError{"Format specifies PAX", whyNoPAX, whyOnlyGNU}
528
+ case FormatGNU:
529
+ err = headerError{"Format specifies GNU", whyNoGNU, whyOnlyPAX}
530
+ default:
531
+ err = headerError{whyNoUSTAR, whyNoPAX, whyNoGNU, whyOnlyPAX, whyOnlyGNU}
532
+ }
533
+ }
534
+ return format, paxHdrs, err
535
+ }
536
+
537
+ // FileInfo returns an fs.FileInfo for the Header.
538
+ func (h *Header) FileInfo() fs.FileInfo {
539
+ return headerFileInfo{h}
540
+ }
541
+
542
+ // headerFileInfo implements fs.FileInfo.
543
+ type headerFileInfo struct {
544
+ h *Header
545
+ }
546
+
547
+ func (fi headerFileInfo) Size() int64 { return fi.h.Size }
548
+ func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
549
+ func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
550
+ func (fi headerFileInfo) Sys() any { return fi.h }
551
+
552
+ // Name returns the base name of the file.
553
+ func (fi headerFileInfo) Name() string {
554
+ if fi.IsDir() {
555
+ return path.Base(path.Clean(fi.h.Name))
556
+ }
557
+ return path.Base(fi.h.Name)
558
+ }
559
+
560
+ // Mode returns the permission and mode bits for the headerFileInfo.
561
+ func (fi headerFileInfo) Mode() (mode fs.FileMode) {
562
+ // Set file permission bits.
563
+ mode = fs.FileMode(fi.h.Mode).Perm()
564
+
565
+ // Set setuid, setgid and sticky bits.
566
+ if fi.h.Mode&c_ISUID != 0 {
567
+ mode |= fs.ModeSetuid
568
+ }
569
+ if fi.h.Mode&c_ISGID != 0 {
570
+ mode |= fs.ModeSetgid
571
+ }
572
+ if fi.h.Mode&c_ISVTX != 0 {
573
+ mode |= fs.ModeSticky
574
+ }
575
+
576
+ // Set file mode bits; clear perm, setuid, setgid, and sticky bits.
577
+ switch m := fs.FileMode(fi.h.Mode) &^ 07777; m {
578
+ case c_ISDIR:
579
+ mode |= fs.ModeDir
580
+ case c_ISFIFO:
581
+ mode |= fs.ModeNamedPipe
582
+ case c_ISLNK:
583
+ mode |= fs.ModeSymlink
584
+ case c_ISBLK:
585
+ mode |= fs.ModeDevice
586
+ case c_ISCHR:
587
+ mode |= fs.ModeDevice
588
+ mode |= fs.ModeCharDevice
589
+ case c_ISSOCK:
590
+ mode |= fs.ModeSocket
591
+ }
592
+
593
+ switch fi.h.Typeflag {
594
+ case TypeSymlink:
595
+ mode |= fs.ModeSymlink
596
+ case TypeChar:
597
+ mode |= fs.ModeDevice
598
+ mode |= fs.ModeCharDevice
599
+ case TypeBlock:
600
+ mode |= fs.ModeDevice
601
+ case TypeDir:
602
+ mode |= fs.ModeDir
603
+ case TypeFifo:
604
+ mode |= fs.ModeNamedPipe
605
+ }
606
+
607
+ return mode
608
+ }
609
+
610
+ func (fi headerFileInfo) String() string {
611
+ return fs.FormatFileInfo(fi)
612
+ }
613
+
614
+ // sysStat, if non-nil, populates h from system-dependent fields of fi.
615
+ var sysStat func(fi fs.FileInfo, h *Header) error
616
+
617
+ const (
618
+ // Mode constants from the USTAR spec:
619
+ // See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06
620
+ c_ISUID = 04000 // Set uid
621
+ c_ISGID = 02000 // Set gid
622
+ c_ISVTX = 01000 // Save text (sticky bit)
623
+
624
+ // Common Unix mode constants; these are not defined in any common tar standard.
625
+ // Header.FileInfo understands these, but FileInfoHeader will never produce these.
626
+ c_ISDIR = 040000 // Directory
627
+ c_ISFIFO = 010000 // FIFO
628
+ c_ISREG = 0100000 // Regular file
629
+ c_ISLNK = 0120000 // Symbolic link
630
+ c_ISBLK = 060000 // Block special file
631
+ c_ISCHR = 020000 // Character special file
632
+ c_ISSOCK = 0140000 // Socket
633
+ )
634
+
635
+ // FileInfoHeader creates a partially-populated [Header] from fi.
636
+ // If fi describes a symlink, FileInfoHeader records link as the link target.
637
+ // If fi describes a directory, a slash is appended to the name.
638
+ //
639
+ // Since fs.FileInfo's Name method only returns the base name of
640
+ // the file it describes, it may be necessary to modify Header.Name
641
+ // to provide the full path name of the file.
642
+ func FileInfoHeader(fi fs.FileInfo, link string) (*Header, error) {
643
+ if fi == nil {
644
+ return nil, errors.New("archive/tar: FileInfo is nil")
645
+ }
646
+ fm := fi.Mode()
647
+ h := &Header{
648
+ Name: fi.Name(),
649
+ ModTime: fi.ModTime(),
650
+ Mode: int64(fm.Perm()), // or'd with c_IS* constants later
651
+ }
652
+ switch {
653
+ case fm.IsRegular():
654
+ h.Typeflag = TypeReg
655
+ h.Size = fi.Size()
656
+ case fi.IsDir():
657
+ h.Typeflag = TypeDir
658
+ h.Name += "/"
659
+ case fm&fs.ModeSymlink != 0:
660
+ h.Typeflag = TypeSymlink
661
+ h.Linkname = link
662
+ case fm&fs.ModeDevice != 0:
663
+ if fm&fs.ModeCharDevice != 0 {
664
+ h.Typeflag = TypeChar
665
+ } else {
666
+ h.Typeflag = TypeBlock
667
+ }
668
+ case fm&fs.ModeNamedPipe != 0:
669
+ h.Typeflag = TypeFifo
670
+ case fm&fs.ModeSocket != 0:
671
+ return nil, fmt.Errorf("archive/tar: sockets not supported")
672
+ default:
673
+ return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
674
+ }
675
+ if fm&fs.ModeSetuid != 0 {
676
+ h.Mode |= c_ISUID
677
+ }
678
+ if fm&fs.ModeSetgid != 0 {
679
+ h.Mode |= c_ISGID
680
+ }
681
+ if fm&fs.ModeSticky != 0 {
682
+ h.Mode |= c_ISVTX
683
+ }
684
+ // If possible, populate additional fields from OS-specific
685
+ // FileInfo fields.
686
+ if sys, ok := fi.Sys().(*Header); ok {
687
+ // This FileInfo came from a Header (not the OS). Use the
688
+ // original Header to populate all remaining fields.
689
+ h.Uid = sys.Uid
690
+ h.Gid = sys.Gid
691
+ h.Uname = sys.Uname
692
+ h.Gname = sys.Gname
693
+ h.AccessTime = sys.AccessTime
694
+ h.ChangeTime = sys.ChangeTime
695
+ if sys.Xattrs != nil {
696
+ h.Xattrs = make(map[string]string)
697
+ for k, v := range sys.Xattrs {
698
+ h.Xattrs[k] = v
699
+ }
700
+ }
701
+ if sys.Typeflag == TypeLink {
702
+ // hard link
703
+ h.Typeflag = TypeLink
704
+ h.Size = 0
705
+ h.Linkname = sys.Linkname
706
+ }
707
+ if sys.PAXRecords != nil {
708
+ h.PAXRecords = make(map[string]string)
709
+ for k, v := range sys.PAXRecords {
710
+ h.PAXRecords[k] = v
711
+ }
712
+ }
713
+ }
714
+ if sysStat != nil {
715
+ return h, sysStat(fi, h)
716
+ }
717
+ return h, nil
718
+ }
719
+
720
+ // isHeaderOnlyType checks if the given type flag is of the type that has no
721
+ // data section even if a size is specified.
722
+ func isHeaderOnlyType(flag byte) bool {
723
+ switch flag {
724
+ case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
725
+ return true
726
+ default:
727
+ return false
728
+ }
729
+ }
platform/dbops/binaries/go/go/src/archive/tar/example_test.go ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2013 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ package tar_test
6
+
7
+ import (
8
+ "archive/tar"
9
+ "bytes"
10
+ "fmt"
11
+ "io"
12
+ "log"
13
+ "os"
14
+ )
15
+
16
+ func Example_minimal() {
17
+ // Create and add some files to the archive.
18
+ var buf bytes.Buffer
19
+ tw := tar.NewWriter(&buf)
20
+ var files = []struct {
21
+ Name, Body string
22
+ }{
23
+ {"readme.txt", "This archive contains some text files."},
24
+ {"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
25
+ {"todo.txt", "Get animal handling license."},
26
+ }
27
+ for _, file := range files {
28
+ hdr := &tar.Header{
29
+ Name: file.Name,
30
+ Mode: 0600,
31
+ Size: int64(len(file.Body)),
32
+ }
33
+ if err := tw.WriteHeader(hdr); err != nil {
34
+ log.Fatal(err)
35
+ }
36
+ if _, err := tw.Write([]byte(file.Body)); err != nil {
37
+ log.Fatal(err)
38
+ }
39
+ }
40
+ if err := tw.Close(); err != nil {
41
+ log.Fatal(err)
42
+ }
43
+
44
+ // Open and iterate through the files in the archive.
45
+ tr := tar.NewReader(&buf)
46
+ for {
47
+ hdr, err := tr.Next()
48
+ if err == io.EOF {
49
+ break // End of archive
50
+ }
51
+ if err != nil {
52
+ log.Fatal(err)
53
+ }
54
+ fmt.Printf("Contents of %s:\n", hdr.Name)
55
+ if _, err := io.Copy(os.Stdout, tr); err != nil {
56
+ log.Fatal(err)
57
+ }
58
+ fmt.Println()
59
+ }
60
+
61
+ // Output:
62
+ // Contents of readme.txt:
63
+ // This archive contains some text files.
64
+ // Contents of gopher.txt:
65
+ // Gopher names:
66
+ // George
67
+ // Geoffrey
68
+ // Gonzo
69
+ // Contents of todo.txt:
70
+ // Get animal handling license.
71
+ }
platform/dbops/binaries/go/go/src/archive/tar/format.go ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2016 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ package tar
6
+
7
+ import "strings"
8
+
9
+ // Format represents the tar archive format.
10
+ //
11
+ // The original tar format was introduced in Unix V7.
12
+ // Since then, there have been multiple competing formats attempting to
13
+ // standardize or extend the V7 format to overcome its limitations.
14
+ // The most common formats are the USTAR, PAX, and GNU formats,
15
+ // each with their own advantages and limitations.
16
+ //
17
+ // The following table captures the capabilities of each format:
18
+ //
19
+ // | USTAR | PAX | GNU
20
+ // ------------------+--------+-----------+----------
21
+ // Name | 256B | unlimited | unlimited
22
+ // Linkname | 100B | unlimited | unlimited
23
+ // Size | uint33 | unlimited | uint89
24
+ // Mode | uint21 | uint21 | uint57
25
+ // Uid/Gid | uint21 | unlimited | uint57
26
+ // Uname/Gname | 32B | unlimited | 32B
27
+ // ModTime | uint33 | unlimited | int89
28
+ // AccessTime | n/a | unlimited | int89
29
+ // ChangeTime | n/a | unlimited | int89
30
+ // Devmajor/Devminor | uint21 | uint21 | uint57
31
+ // ------------------+--------+-----------+----------
32
+ // string encoding | ASCII | UTF-8 | binary
33
+ // sub-second times | no | yes | no
34
+ // sparse files | no | yes | yes
35
+ //
36
+ // The table's upper portion shows the [Header] fields, where each format reports
37
+ // the maximum number of bytes allowed for each string field and
38
+ // the integer type used to store each numeric field
39
+ // (where timestamps are stored as the number of seconds since the Unix epoch).
40
+ //
41
+ // The table's lower portion shows specialized features of each format,
42
+ // such as supported string encodings, support for sub-second timestamps,
43
+ // or support for sparse files.
44
+ //
45
+ // The Writer currently provides no support for sparse files.
46
+ type Format int
47
+
48
+ // Constants to identify various tar formats.
49
+ const (
50
+ // Deliberately hide the meaning of constants from public API.
51
+ _ Format = (1 << iota) / 4 // Sequence of 0, 0, 1, 2, 4, 8, etc...
52
+
53
+ // FormatUnknown indicates that the format is unknown.
54
+ FormatUnknown
55
+
56
+ // The format of the original Unix V7 tar tool prior to standardization.
57
+ formatV7
58
+
59
+ // FormatUSTAR represents the USTAR header format defined in POSIX.1-1988.
60
+ //
61
+ // While this format is compatible with most tar readers,
62
+ // the format has several limitations making it unsuitable for some usages.
63
+ // Most notably, it cannot support sparse files, files larger than 8GiB,
64
+ // filenames larger than 256 characters, and non-ASCII filenames.
65
+ //
66
+ // Reference:
67
+ // http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06
68
+ FormatUSTAR
69
+
70
+ // FormatPAX represents the PAX header format defined in POSIX.1-2001.
71
+ //
72
+ // PAX extends USTAR by writing a special file with Typeflag TypeXHeader
73
+ // preceding the original header. This file contains a set of key-value
74
+ // records, which are used to overcome USTAR's shortcomings, in addition to
75
+ // providing the ability to have sub-second resolution for timestamps.
76
+ //
77
+ // Some newer formats add their own extensions to PAX by defining their
78
+ // own keys and assigning certain semantic meaning to the associated values.
79
+ // For example, sparse file support in PAX is implemented using keys
80
+ // defined by the GNU manual (e.g., "GNU.sparse.map").
81
+ //
82
+ // Reference:
83
+ // http://pubs.opengroup.org/onlinepubs/009695399/utilities/pax.html
84
+ FormatPAX
85
+
86
+ // FormatGNU represents the GNU header format.
87
+ //
88
+ // The GNU header format is older than the USTAR and PAX standards and
89
+ // is not compatible with them. The GNU format supports
90
+ // arbitrary file sizes, filenames of arbitrary encoding and length,
91
+ // sparse files, and other features.
92
+ //
93
+ // It is recommended that PAX be chosen over GNU unless the target
94
+ // application can only parse GNU formatted archives.
95
+ //
96
+ // Reference:
97
+ // https://www.gnu.org/software/tar/manual/html_node/Standard.html
98
+ FormatGNU
99
+
100
+ // Schily's tar format, which is incompatible with USTAR.
101
+ // This does not cover STAR extensions to the PAX format; these fall under
102
+ // the PAX format.
103
+ formatSTAR
104
+
105
+ formatMax
106
+ )
107
+
108
+ func (f Format) has(f2 Format) bool { return f&f2 != 0 }
109
+ func (f *Format) mayBe(f2 Format) { *f |= f2 }
110
+ func (f *Format) mayOnlyBe(f2 Format) { *f &= f2 }
111
+ func (f *Format) mustNotBe(f2 Format) { *f &^= f2 }
112
+
113
+ var formatNames = map[Format]string{
114
+ formatV7: "V7", FormatUSTAR: "USTAR", FormatPAX: "PAX", FormatGNU: "GNU", formatSTAR: "STAR",
115
+ }
116
+
117
+ func (f Format) String() string {
118
+ var ss []string
119
+ for f2 := Format(1); f2 < formatMax; f2 <<= 1 {
120
+ if f.has(f2) {
121
+ ss = append(ss, formatNames[f2])
122
+ }
123
+ }
124
+ switch len(ss) {
125
+ case 0:
126
+ return "<unknown>"
127
+ case 1:
128
+ return ss[0]
129
+ default:
130
+ return "(" + strings.Join(ss, " | ") + ")"
131
+ }
132
+ }
133
+
134
+ // Magics used to identify various formats.
135
+ const (
136
+ magicGNU, versionGNU = "ustar ", " \x00"
137
+ magicUSTAR, versionUSTAR = "ustar\x00", "00"
138
+ trailerSTAR = "tar\x00"
139
+ )
140
+
141
+ // Size constants from various tar specifications.
142
+ const (
143
+ blockSize = 512 // Size of each block in a tar stream
144
+ nameSize = 100 // Max length of the name field in USTAR format
145
+ prefixSize = 155 // Max length of the prefix field in USTAR format
146
+
147
+ // Max length of a special file (PAX header, GNU long name or link).
148
+ // This matches the limit used by libarchive.
149
+ maxSpecialFileSize = 1 << 20
150
+ )
151
+
152
+ // blockPadding computes the number of bytes needed to pad offset up to the
153
+ // nearest block edge where 0 <= n < blockSize.
154
+ func blockPadding(offset int64) (n int64) {
155
+ return -offset & (blockSize - 1)
156
+ }
157
+
158
+ var zeroBlock block
159
+
160
+ type block [blockSize]byte
161
+
162
+ // Convert block to any number of formats.
163
+ func (b *block) toV7() *headerV7 { return (*headerV7)(b) }
164
+ func (b *block) toGNU() *headerGNU { return (*headerGNU)(b) }
165
+ func (b *block) toSTAR() *headerSTAR { return (*headerSTAR)(b) }
166
+ func (b *block) toUSTAR() *headerUSTAR { return (*headerUSTAR)(b) }
167
+ func (b *block) toSparse() sparseArray { return sparseArray(b[:]) }
168
+
169
+ // getFormat checks that the block is a valid tar header based on the checksum.
170
+ // It then attempts to guess the specific format based on magic values.
171
+ // If the checksum fails, then FormatUnknown is returned.
172
+ func (b *block) getFormat() Format {
173
+ // Verify checksum.
174
+ var p parser
175
+ value := p.parseOctal(b.toV7().chksum())
176
+ chksum1, chksum2 := b.computeChecksum()
177
+ if p.err != nil || (value != chksum1 && value != chksum2) {
178
+ return FormatUnknown
179
+ }
180
+
181
+ // Guess the magic values.
182
+ magic := string(b.toUSTAR().magic())
183
+ version := string(b.toUSTAR().version())
184
+ trailer := string(b.toSTAR().trailer())
185
+ switch {
186
+ case magic == magicUSTAR && trailer == trailerSTAR:
187
+ return formatSTAR
188
+ case magic == magicUSTAR:
189
+ return FormatUSTAR | FormatPAX
190
+ case magic == magicGNU && version == versionGNU:
191
+ return FormatGNU
192
+ default:
193
+ return formatV7
194
+ }
195
+ }
196
+
197
+ // setFormat writes the magic values necessary for specified format
198
+ // and then updates the checksum accordingly.
199
+ func (b *block) setFormat(format Format) {
200
+ // Set the magic values.
201
+ switch {
202
+ case format.has(formatV7):
203
+ // Do nothing.
204
+ case format.has(FormatGNU):
205
+ copy(b.toGNU().magic(), magicGNU)
206
+ copy(b.toGNU().version(), versionGNU)
207
+ case format.has(formatSTAR):
208
+ copy(b.toSTAR().magic(), magicUSTAR)
209
+ copy(b.toSTAR().version(), versionUSTAR)
210
+ copy(b.toSTAR().trailer(), trailerSTAR)
211
+ case format.has(FormatUSTAR | FormatPAX):
212
+ copy(b.toUSTAR().magic(), magicUSTAR)
213
+ copy(b.toUSTAR().version(), versionUSTAR)
214
+ default:
215
+ panic("invalid format")
216
+ }
217
+
218
+ // Update checksum.
219
+ // This field is special in that it is terminated by a NULL then space.
220
+ var f formatter
221
+ field := b.toV7().chksum()
222
+ chksum, _ := b.computeChecksum() // Possible values are 256..128776
223
+ f.formatOctal(field[:7], chksum) // Never fails since 128776 < 262143
224
+ field[7] = ' '
225
+ }
226
+
227
+ // computeChecksum computes the checksum for the header block.
228
+ // POSIX specifies a sum of the unsigned byte values, but the Sun tar used
229
+ // signed byte values.
230
+ // We compute and return both.
231
+ func (b *block) computeChecksum() (unsigned, signed int64) {
232
+ for i, c := range b {
233
+ if 148 <= i && i < 156 {
234
+ c = ' ' // Treat the checksum field itself as all spaces.
235
+ }
236
+ unsigned += int64(c)
237
+ signed += int64(int8(c))
238
+ }
239
+ return unsigned, signed
240
+ }
241
+
242
+ // reset clears the block with all zeros.
243
+ func (b *block) reset() {
244
+ *b = block{}
245
+ }
246
+
247
+ type headerV7 [blockSize]byte
248
+
249
+ func (h *headerV7) name() []byte { return h[000:][:100] }
250
+ func (h *headerV7) mode() []byte { return h[100:][:8] }
251
+ func (h *headerV7) uid() []byte { return h[108:][:8] }
252
+ func (h *headerV7) gid() []byte { return h[116:][:8] }
253
+ func (h *headerV7) size() []byte { return h[124:][:12] }
254
+ func (h *headerV7) modTime() []byte { return h[136:][:12] }
255
+ func (h *headerV7) chksum() []byte { return h[148:][:8] }
256
+ func (h *headerV7) typeFlag() []byte { return h[156:][:1] }
257
+ func (h *headerV7) linkName() []byte { return h[157:][:100] }
258
+
259
+ type headerGNU [blockSize]byte
260
+
261
+ func (h *headerGNU) v7() *headerV7 { return (*headerV7)(h) }
262
+ func (h *headerGNU) magic() []byte { return h[257:][:6] }
263
+ func (h *headerGNU) version() []byte { return h[263:][:2] }
264
+ func (h *headerGNU) userName() []byte { return h[265:][:32] }
265
+ func (h *headerGNU) groupName() []byte { return h[297:][:32] }
266
+ func (h *headerGNU) devMajor() []byte { return h[329:][:8] }
267
+ func (h *headerGNU) devMinor() []byte { return h[337:][:8] }
268
+ func (h *headerGNU) accessTime() []byte { return h[345:][:12] }
269
+ func (h *headerGNU) changeTime() []byte { return h[357:][:12] }
270
+ func (h *headerGNU) sparse() sparseArray { return sparseArray(h[386:][:24*4+1]) }
271
+ func (h *headerGNU) realSize() []byte { return h[483:][:12] }
272
+
273
+ type headerSTAR [blockSize]byte
274
+
275
+ func (h *headerSTAR) v7() *headerV7 { return (*headerV7)(h) }
276
+ func (h *headerSTAR) magic() []byte { return h[257:][:6] }
277
+ func (h *headerSTAR) version() []byte { return h[263:][:2] }
278
+ func (h *headerSTAR) userName() []byte { return h[265:][:32] }
279
+ func (h *headerSTAR) groupName() []byte { return h[297:][:32] }
280
+ func (h *headerSTAR) devMajor() []byte { return h[329:][:8] }
281
+ func (h *headerSTAR) devMinor() []byte { return h[337:][:8] }
282
+ func (h *headerSTAR) prefix() []byte { return h[345:][:131] }
283
+ func (h *headerSTAR) accessTime() []byte { return h[476:][:12] }
284
+ func (h *headerSTAR) changeTime() []byte { return h[488:][:12] }
285
+ func (h *headerSTAR) trailer() []byte { return h[508:][:4] }
286
+
287
+ type headerUSTAR [blockSize]byte
288
+
289
+ func (h *headerUSTAR) v7() *headerV7 { return (*headerV7)(h) }
290
+ func (h *headerUSTAR) magic() []byte { return h[257:][:6] }
291
+ func (h *headerUSTAR) version() []byte { return h[263:][:2] }
292
+ func (h *headerUSTAR) userName() []byte { return h[265:][:32] }
293
+ func (h *headerUSTAR) groupName() []byte { return h[297:][:32] }
294
+ func (h *headerUSTAR) devMajor() []byte { return h[329:][:8] }
295
+ func (h *headerUSTAR) devMinor() []byte { return h[337:][:8] }
296
+ func (h *headerUSTAR) prefix() []byte { return h[345:][:155] }
297
+
298
+ type sparseArray []byte
299
+
300
+ func (s sparseArray) entry(i int) sparseElem { return sparseElem(s[i*24:]) }
301
+ func (s sparseArray) isExtended() []byte { return s[24*s.maxEntries():][:1] }
302
+ func (s sparseArray) maxEntries() int { return len(s) / 24 }
303
+
304
+ type sparseElem []byte
305
+
306
+ func (s sparseElem) offset() []byte { return s[00:][:12] }
307
+ func (s sparseElem) length() []byte { return s[12:][:12] }
platform/dbops/binaries/go/go/src/archive/tar/fuzz_test.go ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2021 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ package tar
6
+
7
+ import (
8
+ "bytes"
9
+ "io"
10
+ "testing"
11
+ )
12
+
13
+ func FuzzReader(f *testing.F) {
14
+ b := bytes.NewBuffer(nil)
15
+ w := NewWriter(b)
16
+ inp := []byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.")
17
+ err := w.WriteHeader(&Header{
18
+ Name: "lorem.txt",
19
+ Mode: 0600,
20
+ Size: int64(len(inp)),
21
+ })
22
+ if err != nil {
23
+ f.Fatalf("failed to create writer: %s", err)
24
+ }
25
+ _, err = w.Write(inp)
26
+ if err != nil {
27
+ f.Fatalf("failed to write file to archive: %s", err)
28
+ }
29
+ if err := w.Close(); err != nil {
30
+ f.Fatalf("failed to write archive: %s", err)
31
+ }
32
+ f.Add(b.Bytes())
33
+
34
+ f.Fuzz(func(t *testing.T, b []byte) {
35
+ r := NewReader(bytes.NewReader(b))
36
+ type file struct {
37
+ header *Header
38
+ content []byte
39
+ }
40
+ files := []file{}
41
+ for {
42
+ hdr, err := r.Next()
43
+ if err == io.EOF {
44
+ break
45
+ }
46
+ if err != nil {
47
+ return
48
+ }
49
+ buf := bytes.NewBuffer(nil)
50
+ if _, err := io.Copy(buf, r); err != nil {
51
+ continue
52
+ }
53
+ files = append(files, file{header: hdr, content: buf.Bytes()})
54
+ }
55
+
56
+ // If we were unable to read anything out of the archive don't
57
+ // bother trying to roundtrip it.
58
+ if len(files) == 0 {
59
+ return
60
+ }
61
+
62
+ out := bytes.NewBuffer(nil)
63
+ w := NewWriter(out)
64
+ for _, f := range files {
65
+ if err := w.WriteHeader(f.header); err != nil {
66
+ t.Fatalf("unable to write previously parsed header: %s", err)
67
+ }
68
+ if _, err := w.Write(f.content); err != nil {
69
+ t.Fatalf("unable to write previously parsed content: %s", err)
70
+ }
71
+ }
72
+ if err := w.Close(); err != nil {
73
+ t.Fatalf("Unable to write archive: %s", err)
74
+ }
75
+
76
+ // TODO: We may want to check if the archive roundtrips. This would require
77
+ // taking into account addition of the two zero trailer blocks that Writer.Close
78
+ // appends.
79
+ })
80
+ }
platform/dbops/binaries/go/go/src/archive/tar/reader.go ADDED
@@ -0,0 +1,882 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2009 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ package tar
6
+
7
+ import (
8
+ "bytes"
9
+ "io"
10
+ "path/filepath"
11
+ "strconv"
12
+ "strings"
13
+ "time"
14
+ )
15
+
16
+ // Reader provides sequential access to the contents of a tar archive.
17
+ // Reader.Next advances to the next file in the archive (including the first),
18
+ // and then Reader can be treated as an io.Reader to access the file's data.
19
+ type Reader struct {
20
+ r io.Reader
21
+ pad int64 // Amount of padding (ignored) after current file entry
22
+ curr fileReader // Reader for current file entry
23
+ blk block // Buffer to use as temporary local storage
24
+
25
+ // err is a persistent error.
26
+ // It is only the responsibility of every exported method of Reader to
27
+ // ensure that this error is sticky.
28
+ err error
29
+ }
30
+
31
+ type fileReader interface {
32
+ io.Reader
33
+ fileState
34
+
35
+ WriteTo(io.Writer) (int64, error)
36
+ }
37
+
38
+ // NewReader creates a new [Reader] reading from r.
39
+ func NewReader(r io.Reader) *Reader {
40
+ return &Reader{r: r, curr: &regFileReader{r, 0}}
41
+ }
42
+
43
+ // Next advances to the next entry in the tar archive.
44
+ // The Header.Size determines how many bytes can be read for the next file.
45
+ // Any remaining data in the current file is automatically discarded.
46
+ // At the end of the archive, Next returns the error io.EOF.
47
+ //
48
+ // If Next encounters a non-local name (as defined by [filepath.IsLocal])
49
+ // and the GODEBUG environment variable contains `tarinsecurepath=0`,
50
+ // Next returns the header with an [ErrInsecurePath] error.
51
+ // A future version of Go may introduce this behavior by default.
52
+ // Programs that want to accept non-local names can ignore
53
+ // the [ErrInsecurePath] error and use the returned header.
54
+ func (tr *Reader) Next() (*Header, error) {
55
+ if tr.err != nil {
56
+ return nil, tr.err
57
+ }
58
+ hdr, err := tr.next()
59
+ tr.err = err
60
+ if err == nil && !filepath.IsLocal(hdr.Name) {
61
+ if tarinsecurepath.Value() == "0" {
62
+ tarinsecurepath.IncNonDefault()
63
+ err = ErrInsecurePath
64
+ }
65
+ }
66
+ return hdr, err
67
+ }
68
+
69
+ func (tr *Reader) next() (*Header, error) {
70
+ var paxHdrs map[string]string
71
+ var gnuLongName, gnuLongLink string
72
+
73
+ // Externally, Next iterates through the tar archive as if it is a series of
74
+ // files. Internally, the tar format often uses fake "files" to add meta
75
+ // data that describes the next file. These meta data "files" should not
76
+ // normally be visible to the outside. As such, this loop iterates through
77
+ // one or more "header files" until it finds a "normal file".
78
+ format := FormatUSTAR | FormatPAX | FormatGNU
79
+ for {
80
+ // Discard the remainder of the file and any padding.
81
+ if err := discard(tr.r, tr.curr.physicalRemaining()); err != nil {
82
+ return nil, err
83
+ }
84
+ if _, err := tryReadFull(tr.r, tr.blk[:tr.pad]); err != nil {
85
+ return nil, err
86
+ }
87
+ tr.pad = 0
88
+
89
+ hdr, rawHdr, err := tr.readHeader()
90
+ if err != nil {
91
+ return nil, err
92
+ }
93
+ if err := tr.handleRegularFile(hdr); err != nil {
94
+ return nil, err
95
+ }
96
+ format.mayOnlyBe(hdr.Format)
97
+
98
+ // Check for PAX/GNU special headers and files.
99
+ switch hdr.Typeflag {
100
+ case TypeXHeader, TypeXGlobalHeader:
101
+ format.mayOnlyBe(FormatPAX)
102
+ paxHdrs, err = parsePAX(tr)
103
+ if err != nil {
104
+ return nil, err
105
+ }
106
+ if hdr.Typeflag == TypeXGlobalHeader {
107
+ mergePAX(hdr, paxHdrs)
108
+ return &Header{
109
+ Name: hdr.Name,
110
+ Typeflag: hdr.Typeflag,
111
+ Xattrs: hdr.Xattrs,
112
+ PAXRecords: hdr.PAXRecords,
113
+ Format: format,
114
+ }, nil
115
+ }
116
+ continue // This is a meta header affecting the next header
117
+ case TypeGNULongName, TypeGNULongLink:
118
+ format.mayOnlyBe(FormatGNU)
119
+ realname, err := readSpecialFile(tr)
120
+ if err != nil {
121
+ return nil, err
122
+ }
123
+
124
+ var p parser
125
+ switch hdr.Typeflag {
126
+ case TypeGNULongName:
127
+ gnuLongName = p.parseString(realname)
128
+ case TypeGNULongLink:
129
+ gnuLongLink = p.parseString(realname)
130
+ }
131
+ continue // This is a meta header affecting the next header
132
+ default:
133
+ // The old GNU sparse format is handled here since it is technically
134
+ // just a regular file with additional attributes.
135
+
136
+ if err := mergePAX(hdr, paxHdrs); err != nil {
137
+ return nil, err
138
+ }
139
+ if gnuLongName != "" {
140
+ hdr.Name = gnuLongName
141
+ }
142
+ if gnuLongLink != "" {
143
+ hdr.Linkname = gnuLongLink
144
+ }
145
+ if hdr.Typeflag == TypeRegA {
146
+ if strings.HasSuffix(hdr.Name, "/") {
147
+ hdr.Typeflag = TypeDir // Legacy archives use trailing slash for directories
148
+ } else {
149
+ hdr.Typeflag = TypeReg
150
+ }
151
+ }
152
+
153
+ // The extended headers may have updated the size.
154
+ // Thus, setup the regFileReader again after merging PAX headers.
155
+ if err := tr.handleRegularFile(hdr); err != nil {
156
+ return nil, err
157
+ }
158
+
159
+ // Sparse formats rely on being able to read from the logical data
160
+ // section; there must be a preceding call to handleRegularFile.
161
+ if err := tr.handleSparseFile(hdr, rawHdr); err != nil {
162
+ return nil, err
163
+ }
164
+
165
+ // Set the final guess at the format.
166
+ if format.has(FormatUSTAR) && format.has(FormatPAX) {
167
+ format.mayOnlyBe(FormatUSTAR)
168
+ }
169
+ hdr.Format = format
170
+ return hdr, nil // This is a file, so stop
171
+ }
172
+ }
173
+ }
174
+
175
+ // handleRegularFile sets up the current file reader and padding such that it
176
+ // can only read the following logical data section. It will properly handle
177
+ // special headers that contain no data section.
178
+ func (tr *Reader) handleRegularFile(hdr *Header) error {
179
+ nb := hdr.Size
180
+ if isHeaderOnlyType(hdr.Typeflag) {
181
+ nb = 0
182
+ }
183
+ if nb < 0 {
184
+ return ErrHeader
185
+ }
186
+
187
+ tr.pad = blockPadding(nb)
188
+ tr.curr = &regFileReader{r: tr.r, nb: nb}
189
+ return nil
190
+ }
191
+
192
+ // handleSparseFile checks if the current file is a sparse format of any type
193
+ // and sets the curr reader appropriately.
194
+ func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block) error {
195
+ var spd sparseDatas
196
+ var err error
197
+ if hdr.Typeflag == TypeGNUSparse {
198
+ spd, err = tr.readOldGNUSparseMap(hdr, rawHdr)
199
+ } else {
200
+ spd, err = tr.readGNUSparsePAXHeaders(hdr)
201
+ }
202
+
203
+ // If sp is non-nil, then this is a sparse file.
204
+ // Note that it is possible for len(sp) == 0.
205
+ if err == nil && spd != nil {
206
+ if isHeaderOnlyType(hdr.Typeflag) || !validateSparseEntries(spd, hdr.Size) {
207
+ return ErrHeader
208
+ }
209
+ sph := invertSparseEntries(spd, hdr.Size)
210
+ tr.curr = &sparseFileReader{tr.curr, sph, 0}
211
+ }
212
+ return err
213
+ }
214
+
215
+ // readGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers.
216
+ // If they are found, then this function reads the sparse map and returns it.
217
+ // This assumes that 0.0 headers have already been converted to 0.1 headers
218
+ // by the PAX header parsing logic.
219
+ func (tr *Reader) readGNUSparsePAXHeaders(hdr *Header) (sparseDatas, error) {
220
+ // Identify the version of GNU headers.
221
+ var is1x0 bool
222
+ major, minor := hdr.PAXRecords[paxGNUSparseMajor], hdr.PAXRecords[paxGNUSparseMinor]
223
+ switch {
224
+ case major == "0" && (minor == "0" || minor == "1"):
225
+ is1x0 = false
226
+ case major == "1" && minor == "0":
227
+ is1x0 = true
228
+ case major != "" || minor != "":
229
+ return nil, nil // Unknown GNU sparse PAX version
230
+ case hdr.PAXRecords[paxGNUSparseMap] != "":
231
+ is1x0 = false // 0.0 and 0.1 did not have explicit version records, so guess
232
+ default:
233
+ return nil, nil // Not a PAX format GNU sparse file.
234
+ }
235
+ hdr.Format.mayOnlyBe(FormatPAX)
236
+
237
+ // Update hdr from GNU sparse PAX headers.
238
+ if name := hdr.PAXRecords[paxGNUSparseName]; name != "" {
239
+ hdr.Name = name
240
+ }
241
+ size := hdr.PAXRecords[paxGNUSparseSize]
242
+ if size == "" {
243
+ size = hdr.PAXRecords[paxGNUSparseRealSize]
244
+ }
245
+ if size != "" {
246
+ n, err := strconv.ParseInt(size, 10, 64)
247
+ if err != nil {
248
+ return nil, ErrHeader
249
+ }
250
+ hdr.Size = n
251
+ }
252
+
253
+ // Read the sparse map according to the appropriate format.
254
+ if is1x0 {
255
+ return readGNUSparseMap1x0(tr.curr)
256
+ }
257
+ return readGNUSparseMap0x1(hdr.PAXRecords)
258
+ }
259
+
260
+ // mergePAX merges paxHdrs into hdr for all relevant fields of Header.
261
+ func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) {
262
+ for k, v := range paxHdrs {
263
+ if v == "" {
264
+ continue // Keep the original USTAR value
265
+ }
266
+ var id64 int64
267
+ switch k {
268
+ case paxPath:
269
+ hdr.Name = v
270
+ case paxLinkpath:
271
+ hdr.Linkname = v
272
+ case paxUname:
273
+ hdr.Uname = v
274
+ case paxGname:
275
+ hdr.Gname = v
276
+ case paxUid:
277
+ id64, err = strconv.ParseInt(v, 10, 64)
278
+ hdr.Uid = int(id64) // Integer overflow possible
279
+ case paxGid:
280
+ id64, err = strconv.ParseInt(v, 10, 64)
281
+ hdr.Gid = int(id64) // Integer overflow possible
282
+ case paxAtime:
283
+ hdr.AccessTime, err = parsePAXTime(v)
284
+ case paxMtime:
285
+ hdr.ModTime, err = parsePAXTime(v)
286
+ case paxCtime:
287
+ hdr.ChangeTime, err = parsePAXTime(v)
288
+ case paxSize:
289
+ hdr.Size, err = strconv.ParseInt(v, 10, 64)
290
+ default:
291
+ if strings.HasPrefix(k, paxSchilyXattr) {
292
+ if hdr.Xattrs == nil {
293
+ hdr.Xattrs = make(map[string]string)
294
+ }
295
+ hdr.Xattrs[k[len(paxSchilyXattr):]] = v
296
+ }
297
+ }
298
+ if err != nil {
299
+ return ErrHeader
300
+ }
301
+ }
302
+ hdr.PAXRecords = paxHdrs
303
+ return nil
304
+ }
305
+
306
+ // parsePAX parses PAX headers.
307
+ // If an extended header (type 'x') is invalid, ErrHeader is returned.
308
+ func parsePAX(r io.Reader) (map[string]string, error) {
309
+ buf, err := readSpecialFile(r)
310
+ if err != nil {
311
+ return nil, err
312
+ }
313
+ sbuf := string(buf)
314
+
315
+ // For GNU PAX sparse format 0.0 support.
316
+ // This function transforms the sparse format 0.0 headers into format 0.1
317
+ // headers since 0.0 headers were not PAX compliant.
318
+ var sparseMap []string
319
+
320
+ paxHdrs := make(map[string]string)
321
+ for len(sbuf) > 0 {
322
+ key, value, residual, err := parsePAXRecord(sbuf)
323
+ if err != nil {
324
+ return nil, ErrHeader
325
+ }
326
+ sbuf = residual
327
+
328
+ switch key {
329
+ case paxGNUSparseOffset, paxGNUSparseNumBytes:
330
+ // Validate sparse header order and value.
331
+ if (len(sparseMap)%2 == 0 && key != paxGNUSparseOffset) ||
332
+ (len(sparseMap)%2 == 1 && key != paxGNUSparseNumBytes) ||
333
+ strings.Contains(value, ",") {
334
+ return nil, ErrHeader
335
+ }
336
+ sparseMap = append(sparseMap, value)
337
+ default:
338
+ paxHdrs[key] = value
339
+ }
340
+ }
341
+ if len(sparseMap) > 0 {
342
+ paxHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",")
343
+ }
344
+ return paxHdrs, nil
345
+ }
346
+
347
+ // readHeader reads the next block header and assumes that the underlying reader
348
+ // is already aligned to a block boundary. It returns the raw block of the
349
+ // header in case further processing is required.
350
+ //
351
+ // The err will be set to io.EOF only when one of the following occurs:
352
+ // - Exactly 0 bytes are read and EOF is hit.
353
+ // - Exactly 1 block of zeros is read and EOF is hit.
354
+ // - At least 2 blocks of zeros are read.
355
+ func (tr *Reader) readHeader() (*Header, *block, error) {
356
+ // Two blocks of zero bytes marks the end of the archive.
357
+ if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil {
358
+ return nil, nil, err // EOF is okay here; exactly 0 bytes read
359
+ }
360
+ if bytes.Equal(tr.blk[:], zeroBlock[:]) {
361
+ if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil {
362
+ return nil, nil, err // EOF is okay here; exactly 1 block of zeros read
363
+ }
364
+ if bytes.Equal(tr.blk[:], zeroBlock[:]) {
365
+ return nil, nil, io.EOF // normal EOF; exactly 2 block of zeros read
366
+ }
367
+ return nil, nil, ErrHeader // Zero block and then non-zero block
368
+ }
369
+
370
+ // Verify the header matches a known format.
371
+ format := tr.blk.getFormat()
372
+ if format == FormatUnknown {
373
+ return nil, nil, ErrHeader
374
+ }
375
+
376
+ var p parser
377
+ hdr := new(Header)
378
+
379
+ // Unpack the V7 header.
380
+ v7 := tr.blk.toV7()
381
+ hdr.Typeflag = v7.typeFlag()[0]
382
+ hdr.Name = p.parseString(v7.name())
383
+ hdr.Linkname = p.parseString(v7.linkName())
384
+ hdr.Size = p.parseNumeric(v7.size())
385
+ hdr.Mode = p.parseNumeric(v7.mode())
386
+ hdr.Uid = int(p.parseNumeric(v7.uid()))
387
+ hdr.Gid = int(p.parseNumeric(v7.gid()))
388
+ hdr.ModTime = time.Unix(p.parseNumeric(v7.modTime()), 0)
389
+
390
+ // Unpack format specific fields.
391
+ if format > formatV7 {
392
+ ustar := tr.blk.toUSTAR()
393
+ hdr.Uname = p.parseString(ustar.userName())
394
+ hdr.Gname = p.parseString(ustar.groupName())
395
+ hdr.Devmajor = p.parseNumeric(ustar.devMajor())
396
+ hdr.Devminor = p.parseNumeric(ustar.devMinor())
397
+
398
+ var prefix string
399
+ switch {
400
+ case format.has(FormatUSTAR | FormatPAX):
401
+ hdr.Format = format
402
+ ustar := tr.blk.toUSTAR()
403
+ prefix = p.parseString(ustar.prefix())
404
+
405
+ // For Format detection, check if block is properly formatted since
406
+ // the parser is more liberal than what USTAR actually permits.
407
+ notASCII := func(r rune) bool { return r >= 0x80 }
408
+ if bytes.IndexFunc(tr.blk[:], notASCII) >= 0 {
409
+ hdr.Format = FormatUnknown // Non-ASCII characters in block.
410
+ }
411
+ nul := func(b []byte) bool { return int(b[len(b)-1]) == 0 }
412
+ if !(nul(v7.size()) && nul(v7.mode()) && nul(v7.uid()) && nul(v7.gid()) &&
413
+ nul(v7.modTime()) && nul(ustar.devMajor()) && nul(ustar.devMinor())) {
414
+ hdr.Format = FormatUnknown // Numeric fields must end in NUL
415
+ }
416
+ case format.has(formatSTAR):
417
+ star := tr.blk.toSTAR()
418
+ prefix = p.parseString(star.prefix())
419
+ hdr.AccessTime = time.Unix(p.parseNumeric(star.accessTime()), 0)
420
+ hdr.ChangeTime = time.Unix(p.parseNumeric(star.changeTime()), 0)
421
+ case format.has(FormatGNU):
422
+ hdr.Format = format
423
+ var p2 parser
424
+ gnu := tr.blk.toGNU()
425
+ if b := gnu.accessTime(); b[0] != 0 {
426
+ hdr.AccessTime = time.Unix(p2.parseNumeric(b), 0)
427
+ }
428
+ if b := gnu.changeTime(); b[0] != 0 {
429
+ hdr.ChangeTime = time.Unix(p2.parseNumeric(b), 0)
430
+ }
431
+
432
+ // Prior to Go1.8, the Writer had a bug where it would output
433
+ // an invalid tar file in certain rare situations because the logic
434
+ // incorrectly believed that the old GNU format had a prefix field.
435
+ // This is wrong and leads to an output file that mangles the
436
+ // atime and ctime fields, which are often left unused.
437
+ //
438
+ // In order to continue reading tar files created by former, buggy
439
+ // versions of Go, we skeptically parse the atime and ctime fields.
440
+ // If we are unable to parse them and the prefix field looks like
441
+ // an ASCII string, then we fallback on the pre-Go1.8 behavior
442
+ // of treating these fields as the USTAR prefix field.
443
+ //
444
+ // Note that this will not use the fallback logic for all possible
445
+ // files generated by a pre-Go1.8 toolchain. If the generated file
446
+ // happened to have a prefix field that parses as valid
447
+ // atime and ctime fields (e.g., when they are valid octal strings),
448
+ // then it is impossible to distinguish between a valid GNU file
449
+ // and an invalid pre-Go1.8 file.
450
+ //
451
+ // See https://golang.org/issues/12594
452
+ // See https://golang.org/issues/21005
453
+ if p2.err != nil {
454
+ hdr.AccessTime, hdr.ChangeTime = time.Time{}, time.Time{}
455
+ ustar := tr.blk.toUSTAR()
456
+ if s := p.parseString(ustar.prefix()); isASCII(s) {
457
+ prefix = s
458
+ }
459
+ hdr.Format = FormatUnknown // Buggy file is not GNU
460
+ }
461
+ }
462
+ if len(prefix) > 0 {
463
+ hdr.Name = prefix + "/" + hdr.Name
464
+ }
465
+ }
466
+ return hdr, &tr.blk, p.err
467
+ }
468
+
469
+ // readOldGNUSparseMap reads the sparse map from the old GNU sparse format.
470
+ // The sparse map is stored in the tar header if it's small enough.
471
+ // If it's larger than four entries, then one or more extension headers are used
472
+ // to store the rest of the sparse map.
473
+ //
474
+ // The Header.Size does not reflect the size of any extended headers used.
475
+ // Thus, this function will read from the raw io.Reader to fetch extra headers.
476
+ // This method mutates blk in the process.
477
+ func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) (sparseDatas, error) {
478
+ // Make sure that the input format is GNU.
479
+ // Unfortunately, the STAR format also has a sparse header format that uses
480
+ // the same type flag but has a completely different layout.
481
+ if blk.getFormat() != FormatGNU {
482
+ return nil, ErrHeader
483
+ }
484
+ hdr.Format.mayOnlyBe(FormatGNU)
485
+
486
+ var p parser
487
+ hdr.Size = p.parseNumeric(blk.toGNU().realSize())
488
+ if p.err != nil {
489
+ return nil, p.err
490
+ }
491
+ s := blk.toGNU().sparse()
492
+ spd := make(sparseDatas, 0, s.maxEntries())
493
+ for {
494
+ for i := 0; i < s.maxEntries(); i++ {
495
+ // This termination condition is identical to GNU and BSD tar.
496
+ if s.entry(i).offset()[0] == 0x00 {
497
+ break // Don't return, need to process extended headers (even if empty)
498
+ }
499
+ offset := p.parseNumeric(s.entry(i).offset())
500
+ length := p.parseNumeric(s.entry(i).length())
501
+ if p.err != nil {
502
+ return nil, p.err
503
+ }
504
+ spd = append(spd, sparseEntry{Offset: offset, Length: length})
505
+ }
506
+
507
+ if s.isExtended()[0] > 0 {
508
+ // There are more entries. Read an extension header and parse its entries.
509
+ if _, err := mustReadFull(tr.r, blk[:]); err != nil {
510
+ return nil, err
511
+ }
512
+ s = blk.toSparse()
513
+ continue
514
+ }
515
+ return spd, nil // Done
516
+ }
517
+ }
518
+
519
+ // readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format
520
+ // version 1.0. The format of the sparse map consists of a series of
521
+ // newline-terminated numeric fields. The first field is the number of entries
522
+ // and is always present. Following this are the entries, consisting of two
523
+ // fields (offset, length). This function must stop reading at the end
524
+ // boundary of the block containing the last newline.
525
+ //
526
+ // Note that the GNU manual says that numeric values should be encoded in octal
527
+ // format. However, the GNU tar utility itself outputs these values in decimal.
528
+ // As such, this library treats values as being encoded in decimal.
529
+ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) {
530
+ var (
531
+ cntNewline int64
532
+ buf bytes.Buffer
533
+ blk block
534
+ )
535
+
536
+ // feedTokens copies data in blocks from r into buf until there are
537
+ // at least cnt newlines in buf. It will not read more blocks than needed.
538
+ feedTokens := func(n int64) error {
539
+ for cntNewline < n {
540
+ if _, err := mustReadFull(r, blk[:]); err != nil {
541
+ return err
542
+ }
543
+ buf.Write(blk[:])
544
+ for _, c := range blk {
545
+ if c == '\n' {
546
+ cntNewline++
547
+ }
548
+ }
549
+ }
550
+ return nil
551
+ }
552
+
553
+ // nextToken gets the next token delimited by a newline. This assumes that
554
+ // at least one newline exists in the buffer.
555
+ nextToken := func() string {
556
+ cntNewline--
557
+ tok, _ := buf.ReadString('\n')
558
+ return strings.TrimRight(tok, "\n")
559
+ }
560
+
561
+ // Parse for the number of entries.
562
+ // Use integer overflow resistant math to check this.
563
+ if err := feedTokens(1); err != nil {
564
+ return nil, err
565
+ }
566
+ numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int
567
+ if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
568
+ return nil, ErrHeader
569
+ }
570
+
571
+ // Parse for all member entries.
572
+ // numEntries is trusted after this since a potential attacker must have
573
+ // committed resources proportional to what this library used.
574
+ if err := feedTokens(2 * numEntries); err != nil {
575
+ return nil, err
576
+ }
577
+ spd := make(sparseDatas, 0, numEntries)
578
+ for i := int64(0); i < numEntries; i++ {
579
+ offset, err1 := strconv.ParseInt(nextToken(), 10, 64)
580
+ length, err2 := strconv.ParseInt(nextToken(), 10, 64)
581
+ if err1 != nil || err2 != nil {
582
+ return nil, ErrHeader
583
+ }
584
+ spd = append(spd, sparseEntry{Offset: offset, Length: length})
585
+ }
586
+ return spd, nil
587
+ }
588
+
589
+ // readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format
590
+ // version 0.1. The sparse map is stored in the PAX headers.
591
+ func readGNUSparseMap0x1(paxHdrs map[string]string) (sparseDatas, error) {
592
+ // Get number of entries.
593
+ // Use integer overflow resistant math to check this.
594
+ numEntriesStr := paxHdrs[paxGNUSparseNumBlocks]
595
+ numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int
596
+ if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
597
+ return nil, ErrHeader
598
+ }
599
+
600
+ // There should be two numbers in sparseMap for each entry.
601
+ sparseMap := strings.Split(paxHdrs[paxGNUSparseMap], ",")
602
+ if len(sparseMap) == 1 && sparseMap[0] == "" {
603
+ sparseMap = sparseMap[:0]
604
+ }
605
+ if int64(len(sparseMap)) != 2*numEntries {
606
+ return nil, ErrHeader
607
+ }
608
+
609
+ // Loop through the entries in the sparse map.
610
+ // numEntries is trusted now.
611
+ spd := make(sparseDatas, 0, numEntries)
612
+ for len(sparseMap) >= 2 {
613
+ offset, err1 := strconv.ParseInt(sparseMap[0], 10, 64)
614
+ length, err2 := strconv.ParseInt(sparseMap[1], 10, 64)
615
+ if err1 != nil || err2 != nil {
616
+ return nil, ErrHeader
617
+ }
618
+ spd = append(spd, sparseEntry{Offset: offset, Length: length})
619
+ sparseMap = sparseMap[2:]
620
+ }
621
+ return spd, nil
622
+ }
623
+
624
+ // Read reads from the current file in the tar archive.
625
+ // It returns (0, io.EOF) when it reaches the end of that file,
626
+ // until [Next] is called to advance to the next file.
627
+ //
628
+ // If the current file is sparse, then the regions marked as a hole
629
+ // are read back as NUL-bytes.
630
+ //
631
+ // Calling Read on special types like [TypeLink], [TypeSymlink], [TypeChar],
632
+ // [TypeBlock], [TypeDir], and [TypeFifo] returns (0, [io.EOF]) regardless of what
633
+ // the [Header.Size] claims.
634
+ func (tr *Reader) Read(b []byte) (int, error) {
635
+ if tr.err != nil {
636
+ return 0, tr.err
637
+ }
638
+ n, err := tr.curr.Read(b)
639
+ if err != nil && err != io.EOF {
640
+ tr.err = err
641
+ }
642
+ return n, err
643
+ }
644
+
645
+ // writeTo writes the content of the current file to w.
646
+ // The bytes written matches the number of remaining bytes in the current file.
647
+ //
648
+ // If the current file is sparse and w is an io.WriteSeeker,
649
+ // then writeTo uses Seek to skip past holes defined in Header.SparseHoles,
650
+ // assuming that skipped regions are filled with NULs.
651
+ // This always writes the last byte to ensure w is the right size.
652
+ //
653
+ // TODO(dsnet): Re-export this when adding sparse file support.
654
+ // See https://golang.org/issue/22735
655
+ func (tr *Reader) writeTo(w io.Writer) (int64, error) {
656
+ if tr.err != nil {
657
+ return 0, tr.err
658
+ }
659
+ n, err := tr.curr.WriteTo(w)
660
+ if err != nil {
661
+ tr.err = err
662
+ }
663
+ return n, err
664
+ }
665
+
666
+ // regFileReader is a fileReader for reading data from a regular file entry.
667
+ type regFileReader struct {
668
+ r io.Reader // Underlying Reader
669
+ nb int64 // Number of remaining bytes to read
670
+ }
671
+
672
+ func (fr *regFileReader) Read(b []byte) (n int, err error) {
673
+ if int64(len(b)) > fr.nb {
674
+ b = b[:fr.nb]
675
+ }
676
+ if len(b) > 0 {
677
+ n, err = fr.r.Read(b)
678
+ fr.nb -= int64(n)
679
+ }
680
+ switch {
681
+ case err == io.EOF && fr.nb > 0:
682
+ return n, io.ErrUnexpectedEOF
683
+ case err == nil && fr.nb == 0:
684
+ return n, io.EOF
685
+ default:
686
+ return n, err
687
+ }
688
+ }
689
+
690
+ func (fr *regFileReader) WriteTo(w io.Writer) (int64, error) {
691
+ return io.Copy(w, struct{ io.Reader }{fr})
692
+ }
693
+
694
+ // logicalRemaining implements fileState.logicalRemaining.
695
+ func (fr regFileReader) logicalRemaining() int64 {
696
+ return fr.nb
697
+ }
698
+
699
+ // physicalRemaining implements fileState.physicalRemaining.
700
+ func (fr regFileReader) physicalRemaining() int64 {
701
+ return fr.nb
702
+ }
703
+
704
+ // sparseFileReader is a fileReader for reading data from a sparse file entry.
705
+ type sparseFileReader struct {
706
+ fr fileReader // Underlying fileReader
707
+ sp sparseHoles // Normalized list of sparse holes
708
+ pos int64 // Current position in sparse file
709
+ }
710
+
711
+ func (sr *sparseFileReader) Read(b []byte) (n int, err error) {
712
+ finished := int64(len(b)) >= sr.logicalRemaining()
713
+ if finished {
714
+ b = b[:sr.logicalRemaining()]
715
+ }
716
+
717
+ b0 := b
718
+ endPos := sr.pos + int64(len(b))
719
+ for endPos > sr.pos && err == nil {
720
+ var nf int // Bytes read in fragment
721
+ holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset()
722
+ if sr.pos < holeStart { // In a data fragment
723
+ bf := b[:min(int64(len(b)), holeStart-sr.pos)]
724
+ nf, err = tryReadFull(sr.fr, bf)
725
+ } else { // In a hole fragment
726
+ bf := b[:min(int64(len(b)), holeEnd-sr.pos)]
727
+ nf, err = tryReadFull(zeroReader{}, bf)
728
+ }
729
+ b = b[nf:]
730
+ sr.pos += int64(nf)
731
+ if sr.pos >= holeEnd && len(sr.sp) > 1 {
732
+ sr.sp = sr.sp[1:] // Ensure last fragment always remains
733
+ }
734
+ }
735
+
736
+ n = len(b0) - len(b)
737
+ switch {
738
+ case err == io.EOF:
739
+ return n, errMissData // Less data in dense file than sparse file
740
+ case err != nil:
741
+ return n, err
742
+ case sr.logicalRemaining() == 0 && sr.physicalRemaining() > 0:
743
+ return n, errUnrefData // More data in dense file than sparse file
744
+ case finished:
745
+ return n, io.EOF
746
+ default:
747
+ return n, nil
748
+ }
749
+ }
750
+
751
+ func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) {
752
+ ws, ok := w.(io.WriteSeeker)
753
+ if ok {
754
+ if _, err := ws.Seek(0, io.SeekCurrent); err != nil {
755
+ ok = false // Not all io.Seeker can really seek
756
+ }
757
+ }
758
+ if !ok {
759
+ return io.Copy(w, struct{ io.Reader }{sr})
760
+ }
761
+
762
+ var writeLastByte bool
763
+ pos0 := sr.pos
764
+ for sr.logicalRemaining() > 0 && !writeLastByte && err == nil {
765
+ var nf int64 // Size of fragment
766
+ holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset()
767
+ if sr.pos < holeStart { // In a data fragment
768
+ nf = holeStart - sr.pos
769
+ nf, err = io.CopyN(ws, sr.fr, nf)
770
+ } else { // In a hole fragment
771
+ nf = holeEnd - sr.pos
772
+ if sr.physicalRemaining() == 0 {
773
+ writeLastByte = true
774
+ nf--
775
+ }
776
+ _, err = ws.Seek(nf, io.SeekCurrent)
777
+ }
778
+ sr.pos += nf
779
+ if sr.pos >= holeEnd && len(sr.sp) > 1 {
780
+ sr.sp = sr.sp[1:] // Ensure last fragment always remains
781
+ }
782
+ }
783
+
784
+ // If the last fragment is a hole, then seek to 1-byte before EOF, and
785
+ // write a single byte to ensure the file is the right size.
786
+ if writeLastByte && err == nil {
787
+ _, err = ws.Write([]byte{0})
788
+ sr.pos++
789
+ }
790
+
791
+ n = sr.pos - pos0
792
+ switch {
793
+ case err == io.EOF:
794
+ return n, errMissData // Less data in dense file than sparse file
795
+ case err != nil:
796
+ return n, err
797
+ case sr.logicalRemaining() == 0 && sr.physicalRemaining() > 0:
798
+ return n, errUnrefData // More data in dense file than sparse file
799
+ default:
800
+ return n, nil
801
+ }
802
+ }
803
+
804
+ func (sr sparseFileReader) logicalRemaining() int64 {
805
+ return sr.sp[len(sr.sp)-1].endOffset() - sr.pos
806
+ }
807
+ func (sr sparseFileReader) physicalRemaining() int64 {
808
+ return sr.fr.physicalRemaining()
809
+ }
810
+
811
+ type zeroReader struct{}
812
+
813
+ func (zeroReader) Read(b []byte) (int, error) {
814
+ for i := range b {
815
+ b[i] = 0
816
+ }
817
+ return len(b), nil
818
+ }
819
+
820
+ // mustReadFull is like io.ReadFull except it returns
821
+ // io.ErrUnexpectedEOF when io.EOF is hit before len(b) bytes are read.
822
+ func mustReadFull(r io.Reader, b []byte) (int, error) {
823
+ n, err := tryReadFull(r, b)
824
+ if err == io.EOF {
825
+ err = io.ErrUnexpectedEOF
826
+ }
827
+ return n, err
828
+ }
829
+
830
+ // tryReadFull is like io.ReadFull except it returns
831
+ // io.EOF when it is hit before len(b) bytes are read.
832
+ func tryReadFull(r io.Reader, b []byte) (n int, err error) {
833
+ for len(b) > n && err == nil {
834
+ var nn int
835
+ nn, err = r.Read(b[n:])
836
+ n += nn
837
+ }
838
+ if len(b) == n && err == io.EOF {
839
+ err = nil
840
+ }
841
+ return n, err
842
+ }
843
+
844
+ // readSpecialFile is like io.ReadAll except it returns
845
+ // ErrFieldTooLong if more than maxSpecialFileSize is read.
846
+ func readSpecialFile(r io.Reader) ([]byte, error) {
847
+ buf, err := io.ReadAll(io.LimitReader(r, maxSpecialFileSize+1))
848
+ if len(buf) > maxSpecialFileSize {
849
+ return nil, ErrFieldTooLong
850
+ }
851
+ return buf, err
852
+ }
853
+
854
+ // discard skips n bytes in r, reporting an error if unable to do so.
855
+ func discard(r io.Reader, n int64) error {
856
+ // If possible, Seek to the last byte before the end of the data section.
857
+ // Do this because Seek is often lazy about reporting errors; this will mask
858
+ // the fact that the stream may be truncated. We can rely on the
859
+ // io.CopyN done shortly afterwards to trigger any IO errors.
860
+ var seekSkipped int64 // Number of bytes skipped via Seek
861
+ if sr, ok := r.(io.Seeker); ok && n > 1 {
862
+ // Not all io.Seeker can actually Seek. For example, os.Stdin implements
863
+ // io.Seeker, but calling Seek always returns an error and performs
864
+ // no action. Thus, we try an innocent seek to the current position
865
+ // to see if Seek is really supported.
866
+ pos1, err := sr.Seek(0, io.SeekCurrent)
867
+ if pos1 >= 0 && err == nil {
868
+ // Seek seems supported, so perform the real Seek.
869
+ pos2, err := sr.Seek(n-1, io.SeekCurrent)
870
+ if pos2 < 0 || err != nil {
871
+ return err
872
+ }
873
+ seekSkipped = pos2 - pos1
874
+ }
875
+ }
876
+
877
+ copySkipped, err := io.CopyN(io.Discard, r, n-seekSkipped)
878
+ if err == io.EOF && seekSkipped+copySkipped < n {
879
+ err = io.ErrUnexpectedEOF
880
+ }
881
+ return err
882
+ }
platform/dbops/binaries/go/go/src/archive/tar/reader_test.go ADDED
@@ -0,0 +1,1674 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2009 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ package tar
6
+
7
+ import (
8
+ "bytes"
9
+ "compress/bzip2"
10
+ "crypto/md5"
11
+ "errors"
12
+ "fmt"
13
+ "io"
14
+ "math"
15
+ "os"
16
+ "path"
17
+ "reflect"
18
+ "strconv"
19
+ "strings"
20
+ "testing"
21
+ "time"
22
+ )
23
+
24
+ func TestReader(t *testing.T) {
25
+ vectors := []struct {
26
+ file string // Test input file
27
+ headers []*Header // Expected output headers
28
+ chksums []string // MD5 checksum of files, leave as nil if not checked
29
+ err error // Expected error to occur
30
+ }{{
31
+ file: "testdata/gnu.tar",
32
+ headers: []*Header{{
33
+ Name: "small.txt",
34
+ Mode: 0640,
35
+ Uid: 73025,
36
+ Gid: 5000,
37
+ Size: 5,
38
+ ModTime: time.Unix(1244428340, 0),
39
+ Typeflag: '0',
40
+ Uname: "dsymonds",
41
+ Gname: "eng",
42
+ Format: FormatGNU,
43
+ }, {
44
+ Name: "small2.txt",
45
+ Mode: 0640,
46
+ Uid: 73025,
47
+ Gid: 5000,
48
+ Size: 11,
49
+ ModTime: time.Unix(1244436044, 0),
50
+ Typeflag: '0',
51
+ Uname: "dsymonds",
52
+ Gname: "eng",
53
+ Format: FormatGNU,
54
+ }},
55
+ chksums: []string{
56
+ "e38b27eaccb4391bdec553a7f3ae6b2f",
57
+ "c65bd2e50a56a2138bf1716f2fd56fe9",
58
+ },
59
+ }, {
60
+ file: "testdata/sparse-formats.tar",
61
+ headers: []*Header{{
62
+ Name: "sparse-gnu",
63
+ Mode: 420,
64
+ Uid: 1000,
65
+ Gid: 1000,
66
+ Size: 200,
67
+ ModTime: time.Unix(1392395740, 0),
68
+ Typeflag: 0x53,
69
+ Linkname: "",
70
+ Uname: "david",
71
+ Gname: "david",
72
+ Devmajor: 0,
73
+ Devminor: 0,
74
+ Format: FormatGNU,
75
+ }, {
76
+ Name: "sparse-posix-0.0",
77
+ Mode: 420,
78
+ Uid: 1000,
79
+ Gid: 1000,
80
+ Size: 200,
81
+ ModTime: time.Unix(1392342187, 0),
82
+ Typeflag: 0x30,
83
+ Linkname: "",
84
+ Uname: "david",
85
+ Gname: "david",
86
+ Devmajor: 0,
87
+ Devminor: 0,
88
+ PAXRecords: map[string]string{
89
+ "GNU.sparse.size": "200",
90
+ "GNU.sparse.numblocks": "95",
91
+ "GNU.sparse.map": "1,1,3,1,5,1,7,1,9,1,11,1,13,1,15,1,17,1,19,1,21,1,23,1,25,1,27,1,29,1,31,1,33,1,35,1,37,1,39,1,41,1,43,1,45,1,47,1,49,1,51,1,53,1,55,1,57,1,59,1,61,1,63,1,65,1,67,1,69,1,71,1,73,1,75,1,77,1,79,1,81,1,83,1,85,1,87,1,89,1,91,1,93,1,95,1,97,1,99,1,101,1,103,1,105,1,107,1,109,1,111,1,113,1,115,1,117,1,119,1,121,1,123,1,125,1,127,1,129,1,131,1,133,1,135,1,137,1,139,1,141,1,143,1,145,1,147,1,149,1,151,1,153,1,155,1,157,1,159,1,161,1,163,1,165,1,167,1,169,1,171,1,173,1,175,1,177,1,179,1,181,1,183,1,185,1,187,1,189,1",
92
+ },
93
+ Format: FormatPAX,
94
+ }, {
95
+ Name: "sparse-posix-0.1",
96
+ Mode: 420,
97
+ Uid: 1000,
98
+ Gid: 1000,
99
+ Size: 200,
100
+ ModTime: time.Unix(1392340456, 0),
101
+ Typeflag: 0x30,
102
+ Linkname: "",
103
+ Uname: "david",
104
+ Gname: "david",
105
+ Devmajor: 0,
106
+ Devminor: 0,
107
+ PAXRecords: map[string]string{
108
+ "GNU.sparse.size": "200",
109
+ "GNU.sparse.numblocks": "95",
110
+ "GNU.sparse.map": "1,1,3,1,5,1,7,1,9,1,11,1,13,1,15,1,17,1,19,1,21,1,23,1,25,1,27,1,29,1,31,1,33,1,35,1,37,1,39,1,41,1,43,1,45,1,47,1,49,1,51,1,53,1,55,1,57,1,59,1,61,1,63,1,65,1,67,1,69,1,71,1,73,1,75,1,77,1,79,1,81,1,83,1,85,1,87,1,89,1,91,1,93,1,95,1,97,1,99,1,101,1,103,1,105,1,107,1,109,1,111,1,113,1,115,1,117,1,119,1,121,1,123,1,125,1,127,1,129,1,131,1,133,1,135,1,137,1,139,1,141,1,143,1,145,1,147,1,149,1,151,1,153,1,155,1,157,1,159,1,161,1,163,1,165,1,167,1,169,1,171,1,173,1,175,1,177,1,179,1,181,1,183,1,185,1,187,1,189,1",
111
+ "GNU.sparse.name": "sparse-posix-0.1",
112
+ },
113
+ Format: FormatPAX,
114
+ }, {
115
+ Name: "sparse-posix-1.0",
116
+ Mode: 420,
117
+ Uid: 1000,
118
+ Gid: 1000,
119
+ Size: 200,
120
+ ModTime: time.Unix(1392337404, 0),
121
+ Typeflag: 0x30,
122
+ Linkname: "",
123
+ Uname: "david",
124
+ Gname: "david",
125
+ Devmajor: 0,
126
+ Devminor: 0,
127
+ PAXRecords: map[string]string{
128
+ "GNU.sparse.major": "1",
129
+ "GNU.sparse.minor": "0",
130
+ "GNU.sparse.realsize": "200",
131
+ "GNU.sparse.name": "sparse-posix-1.0",
132
+ },
133
+ Format: FormatPAX,
134
+ }, {
135
+ Name: "end",
136
+ Mode: 420,
137
+ Uid: 1000,
138
+ Gid: 1000,
139
+ Size: 4,
140
+ ModTime: time.Unix(1392398319, 0),
141
+ Typeflag: 0x30,
142
+ Linkname: "",
143
+ Uname: "david",
144
+ Gname: "david",
145
+ Devmajor: 0,
146
+ Devminor: 0,
147
+ Format: FormatGNU,
148
+ }},
149
+ chksums: []string{
150
+ "6f53234398c2449fe67c1812d993012f",
151
+ "6f53234398c2449fe67c1812d993012f",
152
+ "6f53234398c2449fe67c1812d993012f",
153
+ "6f53234398c2449fe67c1812d993012f",
154
+ "b0061974914468de549a2af8ced10316",
155
+ },
156
+ }, {
157
+ file: "testdata/star.tar",
158
+ headers: []*Header{{
159
+ Name: "small.txt",
160
+ Mode: 0640,
161
+ Uid: 73025,
162
+ Gid: 5000,
163
+ Size: 5,
164
+ ModTime: time.Unix(1244592783, 0),
165
+ Typeflag: '0',
166
+ Uname: "dsymonds",
167
+ Gname: "eng",
168
+ AccessTime: time.Unix(1244592783, 0),
169
+ ChangeTime: time.Unix(1244592783, 0),
170
+ }, {
171
+ Name: "small2.txt",
172
+ Mode: 0640,
173
+ Uid: 73025,
174
+ Gid: 5000,
175
+ Size: 11,
176
+ ModTime: time.Unix(1244592783, 0),
177
+ Typeflag: '0',
178
+ Uname: "dsymonds",
179
+ Gname: "eng",
180
+ AccessTime: time.Unix(1244592783, 0),
181
+ ChangeTime: time.Unix(1244592783, 0),
182
+ }},
183
+ }, {
184
+ file: "testdata/v7.tar",
185
+ headers: []*Header{{
186
+ Name: "small.txt",
187
+ Mode: 0444,
188
+ Uid: 73025,
189
+ Gid: 5000,
190
+ Size: 5,
191
+ ModTime: time.Unix(1244593104, 0),
192
+ Typeflag: '0',
193
+ }, {
194
+ Name: "small2.txt",
195
+ Mode: 0444,
196
+ Uid: 73025,
197
+ Gid: 5000,
198
+ Size: 11,
199
+ ModTime: time.Unix(1244593104, 0),
200
+ Typeflag: '0',
201
+ }},
202
+ }, {
203
+ file: "testdata/pax.tar",
204
+ headers: []*Header{{
205
+ Name: "a/123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
206
+ Mode: 0664,
207
+ Uid: 1000,
208
+ Gid: 1000,
209
+ Uname: "shane",
210
+ Gname: "shane",
211
+ Size: 7,
212
+ ModTime: time.Unix(1350244992, 23960108),
213
+ ChangeTime: time.Unix(1350244992, 23960108),
214
+ AccessTime: time.Unix(1350244992, 23960108),
215
+ Typeflag: TypeReg,
216
+ PAXRecords: map[string]string{
217
+ "path": "a/123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
218
+ "mtime": "1350244992.023960108",
219
+ "atime": "1350244992.023960108",
220
+ "ctime": "1350244992.023960108",
221
+ },
222
+ Format: FormatPAX,
223
+ }, {
224
+ Name: "a/b",
225
+ Mode: 0777,
226
+ Uid: 1000,
227
+ Gid: 1000,
228
+ Uname: "shane",
229
+ Gname: "shane",
230
+ Size: 0,
231
+ ModTime: time.Unix(1350266320, 910238425),
232
+ ChangeTime: time.Unix(1350266320, 910238425),
233
+ AccessTime: time.Unix(1350266320, 910238425),
234
+ Typeflag: TypeSymlink,
235
+ Linkname: "123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
236
+ PAXRecords: map[string]string{
237
+ "linkpath": "123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100",
238
+ "mtime": "1350266320.910238425",
239
+ "atime": "1350266320.910238425",
240
+ "ctime": "1350266320.910238425",
241
+ },
242
+ Format: FormatPAX,
243
+ }},
244
+ }, {
245
+ file: "testdata/pax-bad-hdr-file.tar",
246
+ err: ErrHeader,
247
+ }, {
248
+ file: "testdata/pax-bad-hdr-large.tar.bz2",
249
+ err: ErrFieldTooLong,
250
+ }, {
251
+ file: "testdata/pax-bad-mtime-file.tar",
252
+ err: ErrHeader,
253
+ }, {
254
+ file: "testdata/pax-pos-size-file.tar",
255
+ headers: []*Header{{
256
+ Name: "foo",
257
+ Mode: 0640,
258
+ Uid: 319973,
259
+ Gid: 5000,
260
+ Size: 999,
261
+ ModTime: time.Unix(1442282516, 0),
262
+ Typeflag: '0',
263
+ Uname: "joetsai",
264
+ Gname: "eng",
265
+ PAXRecords: map[string]string{
266
+ "size": "000000000000000000000999",
267
+ },
268
+ Format: FormatPAX,
269
+ }},
270
+ chksums: []string{
271
+ "0afb597b283fe61b5d4879669a350556",
272
+ },
273
+ }, {
274
+ file: "testdata/pax-records.tar",
275
+ headers: []*Header{{
276
+ Typeflag: TypeReg,
277
+ Name: "file",
278
+ Uname: strings.Repeat("long", 10),
279
+ ModTime: time.Unix(0, 0),
280
+ PAXRecords: map[string]string{
281
+ "GOLANG.pkg": "tar",
282
+ "comment": "Hello, 世界",
283
+ "uname": strings.Repeat("long", 10),
284
+ },
285
+ Format: FormatPAX,
286
+ }},
287
+ }, {
288
+ file: "testdata/pax-global-records.tar",
289
+ headers: []*Header{{
290
+ Typeflag: TypeXGlobalHeader,
291
+ Name: "global1",
292
+ PAXRecords: map[string]string{"path": "global1", "mtime": "1500000000.0"},
293
+ Format: FormatPAX,
294
+ }, {
295
+ Typeflag: TypeReg,
296
+ Name: "file1",
297
+ ModTime: time.Unix(0, 0),
298
+ Format: FormatUSTAR,
299
+ }, {
300
+ Typeflag: TypeReg,
301
+ Name: "file2",
302
+ PAXRecords: map[string]string{"path": "file2"},
303
+ ModTime: time.Unix(0, 0),
304
+ Format: FormatPAX,
305
+ }, {
306
+ Typeflag: TypeXGlobalHeader,
307
+ Name: "GlobalHead.0.0",
308
+ PAXRecords: map[string]string{"path": ""},
309
+ Format: FormatPAX,
310
+ }, {
311
+ Typeflag: TypeReg,
312
+ Name: "file3",
313
+ ModTime: time.Unix(0, 0),
314
+ Format: FormatUSTAR,
315
+ }, {
316
+ Typeflag: TypeReg,
317
+ Name: "file4",
318
+ ModTime: time.Unix(1400000000, 0),
319
+ PAXRecords: map[string]string{"mtime": "1400000000"},
320
+ Format: FormatPAX,
321
+ }},
322
+ }, {
323
+ file: "testdata/nil-uid.tar", // golang.org/issue/5290
324
+ headers: []*Header{{
325
+ Name: "P1050238.JPG.log",
326
+ Mode: 0664,
327
+ Uid: 0,
328
+ Gid: 0,
329
+ Size: 14,
330
+ ModTime: time.Unix(1365454838, 0),
331
+ Typeflag: TypeReg,
332
+ Linkname: "",
333
+ Uname: "eyefi",
334
+ Gname: "eyefi",
335
+ Devmajor: 0,
336
+ Devminor: 0,
337
+ Format: FormatGNU,
338
+ }},
339
+ }, {
340
+ file: "testdata/xattrs.tar",
341
+ headers: []*Header{{
342
+ Name: "small.txt",
343
+ Mode: 0644,
344
+ Uid: 1000,
345
+ Gid: 10,
346
+ Size: 5,
347
+ ModTime: time.Unix(1386065770, 448252320),
348
+ Typeflag: '0',
349
+ Uname: "alex",
350
+ Gname: "wheel",
351
+ AccessTime: time.Unix(1389782991, 419875220),
352
+ ChangeTime: time.Unix(1389782956, 794414986),
353
+ Xattrs: map[string]string{
354
+ "user.key": "value",
355
+ "user.key2": "value2",
356
+ // Interestingly, selinux encodes the terminating null inside the xattr
357
+ "security.selinux": "unconfined_u:object_r:default_t:s0\x00",
358
+ },
359
+ PAXRecords: map[string]string{
360
+ "mtime": "1386065770.44825232",
361
+ "atime": "1389782991.41987522",
362
+ "ctime": "1389782956.794414986",
363
+ "SCHILY.xattr.user.key": "value",
364
+ "SCHILY.xattr.user.key2": "value2",
365
+ "SCHILY.xattr.security.selinux": "unconfined_u:object_r:default_t:s0\x00",
366
+ },
367
+ Format: FormatPAX,
368
+ }, {
369
+ Name: "small2.txt",
370
+ Mode: 0644,
371
+ Uid: 1000,
372
+ Gid: 10,
373
+ Size: 11,
374
+ ModTime: time.Unix(1386065770, 449252304),
375
+ Typeflag: '0',
376
+ Uname: "alex",
377
+ Gname: "wheel",
378
+ AccessTime: time.Unix(1389782991, 419875220),
379
+ ChangeTime: time.Unix(1386065770, 449252304),
380
+ Xattrs: map[string]string{
381
+ "security.selinux": "unconfined_u:object_r:default_t:s0\x00",
382
+ },
383
+ PAXRecords: map[string]string{
384
+ "mtime": "1386065770.449252304",
385
+ "atime": "1389782991.41987522",
386
+ "ctime": "1386065770.449252304",
387
+ "SCHILY.xattr.security.selinux": "unconfined_u:object_r:default_t:s0\x00",
388
+ },
389
+ Format: FormatPAX,
390
+ }},
391
+ }, {
392
+ // Matches the behavior of GNU, BSD, and STAR tar utilities.
393
+ file: "testdata/gnu-multi-hdrs.tar",
394
+ headers: []*Header{{
395
+ Name: "GNU2/GNU2/long-path-name",
396
+ Linkname: "GNU4/GNU4/long-linkpath-name",
397
+ ModTime: time.Unix(0, 0),
398
+ Typeflag: '2',
399
+ Format: FormatGNU,
400
+ }},
401
+ }, {
402
+ // GNU tar file with atime and ctime fields set.
403
+ // Created with the GNU tar v1.27.1.
404
+ // tar --incremental -S -cvf gnu-incremental.tar test2
405
+ file: "testdata/gnu-incremental.tar",
406
+ headers: []*Header{{
407
+ Name: "test2/",
408
+ Mode: 16877,
409
+ Uid: 1000,
410
+ Gid: 1000,
411
+ Size: 14,
412
+ ModTime: time.Unix(1441973427, 0),
413
+ Typeflag: 'D',
414
+ Uname: "rawr",
415
+ Gname: "dsnet",
416
+ AccessTime: time.Unix(1441974501, 0),
417
+ ChangeTime: time.Unix(1441973436, 0),
418
+ Format: FormatGNU,
419
+ }, {
420
+ Name: "test2/foo",
421
+ Mode: 33188,
422
+ Uid: 1000,
423
+ Gid: 1000,
424
+ Size: 64,
425
+ ModTime: time.Unix(1441973363, 0),
426
+ Typeflag: '0',
427
+ Uname: "rawr",
428
+ Gname: "dsnet",
429
+ AccessTime: time.Unix(1441974501, 0),
430
+ ChangeTime: time.Unix(1441973436, 0),
431
+ Format: FormatGNU,
432
+ }, {
433
+ Name: "test2/sparse",
434
+ Mode: 33188,
435
+ Uid: 1000,
436
+ Gid: 1000,
437
+ Size: 536870912,
438
+ ModTime: time.Unix(1441973427, 0),
439
+ Typeflag: 'S',
440
+ Uname: "rawr",
441
+ Gname: "dsnet",
442
+ AccessTime: time.Unix(1441991948, 0),
443
+ ChangeTime: time.Unix(1441973436, 0),
444
+ Format: FormatGNU,
445
+ }},
446
+ }, {
447
+ // Matches the behavior of GNU and BSD tar utilities.
448
+ file: "testdata/pax-multi-hdrs.tar",
449
+ headers: []*Header{{
450
+ Name: "bar",
451
+ Linkname: "PAX4/PAX4/long-linkpath-name",
452
+ ModTime: time.Unix(0, 0),
453
+ Typeflag: '2',
454
+ PAXRecords: map[string]string{
455
+ "linkpath": "PAX4/PAX4/long-linkpath-name",
456
+ },
457
+ Format: FormatPAX,
458
+ }},
459
+ }, {
460
+ // Both BSD and GNU tar truncate long names at first NUL even
461
+ // if there is data following that NUL character.
462
+ // This is reasonable as GNU long names are C-strings.
463
+ file: "testdata/gnu-long-nul.tar",
464
+ headers: []*Header{{
465
+ Name: "0123456789",
466
+ Mode: 0644,
467
+ Uid: 1000,
468
+ Gid: 1000,
469
+ ModTime: time.Unix(1486082191, 0),
470
+ Typeflag: '0',
471
+ Uname: "rawr",
472
+ Gname: "dsnet",
473
+ Format: FormatGNU,
474
+ }},
475
+ }, {
476
+ // This archive was generated by Writer but is readable by both
477
+ // GNU and BSD tar utilities.
478
+ // The archive generated by GNU is nearly byte-for-byte identical
479
+ // to the Go version except the Go version sets a negative Devminor
480
+ // just to force the GNU format.
481
+ file: "testdata/gnu-utf8.tar",
482
+ headers: []*Header{{
483
+ Name: "☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹",
484
+ Mode: 0644,
485
+ Uid: 1000, Gid: 1000,
486
+ ModTime: time.Unix(0, 0),
487
+ Typeflag: '0',
488
+ Uname: "☺",
489
+ Gname: "⚹",
490
+ Format: FormatGNU,
491
+ }},
492
+ }, {
493
+ // This archive was generated by Writer but is readable by both
494
+ // GNU and BSD tar utilities.
495
+ // The archive generated by GNU is nearly byte-for-byte identical
496
+ // to the Go version except the Go version sets a negative Devminor
497
+ // just to force the GNU format.
498
+ file: "testdata/gnu-not-utf8.tar",
499
+ headers: []*Header{{
500
+ Name: "hi\x80\x81\x82\x83bye",
501
+ Mode: 0644,
502
+ Uid: 1000,
503
+ Gid: 1000,
504
+ ModTime: time.Unix(0, 0),
505
+ Typeflag: '0',
506
+ Uname: "rawr",
507
+ Gname: "dsnet",
508
+ Format: FormatGNU,
509
+ }},
510
+ }, {
511
+ // BSD tar v3.1.2 and GNU tar v1.27.1 both rejects PAX records
512
+ // with NULs in the key.
513
+ file: "testdata/pax-nul-xattrs.tar",
514
+ err: ErrHeader,
515
+ }, {
516
+ // BSD tar v3.1.2 rejects a PAX path with NUL in the value, while
517
+ // GNU tar v1.27.1 simply truncates at first NUL.
518
+ // We emulate the behavior of BSD since it is strange doing NUL
519
+ // truncations since PAX records are length-prefix strings instead
520
+ // of NUL-terminated C-strings.
521
+ file: "testdata/pax-nul-path.tar",
522
+ err: ErrHeader,
523
+ }, {
524
+ file: "testdata/neg-size.tar",
525
+ err: ErrHeader,
526
+ }, {
527
+ file: "testdata/issue10968.tar",
528
+ err: ErrHeader,
529
+ }, {
530
+ file: "testdata/issue11169.tar",
531
+ err: ErrHeader,
532
+ }, {
533
+ file: "testdata/issue12435.tar",
534
+ err: ErrHeader,
535
+ }, {
536
+ // Ensure that we can read back the original Header as written with
537
+ // a buggy pre-Go1.8 tar.Writer.
538
+ file: "testdata/invalid-go17.tar",
539
+ headers: []*Header{{
540
+ Name: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/foo",
541
+ Uid: 010000000,
542
+ ModTime: time.Unix(0, 0),
543
+ Typeflag: '0',
544
+ }},
545
+ }, {
546
+ // USTAR archive with a regular entry with non-zero device numbers.
547
+ file: "testdata/ustar-file-devs.tar",
548
+ headers: []*Header{{
549
+ Name: "file",
550
+ Mode: 0644,
551
+ Typeflag: '0',
552
+ ModTime: time.Unix(0, 0),
553
+ Devmajor: 1,
554
+ Devminor: 1,
555
+ Format: FormatUSTAR,
556
+ }},
557
+ }, {
558
+ // Generated by Go, works on BSD tar v3.1.2 and GNU tar v.1.27.1.
559
+ file: "testdata/gnu-nil-sparse-data.tar",
560
+ headers: []*Header{{
561
+ Name: "sparse.db",
562
+ Typeflag: TypeGNUSparse,
563
+ Size: 1000,
564
+ ModTime: time.Unix(0, 0),
565
+ Format: FormatGNU,
566
+ }},
567
+ }, {
568
+ // Generated by Go, works on BSD tar v3.1.2 and GNU tar v.1.27.1.
569
+ file: "testdata/gnu-nil-sparse-hole.tar",
570
+ headers: []*Header{{
571
+ Name: "sparse.db",
572
+ Typeflag: TypeGNUSparse,
573
+ Size: 1000,
574
+ ModTime: time.Unix(0, 0),
575
+ Format: FormatGNU,
576
+ }},
577
+ }, {
578
+ // Generated by Go, works on BSD tar v3.1.2 and GNU tar v.1.27.1.
579
+ file: "testdata/pax-nil-sparse-data.tar",
580
+ headers: []*Header{{
581
+ Name: "sparse.db",
582
+ Typeflag: TypeReg,
583
+ Size: 1000,
584
+ ModTime: time.Unix(0, 0),
585
+ PAXRecords: map[string]string{
586
+ "size": "1512",
587
+ "GNU.sparse.major": "1",
588
+ "GNU.sparse.minor": "0",
589
+ "GNU.sparse.realsize": "1000",
590
+ "GNU.sparse.name": "sparse.db",
591
+ },
592
+ Format: FormatPAX,
593
+ }},
594
+ }, {
595
+ // Generated by Go, works on BSD tar v3.1.2 and GNU tar v.1.27.1.
596
+ file: "testdata/pax-nil-sparse-hole.tar",
597
+ headers: []*Header{{
598
+ Name: "sparse.db",
599
+ Typeflag: TypeReg,
600
+ Size: 1000,
601
+ ModTime: time.Unix(0, 0),
602
+ PAXRecords: map[string]string{
603
+ "size": "512",
604
+ "GNU.sparse.major": "1",
605
+ "GNU.sparse.minor": "0",
606
+ "GNU.sparse.realsize": "1000",
607
+ "GNU.sparse.name": "sparse.db",
608
+ },
609
+ Format: FormatPAX,
610
+ }},
611
+ }, {
612
+ file: "testdata/trailing-slash.tar",
613
+ headers: []*Header{{
614
+ Typeflag: TypeDir,
615
+ Name: strings.Repeat("123456789/", 30),
616
+ ModTime: time.Unix(0, 0),
617
+ PAXRecords: map[string]string{
618
+ "path": strings.Repeat("123456789/", 30),
619
+ },
620
+ Format: FormatPAX,
621
+ }},
622
+ }}
623
+
624
+ for _, v := range vectors {
625
+ t.Run(path.Base(v.file), func(t *testing.T) {
626
+ f, err := os.Open(v.file)
627
+ if err != nil {
628
+ t.Fatalf("unexpected error: %v", err)
629
+ }
630
+ defer f.Close()
631
+
632
+ var fr io.Reader = f
633
+ if strings.HasSuffix(v.file, ".bz2") {
634
+ fr = bzip2.NewReader(fr)
635
+ }
636
+
637
+ // Capture all headers and checksums.
638
+ var (
639
+ tr = NewReader(fr)
640
+ hdrs []*Header
641
+ chksums []string
642
+ rdbuf = make([]byte, 8)
643
+ )
644
+ for {
645
+ var hdr *Header
646
+ hdr, err = tr.Next()
647
+ if err != nil {
648
+ if err == io.EOF {
649
+ err = nil // Expected error
650
+ }
651
+ break
652
+ }
653
+ hdrs = append(hdrs, hdr)
654
+
655
+ if v.chksums == nil {
656
+ continue
657
+ }
658
+ h := md5.New()
659
+ _, err = io.CopyBuffer(h, tr, rdbuf) // Effectively an incremental read
660
+ if err != nil {
661
+ break
662
+ }
663
+ chksums = append(chksums, fmt.Sprintf("%x", h.Sum(nil)))
664
+ }
665
+
666
+ for i, hdr := range hdrs {
667
+ if i >= len(v.headers) {
668
+ t.Fatalf("entry %d: unexpected header:\ngot %+v", i, *hdr)
669
+ }
670
+ if !reflect.DeepEqual(*hdr, *v.headers[i]) {
671
+ t.Fatalf("entry %d: incorrect header:\ngot %+v\nwant %+v", i, *hdr, *v.headers[i])
672
+ }
673
+ }
674
+ if len(hdrs) != len(v.headers) {
675
+ t.Fatalf("got %d headers, want %d headers", len(hdrs), len(v.headers))
676
+ }
677
+
678
+ for i, sum := range chksums {
679
+ if i >= len(v.chksums) {
680
+ t.Fatalf("entry %d: unexpected sum: got %s", i, sum)
681
+ }
682
+ if sum != v.chksums[i] {
683
+ t.Fatalf("entry %d: incorrect checksum: got %s, want %s", i, sum, v.chksums[i])
684
+ }
685
+ }
686
+
687
+ if err != v.err {
688
+ t.Fatalf("unexpected error: got %v, want %v", err, v.err)
689
+ }
690
+ f.Close()
691
+ })
692
+ }
693
+ }
694
+
695
+ func TestPartialRead(t *testing.T) {
696
+ type testCase struct {
697
+ cnt int // Number of bytes to read
698
+ output string // Expected value of string read
699
+ }
700
+ vectors := []struct {
701
+ file string
702
+ cases []testCase
703
+ }{{
704
+ file: "testdata/gnu.tar",
705
+ cases: []testCase{
706
+ {4, "Kilt"},
707
+ {6, "Google"},
708
+ },
709
+ }, {
710
+ file: "testdata/sparse-formats.tar",
711
+ cases: []testCase{
712
+ {2, "\x00G"},
713
+ {4, "\x00G\x00o"},
714
+ {6, "\x00G\x00o\x00G"},
715
+ {8, "\x00G\x00o\x00G\x00o"},
716
+ {4, "end\n"},
717
+ },
718
+ }}
719
+
720
+ for _, v := range vectors {
721
+ t.Run(path.Base(v.file), func(t *testing.T) {
722
+ f, err := os.Open(v.file)
723
+ if err != nil {
724
+ t.Fatalf("Open() error: %v", err)
725
+ }
726
+ defer f.Close()
727
+
728
+ tr := NewReader(f)
729
+ for i, tc := range v.cases {
730
+ hdr, err := tr.Next()
731
+ if err != nil || hdr == nil {
732
+ t.Fatalf("entry %d, Next(): got %v, want %v", i, err, nil)
733
+ }
734
+ buf := make([]byte, tc.cnt)
735
+ if _, err := io.ReadFull(tr, buf); err != nil {
736
+ t.Fatalf("entry %d, ReadFull(): got %v, want %v", i, err, nil)
737
+ }
738
+ if string(buf) != tc.output {
739
+ t.Fatalf("entry %d, ReadFull(): got %q, want %q", i, string(buf), tc.output)
740
+ }
741
+ }
742
+
743
+ if _, err := tr.Next(); err != io.EOF {
744
+ t.Fatalf("Next(): got %v, want EOF", err)
745
+ }
746
+ })
747
+ }
748
+ }
749
+
750
+ func TestUninitializedRead(t *testing.T) {
751
+ f, err := os.Open("testdata/gnu.tar")
752
+ if err != nil {
753
+ t.Fatalf("Unexpected error: %v", err)
754
+ }
755
+ defer f.Close()
756
+
757
+ tr := NewReader(f)
758
+ _, err = tr.Read([]byte{})
759
+ if err == nil || err != io.EOF {
760
+ t.Errorf("Unexpected error: %v, wanted %v", err, io.EOF)
761
+ }
762
+
763
+ }
764
+
765
+ type reader struct{ io.Reader }
766
+ type readSeeker struct{ io.ReadSeeker }
767
+ type readBadSeeker struct{ io.ReadSeeker }
768
+
769
+ func (rbs *readBadSeeker) Seek(int64, int) (int64, error) { return 0, fmt.Errorf("illegal seek") }
770
+
771
+ // TestReadTruncation test the ending condition on various truncated files and
772
+ // that truncated files are still detected even if the underlying io.Reader
773
+ // satisfies io.Seeker.
774
+ func TestReadTruncation(t *testing.T) {
775
+ var ss []string
776
+ for _, p := range []string{
777
+ "testdata/gnu.tar",
778
+ "testdata/ustar-file-reg.tar",
779
+ "testdata/pax-path-hdr.tar",
780
+ "testdata/sparse-formats.tar",
781
+ } {
782
+ buf, err := os.ReadFile(p)
783
+ if err != nil {
784
+ t.Fatalf("unexpected error: %v", err)
785
+ }
786
+ ss = append(ss, string(buf))
787
+ }
788
+
789
+ data1, data2, pax, sparse := ss[0], ss[1], ss[2], ss[3]
790
+ data2 += strings.Repeat("\x00", 10*512)
791
+ trash := strings.Repeat("garbage ", 64) // Exactly 512 bytes
792
+
793
+ vectors := []struct {
794
+ input string // Input stream
795
+ cnt int // Expected number of headers read
796
+ err error // Expected error outcome
797
+ }{
798
+ {"", 0, io.EOF}, // Empty file is a "valid" tar file
799
+ {data1[:511], 0, io.ErrUnexpectedEOF},
800
+ {data1[:512], 1, io.ErrUnexpectedEOF},
801
+ {data1[:1024], 1, io.EOF},
802
+ {data1[:1536], 2, io.ErrUnexpectedEOF},
803
+ {data1[:2048], 2, io.EOF},
804
+ {data1, 2, io.EOF},
805
+ {data1[:2048] + data2[:1536], 3, io.EOF},
806
+ {data2[:511], 0, io.ErrUnexpectedEOF},
807
+ {data2[:512], 1, io.ErrUnexpectedEOF},
808
+ {data2[:1195], 1, io.ErrUnexpectedEOF},
809
+ {data2[:1196], 1, io.EOF}, // Exact end of data and start of padding
810
+ {data2[:1200], 1, io.EOF},
811
+ {data2[:1535], 1, io.EOF},
812
+ {data2[:1536], 1, io.EOF}, // Exact end of padding
813
+ {data2[:1536] + trash[:1], 1, io.ErrUnexpectedEOF},
814
+ {data2[:1536] + trash[:511], 1, io.ErrUnexpectedEOF},
815
+ {data2[:1536] + trash, 1, ErrHeader},
816
+ {data2[:2048], 1, io.EOF}, // Exactly 1 empty block
817
+ {data2[:2048] + trash[:1], 1, io.ErrUnexpectedEOF},
818
+ {data2[:2048] + trash[:511], 1, io.ErrUnexpectedEOF},
819
+ {data2[:2048] + trash, 1, ErrHeader},
820
+ {data2[:2560], 1, io.EOF}, // Exactly 2 empty blocks (normal end-of-stream)
821
+ {data2[:2560] + trash[:1], 1, io.EOF},
822
+ {data2[:2560] + trash[:511], 1, io.EOF},
823
+ {data2[:2560] + trash, 1, io.EOF},
824
+ {data2[:3072], 1, io.EOF},
825
+ {pax, 0, io.EOF}, // PAX header without data is a "valid" tar file
826
+ {pax + trash[:1], 0, io.ErrUnexpectedEOF},
827
+ {pax + trash[:511], 0, io.ErrUnexpectedEOF},
828
+ {sparse[:511], 0, io.ErrUnexpectedEOF},
829
+ {sparse[:512], 0, io.ErrUnexpectedEOF},
830
+ {sparse[:3584], 1, io.EOF},
831
+ {sparse[:9200], 1, io.EOF}, // Terminate in padding of sparse header
832
+ {sparse[:9216], 1, io.EOF},
833
+ {sparse[:9728], 2, io.ErrUnexpectedEOF},
834
+ {sparse[:10240], 2, io.EOF},
835
+ {sparse[:11264], 2, io.ErrUnexpectedEOF},
836
+ {sparse, 5, io.EOF},
837
+ {sparse + trash, 5, io.EOF},
838
+ }
839
+
840
+ for i, v := range vectors {
841
+ for j := 0; j < 6; j++ {
842
+ var tr *Reader
843
+ var s1, s2 string
844
+
845
+ switch j {
846
+ case 0:
847
+ tr = NewReader(&reader{strings.NewReader(v.input)})
848
+ s1, s2 = "io.Reader", "auto"
849
+ case 1:
850
+ tr = NewReader(&reader{strings.NewReader(v.input)})
851
+ s1, s2 = "io.Reader", "manual"
852
+ case 2:
853
+ tr = NewReader(&readSeeker{strings.NewReader(v.input)})
854
+ s1, s2 = "io.ReadSeeker", "auto"
855
+ case 3:
856
+ tr = NewReader(&readSeeker{strings.NewReader(v.input)})
857
+ s1, s2 = "io.ReadSeeker", "manual"
858
+ case 4:
859
+ tr = NewReader(&readBadSeeker{strings.NewReader(v.input)})
860
+ s1, s2 = "ReadBadSeeker", "auto"
861
+ case 5:
862
+ tr = NewReader(&readBadSeeker{strings.NewReader(v.input)})
863
+ s1, s2 = "ReadBadSeeker", "manual"
864
+ }
865
+
866
+ var cnt int
867
+ var err error
868
+ for {
869
+ if _, err = tr.Next(); err != nil {
870
+ break
871
+ }
872
+ cnt++
873
+ if s2 == "manual" {
874
+ if _, err = tr.writeTo(io.Discard); err != nil {
875
+ break
876
+ }
877
+ }
878
+ }
879
+ if err != v.err {
880
+ t.Errorf("test %d, NewReader(%s) with %s discard: got %v, want %v",
881
+ i, s1, s2, err, v.err)
882
+ }
883
+ if cnt != v.cnt {
884
+ t.Errorf("test %d, NewReader(%s) with %s discard: got %d headers, want %d headers",
885
+ i, s1, s2, cnt, v.cnt)
886
+ }
887
+ }
888
+ }
889
+ }
890
+
891
+ // TestReadHeaderOnly tests that Reader does not attempt to read special
892
+ // header-only files.
893
+ func TestReadHeaderOnly(t *testing.T) {
894
+ f, err := os.Open("testdata/hdr-only.tar")
895
+ if err != nil {
896
+ t.Fatalf("unexpected error: %v", err)
897
+ }
898
+ defer f.Close()
899
+
900
+ var hdrs []*Header
901
+ tr := NewReader(f)
902
+ for {
903
+ hdr, err := tr.Next()
904
+ if err == io.EOF {
905
+ break
906
+ }
907
+ if err != nil {
908
+ t.Errorf("Next(): got %v, want %v", err, nil)
909
+ continue
910
+ }
911
+ hdrs = append(hdrs, hdr)
912
+
913
+ // If a special flag, we should read nothing.
914
+ cnt, _ := io.ReadFull(tr, []byte{0})
915
+ if cnt > 0 && hdr.Typeflag != TypeReg {
916
+ t.Errorf("ReadFull(...): got %d bytes, want 0 bytes", cnt)
917
+ }
918
+ }
919
+
920
+ // File is crafted with 16 entries. The later 8 are identical to the first
921
+ // 8 except that the size is set.
922
+ if len(hdrs) != 16 {
923
+ t.Fatalf("len(hdrs): got %d, want %d", len(hdrs), 16)
924
+ }
925
+ for i := 0; i < 8; i++ {
926
+ hdr1, hdr2 := hdrs[i+0], hdrs[i+8]
927
+ hdr1.Size, hdr2.Size = 0, 0
928
+ if !reflect.DeepEqual(*hdr1, *hdr2) {
929
+ t.Errorf("incorrect header:\ngot %+v\nwant %+v", *hdr1, *hdr2)
930
+ }
931
+ }
932
+ }
933
+
934
+ func TestMergePAX(t *testing.T) {
935
+ vectors := []struct {
936
+ in map[string]string
937
+ want *Header
938
+ ok bool
939
+ }{{
940
+ in: map[string]string{
941
+ "path": "a/b/c",
942
+ "uid": "1000",
943
+ "mtime": "1350244992.023960108",
944
+ },
945
+ want: &Header{
946
+ Name: "a/b/c",
947
+ Uid: 1000,
948
+ ModTime: time.Unix(1350244992, 23960108),
949
+ PAXRecords: map[string]string{
950
+ "path": "a/b/c",
951
+ "uid": "1000",
952
+ "mtime": "1350244992.023960108",
953
+ },
954
+ },
955
+ ok: true,
956
+ }, {
957
+ in: map[string]string{
958
+ "gid": "gtgergergersagersgers",
959
+ },
960
+ ok: false,
961
+ }, {
962
+ in: map[string]string{
963
+ "missing": "missing",
964
+ "SCHILY.xattr.key": "value",
965
+ },
966
+ want: &Header{
967
+ Xattrs: map[string]string{"key": "value"},
968
+ PAXRecords: map[string]string{
969
+ "missing": "missing",
970
+ "SCHILY.xattr.key": "value",
971
+ },
972
+ },
973
+ ok: true,
974
+ }}
975
+
976
+ for i, v := range vectors {
977
+ got := new(Header)
978
+ err := mergePAX(got, v.in)
979
+ if v.ok && !reflect.DeepEqual(*got, *v.want) {
980
+ t.Errorf("test %d, mergePAX(...):\ngot %+v\nwant %+v", i, *got, *v.want)
981
+ }
982
+ if ok := err == nil; ok != v.ok {
983
+ t.Errorf("test %d, mergePAX(...): got %v, want %v", i, ok, v.ok)
984
+ }
985
+ }
986
+ }
987
+
988
+ func TestParsePAX(t *testing.T) {
989
+ vectors := []struct {
990
+ in string
991
+ want map[string]string
992
+ ok bool
993
+ }{
994
+ {"", nil, true},
995
+ {"6 k=1\n", map[string]string{"k": "1"}, true},
996
+ {"10 a=name\n", map[string]string{"a": "name"}, true},
997
+ {"9 a=name\n", map[string]string{"a": "name"}, true},
998
+ {"30 mtime=1350244992.023960108\n", map[string]string{"mtime": "1350244992.023960108"}, true},
999
+ {"3 somelongkey=\n", nil, false},
1000
+ {"50 tooshort=\n", nil, false},
1001
+ {"13 key1=haha\n13 key2=nana\n13 key3=kaka\n",
1002
+ map[string]string{"key1": "haha", "key2": "nana", "key3": "kaka"}, true},
1003
+ {"13 key1=val1\n13 key2=val2\n8 key1=\n",
1004
+ map[string]string{"key1": "", "key2": "val2"}, true},
1005
+ {"22 GNU.sparse.size=10\n26 GNU.sparse.numblocks=2\n" +
1006
+ "23 GNU.sparse.offset=1\n25 GNU.sparse.numbytes=2\n" +
1007
+ "23 GNU.sparse.offset=3\n25 GNU.sparse.numbytes=4\n",
1008
+ map[string]string{paxGNUSparseSize: "10", paxGNUSparseNumBlocks: "2", paxGNUSparseMap: "1,2,3,4"}, true},
1009
+ {"22 GNU.sparse.size=10\n26 GNU.sparse.numblocks=1\n" +
1010
+ "25 GNU.sparse.numbytes=2\n23 GNU.sparse.offset=1\n",
1011
+ nil, false},
1012
+ {"22 GNU.sparse.size=10\n26 GNU.sparse.numblocks=1\n" +
1013
+ "25 GNU.sparse.offset=1,2\n25 GNU.sparse.numbytes=2\n",
1014
+ nil, false},
1015
+ }
1016
+
1017
+ for i, v := range vectors {
1018
+ r := strings.NewReader(v.in)
1019
+ got, err := parsePAX(r)
1020
+ if !reflect.DeepEqual(got, v.want) && !(len(got) == 0 && len(v.want) == 0) {
1021
+ t.Errorf("test %d, parsePAX():\ngot %v\nwant %v", i, got, v.want)
1022
+ }
1023
+ if ok := err == nil; ok != v.ok {
1024
+ t.Errorf("test %d, parsePAX(): got %v, want %v", i, ok, v.ok)
1025
+ }
1026
+ }
1027
+ }
1028
+
1029
+ func TestReadOldGNUSparseMap(t *testing.T) {
1030
+ populateSparseMap := func(sa sparseArray, sps []string) []string {
1031
+ for i := 0; len(sps) > 0 && i < sa.maxEntries(); i++ {
1032
+ copy(sa.entry(i), sps[0])
1033
+ sps = sps[1:]
1034
+ }
1035
+ if len(sps) > 0 {
1036
+ copy(sa.isExtended(), "\x80")
1037
+ }
1038
+ return sps
1039
+ }
1040
+
1041
+ makeInput := func(format Format, size string, sps ...string) (out []byte) {
1042
+ // Write the initial GNU header.
1043
+ var blk block
1044
+ gnu := blk.toGNU()
1045
+ sparse := gnu.sparse()
1046
+ copy(gnu.realSize(), size)
1047
+ sps = populateSparseMap(sparse, sps)
1048
+ if format != FormatUnknown {
1049
+ blk.setFormat(format)
1050
+ }
1051
+ out = append(out, blk[:]...)
1052
+
1053
+ // Write extended sparse blocks.
1054
+ for len(sps) > 0 {
1055
+ var blk block
1056
+ sps = populateSparseMap(blk.toSparse(), sps)
1057
+ out = append(out, blk[:]...)
1058
+ }
1059
+ return out
1060
+ }
1061
+
1062
+ makeSparseStrings := func(sp []sparseEntry) (out []string) {
1063
+ var f formatter
1064
+ for _, s := range sp {
1065
+ var b [24]byte
1066
+ f.formatNumeric(b[:12], s.Offset)
1067
+ f.formatNumeric(b[12:], s.Length)
1068
+ out = append(out, string(b[:]))
1069
+ }
1070
+ return out
1071
+ }
1072
+
1073
+ vectors := []struct {
1074
+ input []byte
1075
+ wantMap sparseDatas
1076
+ wantSize int64
1077
+ wantErr error
1078
+ }{{
1079
+ input: makeInput(FormatUnknown, ""),
1080
+ wantErr: ErrHeader,
1081
+ }, {
1082
+ input: makeInput(FormatGNU, "1234", "fewa"),
1083
+ wantSize: 01234,
1084
+ wantErr: ErrHeader,
1085
+ }, {
1086
+ input: makeInput(FormatGNU, "0031"),
1087
+ wantSize: 031,
1088
+ }, {
1089
+ input: makeInput(FormatGNU, "80"),
1090
+ wantErr: ErrHeader,
1091
+ }, {
1092
+ input: makeInput(FormatGNU, "1234",
1093
+ makeSparseStrings(sparseDatas{{0, 0}, {1, 1}})...),
1094
+ wantMap: sparseDatas{{0, 0}, {1, 1}},
1095
+ wantSize: 01234,
1096
+ }, {
1097
+ input: makeInput(FormatGNU, "1234",
1098
+ append(makeSparseStrings(sparseDatas{{0, 0}, {1, 1}}), []string{"", "blah"}...)...),
1099
+ wantMap: sparseDatas{{0, 0}, {1, 1}},
1100
+ wantSize: 01234,
1101
+ }, {
1102
+ input: makeInput(FormatGNU, "3333",
1103
+ makeSparseStrings(sparseDatas{{0, 1}, {2, 1}, {4, 1}, {6, 1}})...),
1104
+ wantMap: sparseDatas{{0, 1}, {2, 1}, {4, 1}, {6, 1}},
1105
+ wantSize: 03333,
1106
+ }, {
1107
+ input: makeInput(FormatGNU, "",
1108
+ append(append(
1109
+ makeSparseStrings(sparseDatas{{0, 1}, {2, 1}}),
1110
+ []string{"", ""}...),
1111
+ makeSparseStrings(sparseDatas{{4, 1}, {6, 1}})...)...),
1112
+ wantMap: sparseDatas{{0, 1}, {2, 1}, {4, 1}, {6, 1}},
1113
+ }, {
1114
+ input: makeInput(FormatGNU, "",
1115
+ makeSparseStrings(sparseDatas{{0, 1}, {2, 1}, {4, 1}, {6, 1}, {8, 1}, {10, 1}})...)[:blockSize],
1116
+ wantErr: io.ErrUnexpectedEOF,
1117
+ }, {
1118
+ input: makeInput(FormatGNU, "",
1119
+ makeSparseStrings(sparseDatas{{0, 1}, {2, 1}, {4, 1}, {6, 1}, {8, 1}, {10, 1}})...)[:3*blockSize/2],
1120
+ wantErr: io.ErrUnexpectedEOF,
1121
+ }, {
1122
+ input: makeInput(FormatGNU, "",
1123
+ makeSparseStrings(sparseDatas{{0, 1}, {2, 1}, {4, 1}, {6, 1}, {8, 1}, {10, 1}})...),
1124
+ wantMap: sparseDatas{{0, 1}, {2, 1}, {4, 1}, {6, 1}, {8, 1}, {10, 1}},
1125
+ }, {
1126
+ input: makeInput(FormatGNU, "",
1127
+ makeSparseStrings(sparseDatas{{10 << 30, 512}, {20 << 30, 512}})...),
1128
+ wantMap: sparseDatas{{10 << 30, 512}, {20 << 30, 512}},
1129
+ }}
1130
+
1131
+ for i, v := range vectors {
1132
+ var blk block
1133
+ var hdr Header
1134
+ v.input = v.input[copy(blk[:], v.input):]
1135
+ tr := Reader{r: bytes.NewReader(v.input)}
1136
+ got, err := tr.readOldGNUSparseMap(&hdr, &blk)
1137
+ if !equalSparseEntries(got, v.wantMap) {
1138
+ t.Errorf("test %d, readOldGNUSparseMap(): got %v, want %v", i, got, v.wantMap)
1139
+ }
1140
+ if err != v.wantErr {
1141
+ t.Errorf("test %d, readOldGNUSparseMap() = %v, want %v", i, err, v.wantErr)
1142
+ }
1143
+ if hdr.Size != v.wantSize {
1144
+ t.Errorf("test %d, Header.Size = %d, want %d", i, hdr.Size, v.wantSize)
1145
+ }
1146
+ }
1147
+ }
1148
+
1149
+ func TestReadGNUSparsePAXHeaders(t *testing.T) {
1150
+ padInput := func(s string) string {
1151
+ return s + string(zeroBlock[:blockPadding(int64(len(s)))])
1152
+ }
1153
+
1154
+ vectors := []struct {
1155
+ inputData string
1156
+ inputHdrs map[string]string
1157
+ wantMap sparseDatas
1158
+ wantSize int64
1159
+ wantName string
1160
+ wantErr error
1161
+ }{{
1162
+ inputHdrs: nil,
1163
+ wantErr: nil,
1164
+ }, {
1165
+ inputHdrs: map[string]string{
1166
+ paxGNUSparseNumBlocks: strconv.FormatInt(math.MaxInt64, 10),
1167
+ paxGNUSparseMap: "0,1,2,3",
1168
+ },
1169
+ wantErr: ErrHeader,
1170
+ }, {
1171
+ inputHdrs: map[string]string{
1172
+ paxGNUSparseNumBlocks: "4\x00",
1173
+ paxGNUSparseMap: "0,1,2,3",
1174
+ },
1175
+ wantErr: ErrHeader,
1176
+ }, {
1177
+ inputHdrs: map[string]string{
1178
+ paxGNUSparseNumBlocks: "4",
1179
+ paxGNUSparseMap: "0,1,2,3",
1180
+ },
1181
+ wantErr: ErrHeader,
1182
+ }, {
1183
+ inputHdrs: map[string]string{
1184
+ paxGNUSparseNumBlocks: "2",
1185
+ paxGNUSparseMap: "0,1,2,3",
1186
+ },
1187
+ wantMap: sparseDatas{{0, 1}, {2, 3}},
1188
+ }, {
1189
+ inputHdrs: map[string]string{
1190
+ paxGNUSparseNumBlocks: "2",
1191
+ paxGNUSparseMap: "0, 1,2,3",
1192
+ },
1193
+ wantErr: ErrHeader,
1194
+ }, {
1195
+ inputHdrs: map[string]string{
1196
+ paxGNUSparseNumBlocks: "2",
1197
+ paxGNUSparseMap: "0,1,02,3",
1198
+ paxGNUSparseRealSize: "4321",
1199
+ },
1200
+ wantMap: sparseDatas{{0, 1}, {2, 3}},
1201
+ wantSize: 4321,
1202
+ }, {
1203
+ inputHdrs: map[string]string{
1204
+ paxGNUSparseNumBlocks: "2",
1205
+ paxGNUSparseMap: "0,one1,2,3",
1206
+ },
1207
+ wantErr: ErrHeader,
1208
+ }, {
1209
+ inputHdrs: map[string]string{
1210
+ paxGNUSparseMajor: "0",
1211
+ paxGNUSparseMinor: "0",
1212
+ paxGNUSparseNumBlocks: "2",
1213
+ paxGNUSparseMap: "0,1,2,3",
1214
+ paxGNUSparseSize: "1234",
1215
+ paxGNUSparseRealSize: "4321",
1216
+ paxGNUSparseName: "realname",
1217
+ },
1218
+ wantMap: sparseDatas{{0, 1}, {2, 3}},
1219
+ wantSize: 1234,
1220
+ wantName: "realname",
1221
+ }, {
1222
+ inputHdrs: map[string]string{
1223
+ paxGNUSparseMajor: "0",
1224
+ paxGNUSparseMinor: "0",
1225
+ paxGNUSparseNumBlocks: "1",
1226
+ paxGNUSparseMap: "10737418240,512",
1227
+ paxGNUSparseSize: "10737418240",
1228
+ paxGNUSparseName: "realname",
1229
+ },
1230
+ wantMap: sparseDatas{{10737418240, 512}},
1231
+ wantSize: 10737418240,
1232
+ wantName: "realname",
1233
+ }, {
1234
+ inputHdrs: map[string]string{
1235
+ paxGNUSparseMajor: "0",
1236
+ paxGNUSparseMinor: "0",
1237
+ paxGNUSparseNumBlocks: "0",
1238
+ paxGNUSparseMap: "",
1239
+ },
1240
+ wantMap: sparseDatas{},
1241
+ }, {
1242
+ inputHdrs: map[string]string{
1243
+ paxGNUSparseMajor: "0",
1244
+ paxGNUSparseMinor: "1",
1245
+ paxGNUSparseNumBlocks: "4",
1246
+ paxGNUSparseMap: "0,5,10,5,20,5,30,5",
1247
+ },
1248
+ wantMap: sparseDatas{{0, 5}, {10, 5}, {20, 5}, {30, 5}},
1249
+ }, {
1250
+ inputHdrs: map[string]string{
1251
+ paxGNUSparseMajor: "1",
1252
+ paxGNUSparseMinor: "0",
1253
+ paxGNUSparseNumBlocks: "4",
1254
+ paxGNUSparseMap: "0,5,10,5,20,5,30,5",
1255
+ },
1256
+ wantErr: io.ErrUnexpectedEOF,
1257
+ }, {
1258
+ inputData: padInput("0\n"),
1259
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
1260
+ wantMap: sparseDatas{},
1261
+ }, {
1262
+ inputData: padInput("0\n")[:blockSize-1] + "#",
1263
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
1264
+ wantMap: sparseDatas{},
1265
+ }, {
1266
+ inputData: padInput("0"),
1267
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
1268
+ wantErr: io.ErrUnexpectedEOF,
1269
+ }, {
1270
+ inputData: padInput("ab\n"),
1271
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
1272
+ wantErr: ErrHeader,
1273
+ }, {
1274
+ inputData: padInput("1\n2\n3\n"),
1275
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
1276
+ wantMap: sparseDatas{{2, 3}},
1277
+ }, {
1278
+ inputData: padInput("1\n2\n"),
1279
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
1280
+ wantErr: io.ErrUnexpectedEOF,
1281
+ }, {
1282
+ inputData: padInput("1\n2\n\n"),
1283
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
1284
+ wantErr: ErrHeader,
1285
+ }, {
1286
+ inputData: string(zeroBlock[:]) + padInput("0\n"),
1287
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
1288
+ wantErr: ErrHeader,
1289
+ }, {
1290
+ inputData: strings.Repeat("0", blockSize) + padInput("1\n5\n1\n"),
1291
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
1292
+ wantMap: sparseDatas{{5, 1}},
1293
+ }, {
1294
+ inputData: padInput(fmt.Sprintf("%d\n", int64(math.MaxInt64))),
1295
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
1296
+ wantErr: ErrHeader,
1297
+ }, {
1298
+ inputData: padInput(strings.Repeat("0", 300) + "1\n" + strings.Repeat("0", 1000) + "5\n" + strings.Repeat("0", 800) + "2\n"),
1299
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
1300
+ wantMap: sparseDatas{{5, 2}},
1301
+ }, {
1302
+ inputData: padInput("2\n10737418240\n512\n21474836480\n512\n"),
1303
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
1304
+ wantMap: sparseDatas{{10737418240, 512}, {21474836480, 512}},
1305
+ }, {
1306
+ inputData: padInput("100\n" + func() string {
1307
+ var ss []string
1308
+ for i := 0; i < 100; i++ {
1309
+ ss = append(ss, fmt.Sprintf("%d\n%d\n", int64(i)<<30, 512))
1310
+ }
1311
+ return strings.Join(ss, "")
1312
+ }()),
1313
+ inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"},
1314
+ wantMap: func() (spd sparseDatas) {
1315
+ for i := 0; i < 100; i++ {
1316
+ spd = append(spd, sparseEntry{int64(i) << 30, 512})
1317
+ }
1318
+ return spd
1319
+ }(),
1320
+ }}
1321
+
1322
+ for i, v := range vectors {
1323
+ var hdr Header
1324
+ hdr.PAXRecords = v.inputHdrs
1325
+ r := strings.NewReader(v.inputData + "#") // Add canary byte
1326
+ tr := Reader{curr: &regFileReader{r, int64(r.Len())}}
1327
+ got, err := tr.readGNUSparsePAXHeaders(&hdr)
1328
+ if !equalSparseEntries(got, v.wantMap) {
1329
+ t.Errorf("test %d, readGNUSparsePAXHeaders(): got %v, want %v", i, got, v.wantMap)
1330
+ }
1331
+ if err != v.wantErr {
1332
+ t.Errorf("test %d, readGNUSparsePAXHeaders() = %v, want %v", i, err, v.wantErr)
1333
+ }
1334
+ if hdr.Size != v.wantSize {
1335
+ t.Errorf("test %d, Header.Size = %d, want %d", i, hdr.Size, v.wantSize)
1336
+ }
1337
+ if hdr.Name != v.wantName {
1338
+ t.Errorf("test %d, Header.Name = %s, want %s", i, hdr.Name, v.wantName)
1339
+ }
1340
+ if v.wantErr == nil && r.Len() == 0 {
1341
+ t.Errorf("test %d, canary byte unexpectedly consumed", i)
1342
+ }
1343
+ }
1344
+ }
1345
+
1346
+ // testNonEmptyReader wraps an io.Reader and ensures that
1347
+ // Read is never called with an empty buffer.
1348
+ type testNonEmptyReader struct{ io.Reader }
1349
+
1350
+ func (r testNonEmptyReader) Read(b []byte) (int, error) {
1351
+ if len(b) == 0 {
1352
+ return 0, errors.New("unexpected empty Read call")
1353
+ }
1354
+ return r.Reader.Read(b)
1355
+ }
1356
+
1357
+ func TestFileReader(t *testing.T) {
1358
+ type (
1359
+ testRead struct { // Read(cnt) == (wantStr, wantErr)
1360
+ cnt int
1361
+ wantStr string
1362
+ wantErr error
1363
+ }
1364
+ testWriteTo struct { // WriteTo(testFile{ops}) == (wantCnt, wantErr)
1365
+ ops fileOps
1366
+ wantCnt int64
1367
+ wantErr error
1368
+ }
1369
+ testRemaining struct { // logicalRemaining() == wantLCnt, physicalRemaining() == wantPCnt
1370
+ wantLCnt int64
1371
+ wantPCnt int64
1372
+ }
1373
+ testFnc any // testRead | testWriteTo | testRemaining
1374
+ )
1375
+
1376
+ type (
1377
+ makeReg struct {
1378
+ str string
1379
+ size int64
1380
+ }
1381
+ makeSparse struct {
1382
+ makeReg makeReg
1383
+ spd sparseDatas
1384
+ size int64
1385
+ }
1386
+ fileMaker any // makeReg | makeSparse
1387
+ )
1388
+
1389
+ vectors := []struct {
1390
+ maker fileMaker
1391
+ tests []testFnc
1392
+ }{{
1393
+ maker: makeReg{"", 0},
1394
+ tests: []testFnc{
1395
+ testRemaining{0, 0},
1396
+ testRead{0, "", io.EOF},
1397
+ testRead{1, "", io.EOF},
1398
+ testWriteTo{nil, 0, nil},
1399
+ testRemaining{0, 0},
1400
+ },
1401
+ }, {
1402
+ maker: makeReg{"", 1},
1403
+ tests: []testFnc{
1404
+ testRemaining{1, 1},
1405
+ testRead{5, "", io.ErrUnexpectedEOF},
1406
+ testWriteTo{nil, 0, io.ErrUnexpectedEOF},
1407
+ testRemaining{1, 1},
1408
+ },
1409
+ }, {
1410
+ maker: makeReg{"hello", 5},
1411
+ tests: []testFnc{
1412
+ testRemaining{5, 5},
1413
+ testRead{5, "hello", io.EOF},
1414
+ testRemaining{0, 0},
1415
+ },
1416
+ }, {
1417
+ maker: makeReg{"hello, world", 50},
1418
+ tests: []testFnc{
1419
+ testRemaining{50, 50},
1420
+ testRead{7, "hello, ", nil},
1421
+ testRemaining{43, 43},
1422
+ testRead{5, "world", nil},
1423
+ testRemaining{38, 38},
1424
+ testWriteTo{nil, 0, io.ErrUnexpectedEOF},
1425
+ testRead{1, "", io.ErrUnexpectedEOF},
1426
+ testRemaining{38, 38},
1427
+ },
1428
+ }, {
1429
+ maker: makeReg{"hello, world", 5},
1430
+ tests: []testFnc{
1431
+ testRemaining{5, 5},
1432
+ testRead{0, "", nil},
1433
+ testRead{4, "hell", nil},
1434
+ testRemaining{1, 1},
1435
+ testWriteTo{fileOps{"o"}, 1, nil},
1436
+ testRemaining{0, 0},
1437
+ testWriteTo{nil, 0, nil},
1438
+ testRead{0, "", io.EOF},
1439
+ },
1440
+ }, {
1441
+ maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{0, 2}, {5, 3}}, 8},
1442
+ tests: []testFnc{
1443
+ testRemaining{8, 5},
1444
+ testRead{3, "ab\x00", nil},
1445
+ testRead{10, "\x00\x00cde", io.EOF},
1446
+ testRemaining{0, 0},
1447
+ },
1448
+ }, {
1449
+ maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{0, 2}, {5, 3}}, 8},
1450
+ tests: []testFnc{
1451
+ testRemaining{8, 5},
1452
+ testWriteTo{fileOps{"ab", int64(3), "cde"}, 8, nil},
1453
+ testRemaining{0, 0},
1454
+ },
1455
+ }, {
1456
+ maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{0, 2}, {5, 3}}, 10},
1457
+ tests: []testFnc{
1458
+ testRemaining{10, 5},
1459
+ testRead{100, "ab\x00\x00\x00cde\x00\x00", io.EOF},
1460
+ testRemaining{0, 0},
1461
+ },
1462
+ }, {
1463
+ maker: makeSparse{makeReg{"abc", 5}, sparseDatas{{0, 2}, {5, 3}}, 10},
1464
+ tests: []testFnc{
1465
+ testRemaining{10, 5},
1466
+ testRead{100, "ab\x00\x00\x00c", io.ErrUnexpectedEOF},
1467
+ testRemaining{4, 2},
1468
+ },
1469
+ }, {
1470
+ maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 2}}, 8},
1471
+ tests: []testFnc{
1472
+ testRemaining{8, 5},
1473
+ testRead{8, "\x00abc\x00\x00de", io.EOF},
1474
+ testRemaining{0, 0},
1475
+ },
1476
+ }, {
1477
+ maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 0}, {6, 0}, {6, 2}}, 8},
1478
+ tests: []testFnc{
1479
+ testRemaining{8, 5},
1480
+ testRead{8, "\x00abc\x00\x00de", io.EOF},
1481
+ testRemaining{0, 0},
1482
+ },
1483
+ }, {
1484
+ maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 0}, {6, 0}, {6, 2}}, 8},
1485
+ tests: []testFnc{
1486
+ testRemaining{8, 5},
1487
+ testWriteTo{fileOps{int64(1), "abc", int64(2), "de"}, 8, nil},
1488
+ testRemaining{0, 0},
1489
+ },
1490
+ }, {
1491
+ maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 2}}, 10},
1492
+ tests: []testFnc{
1493
+ testRead{100, "\x00abc\x00\x00de\x00\x00", io.EOF},
1494
+ },
1495
+ }, {
1496
+ maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 2}}, 10},
1497
+ tests: []testFnc{
1498
+ testWriteTo{fileOps{int64(1), "abc", int64(2), "de", int64(1), "\x00"}, 10, nil},
1499
+ },
1500
+ }, {
1501
+ maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 2}, {8, 0}, {8, 0}, {8, 0}, {8, 0}}, 10},
1502
+ tests: []testFnc{
1503
+ testRead{100, "\x00abc\x00\x00de\x00\x00", io.EOF},
1504
+ },
1505
+ }, {
1506
+ maker: makeSparse{makeReg{"", 0}, sparseDatas{}, 2},
1507
+ tests: []testFnc{
1508
+ testRead{100, "\x00\x00", io.EOF},
1509
+ },
1510
+ }, {
1511
+ maker: makeSparse{makeReg{"", 8}, sparseDatas{{1, 3}, {6, 5}}, 15},
1512
+ tests: []testFnc{
1513
+ testRead{100, "\x00", io.ErrUnexpectedEOF},
1514
+ },
1515
+ }, {
1516
+ maker: makeSparse{makeReg{"ab", 2}, sparseDatas{{1, 3}, {6, 5}}, 15},
1517
+ tests: []testFnc{
1518
+ testRead{100, "\x00ab", errMissData},
1519
+ },
1520
+ }, {
1521
+ maker: makeSparse{makeReg{"ab", 8}, sparseDatas{{1, 3}, {6, 5}}, 15},
1522
+ tests: []testFnc{
1523
+ testRead{100, "\x00ab", io.ErrUnexpectedEOF},
1524
+ },
1525
+ }, {
1526
+ maker: makeSparse{makeReg{"abc", 3}, sparseDatas{{1, 3}, {6, 5}}, 15},
1527
+ tests: []testFnc{
1528
+ testRead{100, "\x00abc\x00\x00", errMissData},
1529
+ },
1530
+ }, {
1531
+ maker: makeSparse{makeReg{"abc", 8}, sparseDatas{{1, 3}, {6, 5}}, 15},
1532
+ tests: []testFnc{
1533
+ testRead{100, "\x00abc\x00\x00", io.ErrUnexpectedEOF},
1534
+ },
1535
+ }, {
1536
+ maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 5}}, 15},
1537
+ tests: []testFnc{
1538
+ testRead{100, "\x00abc\x00\x00de", errMissData},
1539
+ },
1540
+ }, {
1541
+ maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 5}}, 15},
1542
+ tests: []testFnc{
1543
+ testWriteTo{fileOps{int64(1), "abc", int64(2), "de"}, 8, errMissData},
1544
+ },
1545
+ }, {
1546
+ maker: makeSparse{makeReg{"abcde", 8}, sparseDatas{{1, 3}, {6, 5}}, 15},
1547
+ tests: []testFnc{
1548
+ testRead{100, "\x00abc\x00\x00de", io.ErrUnexpectedEOF},
1549
+ },
1550
+ }, {
1551
+ maker: makeSparse{makeReg{"abcdefghEXTRA", 13}, sparseDatas{{1, 3}, {6, 5}}, 15},
1552
+ tests: []testFnc{
1553
+ testRemaining{15, 13},
1554
+ testRead{100, "\x00abc\x00\x00defgh\x00\x00\x00\x00", errUnrefData},
1555
+ testWriteTo{nil, 0, errUnrefData},
1556
+ testRemaining{0, 5},
1557
+ },
1558
+ }, {
1559
+ maker: makeSparse{makeReg{"abcdefghEXTRA", 13}, sparseDatas{{1, 3}, {6, 5}}, 15},
1560
+ tests: []testFnc{
1561
+ testRemaining{15, 13},
1562
+ testWriteTo{fileOps{int64(1), "abc", int64(2), "defgh", int64(4)}, 15, errUnrefData},
1563
+ testRead{100, "", errUnrefData},
1564
+ testRemaining{0, 5},
1565
+ },
1566
+ }}
1567
+
1568
+ for i, v := range vectors {
1569
+ var fr fileReader
1570
+ switch maker := v.maker.(type) {
1571
+ case makeReg:
1572
+ r := testNonEmptyReader{strings.NewReader(maker.str)}
1573
+ fr = &regFileReader{r, maker.size}
1574
+ case makeSparse:
1575
+ if !validateSparseEntries(maker.spd, maker.size) {
1576
+ t.Fatalf("invalid sparse map: %v", maker.spd)
1577
+ }
1578
+ sph := invertSparseEntries(maker.spd, maker.size)
1579
+ r := testNonEmptyReader{strings.NewReader(maker.makeReg.str)}
1580
+ fr = &regFileReader{r, maker.makeReg.size}
1581
+ fr = &sparseFileReader{fr, sph, 0}
1582
+ default:
1583
+ t.Fatalf("test %d, unknown make operation: %T", i, maker)
1584
+ }
1585
+
1586
+ for j, tf := range v.tests {
1587
+ switch tf := tf.(type) {
1588
+ case testRead:
1589
+ b := make([]byte, tf.cnt)
1590
+ n, err := fr.Read(b)
1591
+ if got := string(b[:n]); got != tf.wantStr || err != tf.wantErr {
1592
+ t.Errorf("test %d.%d, Read(%d):\ngot (%q, %v)\nwant (%q, %v)", i, j, tf.cnt, got, err, tf.wantStr, tf.wantErr)
1593
+ }
1594
+ case testWriteTo:
1595
+ f := &testFile{ops: tf.ops}
1596
+ got, err := fr.WriteTo(f)
1597
+ if _, ok := err.(testError); ok {
1598
+ t.Errorf("test %d.%d, WriteTo(): %v", i, j, err)
1599
+ } else if got != tf.wantCnt || err != tf.wantErr {
1600
+ t.Errorf("test %d.%d, WriteTo() = (%d, %v), want (%d, %v)", i, j, got, err, tf.wantCnt, tf.wantErr)
1601
+ }
1602
+ if len(f.ops) > 0 {
1603
+ t.Errorf("test %d.%d, expected %d more operations", i, j, len(f.ops))
1604
+ }
1605
+ case testRemaining:
1606
+ if got := fr.logicalRemaining(); got != tf.wantLCnt {
1607
+ t.Errorf("test %d.%d, logicalRemaining() = %d, want %d", i, j, got, tf.wantLCnt)
1608
+ }
1609
+ if got := fr.physicalRemaining(); got != tf.wantPCnt {
1610
+ t.Errorf("test %d.%d, physicalRemaining() = %d, want %d", i, j, got, tf.wantPCnt)
1611
+ }
1612
+ default:
1613
+ t.Fatalf("test %d.%d, unknown test operation: %T", i, j, tf)
1614
+ }
1615
+ }
1616
+ }
1617
+ }
1618
+
1619
+ func TestInsecurePaths(t *testing.T) {
1620
+ t.Setenv("GODEBUG", "tarinsecurepath=0")
1621
+ for _, path := range []string{
1622
+ "../foo",
1623
+ "/foo",
1624
+ "a/b/../../../c",
1625
+ } {
1626
+ var buf bytes.Buffer
1627
+ tw := NewWriter(&buf)
1628
+ tw.WriteHeader(&Header{
1629
+ Name: path,
1630
+ })
1631
+ const securePath = "secure"
1632
+ tw.WriteHeader(&Header{
1633
+ Name: securePath,
1634
+ })
1635
+ tw.Close()
1636
+
1637
+ tr := NewReader(&buf)
1638
+ h, err := tr.Next()
1639
+ if err != ErrInsecurePath {
1640
+ t.Errorf("tr.Next for file %q: got err %v, want ErrInsecurePath", path, err)
1641
+ continue
1642
+ }
1643
+ if h.Name != path {
1644
+ t.Errorf("tr.Next for file %q: got name %q, want %q", path, h.Name, path)
1645
+ }
1646
+ // Error should not be sticky.
1647
+ h, err = tr.Next()
1648
+ if err != nil {
1649
+ t.Errorf("tr.Next for file %q: got err %v, want nil", securePath, err)
1650
+ }
1651
+ if h.Name != securePath {
1652
+ t.Errorf("tr.Next for file %q: got name %q, want %q", securePath, h.Name, securePath)
1653
+ }
1654
+ }
1655
+ }
1656
+
1657
+ func TestDisableInsecurePathCheck(t *testing.T) {
1658
+ t.Setenv("GODEBUG", "tarinsecurepath=1")
1659
+ var buf bytes.Buffer
1660
+ tw := NewWriter(&buf)
1661
+ const name = "/foo"
1662
+ tw.WriteHeader(&Header{
1663
+ Name: name,
1664
+ })
1665
+ tw.Close()
1666
+ tr := NewReader(&buf)
1667
+ h, err := tr.Next()
1668
+ if err != nil {
1669
+ t.Fatalf("tr.Next with tarinsecurepath=1: got err %v, want nil", err)
1670
+ }
1671
+ if h.Name != name {
1672
+ t.Fatalf("tr.Next with tarinsecurepath=1: got name %q, want %q", h.Name, name)
1673
+ }
1674
+ }
platform/dbops/binaries/go/go/src/archive/tar/stat_actime1.go ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2012 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ //go:build aix || linux || dragonfly || openbsd || solaris
6
+
7
+ package tar
8
+
9
+ import (
10
+ "syscall"
11
+ "time"
12
+ )
13
+
14
+ func statAtime(st *syscall.Stat_t) time.Time {
15
+ return time.Unix(st.Atim.Unix())
16
+ }
17
+
18
+ func statCtime(st *syscall.Stat_t) time.Time {
19
+ return time.Unix(st.Ctim.Unix())
20
+ }
platform/dbops/binaries/go/go/src/archive/tar/stat_actime2.go ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2012 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ //go:build darwin || freebsd || netbsd
6
+
7
+ package tar
8
+
9
+ import (
10
+ "syscall"
11
+ "time"
12
+ )
13
+
14
+ func statAtime(st *syscall.Stat_t) time.Time {
15
+ return time.Unix(st.Atimespec.Unix())
16
+ }
17
+
18
+ func statCtime(st *syscall.Stat_t) time.Time {
19
+ return time.Unix(st.Ctimespec.Unix())
20
+ }
platform/dbops/binaries/go/go/src/archive/tar/stat_unix.go ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2012 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ //go:build unix
6
+
7
+ package tar
8
+
9
+ import (
10
+ "io/fs"
11
+ "os/user"
12
+ "runtime"
13
+ "strconv"
14
+ "sync"
15
+ "syscall"
16
+ )
17
+
18
+ func init() {
19
+ sysStat = statUnix
20
+ }
21
+
22
+ // userMap and groupMap caches UID and GID lookups for performance reasons.
23
+ // The downside is that renaming uname or gname by the OS never takes effect.
24
+ var userMap, groupMap sync.Map // map[int]string
25
+
26
+ func statUnix(fi fs.FileInfo, h *Header) error {
27
+ sys, ok := fi.Sys().(*syscall.Stat_t)
28
+ if !ok {
29
+ return nil
30
+ }
31
+ h.Uid = int(sys.Uid)
32
+ h.Gid = int(sys.Gid)
33
+
34
+ // Best effort at populating Uname and Gname.
35
+ // The os/user functions may fail for any number of reasons
36
+ // (not implemented on that platform, cgo not enabled, etc).
37
+ if u, ok := userMap.Load(h.Uid); ok {
38
+ h.Uname = u.(string)
39
+ } else if u, err := user.LookupId(strconv.Itoa(h.Uid)); err == nil {
40
+ h.Uname = u.Username
41
+ userMap.Store(h.Uid, h.Uname)
42
+ }
43
+ if g, ok := groupMap.Load(h.Gid); ok {
44
+ h.Gname = g.(string)
45
+ } else if g, err := user.LookupGroupId(strconv.Itoa(h.Gid)); err == nil {
46
+ h.Gname = g.Name
47
+ groupMap.Store(h.Gid, h.Gname)
48
+ }
49
+
50
+ h.AccessTime = statAtime(sys)
51
+ h.ChangeTime = statCtime(sys)
52
+
53
+ // Best effort at populating Devmajor and Devminor.
54
+ if h.Typeflag == TypeChar || h.Typeflag == TypeBlock {
55
+ dev := uint64(sys.Rdev) // May be int32 or uint32
56
+ switch runtime.GOOS {
57
+ case "aix":
58
+ var major, minor uint32
59
+ major = uint32((dev & 0x3fffffff00000000) >> 32)
60
+ minor = uint32((dev & 0x00000000ffffffff) >> 0)
61
+ h.Devmajor, h.Devminor = int64(major), int64(minor)
62
+ case "linux":
63
+ // Copied from golang.org/x/sys/unix/dev_linux.go.
64
+ major := uint32((dev & 0x00000000000fff00) >> 8)
65
+ major |= uint32((dev & 0xfffff00000000000) >> 32)
66
+ minor := uint32((dev & 0x00000000000000ff) >> 0)
67
+ minor |= uint32((dev & 0x00000ffffff00000) >> 12)
68
+ h.Devmajor, h.Devminor = int64(major), int64(minor)
69
+ case "darwin", "ios":
70
+ // Copied from golang.org/x/sys/unix/dev_darwin.go.
71
+ major := uint32((dev >> 24) & 0xff)
72
+ minor := uint32(dev & 0xffffff)
73
+ h.Devmajor, h.Devminor = int64(major), int64(minor)
74
+ case "dragonfly":
75
+ // Copied from golang.org/x/sys/unix/dev_dragonfly.go.
76
+ major := uint32((dev >> 8) & 0xff)
77
+ minor := uint32(dev & 0xffff00ff)
78
+ h.Devmajor, h.Devminor = int64(major), int64(minor)
79
+ case "freebsd":
80
+ // Copied from golang.org/x/sys/unix/dev_freebsd.go.
81
+ major := uint32((dev >> 8) & 0xff)
82
+ minor := uint32(dev & 0xffff00ff)
83
+ h.Devmajor, h.Devminor = int64(major), int64(minor)
84
+ case "netbsd":
85
+ // Copied from golang.org/x/sys/unix/dev_netbsd.go.
86
+ major := uint32((dev & 0x000fff00) >> 8)
87
+ minor := uint32((dev & 0x000000ff) >> 0)
88
+ minor |= uint32((dev & 0xfff00000) >> 12)
89
+ h.Devmajor, h.Devminor = int64(major), int64(minor)
90
+ case "openbsd":
91
+ // Copied from golang.org/x/sys/unix/dev_openbsd.go.
92
+ major := uint32((dev & 0x0000ff00) >> 8)
93
+ minor := uint32((dev & 0x000000ff) >> 0)
94
+ minor |= uint32((dev & 0xffff0000) >> 8)
95
+ h.Devmajor, h.Devminor = int64(major), int64(minor)
96
+ default:
97
+ // TODO: Implement solaris (see https://golang.org/issue/8106)
98
+ }
99
+ }
100
+ return nil
101
+ }
platform/dbops/binaries/go/go/src/archive/tar/strconv.go ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2016 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ package tar
6
+
7
+ import (
8
+ "bytes"
9
+ "fmt"
10
+ "strconv"
11
+ "strings"
12
+ "time"
13
+ )
14
+
15
+ // hasNUL reports whether the NUL character exists within s.
16
+ func hasNUL(s string) bool {
17
+ return strings.Contains(s, "\x00")
18
+ }
19
+
20
+ // isASCII reports whether the input is an ASCII C-style string.
21
+ func isASCII(s string) bool {
22
+ for _, c := range s {
23
+ if c >= 0x80 || c == 0x00 {
24
+ return false
25
+ }
26
+ }
27
+ return true
28
+ }
29
+
30
+ // toASCII converts the input to an ASCII C-style string.
31
+ // This is a best effort conversion, so invalid characters are dropped.
32
+ func toASCII(s string) string {
33
+ if isASCII(s) {
34
+ return s
35
+ }
36
+ b := make([]byte, 0, len(s))
37
+ for _, c := range s {
38
+ if c < 0x80 && c != 0x00 {
39
+ b = append(b, byte(c))
40
+ }
41
+ }
42
+ return string(b)
43
+ }
44
+
45
+ type parser struct {
46
+ err error // Last error seen
47
+ }
48
+
49
+ type formatter struct {
50
+ err error // Last error seen
51
+ }
52
+
53
+ // parseString parses bytes as a NUL-terminated C-style string.
54
+ // If a NUL byte is not found then the whole slice is returned as a string.
55
+ func (*parser) parseString(b []byte) string {
56
+ if i := bytes.IndexByte(b, 0); i >= 0 {
57
+ return string(b[:i])
58
+ }
59
+ return string(b)
60
+ }
61
+
62
+ // formatString copies s into b, NUL-terminating if possible.
63
+ func (f *formatter) formatString(b []byte, s string) {
64
+ if len(s) > len(b) {
65
+ f.err = ErrFieldTooLong
66
+ }
67
+ copy(b, s)
68
+ if len(s) < len(b) {
69
+ b[len(s)] = 0
70
+ }
71
+
72
+ // Some buggy readers treat regular files with a trailing slash
73
+ // in the V7 path field as a directory even though the full path
74
+ // recorded elsewhere (e.g., via PAX record) contains no trailing slash.
75
+ if len(s) > len(b) && b[len(b)-1] == '/' {
76
+ n := len(strings.TrimRight(s[:len(b)-1], "/"))
77
+ b[n] = 0 // Replace trailing slash with NUL terminator
78
+ }
79
+ }
80
+
81
+ // fitsInBase256 reports whether x can be encoded into n bytes using base-256
82
+ // encoding. Unlike octal encoding, base-256 encoding does not require that the
83
+ // string ends with a NUL character. Thus, all n bytes are available for output.
84
+ //
85
+ // If operating in binary mode, this assumes strict GNU binary mode; which means
86
+ // that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
87
+ // equivalent to the sign bit in two's complement form.
88
+ func fitsInBase256(n int, x int64) bool {
89
+ binBits := uint(n-1) * 8
90
+ return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
91
+ }
92
+
93
+ // parseNumeric parses the input as being encoded in either base-256 or octal.
94
+ // This function may return negative numbers.
95
+ // If parsing fails or an integer overflow occurs, err will be set.
96
+ func (p *parser) parseNumeric(b []byte) int64 {
97
+ // Check for base-256 (binary) format first.
98
+ // If the first bit is set, then all following bits constitute a two's
99
+ // complement encoded number in big-endian byte order.
100
+ if len(b) > 0 && b[0]&0x80 != 0 {
101
+ // Handling negative numbers relies on the following identity:
102
+ // -a-1 == ^a
103
+ //
104
+ // If the number is negative, we use an inversion mask to invert the
105
+ // data bytes and treat the value as an unsigned number.
106
+ var inv byte // 0x00 if positive or zero, 0xff if negative
107
+ if b[0]&0x40 != 0 {
108
+ inv = 0xff
109
+ }
110
+
111
+ var x uint64
112
+ for i, c := range b {
113
+ c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing
114
+ if i == 0 {
115
+ c &= 0x7f // Ignore signal bit in first byte
116
+ }
117
+ if (x >> 56) > 0 {
118
+ p.err = ErrHeader // Integer overflow
119
+ return 0
120
+ }
121
+ x = x<<8 | uint64(c)
122
+ }
123
+ if (x >> 63) > 0 {
124
+ p.err = ErrHeader // Integer overflow
125
+ return 0
126
+ }
127
+ if inv == 0xff {
128
+ return ^int64(x)
129
+ }
130
+ return int64(x)
131
+ }
132
+
133
+ // Normal case is base-8 (octal) format.
134
+ return p.parseOctal(b)
135
+ }
136
+
137
+ // formatNumeric encodes x into b using base-8 (octal) encoding if possible.
138
+ // Otherwise it will attempt to use base-256 (binary) encoding.
139
+ func (f *formatter) formatNumeric(b []byte, x int64) {
140
+ if fitsInOctal(len(b), x) {
141
+ f.formatOctal(b, x)
142
+ return
143
+ }
144
+
145
+ if fitsInBase256(len(b), x) {
146
+ for i := len(b) - 1; i >= 0; i-- {
147
+ b[i] = byte(x)
148
+ x >>= 8
149
+ }
150
+ b[0] |= 0x80 // Highest bit indicates binary format
151
+ return
152
+ }
153
+
154
+ f.formatOctal(b, 0) // Last resort, just write zero
155
+ f.err = ErrFieldTooLong
156
+ }
157
+
158
+ func (p *parser) parseOctal(b []byte) int64 {
159
+ // Because unused fields are filled with NULs, we need
160
+ // to skip leading NULs. Fields may also be padded with
161
+ // spaces or NULs.
162
+ // So we remove leading and trailing NULs and spaces to
163
+ // be sure.
164
+ b = bytes.Trim(b, " \x00")
165
+
166
+ if len(b) == 0 {
167
+ return 0
168
+ }
169
+ x, perr := strconv.ParseUint(p.parseString(b), 8, 64)
170
+ if perr != nil {
171
+ p.err = ErrHeader
172
+ }
173
+ return int64(x)
174
+ }
175
+
176
+ func (f *formatter) formatOctal(b []byte, x int64) {
177
+ if !fitsInOctal(len(b), x) {
178
+ x = 0 // Last resort, just write zero
179
+ f.err = ErrFieldTooLong
180
+ }
181
+
182
+ s := strconv.FormatInt(x, 8)
183
+ // Add leading zeros, but leave room for a NUL.
184
+ if n := len(b) - len(s) - 1; n > 0 {
185
+ s = strings.Repeat("0", n) + s
186
+ }
187
+ f.formatString(b, s)
188
+ }
189
+
190
+ // fitsInOctal reports whether the integer x fits in a field n-bytes long
191
+ // using octal encoding with the appropriate NUL terminator.
192
+ func fitsInOctal(n int, x int64) bool {
193
+ octBits := uint(n-1) * 3
194
+ return x >= 0 && (n >= 22 || x < 1<<octBits)
195
+ }
196
+
197
+ // parsePAXTime takes a string of the form %d.%d as described in the PAX
198
+ // specification. Note that this implementation allows for negative timestamps,
199
+ // which is allowed for by the PAX specification, but not always portable.
200
+ func parsePAXTime(s string) (time.Time, error) {
201
+ const maxNanoSecondDigits = 9
202
+
203
+ // Split string into seconds and sub-seconds parts.
204
+ ss, sn, _ := strings.Cut(s, ".")
205
+
206
+ // Parse the seconds.
207
+ secs, err := strconv.ParseInt(ss, 10, 64)
208
+ if err != nil {
209
+ return time.Time{}, ErrHeader
210
+ }
211
+ if len(sn) == 0 {
212
+ return time.Unix(secs, 0), nil // No sub-second values
213
+ }
214
+
215
+ // Parse the nanoseconds.
216
+ if strings.Trim(sn, "0123456789") != "" {
217
+ return time.Time{}, ErrHeader
218
+ }
219
+ if len(sn) < maxNanoSecondDigits {
220
+ sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad
221
+ } else {
222
+ sn = sn[:maxNanoSecondDigits] // Right truncate
223
+ }
224
+ nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed
225
+ if len(ss) > 0 && ss[0] == '-' {
226
+ return time.Unix(secs, -1*nsecs), nil // Negative correction
227
+ }
228
+ return time.Unix(secs, nsecs), nil
229
+ }
230
+
231
+ // formatPAXTime converts ts into a time of the form %d.%d as described in the
232
+ // PAX specification. This function is capable of negative timestamps.
233
+ func formatPAXTime(ts time.Time) (s string) {
234
+ secs, nsecs := ts.Unix(), ts.Nanosecond()
235
+ if nsecs == 0 {
236
+ return strconv.FormatInt(secs, 10)
237
+ }
238
+
239
+ // If seconds is negative, then perform correction.
240
+ sign := ""
241
+ if secs < 0 {
242
+ sign = "-" // Remember sign
243
+ secs = -(secs + 1) // Add a second to secs
244
+ nsecs = -(nsecs - 1e9) // Take that second away from nsecs
245
+ }
246
+ return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0")
247
+ }
248
+
249
+ // parsePAXRecord parses the input PAX record string into a key-value pair.
250
+ // If parsing is successful, it will slice off the currently read record and
251
+ // return the remainder as r.
252
+ func parsePAXRecord(s string) (k, v, r string, err error) {
253
+ // The size field ends at the first space.
254
+ nStr, rest, ok := strings.Cut(s, " ")
255
+ if !ok {
256
+ return "", "", s, ErrHeader
257
+ }
258
+
259
+ // Parse the first token as a decimal integer.
260
+ n, perr := strconv.ParseInt(nStr, 10, 0) // Intentionally parse as native int
261
+ if perr != nil || n < 5 || n > int64(len(s)) {
262
+ return "", "", s, ErrHeader
263
+ }
264
+ n -= int64(len(nStr) + 1) // convert from index in s to index in rest
265
+ if n <= 0 {
266
+ return "", "", s, ErrHeader
267
+ }
268
+
269
+ // Extract everything between the space and the final newline.
270
+ rec, nl, rem := rest[:n-1], rest[n-1:n], rest[n:]
271
+ if nl != "\n" {
272
+ return "", "", s, ErrHeader
273
+ }
274
+
275
+ // The first equals separates the key from the value.
276
+ k, v, ok = strings.Cut(rec, "=")
277
+ if !ok {
278
+ return "", "", s, ErrHeader
279
+ }
280
+
281
+ if !validPAXRecord(k, v) {
282
+ return "", "", s, ErrHeader
283
+ }
284
+ return k, v, rem, nil
285
+ }
286
+
287
+ // formatPAXRecord formats a single PAX record, prefixing it with the
288
+ // appropriate length.
289
+ func formatPAXRecord(k, v string) (string, error) {
290
+ if !validPAXRecord(k, v) {
291
+ return "", ErrHeader
292
+ }
293
+
294
+ const padding = 3 // Extra padding for ' ', '=', and '\n'
295
+ size := len(k) + len(v) + padding
296
+ size += len(strconv.Itoa(size))
297
+ record := strconv.Itoa(size) + " " + k + "=" + v + "\n"
298
+
299
+ // Final adjustment if adding size field increased the record size.
300
+ if len(record) != size {
301
+ size = len(record)
302
+ record = strconv.Itoa(size) + " " + k + "=" + v + "\n"
303
+ }
304
+ return record, nil
305
+ }
306
+
307
+ // validPAXRecord reports whether the key-value pair is valid where each
308
+ // record is formatted as:
309
+ //
310
+ // "%d %s=%s\n" % (size, key, value)
311
+ //
312
+ // Keys and values should be UTF-8, but the number of bad writers out there
313
+ // forces us to be a more liberal.
314
+ // Thus, we only reject all keys with NUL, and only reject NULs in values
315
+ // for the PAX version of the USTAR string fields.
316
+ // The key must not contain an '=' character.
317
+ func validPAXRecord(k, v string) bool {
318
+ if k == "" || strings.Contains(k, "=") {
319
+ return false
320
+ }
321
+ switch k {
322
+ case paxPath, paxLinkpath, paxUname, paxGname:
323
+ return !hasNUL(v)
324
+ default:
325
+ return !hasNUL(k)
326
+ }
327
+ }
platform/dbops/binaries/go/go/src/archive/tar/strconv_test.go ADDED
@@ -0,0 +1,441 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2016 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ package tar
6
+
7
+ import (
8
+ "math"
9
+ "strings"
10
+ "testing"
11
+ "time"
12
+ )
13
+
14
+ func TestFitsInBase256(t *testing.T) {
15
+ vectors := []struct {
16
+ in int64
17
+ width int
18
+ ok bool
19
+ }{
20
+ {+1, 8, true},
21
+ {0, 8, true},
22
+ {-1, 8, true},
23
+ {1 << 56, 8, false},
24
+ {(1 << 56) - 1, 8, true},
25
+ {-1 << 56, 8, true},
26
+ {(-1 << 56) - 1, 8, false},
27
+ {121654, 8, true},
28
+ {-9849849, 8, true},
29
+ {math.MaxInt64, 9, true},
30
+ {0, 9, true},
31
+ {math.MinInt64, 9, true},
32
+ {math.MaxInt64, 12, true},
33
+ {0, 12, true},
34
+ {math.MinInt64, 12, true},
35
+ }
36
+
37
+ for _, v := range vectors {
38
+ ok := fitsInBase256(v.width, v.in)
39
+ if ok != v.ok {
40
+ t.Errorf("fitsInBase256(%d, %d): got %v, want %v", v.in, v.width, ok, v.ok)
41
+ }
42
+ }
43
+ }
44
+
45
+ func TestParseNumeric(t *testing.T) {
46
+ vectors := []struct {
47
+ in string
48
+ want int64
49
+ ok bool
50
+ }{
51
+ // Test base-256 (binary) encoded values.
52
+ {"", 0, true},
53
+ {"\x80", 0, true},
54
+ {"\x80\x00", 0, true},
55
+ {"\x80\x00\x00", 0, true},
56
+ {"\xbf", (1 << 6) - 1, true},
57
+ {"\xbf\xff", (1 << 14) - 1, true},
58
+ {"\xbf\xff\xff", (1 << 22) - 1, true},
59
+ {"\xff", -1, true},
60
+ {"\xff\xff", -1, true},
61
+ {"\xff\xff\xff", -1, true},
62
+ {"\xc0", -1 * (1 << 6), true},
63
+ {"\xc0\x00", -1 * (1 << 14), true},
64
+ {"\xc0\x00\x00", -1 * (1 << 22), true},
65
+ {"\x87\x76\xa2\x22\xeb\x8a\x72\x61", 537795476381659745, true},
66
+ {"\x80\x00\x00\x00\x07\x76\xa2\x22\xeb\x8a\x72\x61", 537795476381659745, true},
67
+ {"\xf7\x76\xa2\x22\xeb\x8a\x72\x61", -615126028225187231, true},
68
+ {"\xff\xff\xff\xff\xf7\x76\xa2\x22\xeb\x8a\x72\x61", -615126028225187231, true},
69
+ {"\x80\x7f\xff\xff\xff\xff\xff\xff\xff", math.MaxInt64, true},
70
+ {"\x80\x80\x00\x00\x00\x00\x00\x00\x00", 0, false},
71
+ {"\xff\x80\x00\x00\x00\x00\x00\x00\x00", math.MinInt64, true},
72
+ {"\xff\x7f\xff\xff\xff\xff\xff\xff\xff", 0, false},
73
+ {"\xf5\xec\xd1\xc7\x7e\x5f\x26\x48\x81\x9f\x8f\x9b", 0, false},
74
+
75
+ // Test base-8 (octal) encoded values.
76
+ {"0000000\x00", 0, true},
77
+ {" \x0000000\x00", 0, true},
78
+ {" \x0000003\x00", 3, true},
79
+ {"00000000227\x00", 0227, true},
80
+ {"032033\x00 ", 032033, true},
81
+ {"320330\x00 ", 0320330, true},
82
+ {"0000660\x00 ", 0660, true},
83
+ {"\x00 0000660\x00 ", 0660, true},
84
+ {"0123456789abcdef", 0, false},
85
+ {"0123456789\x00abcdef", 0, false},
86
+ {"01234567\x0089abcdef", 342391, true},
87
+ {"0123\x7e\x5f\x264123", 0, false},
88
+ }
89
+
90
+ for _, v := range vectors {
91
+ var p parser
92
+ got := p.parseNumeric([]byte(v.in))
93
+ ok := (p.err == nil)
94
+ if ok != v.ok {
95
+ if v.ok {
96
+ t.Errorf("parseNumeric(%q): got parsing failure, want success", v.in)
97
+ } else {
98
+ t.Errorf("parseNumeric(%q): got parsing success, want failure", v.in)
99
+ }
100
+ }
101
+ if ok && got != v.want {
102
+ t.Errorf("parseNumeric(%q): got %d, want %d", v.in, got, v.want)
103
+ }
104
+ }
105
+ }
106
+
107
+ func TestFormatNumeric(t *testing.T) {
108
+ vectors := []struct {
109
+ in int64
110
+ want string
111
+ ok bool
112
+ }{
113
+ // Test base-8 (octal) encoded values.
114
+ {0, "0\x00", true},
115
+ {7, "7\x00", true},
116
+ {8, "\x80\x08", true},
117
+ {077, "77\x00", true},
118
+ {0100, "\x80\x00\x40", true},
119
+ {0, "0000000\x00", true},
120
+ {0123, "0000123\x00", true},
121
+ {07654321, "7654321\x00", true},
122
+ {07777777, "7777777\x00", true},
123
+ {010000000, "\x80\x00\x00\x00\x00\x20\x00\x00", true},
124
+ {0, "00000000000\x00", true},
125
+ {000001234567, "00001234567\x00", true},
126
+ {076543210321, "76543210321\x00", true},
127
+ {012345670123, "12345670123\x00", true},
128
+ {077777777777, "77777777777\x00", true},
129
+ {0100000000000, "\x80\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00", true},
130
+ {math.MaxInt64, "777777777777777777777\x00", true},
131
+
132
+ // Test base-256 (binary) encoded values.
133
+ {-1, "\xff", true},
134
+ {-1, "\xff\xff", true},
135
+ {-1, "\xff\xff\xff", true},
136
+ {(1 << 0), "0", false},
137
+ {(1 << 8) - 1, "\x80\xff", true},
138
+ {(1 << 8), "0\x00", false},
139
+ {(1 << 16) - 1, "\x80\xff\xff", true},
140
+ {(1 << 16), "00\x00", false},
141
+ {-1 * (1 << 0), "\xff", true},
142
+ {-1*(1<<0) - 1, "0", false},
143
+ {-1 * (1 << 8), "\xff\x00", true},
144
+ {-1*(1<<8) - 1, "0\x00", false},
145
+ {-1 * (1 << 16), "\xff\x00\x00", true},
146
+ {-1*(1<<16) - 1, "00\x00", false},
147
+ {537795476381659745, "0000000\x00", false},
148
+ {537795476381659745, "\x80\x00\x00\x00\x07\x76\xa2\x22\xeb\x8a\x72\x61", true},
149
+ {-615126028225187231, "0000000\x00", false},
150
+ {-615126028225187231, "\xff\xff\xff\xff\xf7\x76\xa2\x22\xeb\x8a\x72\x61", true},
151
+ {math.MaxInt64, "0000000\x00", false},
152
+ {math.MaxInt64, "\x80\x00\x00\x00\x7f\xff\xff\xff\xff\xff\xff\xff", true},
153
+ {math.MinInt64, "0000000\x00", false},
154
+ {math.MinInt64, "\xff\xff\xff\xff\x80\x00\x00\x00\x00\x00\x00\x00", true},
155
+ {math.MaxInt64, "\x80\x7f\xff\xff\xff\xff\xff\xff\xff", true},
156
+ {math.MinInt64, "\xff\x80\x00\x00\x00\x00\x00\x00\x00", true},
157
+ }
158
+
159
+ for _, v := range vectors {
160
+ var f formatter
161
+ got := make([]byte, len(v.want))
162
+ f.formatNumeric(got, v.in)
163
+ ok := (f.err == nil)
164
+ if ok != v.ok {
165
+ if v.ok {
166
+ t.Errorf("formatNumeric(%d): got formatting failure, want success", v.in)
167
+ } else {
168
+ t.Errorf("formatNumeric(%d): got formatting success, want failure", v.in)
169
+ }
170
+ }
171
+ if string(got) != v.want {
172
+ t.Errorf("formatNumeric(%d): got %q, want %q", v.in, got, v.want)
173
+ }
174
+ }
175
+ }
176
+
177
+ func TestFitsInOctal(t *testing.T) {
178
+ vectors := []struct {
179
+ input int64
180
+ width int
181
+ ok bool
182
+ }{
183
+ {-1, 1, false},
184
+ {-1, 2, false},
185
+ {-1, 3, false},
186
+ {0, 1, true},
187
+ {0 + 1, 1, false},
188
+ {0, 2, true},
189
+ {07, 2, true},
190
+ {07 + 1, 2, false},
191
+ {0, 4, true},
192
+ {0777, 4, true},
193
+ {0777 + 1, 4, false},
194
+ {0, 8, true},
195
+ {07777777, 8, true},
196
+ {07777777 + 1, 8, false},
197
+ {0, 12, true},
198
+ {077777777777, 12, true},
199
+ {077777777777 + 1, 12, false},
200
+ {math.MaxInt64, 22, true},
201
+ {012345670123, 12, true},
202
+ {01564164, 12, true},
203
+ {-012345670123, 12, false},
204
+ {-01564164, 12, false},
205
+ {-1564164, 30, false},
206
+ }
207
+
208
+ for _, v := range vectors {
209
+ ok := fitsInOctal(v.width, v.input)
210
+ if ok != v.ok {
211
+ t.Errorf("checkOctal(%d, %d): got %v, want %v", v.input, v.width, ok, v.ok)
212
+ }
213
+ }
214
+ }
215
+
216
+ func TestParsePAXTime(t *testing.T) {
217
+ vectors := []struct {
218
+ in string
219
+ want time.Time
220
+ ok bool
221
+ }{
222
+ {"1350244992.023960108", time.Unix(1350244992, 23960108), true},
223
+ {"1350244992.02396010", time.Unix(1350244992, 23960100), true},
224
+ {"1350244992.0239601089", time.Unix(1350244992, 23960108), true},
225
+ {"1350244992.3", time.Unix(1350244992, 300000000), true},
226
+ {"1350244992", time.Unix(1350244992, 0), true},
227
+ {"-1.000000001", time.Unix(-1, -1e0+0e0), true},
228
+ {"-1.000001", time.Unix(-1, -1e3+0e0), true},
229
+ {"-1.001000", time.Unix(-1, -1e6+0e0), true},
230
+ {"-1", time.Unix(-1, -0e0+0e0), true},
231
+ {"-1.999000", time.Unix(-1, -1e9+1e6), true},
232
+ {"-1.999999", time.Unix(-1, -1e9+1e3), true},
233
+ {"-1.999999999", time.Unix(-1, -1e9+1e0), true},
234
+ {"0.000000001", time.Unix(0, 1e0+0e0), true},
235
+ {"0.000001", time.Unix(0, 1e3+0e0), true},
236
+ {"0.001000", time.Unix(0, 1e6+0e0), true},
237
+ {"0", time.Unix(0, 0e0), true},
238
+ {"0.999000", time.Unix(0, 1e9-1e6), true},
239
+ {"0.999999", time.Unix(0, 1e9-1e3), true},
240
+ {"0.999999999", time.Unix(0, 1e9-1e0), true},
241
+ {"1.000000001", time.Unix(+1, +1e0-0e0), true},
242
+ {"1.000001", time.Unix(+1, +1e3-0e0), true},
243
+ {"1.001000", time.Unix(+1, +1e6-0e0), true},
244
+ {"1", time.Unix(+1, +0e0-0e0), true},
245
+ {"1.999000", time.Unix(+1, +1e9-1e6), true},
246
+ {"1.999999", time.Unix(+1, +1e9-1e3), true},
247
+ {"1.999999999", time.Unix(+1, +1e9-1e0), true},
248
+ {"-1350244992.023960108", time.Unix(-1350244992, -23960108), true},
249
+ {"-1350244992.02396010", time.Unix(-1350244992, -23960100), true},
250
+ {"-1350244992.0239601089", time.Unix(-1350244992, -23960108), true},
251
+ {"-1350244992.3", time.Unix(-1350244992, -300000000), true},
252
+ {"-1350244992", time.Unix(-1350244992, 0), true},
253
+ {"", time.Time{}, false},
254
+ {"0", time.Unix(0, 0), true},
255
+ {"1.", time.Unix(1, 0), true},
256
+ {"0.0", time.Unix(0, 0), true},
257
+ {".5", time.Time{}, false},
258
+ {"-1.3", time.Unix(-1, -3e8), true},
259
+ {"-1.0", time.Unix(-1, -0e0), true},
260
+ {"-0.0", time.Unix(-0, -0e0), true},
261
+ {"-0.1", time.Unix(-0, -1e8), true},
262
+ {"-0.01", time.Unix(-0, -1e7), true},
263
+ {"-0.99", time.Unix(-0, -99e7), true},
264
+ {"-0.98", time.Unix(-0, -98e7), true},
265
+ {"-1.1", time.Unix(-1, -1e8), true},
266
+ {"-1.01", time.Unix(-1, -1e7), true},
267
+ {"-2.99", time.Unix(-2, -99e7), true},
268
+ {"-5.98", time.Unix(-5, -98e7), true},
269
+ {"-", time.Time{}, false},
270
+ {"+", time.Time{}, false},
271
+ {"-1.-1", time.Time{}, false},
272
+ {"99999999999999999999999999999999999999999999999", time.Time{}, false},
273
+ {"0.123456789abcdef", time.Time{}, false},
274
+ {"foo", time.Time{}, false},
275
+ {"\x00", time.Time{}, false},
276
+ {"𝟵𝟴𝟳𝟲𝟱.𝟰𝟯𝟮𝟭𝟬", time.Time{}, false}, // Unicode numbers (U+1D7EC to U+1D7F5)
277
+ {"98765﹒43210", time.Time{}, false}, // Unicode period (U+FE52)
278
+ }
279
+
280
+ for _, v := range vectors {
281
+ ts, err := parsePAXTime(v.in)
282
+ ok := (err == nil)
283
+ if v.ok != ok {
284
+ if v.ok {
285
+ t.Errorf("parsePAXTime(%q): got parsing failure, want success", v.in)
286
+ } else {
287
+ t.Errorf("parsePAXTime(%q): got parsing success, want failure", v.in)
288
+ }
289
+ }
290
+ if ok && !ts.Equal(v.want) {
291
+ t.Errorf("parsePAXTime(%q): got (%ds %dns), want (%ds %dns)",
292
+ v.in, ts.Unix(), ts.Nanosecond(), v.want.Unix(), v.want.Nanosecond())
293
+ }
294
+ }
295
+ }
296
+
297
+ func TestFormatPAXTime(t *testing.T) {
298
+ vectors := []struct {
299
+ sec, nsec int64
300
+ want string
301
+ }{
302
+ {1350244992, 0, "1350244992"},
303
+ {1350244992, 300000000, "1350244992.3"},
304
+ {1350244992, 23960100, "1350244992.0239601"},
305
+ {1350244992, 23960108, "1350244992.023960108"},
306
+ {+1, +1e9 - 1e0, "1.999999999"},
307
+ {+1, +1e9 - 1e3, "1.999999"},
308
+ {+1, +1e9 - 1e6, "1.999"},
309
+ {+1, +0e0 - 0e0, "1"},
310
+ {+1, +1e6 - 0e0, "1.001"},
311
+ {+1, +1e3 - 0e0, "1.000001"},
312
+ {+1, +1e0 - 0e0, "1.000000001"},
313
+ {0, 1e9 - 1e0, "0.999999999"},
314
+ {0, 1e9 - 1e3, "0.999999"},
315
+ {0, 1e9 - 1e6, "0.999"},
316
+ {0, 0e0, "0"},
317
+ {0, 1e6 + 0e0, "0.001"},
318
+ {0, 1e3 + 0e0, "0.000001"},
319
+ {0, 1e0 + 0e0, "0.000000001"},
320
+ {-1, -1e9 + 1e0, "-1.999999999"},
321
+ {-1, -1e9 + 1e3, "-1.999999"},
322
+ {-1, -1e9 + 1e6, "-1.999"},
323
+ {-1, -0e0 + 0e0, "-1"},
324
+ {-1, -1e6 + 0e0, "-1.001"},
325
+ {-1, -1e3 + 0e0, "-1.000001"},
326
+ {-1, -1e0 + 0e0, "-1.000000001"},
327
+ {-1350244992, 0, "-1350244992"},
328
+ {-1350244992, -300000000, "-1350244992.3"},
329
+ {-1350244992, -23960100, "-1350244992.0239601"},
330
+ {-1350244992, -23960108, "-1350244992.023960108"},
331
+ }
332
+
333
+ for _, v := range vectors {
334
+ got := formatPAXTime(time.Unix(v.sec, v.nsec))
335
+ if got != v.want {
336
+ t.Errorf("formatPAXTime(%ds, %dns): got %q, want %q",
337
+ v.sec, v.nsec, got, v.want)
338
+ }
339
+ }
340
+ }
341
+
342
+ func TestParsePAXRecord(t *testing.T) {
343
+ medName := strings.Repeat("CD", 50)
344
+ longName := strings.Repeat("AB", 100)
345
+
346
+ vectors := []struct {
347
+ in string
348
+ wantRes string
349
+ wantKey string
350
+ wantVal string
351
+ ok bool
352
+ }{
353
+ {"6 k=v\n\n", "\n", "k", "v", true},
354
+ {"19 path=/etc/hosts\n", "", "path", "/etc/hosts", true},
355
+ {"210 path=" + longName + "\nabc", "abc", "path", longName, true},
356
+ {"110 path=" + medName + "\n", "", "path", medName, true},
357
+ {"9 foo=ba\n", "", "foo", "ba", true},
358
+ {"11 foo=bar\n\x00", "\x00", "foo", "bar", true},
359
+ {"18 foo=b=\nar=\n==\x00\n", "", "foo", "b=\nar=\n==\x00", true},
360
+ {"27 foo=hello9 foo=ba\nworld\n", "", "foo", "hello9 foo=ba\nworld", true},
361
+ {"27 ☺☻☹=日a本b語ç\nmeow mix", "meow mix", "☺☻☹", "日a本b語ç", true},
362
+ {"17 \x00hello=\x00world\n", "17 \x00hello=\x00world\n", "", "", false},
363
+ {"1 k=1\n", "1 k=1\n", "", "", false},
364
+ {"6 k~1\n", "6 k~1\n", "", "", false},
365
+ {"6_k=1\n", "6_k=1\n", "", "", false},
366
+ {"6 k=1 ", "6 k=1 ", "", "", false},
367
+ {"632 k=1\n", "632 k=1\n", "", "", false},
368
+ {"16 longkeyname=hahaha\n", "16 longkeyname=hahaha\n", "", "", false},
369
+ {"3 somelongkey=\n", "3 somelongkey=\n", "", "", false},
370
+ {"50 tooshort=\n", "50 tooshort=\n", "", "", false},
371
+ {"0000000000000000000000000000000030 mtime=1432668921.098285006\n30 ctime=2147483649.15163319", "0000000000000000000000000000000030 mtime=1432668921.098285006\n30 ctime=2147483649.15163319", "mtime", "1432668921.098285006", false},
372
+ {"06 k=v\n", "06 k=v\n", "", "", false},
373
+ {"00006 k=v\n", "00006 k=v\n", "", "", false},
374
+ {"000006 k=v\n", "000006 k=v\n", "", "", false},
375
+ {"000000 k=v\n", "000000 k=v\n", "", "", false},
376
+ {"0 k=v\n", "0 k=v\n", "", "", false},
377
+ {"+0000005 x=\n", "+0000005 x=\n", "", "", false},
378
+ }
379
+
380
+ for _, v := range vectors {
381
+ key, val, res, err := parsePAXRecord(v.in)
382
+ ok := (err == nil)
383
+ if ok != v.ok {
384
+ if v.ok {
385
+ t.Errorf("parsePAXRecord(%q): got parsing failure, want success", v.in)
386
+ } else {
387
+ t.Errorf("parsePAXRecord(%q): got parsing success, want failure", v.in)
388
+ }
389
+ }
390
+ if v.ok && (key != v.wantKey || val != v.wantVal) {
391
+ t.Errorf("parsePAXRecord(%q): got (%q: %q), want (%q: %q)",
392
+ v.in, key, val, v.wantKey, v.wantVal)
393
+ }
394
+ if res != v.wantRes {
395
+ t.Errorf("parsePAXRecord(%q): got residual %q, want residual %q",
396
+ v.in, res, v.wantRes)
397
+ }
398
+ }
399
+ }
400
+
401
+ func TestFormatPAXRecord(t *testing.T) {
402
+ medName := strings.Repeat("CD", 50)
403
+ longName := strings.Repeat("AB", 100)
404
+
405
+ vectors := []struct {
406
+ inKey string
407
+ inVal string
408
+ want string
409
+ ok bool
410
+ }{
411
+ {"k", "v", "6 k=v\n", true},
412
+ {"path", "/etc/hosts", "19 path=/etc/hosts\n", true},
413
+ {"path", longName, "210 path=" + longName + "\n", true},
414
+ {"path", medName, "110 path=" + medName + "\n", true},
415
+ {"foo", "ba", "9 foo=ba\n", true},
416
+ {"foo", "bar", "11 foo=bar\n", true},
417
+ {"foo", "b=\nar=\n==\x00", "18 foo=b=\nar=\n==\x00\n", true},
418
+ {"foo", "hello9 foo=ba\nworld", "27 foo=hello9 foo=ba\nworld\n", true},
419
+ {"☺☻☹", "日a本b語ç", "27 ☺☻☹=日a本b語ç\n", true},
420
+ {"xhello", "\x00world", "17 xhello=\x00world\n", true},
421
+ {"path", "null\x00", "", false},
422
+ {"null\x00", "value", "", false},
423
+ {paxSchilyXattr + "key", "null\x00", "26 SCHILY.xattr.key=null\x00\n", true},
424
+ }
425
+
426
+ for _, v := range vectors {
427
+ got, err := formatPAXRecord(v.inKey, v.inVal)
428
+ ok := (err == nil)
429
+ if ok != v.ok {
430
+ if v.ok {
431
+ t.Errorf("formatPAXRecord(%q, %q): got format failure, want success", v.inKey, v.inVal)
432
+ } else {
433
+ t.Errorf("formatPAXRecord(%q, %q): got format success, want failure", v.inKey, v.inVal)
434
+ }
435
+ }
436
+ if got != v.want {
437
+ t.Errorf("formatPAXRecord(%q, %q): got %q, want %q",
438
+ v.inKey, v.inVal, got, v.want)
439
+ }
440
+ }
441
+ }
platform/dbops/binaries/go/go/src/archive/tar/tar_test.go ADDED
@@ -0,0 +1,850 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2012 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ package tar
6
+
7
+ import (
8
+ "bytes"
9
+ "errors"
10
+ "fmt"
11
+ "internal/testenv"
12
+ "io"
13
+ "io/fs"
14
+ "math"
15
+ "os"
16
+ "path"
17
+ "path/filepath"
18
+ "reflect"
19
+ "strings"
20
+ "testing"
21
+ "time"
22
+ )
23
+
24
+ type testError struct{ error }
25
+
26
+ type fileOps []any // []T where T is (string | int64)
27
+
28
+ // testFile is an io.ReadWriteSeeker where the IO operations performed
29
+ // on it must match the list of operations in ops.
30
+ type testFile struct {
31
+ ops fileOps
32
+ pos int64
33
+ }
34
+
35
+ func (f *testFile) Read(b []byte) (int, error) {
36
+ if len(b) == 0 {
37
+ return 0, nil
38
+ }
39
+ if len(f.ops) == 0 {
40
+ return 0, io.EOF
41
+ }
42
+ s, ok := f.ops[0].(string)
43
+ if !ok {
44
+ return 0, errors.New("unexpected Read operation")
45
+ }
46
+
47
+ n := copy(b, s)
48
+ if len(s) > n {
49
+ f.ops[0] = s[n:]
50
+ } else {
51
+ f.ops = f.ops[1:]
52
+ }
53
+ f.pos += int64(len(b))
54
+ return n, nil
55
+ }
56
+
57
+ func (f *testFile) Write(b []byte) (int, error) {
58
+ if len(b) == 0 {
59
+ return 0, nil
60
+ }
61
+ if len(f.ops) == 0 {
62
+ return 0, errors.New("unexpected Write operation")
63
+ }
64
+ s, ok := f.ops[0].(string)
65
+ if !ok {
66
+ return 0, errors.New("unexpected Write operation")
67
+ }
68
+
69
+ if !strings.HasPrefix(s, string(b)) {
70
+ return 0, testError{fmt.Errorf("got Write(%q), want Write(%q)", b, s)}
71
+ }
72
+ if len(s) > len(b) {
73
+ f.ops[0] = s[len(b):]
74
+ } else {
75
+ f.ops = f.ops[1:]
76
+ }
77
+ f.pos += int64(len(b))
78
+ return len(b), nil
79
+ }
80
+
81
+ func (f *testFile) Seek(pos int64, whence int) (int64, error) {
82
+ if pos == 0 && whence == io.SeekCurrent {
83
+ return f.pos, nil
84
+ }
85
+ if len(f.ops) == 0 {
86
+ return 0, errors.New("unexpected Seek operation")
87
+ }
88
+ s, ok := f.ops[0].(int64)
89
+ if !ok {
90
+ return 0, errors.New("unexpected Seek operation")
91
+ }
92
+
93
+ if s != pos || whence != io.SeekCurrent {
94
+ return 0, testError{fmt.Errorf("got Seek(%d, %d), want Seek(%d, %d)", pos, whence, s, io.SeekCurrent)}
95
+ }
96
+ f.pos += s
97
+ f.ops = f.ops[1:]
98
+ return f.pos, nil
99
+ }
100
+
101
+ func equalSparseEntries(x, y []sparseEntry) bool {
102
+ return (len(x) == 0 && len(y) == 0) || reflect.DeepEqual(x, y)
103
+ }
104
+
105
+ func TestSparseEntries(t *testing.T) {
106
+ vectors := []struct {
107
+ in []sparseEntry
108
+ size int64
109
+
110
+ wantValid bool // Result of validateSparseEntries
111
+ wantAligned []sparseEntry // Result of alignSparseEntries
112
+ wantInverted []sparseEntry // Result of invertSparseEntries
113
+ }{{
114
+ in: []sparseEntry{}, size: 0,
115
+ wantValid: true,
116
+ wantInverted: []sparseEntry{{0, 0}},
117
+ }, {
118
+ in: []sparseEntry{}, size: 5000,
119
+ wantValid: true,
120
+ wantInverted: []sparseEntry{{0, 5000}},
121
+ }, {
122
+ in: []sparseEntry{{0, 5000}}, size: 5000,
123
+ wantValid: true,
124
+ wantAligned: []sparseEntry{{0, 5000}},
125
+ wantInverted: []sparseEntry{{5000, 0}},
126
+ }, {
127
+ in: []sparseEntry{{1000, 4000}}, size: 5000,
128
+ wantValid: true,
129
+ wantAligned: []sparseEntry{{1024, 3976}},
130
+ wantInverted: []sparseEntry{{0, 1000}, {5000, 0}},
131
+ }, {
132
+ in: []sparseEntry{{0, 3000}}, size: 5000,
133
+ wantValid: true,
134
+ wantAligned: []sparseEntry{{0, 2560}},
135
+ wantInverted: []sparseEntry{{3000, 2000}},
136
+ }, {
137
+ in: []sparseEntry{{3000, 2000}}, size: 5000,
138
+ wantValid: true,
139
+ wantAligned: []sparseEntry{{3072, 1928}},
140
+ wantInverted: []sparseEntry{{0, 3000}, {5000, 0}},
141
+ }, {
142
+ in: []sparseEntry{{2000, 2000}}, size: 5000,
143
+ wantValid: true,
144
+ wantAligned: []sparseEntry{{2048, 1536}},
145
+ wantInverted: []sparseEntry{{0, 2000}, {4000, 1000}},
146
+ }, {
147
+ in: []sparseEntry{{0, 2000}, {8000, 2000}}, size: 10000,
148
+ wantValid: true,
149
+ wantAligned: []sparseEntry{{0, 1536}, {8192, 1808}},
150
+ wantInverted: []sparseEntry{{2000, 6000}, {10000, 0}},
151
+ }, {
152
+ in: []sparseEntry{{0, 2000}, {2000, 2000}, {4000, 0}, {4000, 3000}, {7000, 1000}, {8000, 0}, {8000, 2000}}, size: 10000,
153
+ wantValid: true,
154
+ wantAligned: []sparseEntry{{0, 1536}, {2048, 1536}, {4096, 2560}, {7168, 512}, {8192, 1808}},
155
+ wantInverted: []sparseEntry{{10000, 0}},
156
+ }, {
157
+ in: []sparseEntry{{0, 0}, {1000, 0}, {2000, 0}, {3000, 0}, {4000, 0}, {5000, 0}}, size: 5000,
158
+ wantValid: true,
159
+ wantInverted: []sparseEntry{{0, 5000}},
160
+ }, {
161
+ in: []sparseEntry{{1, 0}}, size: 0,
162
+ wantValid: false,
163
+ }, {
164
+ in: []sparseEntry{{-1, 0}}, size: 100,
165
+ wantValid: false,
166
+ }, {
167
+ in: []sparseEntry{{0, -1}}, size: 100,
168
+ wantValid: false,
169
+ }, {
170
+ in: []sparseEntry{{0, 0}}, size: -100,
171
+ wantValid: false,
172
+ }, {
173
+ in: []sparseEntry{{math.MaxInt64, 3}, {6, -5}}, size: 35,
174
+ wantValid: false,
175
+ }, {
176
+ in: []sparseEntry{{1, 3}, {6, -5}}, size: 35,
177
+ wantValid: false,
178
+ }, {
179
+ in: []sparseEntry{{math.MaxInt64, math.MaxInt64}}, size: math.MaxInt64,
180
+ wantValid: false,
181
+ }, {
182
+ in: []sparseEntry{{3, 3}}, size: 5,
183
+ wantValid: false,
184
+ }, {
185
+ in: []sparseEntry{{2, 0}, {1, 0}, {0, 0}}, size: 3,
186
+ wantValid: false,
187
+ }, {
188
+ in: []sparseEntry{{1, 3}, {2, 2}}, size: 10,
189
+ wantValid: false,
190
+ }}
191
+
192
+ for i, v := range vectors {
193
+ gotValid := validateSparseEntries(v.in, v.size)
194
+ if gotValid != v.wantValid {
195
+ t.Errorf("test %d, validateSparseEntries() = %v, want %v", i, gotValid, v.wantValid)
196
+ }
197
+ if !v.wantValid {
198
+ continue
199
+ }
200
+ gotAligned := alignSparseEntries(append([]sparseEntry{}, v.in...), v.size)
201
+ if !equalSparseEntries(gotAligned, v.wantAligned) {
202
+ t.Errorf("test %d, alignSparseEntries():\ngot %v\nwant %v", i, gotAligned, v.wantAligned)
203
+ }
204
+ gotInverted := invertSparseEntries(append([]sparseEntry{}, v.in...), v.size)
205
+ if !equalSparseEntries(gotInverted, v.wantInverted) {
206
+ t.Errorf("test %d, inverseSparseEntries():\ngot %v\nwant %v", i, gotInverted, v.wantInverted)
207
+ }
208
+ }
209
+ }
210
+
211
+ func TestFileInfoHeader(t *testing.T) {
212
+ fi, err := os.Stat("testdata/small.txt")
213
+ if err != nil {
214
+ t.Fatal(err)
215
+ }
216
+ h, err := FileInfoHeader(fi, "")
217
+ if err != nil {
218
+ t.Fatalf("FileInfoHeader: %v", err)
219
+ }
220
+ if g, e := h.Name, "small.txt"; g != e {
221
+ t.Errorf("Name = %q; want %q", g, e)
222
+ }
223
+ if g, e := h.Mode, int64(fi.Mode().Perm()); g != e {
224
+ t.Errorf("Mode = %#o; want %#o", g, e)
225
+ }
226
+ if g, e := h.Size, int64(5); g != e {
227
+ t.Errorf("Size = %v; want %v", g, e)
228
+ }
229
+ if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
230
+ t.Errorf("ModTime = %v; want %v", g, e)
231
+ }
232
+ // FileInfoHeader should error when passing nil FileInfo
233
+ if _, err := FileInfoHeader(nil, ""); err == nil {
234
+ t.Fatalf("Expected error when passing nil to FileInfoHeader")
235
+ }
236
+ }
237
+
238
+ func TestFileInfoHeaderDir(t *testing.T) {
239
+ fi, err := os.Stat("testdata")
240
+ if err != nil {
241
+ t.Fatal(err)
242
+ }
243
+ h, err := FileInfoHeader(fi, "")
244
+ if err != nil {
245
+ t.Fatalf("FileInfoHeader: %v", err)
246
+ }
247
+ if g, e := h.Name, "testdata/"; g != e {
248
+ t.Errorf("Name = %q; want %q", g, e)
249
+ }
250
+ // Ignoring c_ISGID for golang.org/issue/4867
251
+ if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm()); g != e {
252
+ t.Errorf("Mode = %#o; want %#o", g, e)
253
+ }
254
+ if g, e := h.Size, int64(0); g != e {
255
+ t.Errorf("Size = %v; want %v", g, e)
256
+ }
257
+ if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
258
+ t.Errorf("ModTime = %v; want %v", g, e)
259
+ }
260
+ }
261
+
262
+ func TestFileInfoHeaderSymlink(t *testing.T) {
263
+ testenv.MustHaveSymlink(t)
264
+
265
+ tmpdir := t.TempDir()
266
+
267
+ link := filepath.Join(tmpdir, "link")
268
+ target := tmpdir
269
+ if err := os.Symlink(target, link); err != nil {
270
+ t.Fatal(err)
271
+ }
272
+ fi, err := os.Lstat(link)
273
+ if err != nil {
274
+ t.Fatal(err)
275
+ }
276
+
277
+ h, err := FileInfoHeader(fi, target)
278
+ if err != nil {
279
+ t.Fatal(err)
280
+ }
281
+ if g, e := h.Name, fi.Name(); g != e {
282
+ t.Errorf("Name = %q; want %q", g, e)
283
+ }
284
+ if g, e := h.Linkname, target; g != e {
285
+ t.Errorf("Linkname = %q; want %q", g, e)
286
+ }
287
+ if g, e := h.Typeflag, byte(TypeSymlink); g != e {
288
+ t.Errorf("Typeflag = %v; want %v", g, e)
289
+ }
290
+ }
291
+
292
+ func TestRoundTrip(t *testing.T) {
293
+ data := []byte("some file contents")
294
+
295
+ var b bytes.Buffer
296
+ tw := NewWriter(&b)
297
+ hdr := &Header{
298
+ Name: "file.txt",
299
+ Uid: 1 << 21, // Too big for 8 octal digits
300
+ Size: int64(len(data)),
301
+ ModTime: time.Now().Round(time.Second),
302
+ PAXRecords: map[string]string{"uid": "2097152"},
303
+ Format: FormatPAX,
304
+ Typeflag: TypeReg,
305
+ }
306
+ if err := tw.WriteHeader(hdr); err != nil {
307
+ t.Fatalf("tw.WriteHeader: %v", err)
308
+ }
309
+ if _, err := tw.Write(data); err != nil {
310
+ t.Fatalf("tw.Write: %v", err)
311
+ }
312
+ if err := tw.Close(); err != nil {
313
+ t.Fatalf("tw.Close: %v", err)
314
+ }
315
+
316
+ // Read it back.
317
+ tr := NewReader(&b)
318
+ rHdr, err := tr.Next()
319
+ if err != nil {
320
+ t.Fatalf("tr.Next: %v", err)
321
+ }
322
+ if !reflect.DeepEqual(rHdr, hdr) {
323
+ t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr)
324
+ }
325
+ rData, err := io.ReadAll(tr)
326
+ if err != nil {
327
+ t.Fatalf("Read: %v", err)
328
+ }
329
+ if !bytes.Equal(rData, data) {
330
+ t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data)
331
+ }
332
+ }
333
+
334
+ type headerRoundTripTest struct {
335
+ h *Header
336
+ fm fs.FileMode
337
+ }
338
+
339
+ func TestHeaderRoundTrip(t *testing.T) {
340
+ vectors := []headerRoundTripTest{{
341
+ // regular file.
342
+ h: &Header{
343
+ Name: "test.txt",
344
+ Mode: 0644,
345
+ Size: 12,
346
+ ModTime: time.Unix(1360600916, 0),
347
+ Typeflag: TypeReg,
348
+ },
349
+ fm: 0644,
350
+ }, {
351
+ // symbolic link.
352
+ h: &Header{
353
+ Name: "link.txt",
354
+ Mode: 0777,
355
+ Size: 0,
356
+ ModTime: time.Unix(1360600852, 0),
357
+ Typeflag: TypeSymlink,
358
+ },
359
+ fm: 0777 | fs.ModeSymlink,
360
+ }, {
361
+ // character device node.
362
+ h: &Header{
363
+ Name: "dev/null",
364
+ Mode: 0666,
365
+ Size: 0,
366
+ ModTime: time.Unix(1360578951, 0),
367
+ Typeflag: TypeChar,
368
+ },
369
+ fm: 0666 | fs.ModeDevice | fs.ModeCharDevice,
370
+ }, {
371
+ // block device node.
372
+ h: &Header{
373
+ Name: "dev/sda",
374
+ Mode: 0660,
375
+ Size: 0,
376
+ ModTime: time.Unix(1360578954, 0),
377
+ Typeflag: TypeBlock,
378
+ },
379
+ fm: 0660 | fs.ModeDevice,
380
+ }, {
381
+ // directory.
382
+ h: &Header{
383
+ Name: "dir/",
384
+ Mode: 0755,
385
+ Size: 0,
386
+ ModTime: time.Unix(1360601116, 0),
387
+ Typeflag: TypeDir,
388
+ },
389
+ fm: 0755 | fs.ModeDir,
390
+ }, {
391
+ // fifo node.
392
+ h: &Header{
393
+ Name: "dev/initctl",
394
+ Mode: 0600,
395
+ Size: 0,
396
+ ModTime: time.Unix(1360578949, 0),
397
+ Typeflag: TypeFifo,
398
+ },
399
+ fm: 0600 | fs.ModeNamedPipe,
400
+ }, {
401
+ // setuid.
402
+ h: &Header{
403
+ Name: "bin/su",
404
+ Mode: 0755 | c_ISUID,
405
+ Size: 23232,
406
+ ModTime: time.Unix(1355405093, 0),
407
+ Typeflag: TypeReg,
408
+ },
409
+ fm: 0755 | fs.ModeSetuid,
410
+ }, {
411
+ // setguid.
412
+ h: &Header{
413
+ Name: "group.txt",
414
+ Mode: 0750 | c_ISGID,
415
+ Size: 0,
416
+ ModTime: time.Unix(1360602346, 0),
417
+ Typeflag: TypeReg,
418
+ },
419
+ fm: 0750 | fs.ModeSetgid,
420
+ }, {
421
+ // sticky.
422
+ h: &Header{
423
+ Name: "sticky.txt",
424
+ Mode: 0600 | c_ISVTX,
425
+ Size: 7,
426
+ ModTime: time.Unix(1360602540, 0),
427
+ Typeflag: TypeReg,
428
+ },
429
+ fm: 0600 | fs.ModeSticky,
430
+ }, {
431
+ // hard link.
432
+ h: &Header{
433
+ Name: "hard.txt",
434
+ Mode: 0644,
435
+ Size: 0,
436
+ Linkname: "file.txt",
437
+ ModTime: time.Unix(1360600916, 0),
438
+ Typeflag: TypeLink,
439
+ },
440
+ fm: 0644,
441
+ }, {
442
+ // More information.
443
+ h: &Header{
444
+ Name: "info.txt",
445
+ Mode: 0600,
446
+ Size: 0,
447
+ Uid: 1000,
448
+ Gid: 1000,
449
+ ModTime: time.Unix(1360602540, 0),
450
+ Uname: "slartibartfast",
451
+ Gname: "users",
452
+ Typeflag: TypeReg,
453
+ },
454
+ fm: 0600,
455
+ }}
456
+
457
+ for i, v := range vectors {
458
+ fi := v.h.FileInfo()
459
+ h2, err := FileInfoHeader(fi, "")
460
+ if err != nil {
461
+ t.Error(err)
462
+ continue
463
+ }
464
+ if strings.Contains(fi.Name(), "/") {
465
+ t.Errorf("FileInfo of %q contains slash: %q", v.h.Name, fi.Name())
466
+ }
467
+ name := path.Base(v.h.Name)
468
+ if fi.IsDir() {
469
+ name += "/"
470
+ }
471
+ if got, want := h2.Name, name; got != want {
472
+ t.Errorf("i=%d: Name: got %v, want %v", i, got, want)
473
+ }
474
+ if got, want := h2.Size, v.h.Size; got != want {
475
+ t.Errorf("i=%d: Size: got %v, want %v", i, got, want)
476
+ }
477
+ if got, want := h2.Uid, v.h.Uid; got != want {
478
+ t.Errorf("i=%d: Uid: got %d, want %d", i, got, want)
479
+ }
480
+ if got, want := h2.Gid, v.h.Gid; got != want {
481
+ t.Errorf("i=%d: Gid: got %d, want %d", i, got, want)
482
+ }
483
+ if got, want := h2.Uname, v.h.Uname; got != want {
484
+ t.Errorf("i=%d: Uname: got %q, want %q", i, got, want)
485
+ }
486
+ if got, want := h2.Gname, v.h.Gname; got != want {
487
+ t.Errorf("i=%d: Gname: got %q, want %q", i, got, want)
488
+ }
489
+ if got, want := h2.Linkname, v.h.Linkname; got != want {
490
+ t.Errorf("i=%d: Linkname: got %v, want %v", i, got, want)
491
+ }
492
+ if got, want := h2.Typeflag, v.h.Typeflag; got != want {
493
+ t.Logf("%#v %#v", v.h, fi.Sys())
494
+ t.Errorf("i=%d: Typeflag: got %q, want %q", i, got, want)
495
+ }
496
+ if got, want := h2.Mode, v.h.Mode; got != want {
497
+ t.Errorf("i=%d: Mode: got %o, want %o", i, got, want)
498
+ }
499
+ if got, want := fi.Mode(), v.fm; got != want {
500
+ t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want)
501
+ }
502
+ if got, want := h2.AccessTime, v.h.AccessTime; got != want {
503
+ t.Errorf("i=%d: AccessTime: got %v, want %v", i, got, want)
504
+ }
505
+ if got, want := h2.ChangeTime, v.h.ChangeTime; got != want {
506
+ t.Errorf("i=%d: ChangeTime: got %v, want %v", i, got, want)
507
+ }
508
+ if got, want := h2.ModTime, v.h.ModTime; got != want {
509
+ t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want)
510
+ }
511
+ if sysh, ok := fi.Sys().(*Header); !ok || sysh != v.h {
512
+ t.Errorf("i=%d: Sys didn't return original *Header", i)
513
+ }
514
+ }
515
+ }
516
+
517
+ func TestHeaderAllowedFormats(t *testing.T) {
518
+ vectors := []struct {
519
+ header *Header // Input header
520
+ paxHdrs map[string]string // Expected PAX headers that may be needed
521
+ formats Format // Expected formats that can encode the header
522
+ }{{
523
+ header: &Header{},
524
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
525
+ }, {
526
+ header: &Header{Size: 077777777777},
527
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
528
+ }, {
529
+ header: &Header{Size: 077777777777, Format: FormatUSTAR},
530
+ formats: FormatUSTAR,
531
+ }, {
532
+ header: &Header{Size: 077777777777, Format: FormatPAX},
533
+ formats: FormatUSTAR | FormatPAX,
534
+ }, {
535
+ header: &Header{Size: 077777777777, Format: FormatGNU},
536
+ formats: FormatGNU,
537
+ }, {
538
+ header: &Header{Size: 077777777777 + 1},
539
+ paxHdrs: map[string]string{paxSize: "8589934592"},
540
+ formats: FormatPAX | FormatGNU,
541
+ }, {
542
+ header: &Header{Size: 077777777777 + 1, Format: FormatPAX},
543
+ paxHdrs: map[string]string{paxSize: "8589934592"},
544
+ formats: FormatPAX,
545
+ }, {
546
+ header: &Header{Size: 077777777777 + 1, Format: FormatGNU},
547
+ paxHdrs: map[string]string{paxSize: "8589934592"},
548
+ formats: FormatGNU,
549
+ }, {
550
+ header: &Header{Mode: 07777777},
551
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
552
+ }, {
553
+ header: &Header{Mode: 07777777 + 1},
554
+ formats: FormatGNU,
555
+ }, {
556
+ header: &Header{Devmajor: -123},
557
+ formats: FormatGNU,
558
+ }, {
559
+ header: &Header{Devmajor: 1<<56 - 1},
560
+ formats: FormatGNU,
561
+ }, {
562
+ header: &Header{Devmajor: 1 << 56},
563
+ formats: FormatUnknown,
564
+ }, {
565
+ header: &Header{Devmajor: -1 << 56},
566
+ formats: FormatGNU,
567
+ }, {
568
+ header: &Header{Devmajor: -1<<56 - 1},
569
+ formats: FormatUnknown,
570
+ }, {
571
+ header: &Header{Name: "用戶名", Devmajor: -1 << 56},
572
+ formats: FormatGNU,
573
+ }, {
574
+ header: &Header{Size: math.MaxInt64},
575
+ paxHdrs: map[string]string{paxSize: "9223372036854775807"},
576
+ formats: FormatPAX | FormatGNU,
577
+ }, {
578
+ header: &Header{Size: math.MinInt64},
579
+ paxHdrs: map[string]string{paxSize: "-9223372036854775808"},
580
+ formats: FormatUnknown,
581
+ }, {
582
+ header: &Header{Uname: "0123456789abcdef0123456789abcdef"},
583
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
584
+ }, {
585
+ header: &Header{Uname: "0123456789abcdef0123456789abcdefx"},
586
+ paxHdrs: map[string]string{paxUname: "0123456789abcdef0123456789abcdefx"},
587
+ formats: FormatPAX,
588
+ }, {
589
+ header: &Header{Name: "foobar"},
590
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
591
+ }, {
592
+ header: &Header{Name: strings.Repeat("a", nameSize)},
593
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
594
+ }, {
595
+ header: &Header{Name: strings.Repeat("a", nameSize+1)},
596
+ paxHdrs: map[string]string{paxPath: strings.Repeat("a", nameSize+1)},
597
+ formats: FormatPAX | FormatGNU,
598
+ }, {
599
+ header: &Header{Linkname: "用戶名"},
600
+ paxHdrs: map[string]string{paxLinkpath: "用戶名"},
601
+ formats: FormatPAX | FormatGNU,
602
+ }, {
603
+ header: &Header{Linkname: strings.Repeat("用戶名\x00", nameSize)},
604
+ paxHdrs: map[string]string{paxLinkpath: strings.Repeat("用戶名\x00", nameSize)},
605
+ formats: FormatUnknown,
606
+ }, {
607
+ header: &Header{Linkname: "\x00hello"},
608
+ paxHdrs: map[string]string{paxLinkpath: "\x00hello"},
609
+ formats: FormatUnknown,
610
+ }, {
611
+ header: &Header{Uid: 07777777},
612
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
613
+ }, {
614
+ header: &Header{Uid: 07777777 + 1},
615
+ paxHdrs: map[string]string{paxUid: "2097152"},
616
+ formats: FormatPAX | FormatGNU,
617
+ }, {
618
+ header: &Header{Xattrs: nil},
619
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
620
+ }, {
621
+ header: &Header{Xattrs: map[string]string{"foo": "bar"}},
622
+ paxHdrs: map[string]string{paxSchilyXattr + "foo": "bar"},
623
+ formats: FormatPAX,
624
+ }, {
625
+ header: &Header{Xattrs: map[string]string{"foo": "bar"}, Format: FormatGNU},
626
+ paxHdrs: map[string]string{paxSchilyXattr + "foo": "bar"},
627
+ formats: FormatUnknown,
628
+ }, {
629
+ header: &Header{Xattrs: map[string]string{"用戶名": "\x00hello"}},
630
+ paxHdrs: map[string]string{paxSchilyXattr + "用戶名": "\x00hello"},
631
+ formats: FormatPAX,
632
+ }, {
633
+ header: &Header{Xattrs: map[string]string{"foo=bar": "baz"}},
634
+ formats: FormatUnknown,
635
+ }, {
636
+ header: &Header{Xattrs: map[string]string{"foo": ""}},
637
+ paxHdrs: map[string]string{paxSchilyXattr + "foo": ""},
638
+ formats: FormatPAX,
639
+ }, {
640
+ header: &Header{ModTime: time.Unix(0, 0)},
641
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
642
+ }, {
643
+ header: &Header{ModTime: time.Unix(077777777777, 0)},
644
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
645
+ }, {
646
+ header: &Header{ModTime: time.Unix(077777777777+1, 0)},
647
+ paxHdrs: map[string]string{paxMtime: "8589934592"},
648
+ formats: FormatPAX | FormatGNU,
649
+ }, {
650
+ header: &Header{ModTime: time.Unix(math.MaxInt64, 0)},
651
+ paxHdrs: map[string]string{paxMtime: "9223372036854775807"},
652
+ formats: FormatPAX | FormatGNU,
653
+ }, {
654
+ header: &Header{ModTime: time.Unix(math.MaxInt64, 0), Format: FormatUSTAR},
655
+ paxHdrs: map[string]string{paxMtime: "9223372036854775807"},
656
+ formats: FormatUnknown,
657
+ }, {
658
+ header: &Header{ModTime: time.Unix(-1, 0)},
659
+ paxHdrs: map[string]string{paxMtime: "-1"},
660
+ formats: FormatPAX | FormatGNU,
661
+ }, {
662
+ header: &Header{ModTime: time.Unix(1, 500)},
663
+ paxHdrs: map[string]string{paxMtime: "1.0000005"},
664
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
665
+ }, {
666
+ header: &Header{ModTime: time.Unix(1, 0)},
667
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
668
+ }, {
669
+ header: &Header{ModTime: time.Unix(1, 0), Format: FormatPAX},
670
+ formats: FormatUSTAR | FormatPAX,
671
+ }, {
672
+ header: &Header{ModTime: time.Unix(1, 500), Format: FormatUSTAR},
673
+ paxHdrs: map[string]string{paxMtime: "1.0000005"},
674
+ formats: FormatUSTAR,
675
+ }, {
676
+ header: &Header{ModTime: time.Unix(1, 500), Format: FormatPAX},
677
+ paxHdrs: map[string]string{paxMtime: "1.0000005"},
678
+ formats: FormatPAX,
679
+ }, {
680
+ header: &Header{ModTime: time.Unix(1, 500), Format: FormatGNU},
681
+ paxHdrs: map[string]string{paxMtime: "1.0000005"},
682
+ formats: FormatGNU,
683
+ }, {
684
+ header: &Header{ModTime: time.Unix(-1, 500)},
685
+ paxHdrs: map[string]string{paxMtime: "-0.9999995"},
686
+ formats: FormatPAX | FormatGNU,
687
+ }, {
688
+ header: &Header{ModTime: time.Unix(-1, 500), Format: FormatGNU},
689
+ paxHdrs: map[string]string{paxMtime: "-0.9999995"},
690
+ formats: FormatGNU,
691
+ }, {
692
+ header: &Header{AccessTime: time.Unix(0, 0)},
693
+ paxHdrs: map[string]string{paxAtime: "0"},
694
+ formats: FormatPAX | FormatGNU,
695
+ }, {
696
+ header: &Header{AccessTime: time.Unix(0, 0), Format: FormatUSTAR},
697
+ paxHdrs: map[string]string{paxAtime: "0"},
698
+ formats: FormatUnknown,
699
+ }, {
700
+ header: &Header{AccessTime: time.Unix(0, 0), Format: FormatPAX},
701
+ paxHdrs: map[string]string{paxAtime: "0"},
702
+ formats: FormatPAX,
703
+ }, {
704
+ header: &Header{AccessTime: time.Unix(0, 0), Format: FormatGNU},
705
+ paxHdrs: map[string]string{paxAtime: "0"},
706
+ formats: FormatGNU,
707
+ }, {
708
+ header: &Header{AccessTime: time.Unix(-123, 0)},
709
+ paxHdrs: map[string]string{paxAtime: "-123"},
710
+ formats: FormatPAX | FormatGNU,
711
+ }, {
712
+ header: &Header{AccessTime: time.Unix(-123, 0), Format: FormatPAX},
713
+ paxHdrs: map[string]string{paxAtime: "-123"},
714
+ formats: FormatPAX,
715
+ }, {
716
+ header: &Header{ChangeTime: time.Unix(123, 456)},
717
+ paxHdrs: map[string]string{paxCtime: "123.000000456"},
718
+ formats: FormatPAX | FormatGNU,
719
+ }, {
720
+ header: &Header{ChangeTime: time.Unix(123, 456), Format: FormatUSTAR},
721
+ paxHdrs: map[string]string{paxCtime: "123.000000456"},
722
+ formats: FormatUnknown,
723
+ }, {
724
+ header: &Header{ChangeTime: time.Unix(123, 456), Format: FormatGNU},
725
+ paxHdrs: map[string]string{paxCtime: "123.000000456"},
726
+ formats: FormatGNU,
727
+ }, {
728
+ header: &Header{ChangeTime: time.Unix(123, 456), Format: FormatPAX},
729
+ paxHdrs: map[string]string{paxCtime: "123.000000456"},
730
+ formats: FormatPAX,
731
+ }, {
732
+ header: &Header{Name: "foo/", Typeflag: TypeDir},
733
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
734
+ }, {
735
+ header: &Header{Name: "foo/", Typeflag: TypeReg},
736
+ formats: FormatUnknown,
737
+ }, {
738
+ header: &Header{Name: "foo/", Typeflag: TypeSymlink},
739
+ formats: FormatUSTAR | FormatPAX | FormatGNU,
740
+ }}
741
+
742
+ for i, v := range vectors {
743
+ formats, paxHdrs, err := v.header.allowedFormats()
744
+ if formats != v.formats {
745
+ t.Errorf("test %d, allowedFormats(): got %v, want %v", i, formats, v.formats)
746
+ }
747
+ if formats&FormatPAX > 0 && !reflect.DeepEqual(paxHdrs, v.paxHdrs) && !(len(paxHdrs) == 0 && len(v.paxHdrs) == 0) {
748
+ t.Errorf("test %d, allowedFormats():\ngot %v\nwant %s", i, paxHdrs, v.paxHdrs)
749
+ }
750
+ if (formats != FormatUnknown) && (err != nil) {
751
+ t.Errorf("test %d, unexpected error: %v", i, err)
752
+ }
753
+ if (formats == FormatUnknown) && (err == nil) {
754
+ t.Errorf("test %d, got nil-error, want non-nil error", i)
755
+ }
756
+ }
757
+ }
758
+
759
+ func Benchmark(b *testing.B) {
760
+ type file struct {
761
+ hdr *Header
762
+ body []byte
763
+ }
764
+
765
+ vectors := []struct {
766
+ label string
767
+ files []file
768
+ }{{
769
+ "USTAR",
770
+ []file{{
771
+ &Header{Name: "bar", Mode: 0640, Size: int64(3)},
772
+ []byte("foo"),
773
+ }, {
774
+ &Header{Name: "world", Mode: 0640, Size: int64(5)},
775
+ []byte("hello"),
776
+ }},
777
+ }, {
778
+ "GNU",
779
+ []file{{
780
+ &Header{Name: "bar", Mode: 0640, Size: int64(3), Devmajor: -1},
781
+ []byte("foo"),
782
+ }, {
783
+ &Header{Name: "world", Mode: 0640, Size: int64(5), Devmajor: -1},
784
+ []byte("hello"),
785
+ }},
786
+ }, {
787
+ "PAX",
788
+ []file{{
789
+ &Header{Name: "bar", Mode: 0640, Size: int64(3), Xattrs: map[string]string{"foo": "bar"}},
790
+ []byte("foo"),
791
+ }, {
792
+ &Header{Name: "world", Mode: 0640, Size: int64(5), Xattrs: map[string]string{"foo": "bar"}},
793
+ []byte("hello"),
794
+ }},
795
+ }}
796
+
797
+ b.Run("Writer", func(b *testing.B) {
798
+ for _, v := range vectors {
799
+ b.Run(v.label, func(b *testing.B) {
800
+ b.ReportAllocs()
801
+ for i := 0; i < b.N; i++ {
802
+ // Writing to io.Discard because we want to
803
+ // test purely the writer code and not bring in disk performance into this.
804
+ tw := NewWriter(io.Discard)
805
+ for _, file := range v.files {
806
+ if err := tw.WriteHeader(file.hdr); err != nil {
807
+ b.Errorf("unexpected WriteHeader error: %v", err)
808
+ }
809
+ if _, err := tw.Write(file.body); err != nil {
810
+ b.Errorf("unexpected Write error: %v", err)
811
+ }
812
+ }
813
+ if err := tw.Close(); err != nil {
814
+ b.Errorf("unexpected Close error: %v", err)
815
+ }
816
+ }
817
+ })
818
+ }
819
+ })
820
+
821
+ b.Run("Reader", func(b *testing.B) {
822
+ for _, v := range vectors {
823
+ var buf bytes.Buffer
824
+ var r bytes.Reader
825
+
826
+ // Write the archive to a byte buffer.
827
+ tw := NewWriter(&buf)
828
+ for _, file := range v.files {
829
+ tw.WriteHeader(file.hdr)
830
+ tw.Write(file.body)
831
+ }
832
+ tw.Close()
833
+ b.Run(v.label, func(b *testing.B) {
834
+ b.ReportAllocs()
835
+ // Read from the byte buffer.
836
+ for i := 0; i < b.N; i++ {
837
+ r.Reset(buf.Bytes())
838
+ tr := NewReader(&r)
839
+ if _, err := tr.Next(); err != nil {
840
+ b.Errorf("unexpected Next error: %v", err)
841
+ }
842
+ if _, err := io.Copy(io.Discard, tr); err != nil {
843
+ b.Errorf("unexpected Copy error : %v", err)
844
+ }
845
+ }
846
+ })
847
+ }
848
+ })
849
+
850
+ }
platform/dbops/binaries/go/go/src/archive/tar/testdata/file-and-dir.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97b7612d21901a62cb80af3b976878da051b512f0a8018c16931ffd6e176067b
3
+ size 2560
platform/dbops/binaries/go/go/src/archive/tar/testdata/gnu-incremental.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:675b784cae33a0a4856b66b1752a3892455b983f505d3e27505b07a525d61055
3
+ size 2560
platform/dbops/binaries/go/go/src/archive/tar/testdata/gnu-long-nul.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:146a47c77ea4b77dff0d94403a80e6164629262d66ec56d8dbad0195174c8a47
3
+ size 2560
platform/dbops/binaries/go/go/src/archive/tar/testdata/gnu-multi-hdrs.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3e3e97854feebc6ee48069dc759b93e32c432109ca146ee691d61dc34fedb7b
3
+ size 4608
platform/dbops/binaries/go/go/src/archive/tar/testdata/gnu-nil-sparse-data.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbd8462155e49f06b82b17b8c4c871d85d322e2d95e2fc3108fad51f1f4ce107
3
+ size 2560
platform/dbops/binaries/go/go/src/archive/tar/testdata/gnu-nil-sparse-hole.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:506d08fe8bbee110612f4672e632e5e9a6373cafba15cbd5866bdd09e3f79d2b
3
+ size 1536
platform/dbops/binaries/go/go/src/archive/tar/testdata/gnu-not-utf8.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3560b7b37f487f1c41a46b39b74d9f00a52d6b9f3430d64a6e42544df3c14c95
3
+ size 1536
platform/dbops/binaries/go/go/src/archive/tar/testdata/gnu-sparse-big.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:027bb1a0d7f778fde347be353b1920141bc4d4d2da6828ace084cb339787fd17
3
+ size 5120
platform/dbops/binaries/go/go/src/archive/tar/testdata/gnu-utf8.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d46bde4ea43f780c4f184a7ef11b11cdde9f9e02d6892a03c67d9b6c6608973
3
+ size 2560
platform/dbops/binaries/go/go/src/archive/tar/testdata/gnu.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4635a876c70af74b13976fdf86811e809ec29dc1ccb2a18c1174a493240edf8b
3
+ size 3072
platform/dbops/binaries/go/go/src/archive/tar/testdata/hardlink.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57be2655401e9cb4b79515257a2978db683ca5b3c9b4ea0a3f1adcd1b43d56a7
3
+ size 2560
platform/dbops/binaries/go/go/src/archive/tar/testdata/hdr-only.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:065f7d6cdcebbb9a2a0afbac24849165d1fe83872550b0abad9460520854ad4d
3
+ size 10240
platform/dbops/binaries/go/go/src/archive/tar/testdata/invalid-go17.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53fe0bb9e743531a01d601f70482f6e47e43469894ca101abf7069424edbe81f
3
+ size 1536
platform/dbops/binaries/go/go/src/archive/tar/testdata/issue10968.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a302db10909ff2822f09f362576eca6e1b4f0d70e34748f6fa1eea79e7d3713a
3
+ size 512
platform/dbops/binaries/go/go/src/archive/tar/testdata/issue11169.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c80a95a15edc3eef8fd6c506c4a92481f517bebd088d5db0eb260033a77c0f6
3
+ size 602
platform/dbops/binaries/go/go/src/archive/tar/testdata/issue12435.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de7e983cc020b72b10819773e487c794e1269fc5a3793bc13d3c61ffacc553a5
3
+ size 512
platform/dbops/binaries/go/go/src/archive/tar/testdata/neg-size.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:831bd782db9d2b552cbcfcb87adfded0941762596e5d8fbd742dba03c6dc357b
3
+ size 512
platform/dbops/binaries/go/go/src/archive/tar/testdata/nil-uid.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e18d729821696dc33c7eb0bf32622146365bb3e0c2bd2d72ce4b2e460a84690e
3
+ size 1024
platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-bad-hdr-file.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63d5e53472b1ea35cd8814f9d4dee7597601c99eca367177bbaaab1e09d30468
3
+ size 2560
platform/dbops/binaries/go/go/src/archive/tar/testdata/pax-bad-hdr-large.tar.bz2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4dc5dc501d3f9728918cf03e15da2bf5c17f05c4add4b1d5054d125b28c542a
3
+ size 156
platform/dbops/binaries/go/go/src/archive/tar/writer.go ADDED
@@ -0,0 +1,698 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2009 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ package tar
6
+
7
+ import (
8
+ "errors"
9
+ "fmt"
10
+ "io"
11
+ "io/fs"
12
+ "path"
13
+ "sort"
14
+ "strings"
15
+ "time"
16
+ )
17
+
18
+ // Writer provides sequential writing of a tar archive.
19
+ // [Writer.WriteHeader] begins a new file with the provided [Header],
20
+ // and then Writer can be treated as an io.Writer to supply that file's data.
21
+ type Writer struct {
22
+ w io.Writer
23
+ pad int64 // Amount of padding to write after current file entry
24
+ curr fileWriter // Writer for current file entry
25
+ hdr Header // Shallow copy of Header that is safe for mutations
26
+ blk block // Buffer to use as temporary local storage
27
+
28
+ // err is a persistent error.
29
+ // It is only the responsibility of every exported method of Writer to
30
+ // ensure that this error is sticky.
31
+ err error
32
+ }
33
+
34
+ // NewWriter creates a new Writer writing to w.
35
+ func NewWriter(w io.Writer) *Writer {
36
+ return &Writer{w: w, curr: &regFileWriter{w, 0}}
37
+ }
38
+
39
+ type fileWriter interface {
40
+ io.Writer
41
+ fileState
42
+
43
+ ReadFrom(io.Reader) (int64, error)
44
+ }
45
+
46
+ // Flush finishes writing the current file's block padding.
47
+ // The current file must be fully written before Flush can be called.
48
+ //
49
+ // This is unnecessary as the next call to [Writer.WriteHeader] or [Writer.Close]
50
+ // will implicitly flush out the file's padding.
51
+ func (tw *Writer) Flush() error {
52
+ if tw.err != nil {
53
+ return tw.err
54
+ }
55
+ if nb := tw.curr.logicalRemaining(); nb > 0 {
56
+ return fmt.Errorf("archive/tar: missed writing %d bytes", nb)
57
+ }
58
+ if _, tw.err = tw.w.Write(zeroBlock[:tw.pad]); tw.err != nil {
59
+ return tw.err
60
+ }
61
+ tw.pad = 0
62
+ return nil
63
+ }
64
+
65
+ // WriteHeader writes hdr and prepares to accept the file's contents.
66
+ // The Header.Size determines how many bytes can be written for the next file.
67
+ // If the current file is not fully written, then this returns an error.
68
+ // This implicitly flushes any padding necessary before writing the header.
69
+ func (tw *Writer) WriteHeader(hdr *Header) error {
70
+ if err := tw.Flush(); err != nil {
71
+ return err
72
+ }
73
+ tw.hdr = *hdr // Shallow copy of Header
74
+
75
+ // Avoid usage of the legacy TypeRegA flag, and automatically promote
76
+ // it to use TypeReg or TypeDir.
77
+ if tw.hdr.Typeflag == TypeRegA {
78
+ if strings.HasSuffix(tw.hdr.Name, "/") {
79
+ tw.hdr.Typeflag = TypeDir
80
+ } else {
81
+ tw.hdr.Typeflag = TypeReg
82
+ }
83
+ }
84
+
85
+ // Round ModTime and ignore AccessTime and ChangeTime unless
86
+ // the format is explicitly chosen.
87
+ // This ensures nominal usage of WriteHeader (without specifying the format)
88
+ // does not always result in the PAX format being chosen, which
89
+ // causes a 1KiB increase to every header.
90
+ if tw.hdr.Format == FormatUnknown {
91
+ tw.hdr.ModTime = tw.hdr.ModTime.Round(time.Second)
92
+ tw.hdr.AccessTime = time.Time{}
93
+ tw.hdr.ChangeTime = time.Time{}
94
+ }
95
+
96
+ allowedFormats, paxHdrs, err := tw.hdr.allowedFormats()
97
+ switch {
98
+ case allowedFormats.has(FormatUSTAR):
99
+ tw.err = tw.writeUSTARHeader(&tw.hdr)
100
+ return tw.err
101
+ case allowedFormats.has(FormatPAX):
102
+ tw.err = tw.writePAXHeader(&tw.hdr, paxHdrs)
103
+ return tw.err
104
+ case allowedFormats.has(FormatGNU):
105
+ tw.err = tw.writeGNUHeader(&tw.hdr)
106
+ return tw.err
107
+ default:
108
+ return err // Non-fatal error
109
+ }
110
+ }
111
+
112
+ func (tw *Writer) writeUSTARHeader(hdr *Header) error {
113
+ // Check if we can use USTAR prefix/suffix splitting.
114
+ var namePrefix string
115
+ if prefix, suffix, ok := splitUSTARPath(hdr.Name); ok {
116
+ namePrefix, hdr.Name = prefix, suffix
117
+ }
118
+
119
+ // Pack the main header.
120
+ var f formatter
121
+ blk := tw.templateV7Plus(hdr, f.formatString, f.formatOctal)
122
+ f.formatString(blk.toUSTAR().prefix(), namePrefix)
123
+ blk.setFormat(FormatUSTAR)
124
+ if f.err != nil {
125
+ return f.err // Should never happen since header is validated
126
+ }
127
+ return tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag)
128
+ }
129
+
130
+ func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
131
+ realName, realSize := hdr.Name, hdr.Size
132
+
133
+ // TODO(dsnet): Re-enable this when adding sparse support.
134
+ // See https://golang.org/issue/22735
135
+ /*
136
+ // Handle sparse files.
137
+ var spd sparseDatas
138
+ var spb []byte
139
+ if len(hdr.SparseHoles) > 0 {
140
+ sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map
141
+ sph = alignSparseEntries(sph, hdr.Size)
142
+ spd = invertSparseEntries(sph, hdr.Size)
143
+
144
+ // Format the sparse map.
145
+ hdr.Size = 0 // Replace with encoded size
146
+ spb = append(strconv.AppendInt(spb, int64(len(spd)), 10), '\n')
147
+ for _, s := range spd {
148
+ hdr.Size += s.Length
149
+ spb = append(strconv.AppendInt(spb, s.Offset, 10), '\n')
150
+ spb = append(strconv.AppendInt(spb, s.Length, 10), '\n')
151
+ }
152
+ pad := blockPadding(int64(len(spb)))
153
+ spb = append(spb, zeroBlock[:pad]...)
154
+ hdr.Size += int64(len(spb)) // Accounts for encoded sparse map
155
+
156
+ // Add and modify appropriate PAX records.
157
+ dir, file := path.Split(realName)
158
+ hdr.Name = path.Join(dir, "GNUSparseFile.0", file)
159
+ paxHdrs[paxGNUSparseMajor] = "1"
160
+ paxHdrs[paxGNUSparseMinor] = "0"
161
+ paxHdrs[paxGNUSparseName] = realName
162
+ paxHdrs[paxGNUSparseRealSize] = strconv.FormatInt(realSize, 10)
163
+ paxHdrs[paxSize] = strconv.FormatInt(hdr.Size, 10)
164
+ delete(paxHdrs, paxPath) // Recorded by paxGNUSparseName
165
+ }
166
+ */
167
+ _ = realSize
168
+
169
+ // Write PAX records to the output.
170
+ isGlobal := hdr.Typeflag == TypeXGlobalHeader
171
+ if len(paxHdrs) > 0 || isGlobal {
172
+ // Sort keys for deterministic ordering.
173
+ var keys []string
174
+ for k := range paxHdrs {
175
+ keys = append(keys, k)
176
+ }
177
+ sort.Strings(keys)
178
+
179
+ // Write each record to a buffer.
180
+ var buf strings.Builder
181
+ for _, k := range keys {
182
+ rec, err := formatPAXRecord(k, paxHdrs[k])
183
+ if err != nil {
184
+ return err
185
+ }
186
+ buf.WriteString(rec)
187
+ }
188
+
189
+ // Write the extended header file.
190
+ var name string
191
+ var flag byte
192
+ if isGlobal {
193
+ name = realName
194
+ if name == "" {
195
+ name = "GlobalHead.0.0"
196
+ }
197
+ flag = TypeXGlobalHeader
198
+ } else {
199
+ dir, file := path.Split(realName)
200
+ name = path.Join(dir, "PaxHeaders.0", file)
201
+ flag = TypeXHeader
202
+ }
203
+ data := buf.String()
204
+ if len(data) > maxSpecialFileSize {
205
+ return ErrFieldTooLong
206
+ }
207
+ if err := tw.writeRawFile(name, data, flag, FormatPAX); err != nil || isGlobal {
208
+ return err // Global headers return here
209
+ }
210
+ }
211
+
212
+ // Pack the main header.
213
+ var f formatter // Ignore errors since they are expected
214
+ fmtStr := func(b []byte, s string) { f.formatString(b, toASCII(s)) }
215
+ blk := tw.templateV7Plus(hdr, fmtStr, f.formatOctal)
216
+ blk.setFormat(FormatPAX)
217
+ if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil {
218
+ return err
219
+ }
220
+
221
+ // TODO(dsnet): Re-enable this when adding sparse support.
222
+ // See https://golang.org/issue/22735
223
+ /*
224
+ // Write the sparse map and setup the sparse writer if necessary.
225
+ if len(spd) > 0 {
226
+ // Use tw.curr since the sparse map is accounted for in hdr.Size.
227
+ if _, err := tw.curr.Write(spb); err != nil {
228
+ return err
229
+ }
230
+ tw.curr = &sparseFileWriter{tw.curr, spd, 0}
231
+ }
232
+ */
233
+ return nil
234
+ }
235
+
236
+ func (tw *Writer) writeGNUHeader(hdr *Header) error {
237
+ // Use long-link files if Name or Linkname exceeds the field size.
238
+ const longName = "././@LongLink"
239
+ if len(hdr.Name) > nameSize {
240
+ data := hdr.Name + "\x00"
241
+ if err := tw.writeRawFile(longName, data, TypeGNULongName, FormatGNU); err != nil {
242
+ return err
243
+ }
244
+ }
245
+ if len(hdr.Linkname) > nameSize {
246
+ data := hdr.Linkname + "\x00"
247
+ if err := tw.writeRawFile(longName, data, TypeGNULongLink, FormatGNU); err != nil {
248
+ return err
249
+ }
250
+ }
251
+
252
+ // Pack the main header.
253
+ var f formatter // Ignore errors since they are expected
254
+ var spd sparseDatas
255
+ var spb []byte
256
+ blk := tw.templateV7Plus(hdr, f.formatString, f.formatNumeric)
257
+ if !hdr.AccessTime.IsZero() {
258
+ f.formatNumeric(blk.toGNU().accessTime(), hdr.AccessTime.Unix())
259
+ }
260
+ if !hdr.ChangeTime.IsZero() {
261
+ f.formatNumeric(blk.toGNU().changeTime(), hdr.ChangeTime.Unix())
262
+ }
263
+ // TODO(dsnet): Re-enable this when adding sparse support.
264
+ // See https://golang.org/issue/22735
265
+ /*
266
+ if hdr.Typeflag == TypeGNUSparse {
267
+ sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map
268
+ sph = alignSparseEntries(sph, hdr.Size)
269
+ spd = invertSparseEntries(sph, hdr.Size)
270
+
271
+ // Format the sparse map.
272
+ formatSPD := func(sp sparseDatas, sa sparseArray) sparseDatas {
273
+ for i := 0; len(sp) > 0 && i < sa.MaxEntries(); i++ {
274
+ f.formatNumeric(sa.Entry(i).Offset(), sp[0].Offset)
275
+ f.formatNumeric(sa.Entry(i).Length(), sp[0].Length)
276
+ sp = sp[1:]
277
+ }
278
+ if len(sp) > 0 {
279
+ sa.IsExtended()[0] = 1
280
+ }
281
+ return sp
282
+ }
283
+ sp2 := formatSPD(spd, blk.GNU().Sparse())
284
+ for len(sp2) > 0 {
285
+ var spHdr block
286
+ sp2 = formatSPD(sp2, spHdr.Sparse())
287
+ spb = append(spb, spHdr[:]...)
288
+ }
289
+
290
+ // Update size fields in the header block.
291
+ realSize := hdr.Size
292
+ hdr.Size = 0 // Encoded size; does not account for encoded sparse map
293
+ for _, s := range spd {
294
+ hdr.Size += s.Length
295
+ }
296
+ copy(blk.V7().Size(), zeroBlock[:]) // Reset field
297
+ f.formatNumeric(blk.V7().Size(), hdr.Size)
298
+ f.formatNumeric(blk.GNU().RealSize(), realSize)
299
+ }
300
+ */
301
+ blk.setFormat(FormatGNU)
302
+ if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil {
303
+ return err
304
+ }
305
+
306
+ // Write the extended sparse map and setup the sparse writer if necessary.
307
+ if len(spd) > 0 {
308
+ // Use tw.w since the sparse map is not accounted for in hdr.Size.
309
+ if _, err := tw.w.Write(spb); err != nil {
310
+ return err
311
+ }
312
+ tw.curr = &sparseFileWriter{tw.curr, spd, 0}
313
+ }
314
+ return nil
315
+ }
316
+
317
+ type (
318
+ stringFormatter func([]byte, string)
319
+ numberFormatter func([]byte, int64)
320
+ )
321
+
322
+ // templateV7Plus fills out the V7 fields of a block using values from hdr.
323
+ // It also fills out fields (uname, gname, devmajor, devminor) that are
324
+ // shared in the USTAR, PAX, and GNU formats using the provided formatters.
325
+ //
326
+ // The block returned is only valid until the next call to
327
+ // templateV7Plus or writeRawFile.
328
+ func (tw *Writer) templateV7Plus(hdr *Header, fmtStr stringFormatter, fmtNum numberFormatter) *block {
329
+ tw.blk.reset()
330
+
331
+ modTime := hdr.ModTime
332
+ if modTime.IsZero() {
333
+ modTime = time.Unix(0, 0)
334
+ }
335
+
336
+ v7 := tw.blk.toV7()
337
+ v7.typeFlag()[0] = hdr.Typeflag
338
+ fmtStr(v7.name(), hdr.Name)
339
+ fmtStr(v7.linkName(), hdr.Linkname)
340
+ fmtNum(v7.mode(), hdr.Mode)
341
+ fmtNum(v7.uid(), int64(hdr.Uid))
342
+ fmtNum(v7.gid(), int64(hdr.Gid))
343
+ fmtNum(v7.size(), hdr.Size)
344
+ fmtNum(v7.modTime(), modTime.Unix())
345
+
346
+ ustar := tw.blk.toUSTAR()
347
+ fmtStr(ustar.userName(), hdr.Uname)
348
+ fmtStr(ustar.groupName(), hdr.Gname)
349
+ fmtNum(ustar.devMajor(), hdr.Devmajor)
350
+ fmtNum(ustar.devMinor(), hdr.Devminor)
351
+
352
+ return &tw.blk
353
+ }
354
+
355
+ // writeRawFile writes a minimal file with the given name and flag type.
356
+ // It uses format to encode the header format and will write data as the body.
357
+ // It uses default values for all of the other fields (as BSD and GNU tar does).
358
+ func (tw *Writer) writeRawFile(name, data string, flag byte, format Format) error {
359
+ tw.blk.reset()
360
+
361
+ // Best effort for the filename.
362
+ name = toASCII(name)
363
+ if len(name) > nameSize {
364
+ name = name[:nameSize]
365
+ }
366
+ name = strings.TrimRight(name, "/")
367
+
368
+ var f formatter
369
+ v7 := tw.blk.toV7()
370
+ v7.typeFlag()[0] = flag
371
+ f.formatString(v7.name(), name)
372
+ f.formatOctal(v7.mode(), 0)
373
+ f.formatOctal(v7.uid(), 0)
374
+ f.formatOctal(v7.gid(), 0)
375
+ f.formatOctal(v7.size(), int64(len(data))) // Must be < 8GiB
376
+ f.formatOctal(v7.modTime(), 0)
377
+ tw.blk.setFormat(format)
378
+ if f.err != nil {
379
+ return f.err // Only occurs if size condition is violated
380
+ }
381
+
382
+ // Write the header and data.
383
+ if err := tw.writeRawHeader(&tw.blk, int64(len(data)), flag); err != nil {
384
+ return err
385
+ }
386
+ _, err := io.WriteString(tw, data)
387
+ return err
388
+ }
389
+
390
+ // writeRawHeader writes the value of blk, regardless of its value.
391
+ // It sets up the Writer such that it can accept a file of the given size.
392
+ // If the flag is a special header-only flag, then the size is treated as zero.
393
+ func (tw *Writer) writeRawHeader(blk *block, size int64, flag byte) error {
394
+ if err := tw.Flush(); err != nil {
395
+ return err
396
+ }
397
+ if _, err := tw.w.Write(blk[:]); err != nil {
398
+ return err
399
+ }
400
+ if isHeaderOnlyType(flag) {
401
+ size = 0
402
+ }
403
+ tw.curr = &regFileWriter{tw.w, size}
404
+ tw.pad = blockPadding(size)
405
+ return nil
406
+ }
407
+
408
+ // AddFS adds the files from fs.FS to the archive.
409
+ // It walks the directory tree starting at the root of the filesystem
410
+ // adding each file to the tar archive while maintaining the directory structure.
411
+ func (tw *Writer) AddFS(fsys fs.FS) error {
412
+ return fs.WalkDir(fsys, ".", func(name string, d fs.DirEntry, err error) error {
413
+ if err != nil {
414
+ return err
415
+ }
416
+ if d.IsDir() {
417
+ return nil
418
+ }
419
+ info, err := d.Info()
420
+ if err != nil {
421
+ return err
422
+ }
423
+ // TODO(#49580): Handle symlinks when fs.ReadLinkFS is available.
424
+ if !info.Mode().IsRegular() {
425
+ return errors.New("tar: cannot add non-regular file")
426
+ }
427
+ h, err := FileInfoHeader(info, "")
428
+ if err != nil {
429
+ return err
430
+ }
431
+ h.Name = name
432
+ if err := tw.WriteHeader(h); err != nil {
433
+ return err
434
+ }
435
+ f, err := fsys.Open(name)
436
+ if err != nil {
437
+ return err
438
+ }
439
+ defer f.Close()
440
+ _, err = io.Copy(tw, f)
441
+ return err
442
+ })
443
+ }
444
+
445
+ // splitUSTARPath splits a path according to USTAR prefix and suffix rules.
446
+ // If the path is not splittable, then it will return ("", "", false).
447
+ func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
448
+ length := len(name)
449
+ if length <= nameSize || !isASCII(name) {
450
+ return "", "", false
451
+ } else if length > prefixSize+1 {
452
+ length = prefixSize + 1
453
+ } else if name[length-1] == '/' {
454
+ length--
455
+ }
456
+
457
+ i := strings.LastIndex(name[:length], "/")
458
+ nlen := len(name) - i - 1 // nlen is length of suffix
459
+ plen := i // plen is length of prefix
460
+ if i <= 0 || nlen > nameSize || nlen == 0 || plen > prefixSize {
461
+ return "", "", false
462
+ }
463
+ return name[:i], name[i+1:], true
464
+ }
465
+
466
+ // Write writes to the current file in the tar archive.
467
+ // Write returns the error [ErrWriteTooLong] if more than
468
+ // Header.Size bytes are written after [Writer.WriteHeader].
469
+ //
470
+ // Calling Write on special types like [TypeLink], [TypeSymlink], [TypeChar],
471
+ // [TypeBlock], [TypeDir], and [TypeFifo] returns (0, [ErrWriteTooLong]) regardless
472
+ // of what the [Header.Size] claims.
473
+ func (tw *Writer) Write(b []byte) (int, error) {
474
+ if tw.err != nil {
475
+ return 0, tw.err
476
+ }
477
+ n, err := tw.curr.Write(b)
478
+ if err != nil && err != ErrWriteTooLong {
479
+ tw.err = err
480
+ }
481
+ return n, err
482
+ }
483
+
484
+ // readFrom populates the content of the current file by reading from r.
485
+ // The bytes read must match the number of remaining bytes in the current file.
486
+ //
487
+ // If the current file is sparse and r is an io.ReadSeeker,
488
+ // then readFrom uses Seek to skip past holes defined in Header.SparseHoles,
489
+ // assuming that skipped regions are all NULs.
490
+ // This always reads the last byte to ensure r is the right size.
491
+ //
492
+ // TODO(dsnet): Re-export this when adding sparse file support.
493
+ // See https://golang.org/issue/22735
494
+ func (tw *Writer) readFrom(r io.Reader) (int64, error) {
495
+ if tw.err != nil {
496
+ return 0, tw.err
497
+ }
498
+ n, err := tw.curr.ReadFrom(r)
499
+ if err != nil && err != ErrWriteTooLong {
500
+ tw.err = err
501
+ }
502
+ return n, err
503
+ }
504
+
505
+ // Close closes the tar archive by flushing the padding, and writing the footer.
506
+ // If the current file (from a prior call to [Writer.WriteHeader]) is not fully written,
507
+ // then this returns an error.
508
+ func (tw *Writer) Close() error {
509
+ if tw.err == ErrWriteAfterClose {
510
+ return nil
511
+ }
512
+ if tw.err != nil {
513
+ return tw.err
514
+ }
515
+
516
+ // Trailer: two zero blocks.
517
+ err := tw.Flush()
518
+ for i := 0; i < 2 && err == nil; i++ {
519
+ _, err = tw.w.Write(zeroBlock[:])
520
+ }
521
+
522
+ // Ensure all future actions are invalid.
523
+ tw.err = ErrWriteAfterClose
524
+ return err // Report IO errors
525
+ }
526
+
527
+ // regFileWriter is a fileWriter for writing data to a regular file entry.
528
+ type regFileWriter struct {
529
+ w io.Writer // Underlying Writer
530
+ nb int64 // Number of remaining bytes to write
531
+ }
532
+
533
+ func (fw *regFileWriter) Write(b []byte) (n int, err error) {
534
+ overwrite := int64(len(b)) > fw.nb
535
+ if overwrite {
536
+ b = b[:fw.nb]
537
+ }
538
+ if len(b) > 0 {
539
+ n, err = fw.w.Write(b)
540
+ fw.nb -= int64(n)
541
+ }
542
+ switch {
543
+ case err != nil:
544
+ return n, err
545
+ case overwrite:
546
+ return n, ErrWriteTooLong
547
+ default:
548
+ return n, nil
549
+ }
550
+ }
551
+
552
+ func (fw *regFileWriter) ReadFrom(r io.Reader) (int64, error) {
553
+ return io.Copy(struct{ io.Writer }{fw}, r)
554
+ }
555
+
556
+ // logicalRemaining implements fileState.logicalRemaining.
557
+ func (fw regFileWriter) logicalRemaining() int64 {
558
+ return fw.nb
559
+ }
560
+
561
+ // physicalRemaining implements fileState.physicalRemaining.
562
+ func (fw regFileWriter) physicalRemaining() int64 {
563
+ return fw.nb
564
+ }
565
+
566
+ // sparseFileWriter is a fileWriter for writing data to a sparse file entry.
567
+ type sparseFileWriter struct {
568
+ fw fileWriter // Underlying fileWriter
569
+ sp sparseDatas // Normalized list of data fragments
570
+ pos int64 // Current position in sparse file
571
+ }
572
+
573
+ func (sw *sparseFileWriter) Write(b []byte) (n int, err error) {
574
+ overwrite := int64(len(b)) > sw.logicalRemaining()
575
+ if overwrite {
576
+ b = b[:sw.logicalRemaining()]
577
+ }
578
+
579
+ b0 := b
580
+ endPos := sw.pos + int64(len(b))
581
+ for endPos > sw.pos && err == nil {
582
+ var nf int // Bytes written in fragment
583
+ dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset()
584
+ if sw.pos < dataStart { // In a hole fragment
585
+ bf := b[:min(int64(len(b)), dataStart-sw.pos)]
586
+ nf, err = zeroWriter{}.Write(bf)
587
+ } else { // In a data fragment
588
+ bf := b[:min(int64(len(b)), dataEnd-sw.pos)]
589
+ nf, err = sw.fw.Write(bf)
590
+ }
591
+ b = b[nf:]
592
+ sw.pos += int64(nf)
593
+ if sw.pos >= dataEnd && len(sw.sp) > 1 {
594
+ sw.sp = sw.sp[1:] // Ensure last fragment always remains
595
+ }
596
+ }
597
+
598
+ n = len(b0) - len(b)
599
+ switch {
600
+ case err == ErrWriteTooLong:
601
+ return n, errMissData // Not possible; implies bug in validation logic
602
+ case err != nil:
603
+ return n, err
604
+ case sw.logicalRemaining() == 0 && sw.physicalRemaining() > 0:
605
+ return n, errUnrefData // Not possible; implies bug in validation logic
606
+ case overwrite:
607
+ return n, ErrWriteTooLong
608
+ default:
609
+ return n, nil
610
+ }
611
+ }
612
+
613
+ func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) {
614
+ rs, ok := r.(io.ReadSeeker)
615
+ if ok {
616
+ if _, err := rs.Seek(0, io.SeekCurrent); err != nil {
617
+ ok = false // Not all io.Seeker can really seek
618
+ }
619
+ }
620
+ if !ok {
621
+ return io.Copy(struct{ io.Writer }{sw}, r)
622
+ }
623
+
624
+ var readLastByte bool
625
+ pos0 := sw.pos
626
+ for sw.logicalRemaining() > 0 && !readLastByte && err == nil {
627
+ var nf int64 // Size of fragment
628
+ dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset()
629
+ if sw.pos < dataStart { // In a hole fragment
630
+ nf = dataStart - sw.pos
631
+ if sw.physicalRemaining() == 0 {
632
+ readLastByte = true
633
+ nf--
634
+ }
635
+ _, err = rs.Seek(nf, io.SeekCurrent)
636
+ } else { // In a data fragment
637
+ nf = dataEnd - sw.pos
638
+ nf, err = io.CopyN(sw.fw, rs, nf)
639
+ }
640
+ sw.pos += nf
641
+ if sw.pos >= dataEnd && len(sw.sp) > 1 {
642
+ sw.sp = sw.sp[1:] // Ensure last fragment always remains
643
+ }
644
+ }
645
+
646
+ // If the last fragment is a hole, then seek to 1-byte before EOF, and
647
+ // read a single byte to ensure the file is the right size.
648
+ if readLastByte && err == nil {
649
+ _, err = mustReadFull(rs, []byte{0})
650
+ sw.pos++
651
+ }
652
+
653
+ n = sw.pos - pos0
654
+ switch {
655
+ case err == io.EOF:
656
+ return n, io.ErrUnexpectedEOF
657
+ case err == ErrWriteTooLong:
658
+ return n, errMissData // Not possible; implies bug in validation logic
659
+ case err != nil:
660
+ return n, err
661
+ case sw.logicalRemaining() == 0 && sw.physicalRemaining() > 0:
662
+ return n, errUnrefData // Not possible; implies bug in validation logic
663
+ default:
664
+ return n, ensureEOF(rs)
665
+ }
666
+ }
667
+
668
+ func (sw sparseFileWriter) logicalRemaining() int64 {
669
+ return sw.sp[len(sw.sp)-1].endOffset() - sw.pos
670
+ }
671
+ func (sw sparseFileWriter) physicalRemaining() int64 {
672
+ return sw.fw.physicalRemaining()
673
+ }
674
+
675
+ // zeroWriter may only be written with NULs, otherwise it returns errWriteHole.
676
+ type zeroWriter struct{}
677
+
678
+ func (zeroWriter) Write(b []byte) (int, error) {
679
+ for i, c := range b {
680
+ if c != 0 {
681
+ return i, errWriteHole
682
+ }
683
+ }
684
+ return len(b), nil
685
+ }
686
+
687
+ // ensureEOF checks whether r is at EOF, reporting ErrWriteTooLong if not so.
688
+ func ensureEOF(r io.Reader) error {
689
+ n, err := tryReadFull(r, []byte{0})
690
+ switch {
691
+ case n > 0:
692
+ return ErrWriteTooLong
693
+ case err == io.EOF:
694
+ return nil
695
+ default:
696
+ return err
697
+ }
698
+ }
platform/dbops/binaries/go/go/src/archive/tar/writer_test.go ADDED
@@ -0,0 +1,1401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2009 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ package tar
6
+
7
+ import (
8
+ "bytes"
9
+ "encoding/hex"
10
+ "errors"
11
+ "io"
12
+ "io/fs"
13
+ "os"
14
+ "path"
15
+ "reflect"
16
+ "sort"
17
+ "strings"
18
+ "testing"
19
+ "testing/fstest"
20
+ "testing/iotest"
21
+ "time"
22
+ )
23
+
24
+ func bytediff(a, b []byte) string {
25
+ const (
26
+ uniqueA = "- "
27
+ uniqueB = "+ "
28
+ identity = " "
29
+ )
30
+ var ss []string
31
+ sa := strings.Split(strings.TrimSpace(hex.Dump(a)), "\n")
32
+ sb := strings.Split(strings.TrimSpace(hex.Dump(b)), "\n")
33
+ for len(sa) > 0 && len(sb) > 0 {
34
+ if sa[0] == sb[0] {
35
+ ss = append(ss, identity+sa[0])
36
+ } else {
37
+ ss = append(ss, uniqueA+sa[0])
38
+ ss = append(ss, uniqueB+sb[0])
39
+ }
40
+ sa, sb = sa[1:], sb[1:]
41
+ }
42
+ for len(sa) > 0 {
43
+ ss = append(ss, uniqueA+sa[0])
44
+ sa = sa[1:]
45
+ }
46
+ for len(sb) > 0 {
47
+ ss = append(ss, uniqueB+sb[0])
48
+ sb = sb[1:]
49
+ }
50
+ return strings.Join(ss, "\n")
51
+ }
52
+
53
+ func TestWriter(t *testing.T) {
54
+ type (
55
+ testHeader struct { // WriteHeader(hdr) == wantErr
56
+ hdr Header
57
+ wantErr error
58
+ }
59
+ testWrite struct { // Write(str) == (wantCnt, wantErr)
60
+ str string
61
+ wantCnt int
62
+ wantErr error
63
+ }
64
+ testReadFrom struct { // ReadFrom(testFile{ops}) == (wantCnt, wantErr)
65
+ ops fileOps
66
+ wantCnt int64
67
+ wantErr error
68
+ }
69
+ testClose struct { // Close() == wantErr
70
+ wantErr error
71
+ }
72
+ testFnc any // testHeader | testWrite | testReadFrom | testClose
73
+ )
74
+
75
+ vectors := []struct {
76
+ file string // Optional filename of expected output
77
+ tests []testFnc
78
+ }{{
79
+ // The writer test file was produced with this command:
80
+ // tar (GNU tar) 1.26
81
+ // ln -s small.txt link.txt
82
+ // tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt
83
+ file: "testdata/writer.tar",
84
+ tests: []testFnc{
85
+ testHeader{Header{
86
+ Typeflag: TypeReg,
87
+ Name: "small.txt",
88
+ Size: 5,
89
+ Mode: 0640,
90
+ Uid: 73025,
91
+ Gid: 5000,
92
+ Uname: "dsymonds",
93
+ Gname: "eng",
94
+ ModTime: time.Unix(1246508266, 0),
95
+ }, nil},
96
+ testWrite{"Kilts", 5, nil},
97
+
98
+ testHeader{Header{
99
+ Typeflag: TypeReg,
100
+ Name: "small2.txt",
101
+ Size: 11,
102
+ Mode: 0640,
103
+ Uid: 73025,
104
+ Uname: "dsymonds",
105
+ Gname: "eng",
106
+ Gid: 5000,
107
+ ModTime: time.Unix(1245217492, 0),
108
+ }, nil},
109
+ testWrite{"Google.com\n", 11, nil},
110
+
111
+ testHeader{Header{
112
+ Typeflag: TypeSymlink,
113
+ Name: "link.txt",
114
+ Linkname: "small.txt",
115
+ Mode: 0777,
116
+ Uid: 1000,
117
+ Gid: 1000,
118
+ Uname: "strings",
119
+ Gname: "strings",
120
+ ModTime: time.Unix(1314603082, 0),
121
+ }, nil},
122
+ testWrite{"", 0, nil},
123
+
124
+ testClose{nil},
125
+ },
126
+ }, {
127
+ // The truncated test file was produced using these commands:
128
+ // dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
129
+ // tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
130
+ file: "testdata/writer-big.tar",
131
+ tests: []testFnc{
132
+ testHeader{Header{
133
+ Typeflag: TypeReg,
134
+ Name: "tmp/16gig.txt",
135
+ Size: 16 << 30,
136
+ Mode: 0640,
137
+ Uid: 73025,
138
+ Gid: 5000,
139
+ Uname: "dsymonds",
140
+ Gname: "eng",
141
+ ModTime: time.Unix(1254699560, 0),
142
+ Format: FormatGNU,
143
+ }, nil},
144
+ },
145
+ }, {
146
+ // This truncated file was produced using this library.
147
+ // It was verified to work with GNU tar 1.27.1 and BSD tar 3.1.2.
148
+ // dd if=/dev/zero bs=1G count=16 >> writer-big-long.tar
149
+ // gnutar -xvf writer-big-long.tar
150
+ // bsdtar -xvf writer-big-long.tar
151
+ //
152
+ // This file is in PAX format.
153
+ file: "testdata/writer-big-long.tar",
154
+ tests: []testFnc{
155
+ testHeader{Header{
156
+ Typeflag: TypeReg,
157
+ Name: strings.Repeat("longname/", 15) + "16gig.txt",
158
+ Size: 16 << 30,
159
+ Mode: 0644,
160
+ Uid: 1000,
161
+ Gid: 1000,
162
+ Uname: "guillaume",
163
+ Gname: "guillaume",
164
+ ModTime: time.Unix(1399583047, 0),
165
+ }, nil},
166
+ },
167
+ }, {
168
+ // This file was produced using GNU tar v1.17.
169
+ // gnutar -b 4 --format=ustar (longname/)*15 + file.txt
170
+ file: "testdata/ustar.tar",
171
+ tests: []testFnc{
172
+ testHeader{Header{
173
+ Typeflag: TypeReg,
174
+ Name: strings.Repeat("longname/", 15) + "file.txt",
175
+ Size: 6,
176
+ Mode: 0644,
177
+ Uid: 501,
178
+ Gid: 20,
179
+ Uname: "shane",
180
+ Gname: "staff",
181
+ ModTime: time.Unix(1360135598, 0),
182
+ }, nil},
183
+ testWrite{"hello\n", 6, nil},
184
+ testClose{nil},
185
+ },
186
+ }, {
187
+ // This file was produced using GNU tar v1.26:
188
+ // echo "Slartibartfast" > file.txt
189
+ // ln file.txt hard.txt
190
+ // tar -b 1 --format=ustar -c -f hardlink.tar file.txt hard.txt
191
+ file: "testdata/hardlink.tar",
192
+ tests: []testFnc{
193
+ testHeader{Header{
194
+ Typeflag: TypeReg,
195
+ Name: "file.txt",
196
+ Size: 15,
197
+ Mode: 0644,
198
+ Uid: 1000,
199
+ Gid: 100,
200
+ Uname: "vbatts",
201
+ Gname: "users",
202
+ ModTime: time.Unix(1425484303, 0),
203
+ }, nil},
204
+ testWrite{"Slartibartfast\n", 15, nil},
205
+
206
+ testHeader{Header{
207
+ Typeflag: TypeLink,
208
+ Name: "hard.txt",
209
+ Linkname: "file.txt",
210
+ Mode: 0644,
211
+ Uid: 1000,
212
+ Gid: 100,
213
+ Uname: "vbatts",
214
+ Gname: "users",
215
+ ModTime: time.Unix(1425484303, 0),
216
+ }, nil},
217
+ testWrite{"", 0, nil},
218
+
219
+ testClose{nil},
220
+ },
221
+ }, {
222
+ tests: []testFnc{
223
+ testHeader{Header{
224
+ Typeflag: TypeReg,
225
+ Name: "bad-null.txt",
226
+ Xattrs: map[string]string{"null\x00null\x00": "fizzbuzz"},
227
+ }, headerError{}},
228
+ },
229
+ }, {
230
+ tests: []testFnc{
231
+ testHeader{Header{
232
+ Typeflag: TypeReg,
233
+ Name: "null\x00.txt",
234
+ }, headerError{}},
235
+ },
236
+ }, {
237
+ file: "testdata/pax-records.tar",
238
+ tests: []testFnc{
239
+ testHeader{Header{
240
+ Typeflag: TypeReg,
241
+ Name: "file",
242
+ Uname: strings.Repeat("long", 10),
243
+ PAXRecords: map[string]string{
244
+ "path": "FILE", // Should be ignored
245
+ "GNU.sparse.map": "0,0", // Should be ignored
246
+ "comment": "Hello, 世界",
247
+ "GOLANG.pkg": "tar",
248
+ },
249
+ }, nil},
250
+ testClose{nil},
251
+ },
252
+ }, {
253
+ // Craft a theoretically valid PAX archive with global headers.
254
+ // The GNU and BSD tar tools do not parse these the same way.
255
+ //
256
+ // BSD tar v3.1.2 parses and ignores all global headers;
257
+ // the behavior is verified by researching the source code.
258
+ //
259
+ // $ bsdtar -tvf pax-global-records.tar
260
+ // ---------- 0 0 0 0 Dec 31 1969 file1
261
+ // ---------- 0 0 0 0 Dec 31 1969 file2
262
+ // ---------- 0 0 0 0 Dec 31 1969 file3
263
+ // ---------- 0 0 0 0 May 13 2014 file4
264
+ //
265
+ // GNU tar v1.27.1 applies global headers to subsequent records,
266
+ // but does not do the following properly:
267
+ // * It does not treat an empty record as deletion.
268
+ // * It does not use subsequent global headers to update previous ones.
269
+ //
270
+ // $ gnutar -tvf pax-global-records.tar
271
+ // ---------- 0/0 0 2017-07-13 19:40 global1
272
+ // ---------- 0/0 0 2017-07-13 19:40 file2
273
+ // gnutar: Substituting `.' for empty member name
274
+ // ---------- 0/0 0 1969-12-31 16:00
275
+ // gnutar: Substituting `.' for empty member name
276
+ // ---------- 0/0 0 2014-05-13 09:53
277
+ //
278
+ // According to the PAX specification, this should have been the result:
279
+ // ---------- 0/0 0 2017-07-13 19:40 global1
280
+ // ---------- 0/0 0 2017-07-13 19:40 file2
281
+ // ---------- 0/0 0 2017-07-13 19:40 file3
282
+ // ---------- 0/0 0 2014-05-13 09:53 file4
283
+ file: "testdata/pax-global-records.tar",
284
+ tests: []testFnc{
285
+ testHeader{Header{
286
+ Typeflag: TypeXGlobalHeader,
287
+ PAXRecords: map[string]string{"path": "global1", "mtime": "1500000000.0"},
288
+ }, nil},
289
+ testHeader{Header{
290
+ Typeflag: TypeReg, Name: "file1",
291
+ }, nil},
292
+ testHeader{Header{
293
+ Typeflag: TypeReg,
294
+ Name: "file2",
295
+ PAXRecords: map[string]string{"path": "file2"},
296
+ }, nil},
297
+ testHeader{Header{
298
+ Typeflag: TypeXGlobalHeader,
299
+ PAXRecords: map[string]string{"path": ""}, // Should delete "path", but keep "mtime"
300
+ }, nil},
301
+ testHeader{Header{
302
+ Typeflag: TypeReg, Name: "file3",
303
+ }, nil},
304
+ testHeader{Header{
305
+ Typeflag: TypeReg,
306
+ Name: "file4",
307
+ ModTime: time.Unix(1400000000, 0),
308
+ PAXRecords: map[string]string{"mtime": "1400000000"},
309
+ }, nil},
310
+ testClose{nil},
311
+ },
312
+ }, {
313
+ file: "testdata/gnu-utf8.tar",
314
+ tests: []testFnc{
315
+ testHeader{Header{
316
+ Typeflag: TypeReg,
317
+ Name: "☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹",
318
+ Mode: 0644,
319
+ Uid: 1000, Gid: 1000,
320
+ Uname: "☺",
321
+ Gname: "⚹",
322
+ ModTime: time.Unix(0, 0),
323
+ Format: FormatGNU,
324
+ }, nil},
325
+ testClose{nil},
326
+ },
327
+ }, {
328
+ file: "testdata/gnu-not-utf8.tar",
329
+ tests: []testFnc{
330
+ testHeader{Header{
331
+ Typeflag: TypeReg,
332
+ Name: "hi\x80\x81\x82\x83bye",
333
+ Mode: 0644,
334
+ Uid: 1000,
335
+ Gid: 1000,
336
+ Uname: "rawr",
337
+ Gname: "dsnet",
338
+ ModTime: time.Unix(0, 0),
339
+ Format: FormatGNU,
340
+ }, nil},
341
+ testClose{nil},
342
+ },
343
+ // TODO(dsnet): Re-enable this test when adding sparse support.
344
+ // See https://golang.org/issue/22735
345
+ /*
346
+ }, {
347
+ file: "testdata/gnu-nil-sparse-data.tar",
348
+ tests: []testFnc{
349
+ testHeader{Header{
350
+ Typeflag: TypeGNUSparse,
351
+ Name: "sparse.db",
352
+ Size: 1000,
353
+ SparseHoles: []sparseEntry{{Offset: 1000, Length: 0}},
354
+ }, nil},
355
+ testWrite{strings.Repeat("0123456789", 100), 1000, nil},
356
+ testClose{},
357
+ },
358
+ }, {
359
+ file: "testdata/gnu-nil-sparse-hole.tar",
360
+ tests: []testFnc{
361
+ testHeader{Header{
362
+ Typeflag: TypeGNUSparse,
363
+ Name: "sparse.db",
364
+ Size: 1000,
365
+ SparseHoles: []sparseEntry{{Offset: 0, Length: 1000}},
366
+ }, nil},
367
+ testWrite{strings.Repeat("\x00", 1000), 1000, nil},
368
+ testClose{},
369
+ },
370
+ }, {
371
+ file: "testdata/pax-nil-sparse-data.tar",
372
+ tests: []testFnc{
373
+ testHeader{Header{
374
+ Typeflag: TypeReg,
375
+ Name: "sparse.db",
376
+ Size: 1000,
377
+ SparseHoles: []sparseEntry{{Offset: 1000, Length: 0}},
378
+ }, nil},
379
+ testWrite{strings.Repeat("0123456789", 100), 1000, nil},
380
+ testClose{},
381
+ },
382
+ }, {
383
+ file: "testdata/pax-nil-sparse-hole.tar",
384
+ tests: []testFnc{
385
+ testHeader{Header{
386
+ Typeflag: TypeReg,
387
+ Name: "sparse.db",
388
+ Size: 1000,
389
+ SparseHoles: []sparseEntry{{Offset: 0, Length: 1000}},
390
+ }, nil},
391
+ testWrite{strings.Repeat("\x00", 1000), 1000, nil},
392
+ testClose{},
393
+ },
394
+ }, {
395
+ file: "testdata/gnu-sparse-big.tar",
396
+ tests: []testFnc{
397
+ testHeader{Header{
398
+ Typeflag: TypeGNUSparse,
399
+ Name: "gnu-sparse",
400
+ Size: 6e10,
401
+ SparseHoles: []sparseEntry{
402
+ {Offset: 0e10, Length: 1e10 - 100},
403
+ {Offset: 1e10, Length: 1e10 - 100},
404
+ {Offset: 2e10, Length: 1e10 - 100},
405
+ {Offset: 3e10, Length: 1e10 - 100},
406
+ {Offset: 4e10, Length: 1e10 - 100},
407
+ {Offset: 5e10, Length: 1e10 - 100},
408
+ },
409
+ }, nil},
410
+ testReadFrom{fileOps{
411
+ int64(1e10 - blockSize),
412
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
413
+ int64(1e10 - blockSize),
414
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
415
+ int64(1e10 - blockSize),
416
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
417
+ int64(1e10 - blockSize),
418
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
419
+ int64(1e10 - blockSize),
420
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
421
+ int64(1e10 - blockSize),
422
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
423
+ }, 6e10, nil},
424
+ testClose{nil},
425
+ },
426
+ }, {
427
+ file: "testdata/pax-sparse-big.tar",
428
+ tests: []testFnc{
429
+ testHeader{Header{
430
+ Typeflag: TypeReg,
431
+ Name: "pax-sparse",
432
+ Size: 6e10,
433
+ SparseHoles: []sparseEntry{
434
+ {Offset: 0e10, Length: 1e10 - 100},
435
+ {Offset: 1e10, Length: 1e10 - 100},
436
+ {Offset: 2e10, Length: 1e10 - 100},
437
+ {Offset: 3e10, Length: 1e10 - 100},
438
+ {Offset: 4e10, Length: 1e10 - 100},
439
+ {Offset: 5e10, Length: 1e10 - 100},
440
+ },
441
+ }, nil},
442
+ testReadFrom{fileOps{
443
+ int64(1e10 - blockSize),
444
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
445
+ int64(1e10 - blockSize),
446
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
447
+ int64(1e10 - blockSize),
448
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
449
+ int64(1e10 - blockSize),
450
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
451
+ int64(1e10 - blockSize),
452
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
453
+ int64(1e10 - blockSize),
454
+ strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10),
455
+ }, 6e10, nil},
456
+ testClose{nil},
457
+ },
458
+ */
459
+ }, {
460
+ file: "testdata/trailing-slash.tar",
461
+ tests: []testFnc{
462
+ testHeader{Header{Name: strings.Repeat("123456789/", 30)}, nil},
463
+ testClose{nil},
464
+ },
465
+ }, {
466
+ // Automatically promote zero value of Typeflag depending on the name.
467
+ file: "testdata/file-and-dir.tar",
468
+ tests: []testFnc{
469
+ testHeader{Header{Name: "small.txt", Size: 5}, nil},
470
+ testWrite{"Kilts", 5, nil},
471
+ testHeader{Header{Name: "dir/"}, nil},
472
+ testClose{nil},
473
+ },
474
+ }}
475
+
476
+ equalError := func(x, y error) bool {
477
+ _, ok1 := x.(headerError)
478
+ _, ok2 := y.(headerError)
479
+ if ok1 || ok2 {
480
+ return ok1 && ok2
481
+ }
482
+ return x == y
483
+ }
484
+ for _, v := range vectors {
485
+ t.Run(path.Base(v.file), func(t *testing.T) {
486
+ const maxSize = 10 << 10 // 10KiB
487
+ buf := new(bytes.Buffer)
488
+ tw := NewWriter(iotest.TruncateWriter(buf, maxSize))
489
+
490
+ for i, tf := range v.tests {
491
+ switch tf := tf.(type) {
492
+ case testHeader:
493
+ err := tw.WriteHeader(&tf.hdr)
494
+ if !equalError(err, tf.wantErr) {
495
+ t.Fatalf("test %d, WriteHeader() = %v, want %v", i, err, tf.wantErr)
496
+ }
497
+ case testWrite:
498
+ got, err := tw.Write([]byte(tf.str))
499
+ if got != tf.wantCnt || !equalError(err, tf.wantErr) {
500
+ t.Fatalf("test %d, Write() = (%d, %v), want (%d, %v)", i, got, err, tf.wantCnt, tf.wantErr)
501
+ }
502
+ case testReadFrom:
503
+ f := &testFile{ops: tf.ops}
504
+ got, err := tw.readFrom(f)
505
+ if _, ok := err.(testError); ok {
506
+ t.Errorf("test %d, ReadFrom(): %v", i, err)
507
+ } else if got != tf.wantCnt || !equalError(err, tf.wantErr) {
508
+ t.Errorf("test %d, ReadFrom() = (%d, %v), want (%d, %v)", i, got, err, tf.wantCnt, tf.wantErr)
509
+ }
510
+ if len(f.ops) > 0 {
511
+ t.Errorf("test %d, expected %d more operations", i, len(f.ops))
512
+ }
513
+ case testClose:
514
+ err := tw.Close()
515
+ if !equalError(err, tf.wantErr) {
516
+ t.Fatalf("test %d, Close() = %v, want %v", i, err, tf.wantErr)
517
+ }
518
+ default:
519
+ t.Fatalf("test %d, unknown test operation: %T", i, tf)
520
+ }
521
+ }
522
+
523
+ if v.file != "" {
524
+ want, err := os.ReadFile(v.file)
525
+ if err != nil {
526
+ t.Fatalf("ReadFile() = %v, want nil", err)
527
+ }
528
+ got := buf.Bytes()
529
+ if !bytes.Equal(want, got) {
530
+ t.Fatalf("incorrect result: (-got +want)\n%v", bytediff(got, want))
531
+ }
532
+ }
533
+ })
534
+ }
535
+ }
536
+
537
+ func TestPax(t *testing.T) {
538
+ // Create an archive with a large name
539
+ fileinfo, err := os.Stat("testdata/small.txt")
540
+ if err != nil {
541
+ t.Fatal(err)
542
+ }
543
+ hdr, err := FileInfoHeader(fileinfo, "")
544
+ if err != nil {
545
+ t.Fatalf("os.Stat: %v", err)
546
+ }
547
+ // Force a PAX long name to be written
548
+ longName := strings.Repeat("ab", 100)
549
+ contents := strings.Repeat(" ", int(hdr.Size))
550
+ hdr.Name = longName
551
+ var buf bytes.Buffer
552
+ writer := NewWriter(&buf)
553
+ if err := writer.WriteHeader(hdr); err != nil {
554
+ t.Fatal(err)
555
+ }
556
+ if _, err = writer.Write([]byte(contents)); err != nil {
557
+ t.Fatal(err)
558
+ }
559
+ if err := writer.Close(); err != nil {
560
+ t.Fatal(err)
561
+ }
562
+ // Simple test to make sure PAX extensions are in effect
563
+ if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
564
+ t.Fatal("Expected at least one PAX header to be written.")
565
+ }
566
+ // Test that we can get a long name back out of the archive.
567
+ reader := NewReader(&buf)
568
+ hdr, err = reader.Next()
569
+ if err != nil {
570
+ t.Fatal(err)
571
+ }
572
+ if hdr.Name != longName {
573
+ t.Fatal("Couldn't recover long file name")
574
+ }
575
+ }
576
+
577
+ func TestPaxSymlink(t *testing.T) {
578
+ // Create an archive with a large linkname
579
+ fileinfo, err := os.Stat("testdata/small.txt")
580
+ if err != nil {
581
+ t.Fatal(err)
582
+ }
583
+ hdr, err := FileInfoHeader(fileinfo, "")
584
+ hdr.Typeflag = TypeSymlink
585
+ if err != nil {
586
+ t.Fatalf("os.Stat:1 %v", err)
587
+ }
588
+ // Force a PAX long linkname to be written
589
+ longLinkname := strings.Repeat("1234567890/1234567890", 10)
590
+ hdr.Linkname = longLinkname
591
+
592
+ hdr.Size = 0
593
+ var buf bytes.Buffer
594
+ writer := NewWriter(&buf)
595
+ if err := writer.WriteHeader(hdr); err != nil {
596
+ t.Fatal(err)
597
+ }
598
+ if err := writer.Close(); err != nil {
599
+ t.Fatal(err)
600
+ }
601
+ // Simple test to make sure PAX extensions are in effect
602
+ if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
603
+ t.Fatal("Expected at least one PAX header to be written.")
604
+ }
605
+ // Test that we can get a long name back out of the archive.
606
+ reader := NewReader(&buf)
607
+ hdr, err = reader.Next()
608
+ if err != nil {
609
+ t.Fatal(err)
610
+ }
611
+ if hdr.Linkname != longLinkname {
612
+ t.Fatal("Couldn't recover long link name")
613
+ }
614
+ }
615
+
616
+ func TestPaxNonAscii(t *testing.T) {
617
+ // Create an archive with non ascii. These should trigger a pax header
618
+ // because pax headers have a defined utf-8 encoding.
619
+ fileinfo, err := os.Stat("testdata/small.txt")
620
+ if err != nil {
621
+ t.Fatal(err)
622
+ }
623
+
624
+ hdr, err := FileInfoHeader(fileinfo, "")
625
+ if err != nil {
626
+ t.Fatalf("os.Stat:1 %v", err)
627
+ }
628
+
629
+ // some sample data
630
+ chineseFilename := "文件名"
631
+ chineseGroupname := "組"
632
+ chineseUsername := "用戶名"
633
+
634
+ hdr.Name = chineseFilename
635
+ hdr.Gname = chineseGroupname
636
+ hdr.Uname = chineseUsername
637
+
638
+ contents := strings.Repeat(" ", int(hdr.Size))
639
+
640
+ var buf bytes.Buffer
641
+ writer := NewWriter(&buf)
642
+ if err := writer.WriteHeader(hdr); err != nil {
643
+ t.Fatal(err)
644
+ }
645
+ if _, err = writer.Write([]byte(contents)); err != nil {
646
+ t.Fatal(err)
647
+ }
648
+ if err := writer.Close(); err != nil {
649
+ t.Fatal(err)
650
+ }
651
+ // Simple test to make sure PAX extensions are in effect
652
+ if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
653
+ t.Fatal("Expected at least one PAX header to be written.")
654
+ }
655
+ // Test that we can get a long name back out of the archive.
656
+ reader := NewReader(&buf)
657
+ hdr, err = reader.Next()
658
+ if err != nil {
659
+ t.Fatal(err)
660
+ }
661
+ if hdr.Name != chineseFilename {
662
+ t.Fatal("Couldn't recover unicode name")
663
+ }
664
+ if hdr.Gname != chineseGroupname {
665
+ t.Fatal("Couldn't recover unicode group")
666
+ }
667
+ if hdr.Uname != chineseUsername {
668
+ t.Fatal("Couldn't recover unicode user")
669
+ }
670
+ }
671
+
672
+ func TestPaxXattrs(t *testing.T) {
673
+ xattrs := map[string]string{
674
+ "user.key": "value",
675
+ }
676
+
677
+ // Create an archive with an xattr
678
+ fileinfo, err := os.Stat("testdata/small.txt")
679
+ if err != nil {
680
+ t.Fatal(err)
681
+ }
682
+ hdr, err := FileInfoHeader(fileinfo, "")
683
+ if err != nil {
684
+ t.Fatalf("os.Stat: %v", err)
685
+ }
686
+ contents := "Kilts"
687
+ hdr.Xattrs = xattrs
688
+ var buf bytes.Buffer
689
+ writer := NewWriter(&buf)
690
+ if err := writer.WriteHeader(hdr); err != nil {
691
+ t.Fatal(err)
692
+ }
693
+ if _, err = writer.Write([]byte(contents)); err != nil {
694
+ t.Fatal(err)
695
+ }
696
+ if err := writer.Close(); err != nil {
697
+ t.Fatal(err)
698
+ }
699
+ // Test that we can get the xattrs back out of the archive.
700
+ reader := NewReader(&buf)
701
+ hdr, err = reader.Next()
702
+ if err != nil {
703
+ t.Fatal(err)
704
+ }
705
+ if !reflect.DeepEqual(hdr.Xattrs, xattrs) {
706
+ t.Fatalf("xattrs did not survive round trip: got %+v, want %+v",
707
+ hdr.Xattrs, xattrs)
708
+ }
709
+ }
710
+
711
+ func TestPaxHeadersSorted(t *testing.T) {
712
+ fileinfo, err := os.Stat("testdata/small.txt")
713
+ if err != nil {
714
+ t.Fatal(err)
715
+ }
716
+ hdr, err := FileInfoHeader(fileinfo, "")
717
+ if err != nil {
718
+ t.Fatalf("os.Stat: %v", err)
719
+ }
720
+ contents := strings.Repeat(" ", int(hdr.Size))
721
+
722
+ hdr.Xattrs = map[string]string{
723
+ "foo": "foo",
724
+ "bar": "bar",
725
+ "baz": "baz",
726
+ "qux": "qux",
727
+ }
728
+
729
+ var buf bytes.Buffer
730
+ writer := NewWriter(&buf)
731
+ if err := writer.WriteHeader(hdr); err != nil {
732
+ t.Fatal(err)
733
+ }
734
+ if _, err = writer.Write([]byte(contents)); err != nil {
735
+ t.Fatal(err)
736
+ }
737
+ if err := writer.Close(); err != nil {
738
+ t.Fatal(err)
739
+ }
740
+ // Simple test to make sure PAX extensions are in effect
741
+ if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
742
+ t.Fatal("Expected at least one PAX header to be written.")
743
+ }
744
+
745
+ // xattr bar should always appear before others
746
+ indices := []int{
747
+ bytes.Index(buf.Bytes(), []byte("bar=bar")),
748
+ bytes.Index(buf.Bytes(), []byte("baz=baz")),
749
+ bytes.Index(buf.Bytes(), []byte("foo=foo")),
750
+ bytes.Index(buf.Bytes(), []byte("qux=qux")),
751
+ }
752
+ if !sort.IntsAreSorted(indices) {
753
+ t.Fatal("PAX headers are not sorted")
754
+ }
755
+ }
756
+
757
+ func TestUSTARLongName(t *testing.T) {
758
+ // Create an archive with a path that failed to split with USTAR extension in previous versions.
759
+ fileinfo, err := os.Stat("testdata/small.txt")
760
+ if err != nil {
761
+ t.Fatal(err)
762
+ }
763
+ hdr, err := FileInfoHeader(fileinfo, "")
764
+ hdr.Typeflag = TypeDir
765
+ if err != nil {
766
+ t.Fatalf("os.Stat:1 %v", err)
767
+ }
768
+ // Force a PAX long name to be written. The name was taken from a practical example
769
+ // that fails and replaced ever char through numbers to anonymize the sample.
770
+ longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/"
771
+ hdr.Name = longName
772
+
773
+ hdr.Size = 0
774
+ var buf bytes.Buffer
775
+ writer := NewWriter(&buf)
776
+ if err := writer.WriteHeader(hdr); err != nil {
777
+ t.Fatal(err)
778
+ }
779
+ if err := writer.Close(); err != nil {
780
+ t.Fatal(err)
781
+ }
782
+ // Test that we can get a long name back out of the archive.
783
+ reader := NewReader(&buf)
784
+ hdr, err = reader.Next()
785
+ if err != nil && err != ErrInsecurePath {
786
+ t.Fatal(err)
787
+ }
788
+ if hdr.Name != longName {
789
+ t.Fatal("Couldn't recover long name")
790
+ }
791
+ }
792
+
793
+ func TestValidTypeflagWithPAXHeader(t *testing.T) {
794
+ var buffer bytes.Buffer
795
+ tw := NewWriter(&buffer)
796
+
797
+ fileName := strings.Repeat("ab", 100)
798
+
799
+ hdr := &Header{
800
+ Name: fileName,
801
+ Size: 4,
802
+ Typeflag: 0,
803
+ }
804
+ if err := tw.WriteHeader(hdr); err != nil {
805
+ t.Fatalf("Failed to write header: %s", err)
806
+ }
807
+ if _, err := tw.Write([]byte("fooo")); err != nil {
808
+ t.Fatalf("Failed to write the file's data: %s", err)
809
+ }
810
+ tw.Close()
811
+
812
+ tr := NewReader(&buffer)
813
+
814
+ for {
815
+ header, err := tr.Next()
816
+ if err == io.EOF {
817
+ break
818
+ }
819
+ if err != nil {
820
+ t.Fatalf("Failed to read header: %s", err)
821
+ }
822
+ if header.Typeflag != TypeReg {
823
+ t.Fatalf("Typeflag should've been %d, found %d", TypeReg, header.Typeflag)
824
+ }
825
+ }
826
+ }
827
+
828
+ // failOnceWriter fails exactly once and then always reports success.
829
+ type failOnceWriter bool
830
+
831
+ func (w *failOnceWriter) Write(b []byte) (int, error) {
832
+ if !*w {
833
+ return 0, io.ErrShortWrite
834
+ }
835
+ *w = true
836
+ return len(b), nil
837
+ }
838
+
839
+ func TestWriterErrors(t *testing.T) {
840
+ t.Run("HeaderOnly", func(t *testing.T) {
841
+ tw := NewWriter(new(bytes.Buffer))
842
+ hdr := &Header{Name: "dir/", Typeflag: TypeDir}
843
+ if err := tw.WriteHeader(hdr); err != nil {
844
+ t.Fatalf("WriteHeader() = %v, want nil", err)
845
+ }
846
+ if _, err := tw.Write([]byte{0x00}); err != ErrWriteTooLong {
847
+ t.Fatalf("Write() = %v, want %v", err, ErrWriteTooLong)
848
+ }
849
+ })
850
+
851
+ t.Run("NegativeSize", func(t *testing.T) {
852
+ tw := NewWriter(new(bytes.Buffer))
853
+ hdr := &Header{Name: "small.txt", Size: -1}
854
+ if err := tw.WriteHeader(hdr); err == nil {
855
+ t.Fatalf("WriteHeader() = nil, want non-nil error")
856
+ }
857
+ })
858
+
859
+ t.Run("BeforeHeader", func(t *testing.T) {
860
+ tw := NewWriter(new(bytes.Buffer))
861
+ if _, err := tw.Write([]byte("Kilts")); err != ErrWriteTooLong {
862
+ t.Fatalf("Write() = %v, want %v", err, ErrWriteTooLong)
863
+ }
864
+ })
865
+
866
+ t.Run("AfterClose", func(t *testing.T) {
867
+ tw := NewWriter(new(bytes.Buffer))
868
+ hdr := &Header{Name: "small.txt"}
869
+ if err := tw.WriteHeader(hdr); err != nil {
870
+ t.Fatalf("WriteHeader() = %v, want nil", err)
871
+ }
872
+ if err := tw.Close(); err != nil {
873
+ t.Fatalf("Close() = %v, want nil", err)
874
+ }
875
+ if _, err := tw.Write([]byte("Kilts")); err != ErrWriteAfterClose {
876
+ t.Fatalf("Write() = %v, want %v", err, ErrWriteAfterClose)
877
+ }
878
+ if err := tw.Flush(); err != ErrWriteAfterClose {
879
+ t.Fatalf("Flush() = %v, want %v", err, ErrWriteAfterClose)
880
+ }
881
+ if err := tw.Close(); err != nil {
882
+ t.Fatalf("Close() = %v, want nil", err)
883
+ }
884
+ })
885
+
886
+ t.Run("PrematureFlush", func(t *testing.T) {
887
+ tw := NewWriter(new(bytes.Buffer))
888
+ hdr := &Header{Name: "small.txt", Size: 5}
889
+ if err := tw.WriteHeader(hdr); err != nil {
890
+ t.Fatalf("WriteHeader() = %v, want nil", err)
891
+ }
892
+ if err := tw.Flush(); err == nil {
893
+ t.Fatalf("Flush() = %v, want non-nil error", err)
894
+ }
895
+ })
896
+
897
+ t.Run("PrematureClose", func(t *testing.T) {
898
+ tw := NewWriter(new(bytes.Buffer))
899
+ hdr := &Header{Name: "small.txt", Size: 5}
900
+ if err := tw.WriteHeader(hdr); err != nil {
901
+ t.Fatalf("WriteHeader() = %v, want nil", err)
902
+ }
903
+ if err := tw.Close(); err == nil {
904
+ t.Fatalf("Close() = %v, want non-nil error", err)
905
+ }
906
+ })
907
+
908
+ t.Run("Persistence", func(t *testing.T) {
909
+ tw := NewWriter(new(failOnceWriter))
910
+ if err := tw.WriteHeader(&Header{}); err != io.ErrShortWrite {
911
+ t.Fatalf("WriteHeader() = %v, want %v", err, io.ErrShortWrite)
912
+ }
913
+ if err := tw.WriteHeader(&Header{Name: "small.txt"}); err == nil {
914
+ t.Errorf("WriteHeader() = got %v, want non-nil error", err)
915
+ }
916
+ if _, err := tw.Write(nil); err == nil {
917
+ t.Errorf("Write() = %v, want non-nil error", err)
918
+ }
919
+ if err := tw.Flush(); err == nil {
920
+ t.Errorf("Flush() = %v, want non-nil error", err)
921
+ }
922
+ if err := tw.Close(); err == nil {
923
+ t.Errorf("Close() = %v, want non-nil error", err)
924
+ }
925
+ })
926
+ }
927
+
928
+ func TestSplitUSTARPath(t *testing.T) {
929
+ sr := strings.Repeat
930
+
931
+ vectors := []struct {
932
+ input string // Input path
933
+ prefix string // Expected output prefix
934
+ suffix string // Expected output suffix
935
+ ok bool // Split success?
936
+ }{
937
+ {"", "", "", false},
938
+ {"abc", "", "", false},
939
+ {"用戶名", "", "", false},
940
+ {sr("a", nameSize), "", "", false},
941
+ {sr("a", nameSize) + "/", "", "", false},
942
+ {sr("a", nameSize) + "/a", sr("a", nameSize), "a", true},
943
+ {sr("a", prefixSize) + "/", "", "", false},
944
+ {sr("a", prefixSize) + "/a", sr("a", prefixSize), "a", true},
945
+ {sr("a", nameSize+1), "", "", false},
946
+ {sr("/", nameSize+1), sr("/", nameSize-1), "/", true},
947
+ {sr("a", prefixSize) + "/" + sr("b", nameSize),
948
+ sr("a", prefixSize), sr("b", nameSize), true},
949
+ {sr("a", prefixSize) + "//" + sr("b", nameSize), "", "", false},
950
+ {sr("a/", nameSize), sr("a/", 77) + "a", sr("a/", 22), true},
951
+ }
952
+
953
+ for _, v := range vectors {
954
+ prefix, suffix, ok := splitUSTARPath(v.input)
955
+ if prefix != v.prefix || suffix != v.suffix || ok != v.ok {
956
+ t.Errorf("splitUSTARPath(%q):\ngot (%q, %q, %v)\nwant (%q, %q, %v)",
957
+ v.input, prefix, suffix, ok, v.prefix, v.suffix, v.ok)
958
+ }
959
+ }
960
+ }
961
+
962
+ // TestIssue12594 tests that the Writer does not attempt to populate the prefix
963
+ // field when encoding a header in the GNU format. The prefix field is valid
964
+ // in USTAR and PAX, but not GNU.
965
+ func TestIssue12594(t *testing.T) {
966
+ names := []string{
967
+ "0/1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30/file.txt",
968
+ "0/1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30/31/32/33/file.txt",
969
+ "0/1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30/31/32/333/file.txt",
970
+ "0/1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30/31/32/33/34/35/36/37/38/39/40/file.txt",
971
+ "0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000/file.txt",
972
+ "/home/support/.openoffice.org/3/user/uno_packages/cache/registry/com.sun.star.comp.deployment.executable.PackageRegistryBackend",
973
+ }
974
+
975
+ for i, name := range names {
976
+ var b bytes.Buffer
977
+
978
+ tw := NewWriter(&b)
979
+ if err := tw.WriteHeader(&Header{
980
+ Name: name,
981
+ Uid: 1 << 25, // Prevent USTAR format
982
+ }); err != nil {
983
+ t.Errorf("test %d, unexpected WriteHeader error: %v", i, err)
984
+ }
985
+ if err := tw.Close(); err != nil {
986
+ t.Errorf("test %d, unexpected Close error: %v", i, err)
987
+ }
988
+
989
+ // The prefix field should never appear in the GNU format.
990
+ var blk block
991
+ copy(blk[:], b.Bytes())
992
+ prefix := string(blk.toUSTAR().prefix())
993
+ prefix, _, _ = strings.Cut(prefix, "\x00") // Truncate at the NUL terminator
994
+ if blk.getFormat() == FormatGNU && len(prefix) > 0 && strings.HasPrefix(name, prefix) {
995
+ t.Errorf("test %d, found prefix in GNU format: %s", i, prefix)
996
+ }
997
+
998
+ tr := NewReader(&b)
999
+ hdr, err := tr.Next()
1000
+ if err != nil && err != ErrInsecurePath {
1001
+ t.Errorf("test %d, unexpected Next error: %v", i, err)
1002
+ }
1003
+ if hdr.Name != name {
1004
+ t.Errorf("test %d, hdr.Name = %s, want %s", i, hdr.Name, name)
1005
+ }
1006
+ }
1007
+ }
1008
+
1009
+ func TestWriteLongHeader(t *testing.T) {
1010
+ for _, test := range []struct {
1011
+ name string
1012
+ h *Header
1013
+ }{{
1014
+ name: "name too long",
1015
+ h: &Header{Name: strings.Repeat("a", maxSpecialFileSize)},
1016
+ }, {
1017
+ name: "linkname too long",
1018
+ h: &Header{Linkname: strings.Repeat("a", maxSpecialFileSize)},
1019
+ }, {
1020
+ name: "uname too long",
1021
+ h: &Header{Uname: strings.Repeat("a", maxSpecialFileSize)},
1022
+ }, {
1023
+ name: "gname too long",
1024
+ h: &Header{Gname: strings.Repeat("a", maxSpecialFileSize)},
1025
+ }, {
1026
+ name: "PAX header too long",
1027
+ h: &Header{PAXRecords: map[string]string{"GOLANG.x": strings.Repeat("a", maxSpecialFileSize)}},
1028
+ }} {
1029
+ w := NewWriter(io.Discard)
1030
+ if err := w.WriteHeader(test.h); err != ErrFieldTooLong {
1031
+ t.Errorf("%v: w.WriteHeader() = %v, want ErrFieldTooLong", test.name, err)
1032
+ }
1033
+ }
1034
+ }
1035
+
1036
+ // testNonEmptyWriter wraps an io.Writer and ensures that
1037
+ // Write is never called with an empty buffer.
1038
+ type testNonEmptyWriter struct{ io.Writer }
1039
+
1040
+ func (w testNonEmptyWriter) Write(b []byte) (int, error) {
1041
+ if len(b) == 0 {
1042
+ return 0, errors.New("unexpected empty Write call")
1043
+ }
1044
+ return w.Writer.Write(b)
1045
+ }
1046
+
1047
+ func TestFileWriter(t *testing.T) {
1048
+ type (
1049
+ testWrite struct { // Write(str) == (wantCnt, wantErr)
1050
+ str string
1051
+ wantCnt int
1052
+ wantErr error
1053
+ }
1054
+ testReadFrom struct { // ReadFrom(testFile{ops}) == (wantCnt, wantErr)
1055
+ ops fileOps
1056
+ wantCnt int64
1057
+ wantErr error
1058
+ }
1059
+ testRemaining struct { // logicalRemaining() == wantLCnt, physicalRemaining() == wantPCnt
1060
+ wantLCnt int64
1061
+ wantPCnt int64
1062
+ }
1063
+ testFnc any // testWrite | testReadFrom | testRemaining
1064
+ )
1065
+
1066
+ type (
1067
+ makeReg struct {
1068
+ size int64
1069
+ wantStr string
1070
+ }
1071
+ makeSparse struct {
1072
+ makeReg makeReg
1073
+ sph sparseHoles
1074
+ size int64
1075
+ }
1076
+ fileMaker any // makeReg | makeSparse
1077
+ )
1078
+
1079
+ vectors := []struct {
1080
+ maker fileMaker
1081
+ tests []testFnc
1082
+ }{{
1083
+ maker: makeReg{0, ""},
1084
+ tests: []testFnc{
1085
+ testRemaining{0, 0},
1086
+ testWrite{"", 0, nil},
1087
+ testWrite{"a", 0, ErrWriteTooLong},
1088
+ testReadFrom{fileOps{""}, 0, nil},
1089
+ testReadFrom{fileOps{"a"}, 0, ErrWriteTooLong},
1090
+ testRemaining{0, 0},
1091
+ },
1092
+ }, {
1093
+ maker: makeReg{1, "a"},
1094
+ tests: []testFnc{
1095
+ testRemaining{1, 1},
1096
+ testWrite{"", 0, nil},
1097
+ testWrite{"a", 1, nil},
1098
+ testWrite{"bcde", 0, ErrWriteTooLong},
1099
+ testWrite{"", 0, nil},
1100
+ testReadFrom{fileOps{""}, 0, nil},
1101
+ testReadFrom{fileOps{"a"}, 0, ErrWriteTooLong},
1102
+ testRemaining{0, 0},
1103
+ },
1104
+ }, {
1105
+ maker: makeReg{5, "hello"},
1106
+ tests: []testFnc{
1107
+ testRemaining{5, 5},
1108
+ testWrite{"hello", 5, nil},
1109
+ testRemaining{0, 0},
1110
+ },
1111
+ }, {
1112
+ maker: makeReg{5, "\x00\x00\x00\x00\x00"},
1113
+ tests: []testFnc{
1114
+ testRemaining{5, 5},
1115
+ testReadFrom{fileOps{"\x00\x00\x00\x00\x00"}, 5, nil},
1116
+ testRemaining{0, 0},
1117
+ },
1118
+ }, {
1119
+ maker: makeReg{5, "\x00\x00\x00\x00\x00"},
1120
+ tests: []testFnc{
1121
+ testRemaining{5, 5},
1122
+ testReadFrom{fileOps{"\x00\x00\x00\x00\x00extra"}, 5, ErrWriteTooLong},
1123
+ testRemaining{0, 0},
1124
+ },
1125
+ }, {
1126
+ maker: makeReg{5, "abc\x00\x00"},
1127
+ tests: []testFnc{
1128
+ testRemaining{5, 5},
1129
+ testWrite{"abc", 3, nil},
1130
+ testRemaining{2, 2},
1131
+ testReadFrom{fileOps{"\x00\x00"}, 2, nil},
1132
+ testRemaining{0, 0},
1133
+ },
1134
+ }, {
1135
+ maker: makeReg{5, "\x00\x00abc"},
1136
+ tests: []testFnc{
1137
+ testRemaining{5, 5},
1138
+ testWrite{"\x00\x00", 2, nil},
1139
+ testRemaining{3, 3},
1140
+ testWrite{"abc", 3, nil},
1141
+ testReadFrom{fileOps{"z"}, 0, ErrWriteTooLong},
1142
+ testWrite{"z", 0, ErrWriteTooLong},
1143
+ testRemaining{0, 0},
1144
+ },
1145
+ }, {
1146
+ maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8},
1147
+ tests: []testFnc{
1148
+ testRemaining{8, 5},
1149
+ testWrite{"ab\x00\x00\x00cde", 8, nil},
1150
+ testWrite{"a", 0, ErrWriteTooLong},
1151
+ testRemaining{0, 0},
1152
+ },
1153
+ }, {
1154
+ maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8},
1155
+ tests: []testFnc{
1156
+ testWrite{"ab\x00\x00\x00cdez", 8, ErrWriteTooLong},
1157
+ testRemaining{0, 0},
1158
+ },
1159
+ }, {
1160
+ maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8},
1161
+ tests: []testFnc{
1162
+ testWrite{"ab\x00", 3, nil},
1163
+ testRemaining{5, 3},
1164
+ testWrite{"\x00\x00cde", 5, nil},
1165
+ testWrite{"a", 0, ErrWriteTooLong},
1166
+ testRemaining{0, 0},
1167
+ },
1168
+ }, {
1169
+ maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8},
1170
+ tests: []testFnc{
1171
+ testWrite{"ab", 2, nil},
1172
+ testRemaining{6, 3},
1173
+ testReadFrom{fileOps{int64(3), "cde"}, 6, nil},
1174
+ testRemaining{0, 0},
1175
+ },
1176
+ }, {
1177
+ maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8},
1178
+ tests: []testFnc{
1179
+ testReadFrom{fileOps{"ab", int64(3), "cde"}, 8, nil},
1180
+ testRemaining{0, 0},
1181
+ },
1182
+ }, {
1183
+ maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8},
1184
+ tests: []testFnc{
1185
+ testReadFrom{fileOps{"ab", int64(3), "cdeX"}, 8, ErrWriteTooLong},
1186
+ testRemaining{0, 0},
1187
+ },
1188
+ }, {
1189
+ maker: makeSparse{makeReg{4, "abcd"}, sparseHoles{{2, 3}}, 8},
1190
+ tests: []testFnc{
1191
+ testReadFrom{fileOps{"ab", int64(3), "cd"}, 7, io.ErrUnexpectedEOF},
1192
+ testRemaining{1, 0},
1193
+ },
1194
+ }, {
1195
+ maker: makeSparse{makeReg{4, "abcd"}, sparseHoles{{2, 3}}, 8},
1196
+ tests: []testFnc{
1197
+ testReadFrom{fileOps{"ab", int64(3), "cde"}, 7, errMissData},
1198
+ testRemaining{1, 0},
1199
+ },
1200
+ }, {
1201
+ maker: makeSparse{makeReg{6, "abcde"}, sparseHoles{{2, 3}}, 8},
1202
+ tests: []testFnc{
1203
+ testReadFrom{fileOps{"ab", int64(3), "cde"}, 8, errUnrefData},
1204
+ testRemaining{0, 1},
1205
+ },
1206
+ }, {
1207
+ maker: makeSparse{makeReg{4, "abcd"}, sparseHoles{{2, 3}}, 8},
1208
+ tests: []testFnc{
1209
+ testWrite{"ab", 2, nil},
1210
+ testRemaining{6, 2},
1211
+ testWrite{"\x00\x00\x00", 3, nil},
1212
+ testRemaining{3, 2},
1213
+ testWrite{"cde", 2, errMissData},
1214
+ testRemaining{1, 0},
1215
+ },
1216
+ }, {
1217
+ maker: makeSparse{makeReg{6, "abcde"}, sparseHoles{{2, 3}}, 8},
1218
+ tests: []testFnc{
1219
+ testWrite{"ab", 2, nil},
1220
+ testRemaining{6, 4},
1221
+ testWrite{"\x00\x00\x00", 3, nil},
1222
+ testRemaining{3, 4},
1223
+ testWrite{"cde", 3, errUnrefData},
1224
+ testRemaining{0, 1},
1225
+ },
1226
+ }, {
1227
+ maker: makeSparse{makeReg{3, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7},
1228
+ tests: []testFnc{
1229
+ testRemaining{7, 3},
1230
+ testWrite{"\x00\x00abc\x00\x00", 7, nil},
1231
+ testRemaining{0, 0},
1232
+ },
1233
+ }, {
1234
+ maker: makeSparse{makeReg{3, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7},
1235
+ tests: []testFnc{
1236
+ testRemaining{7, 3},
1237
+ testReadFrom{fileOps{int64(2), "abc", int64(1), "\x00"}, 7, nil},
1238
+ testRemaining{0, 0},
1239
+ },
1240
+ }, {
1241
+ maker: makeSparse{makeReg{3, ""}, sparseHoles{{0, 2}, {5, 2}}, 7},
1242
+ tests: []testFnc{
1243
+ testWrite{"abcdefg", 0, errWriteHole},
1244
+ },
1245
+ }, {
1246
+ maker: makeSparse{makeReg{3, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7},
1247
+ tests: []testFnc{
1248
+ testWrite{"\x00\x00abcde", 5, errWriteHole},
1249
+ },
1250
+ }, {
1251
+ maker: makeSparse{makeReg{3, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7},
1252
+ tests: []testFnc{
1253
+ testWrite{"\x00\x00abc\x00\x00z", 7, ErrWriteTooLong},
1254
+ testRemaining{0, 0},
1255
+ },
1256
+ }, {
1257
+ maker: makeSparse{makeReg{3, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7},
1258
+ tests: []testFnc{
1259
+ testWrite{"\x00\x00", 2, nil},
1260
+ testRemaining{5, 3},
1261
+ testWrite{"abc", 3, nil},
1262
+ testRemaining{2, 0},
1263
+ testWrite{"\x00\x00", 2, nil},
1264
+ testRemaining{0, 0},
1265
+ },
1266
+ }, {
1267
+ maker: makeSparse{makeReg{2, "ab"}, sparseHoles{{0, 2}, {5, 2}}, 7},
1268
+ tests: []testFnc{
1269
+ testWrite{"\x00\x00", 2, nil},
1270
+ testWrite{"abc", 2, errMissData},
1271
+ testWrite{"\x00\x00", 0, errMissData},
1272
+ },
1273
+ }, {
1274
+ maker: makeSparse{makeReg{4, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7},
1275
+ tests: []testFnc{
1276
+ testWrite{"\x00\x00", 2, nil},
1277
+ testWrite{"abc", 3, nil},
1278
+ testWrite{"\x00\x00", 2, errUnrefData},
1279
+ },
1280
+ }}
1281
+
1282
+ for i, v := range vectors {
1283
+ var wantStr string
1284
+ bb := new(strings.Builder)
1285
+ w := testNonEmptyWriter{bb}
1286
+ var fw fileWriter
1287
+ switch maker := v.maker.(type) {
1288
+ case makeReg:
1289
+ fw = &regFileWriter{w, maker.size}
1290
+ wantStr = maker.wantStr
1291
+ case makeSparse:
1292
+ if !validateSparseEntries(maker.sph, maker.size) {
1293
+ t.Fatalf("invalid sparse map: %v", maker.sph)
1294
+ }
1295
+ spd := invertSparseEntries(maker.sph, maker.size)
1296
+ fw = &regFileWriter{w, maker.makeReg.size}
1297
+ fw = &sparseFileWriter{fw, spd, 0}
1298
+ wantStr = maker.makeReg.wantStr
1299
+ default:
1300
+ t.Fatalf("test %d, unknown make operation: %T", i, maker)
1301
+ }
1302
+
1303
+ for j, tf := range v.tests {
1304
+ switch tf := tf.(type) {
1305
+ case testWrite:
1306
+ got, err := fw.Write([]byte(tf.str))
1307
+ if got != tf.wantCnt || err != tf.wantErr {
1308
+ t.Errorf("test %d.%d, Write(%s):\ngot (%d, %v)\nwant (%d, %v)", i, j, tf.str, got, err, tf.wantCnt, tf.wantErr)
1309
+ }
1310
+ case testReadFrom:
1311
+ f := &testFile{ops: tf.ops}
1312
+ got, err := fw.ReadFrom(f)
1313
+ if _, ok := err.(testError); ok {
1314
+ t.Errorf("test %d.%d, ReadFrom(): %v", i, j, err)
1315
+ } else if got != tf.wantCnt || err != tf.wantErr {
1316
+ t.Errorf("test %d.%d, ReadFrom() = (%d, %v), want (%d, %v)", i, j, got, err, tf.wantCnt, tf.wantErr)
1317
+ }
1318
+ if len(f.ops) > 0 {
1319
+ t.Errorf("test %d.%d, expected %d more operations", i, j, len(f.ops))
1320
+ }
1321
+ case testRemaining:
1322
+ if got := fw.logicalRemaining(); got != tf.wantLCnt {
1323
+ t.Errorf("test %d.%d, logicalRemaining() = %d, want %d", i, j, got, tf.wantLCnt)
1324
+ }
1325
+ if got := fw.physicalRemaining(); got != tf.wantPCnt {
1326
+ t.Errorf("test %d.%d, physicalRemaining() = %d, want %d", i, j, got, tf.wantPCnt)
1327
+ }
1328
+ default:
1329
+ t.Fatalf("test %d.%d, unknown test operation: %T", i, j, tf)
1330
+ }
1331
+ }
1332
+
1333
+ if got := bb.String(); got != wantStr {
1334
+ t.Fatalf("test %d, String() = %q, want %q", i, got, wantStr)
1335
+ }
1336
+ }
1337
+ }
1338
+
1339
+ func TestWriterAddFS(t *testing.T) {
1340
+ fsys := fstest.MapFS{
1341
+ "file.go": {Data: []byte("hello")},
1342
+ "subfolder/another.go": {Data: []byte("world")},
1343
+ }
1344
+ var buf bytes.Buffer
1345
+ tw := NewWriter(&buf)
1346
+ if err := tw.AddFS(fsys); err != nil {
1347
+ t.Fatal(err)
1348
+ }
1349
+
1350
+ // Test that we can get the files back from the archive
1351
+ tr := NewReader(&buf)
1352
+
1353
+ entries, err := fsys.ReadDir(".")
1354
+ if err != nil {
1355
+ t.Fatal(err)
1356
+ }
1357
+
1358
+ var curfname string
1359
+ for _, entry := range entries {
1360
+ curfname = entry.Name()
1361
+ if entry.IsDir() {
1362
+ curfname += "/"
1363
+ continue
1364
+ }
1365
+ hdr, err := tr.Next()
1366
+ if err == io.EOF {
1367
+ break // End of archive
1368
+ }
1369
+ if err != nil {
1370
+ t.Fatal(err)
1371
+ }
1372
+
1373
+ data, err := io.ReadAll(tr)
1374
+ if err != nil {
1375
+ t.Fatal(err)
1376
+ }
1377
+
1378
+ if hdr.Name != curfname {
1379
+ t.Fatalf("got filename %v, want %v",
1380
+ curfname, hdr.Name)
1381
+ }
1382
+
1383
+ origdata := fsys[curfname].Data
1384
+ if string(data) != string(origdata) {
1385
+ t.Fatalf("got file content %v, want %v",
1386
+ data, origdata)
1387
+ }
1388
+ }
1389
+ }
1390
+
1391
+ func TestWriterAddFSNonRegularFiles(t *testing.T) {
1392
+ fsys := fstest.MapFS{
1393
+ "device": {Data: []byte("hello"), Mode: 0755 | fs.ModeDevice},
1394
+ "symlink": {Data: []byte("world"), Mode: 0755 | fs.ModeSymlink},
1395
+ }
1396
+ var buf bytes.Buffer
1397
+ tw := NewWriter(&buf)
1398
+ if err := tw.AddFS(fsys); err == nil {
1399
+ t.Fatal("expected error, got nil")
1400
+ }
1401
+ }
platform/dbops/binaries/go/go/src/archive/zip/example_test.go ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2012 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ package zip_test
6
+
7
+ import (
8
+ "archive/zip"
9
+ "bytes"
10
+ "compress/flate"
11
+ "fmt"
12
+ "io"
13
+ "log"
14
+ "os"
15
+ )
16
+
17
+ func ExampleWriter() {
18
+ // Create a buffer to write our archive to.
19
+ buf := new(bytes.Buffer)
20
+
21
+ // Create a new zip archive.
22
+ w := zip.NewWriter(buf)
23
+
24
+ // Add some files to the archive.
25
+ var files = []struct {
26
+ Name, Body string
27
+ }{
28
+ {"readme.txt", "This archive contains some text files."},
29
+ {"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
30
+ {"todo.txt", "Get animal handling licence.\nWrite more examples."},
31
+ }
32
+ for _, file := range files {
33
+ f, err := w.Create(file.Name)
34
+ if err != nil {
35
+ log.Fatal(err)
36
+ }
37
+ _, err = f.Write([]byte(file.Body))
38
+ if err != nil {
39
+ log.Fatal(err)
40
+ }
41
+ }
42
+
43
+ // Make sure to check the error on Close.
44
+ err := w.Close()
45
+ if err != nil {
46
+ log.Fatal(err)
47
+ }
48
+ }
49
+
50
+ func ExampleReader() {
51
+ // Open a zip archive for reading.
52
+ r, err := zip.OpenReader("testdata/readme.zip")
53
+ if err != nil {
54
+ log.Fatal(err)
55
+ }
56
+ defer r.Close()
57
+
58
+ // Iterate through the files in the archive,
59
+ // printing some of their contents.
60
+ for _, f := range r.File {
61
+ fmt.Printf("Contents of %s:\n", f.Name)
62
+ rc, err := f.Open()
63
+ if err != nil {
64
+ log.Fatal(err)
65
+ }
66
+ _, err = io.CopyN(os.Stdout, rc, 68)
67
+ if err != nil {
68
+ log.Fatal(err)
69
+ }
70
+ rc.Close()
71
+ fmt.Println()
72
+ }
73
+ // Output:
74
+ // Contents of README:
75
+ // This is the source code repository for the Go programming language.
76
+ }
77
+
78
+ func ExampleWriter_RegisterCompressor() {
79
+ // Override the default Deflate compressor with a higher compression level.
80
+
81
+ // Create a buffer to write our archive to.
82
+ buf := new(bytes.Buffer)
83
+
84
+ // Create a new zip archive.
85
+ w := zip.NewWriter(buf)
86
+
87
+ // Register a custom Deflate compressor.
88
+ w.RegisterCompressor(zip.Deflate, func(out io.Writer) (io.WriteCloser, error) {
89
+ return flate.NewWriter(out, flate.BestCompression)
90
+ })
91
+
92
+ // Proceed to add files to w.
93
+ }
platform/dbops/binaries/go/go/src/archive/zip/fuzz_test.go ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2021 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ package zip
6
+
7
+ import (
8
+ "bytes"
9
+ "io"
10
+ "os"
11
+ "path/filepath"
12
+ "testing"
13
+ )
14
+
15
+ func FuzzReader(f *testing.F) {
16
+ testdata, err := os.ReadDir("testdata")
17
+ if err != nil {
18
+ f.Fatalf("failed to read testdata directory: %s", err)
19
+ }
20
+ for _, de := range testdata {
21
+ if de.IsDir() {
22
+ continue
23
+ }
24
+ b, err := os.ReadFile(filepath.Join("testdata", de.Name()))
25
+ if err != nil {
26
+ f.Fatalf("failed to read testdata: %s", err)
27
+ }
28
+ f.Add(b)
29
+ }
30
+
31
+ f.Fuzz(func(t *testing.T, b []byte) {
32
+ r, err := NewReader(bytes.NewReader(b), int64(len(b)))
33
+ if err != nil {
34
+ return
35
+ }
36
+
37
+ type file struct {
38
+ header *FileHeader
39
+ content []byte
40
+ }
41
+ files := []file{}
42
+
43
+ for _, f := range r.File {
44
+ fr, err := f.Open()
45
+ if err != nil {
46
+ continue
47
+ }
48
+ content, err := io.ReadAll(fr)
49
+ if err != nil {
50
+ continue
51
+ }
52
+ files = append(files, file{header: &f.FileHeader, content: content})
53
+ if _, err := r.Open(f.Name); err != nil {
54
+ continue
55
+ }
56
+ }
57
+
58
+ // If we were unable to read anything out of the archive don't
59
+ // bother trying to roundtrip it.
60
+ if len(files) == 0 {
61
+ return
62
+ }
63
+
64
+ w := NewWriter(io.Discard)
65
+ for _, f := range files {
66
+ ww, err := w.CreateHeader(f.header)
67
+ if err != nil {
68
+ t.Fatalf("unable to write previously parsed header: %s", err)
69
+ }
70
+ if _, err := ww.Write(f.content); err != nil {
71
+ t.Fatalf("unable to write previously parsed content: %s", err)
72
+ }
73
+ }
74
+
75
+ if err := w.Close(); err != nil {
76
+ t.Fatalf("Unable to write archive: %s", err)
77
+ }
78
+
79
+ // TODO: We may want to check if the archive roundtrips.
80
+ })
81
+ }
platform/dbops/binaries/go/go/src/archive/zip/reader.go ADDED
@@ -0,0 +1,983 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2010 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ package zip
6
+
7
+ import (
8
+ "bufio"
9
+ "encoding/binary"
10
+ "errors"
11
+ "hash"
12
+ "hash/crc32"
13
+ "internal/godebug"
14
+ "io"
15
+ "io/fs"
16
+ "os"
17
+ "path"
18
+ "path/filepath"
19
+ "sort"
20
+ "strings"
21
+ "sync"
22
+ "time"
23
+ )
24
+
25
+ var zipinsecurepath = godebug.New("zipinsecurepath")
26
+
27
+ var (
28
+ ErrFormat = errors.New("zip: not a valid zip file")
29
+ ErrAlgorithm = errors.New("zip: unsupported compression algorithm")
30
+ ErrChecksum = errors.New("zip: checksum error")
31
+ ErrInsecurePath = errors.New("zip: insecure file path")
32
+ )
33
+
34
+ // A Reader serves content from a ZIP archive.
35
+ type Reader struct {
36
+ r io.ReaderAt
37
+ File []*File
38
+ Comment string
39
+ decompressors map[uint16]Decompressor
40
+
41
+ // Some JAR files are zip files with a prefix that is a bash script.
42
+ // The baseOffset field is the start of the zip file proper.
43
+ baseOffset int64
44
+
45
+ // fileList is a list of files sorted by ename,
46
+ // for use by the Open method.
47
+ fileListOnce sync.Once
48
+ fileList []fileListEntry
49
+ }
50
+
51
+ // A ReadCloser is a [Reader] that must be closed when no longer needed.
52
+ type ReadCloser struct {
53
+ f *os.File
54
+ Reader
55
+ }
56
+
57
+ // A File is a single file in a ZIP archive.
58
+ // The file information is in the embedded [FileHeader].
59
+ // The file content can be accessed by calling [File.Open].
60
+ type File struct {
61
+ FileHeader
62
+ zip *Reader
63
+ zipr io.ReaderAt
64
+ headerOffset int64 // includes overall ZIP archive baseOffset
65
+ zip64 bool // zip64 extended information extra field presence
66
+ }
67
+
68
+ // OpenReader will open the Zip file specified by name and return a ReadCloser.
69
+ //
70
+ // If any file inside the archive uses a non-local name
71
+ // (as defined by [filepath.IsLocal]) or a name containing backslashes
72
+ // and the GODEBUG environment variable contains `zipinsecurepath=0`,
73
+ // OpenReader returns the reader with an ErrInsecurePath error.
74
+ // A future version of Go may introduce this behavior by default.
75
+ // Programs that want to accept non-local names can ignore
76
+ // the ErrInsecurePath error and use the returned reader.
77
+ func OpenReader(name string) (*ReadCloser, error) {
78
+ f, err := os.Open(name)
79
+ if err != nil {
80
+ return nil, err
81
+ }
82
+ fi, err := f.Stat()
83
+ if err != nil {
84
+ f.Close()
85
+ return nil, err
86
+ }
87
+ r := new(ReadCloser)
88
+ if err = r.init(f, fi.Size()); err != nil && err != ErrInsecurePath {
89
+ f.Close()
90
+ return nil, err
91
+ }
92
+ r.f = f
93
+ return r, err
94
+ }
95
+
96
+ // NewReader returns a new [Reader] reading from r, which is assumed to
97
+ // have the given size in bytes.
98
+ //
99
+ // If any file inside the archive uses a non-local name
100
+ // (as defined by [filepath.IsLocal]) or a name containing backslashes
101
+ // and the GODEBUG environment variable contains `zipinsecurepath=0`,
102
+ // NewReader returns the reader with an [ErrInsecurePath] error.
103
+ // A future version of Go may introduce this behavior by default.
104
+ // Programs that want to accept non-local names can ignore
105
+ // the [ErrInsecurePath] error and use the returned reader.
106
+ func NewReader(r io.ReaderAt, size int64) (*Reader, error) {
107
+ if size < 0 {
108
+ return nil, errors.New("zip: size cannot be negative")
109
+ }
110
+ zr := new(Reader)
111
+ var err error
112
+ if err = zr.init(r, size); err != nil && err != ErrInsecurePath {
113
+ return nil, err
114
+ }
115
+ return zr, err
116
+ }
117
+
118
+ func (r *Reader) init(rdr io.ReaderAt, size int64) error {
119
+ end, baseOffset, err := readDirectoryEnd(rdr, size)
120
+ if err != nil {
121
+ return err
122
+ }
123
+ r.r = rdr
124
+ r.baseOffset = baseOffset
125
+ // Since the number of directory records is not validated, it is not
126
+ // safe to preallocate r.File without first checking that the specified
127
+ // number of files is reasonable, since a malformed archive may
128
+ // indicate it contains up to 1 << 128 - 1 files. Since each file has a
129
+ // header which will be _at least_ 30 bytes we can safely preallocate
130
+ // if (data size / 30) >= end.directoryRecords.
131
+ if end.directorySize < uint64(size) && (uint64(size)-end.directorySize)/30 >= end.directoryRecords {
132
+ r.File = make([]*File, 0, end.directoryRecords)
133
+ }
134
+ r.Comment = end.comment
135
+ rs := io.NewSectionReader(rdr, 0, size)
136
+ if _, err = rs.Seek(r.baseOffset+int64(end.directoryOffset), io.SeekStart); err != nil {
137
+ return err
138
+ }
139
+ buf := bufio.NewReader(rs)
140
+
141
+ // The count of files inside a zip is truncated to fit in a uint16.
142
+ // Gloss over this by reading headers until we encounter
143
+ // a bad one, and then only report an ErrFormat or UnexpectedEOF if
144
+ // the file count modulo 65536 is incorrect.
145
+ for {
146
+ f := &File{zip: r, zipr: rdr}
147
+ err = readDirectoryHeader(f, buf)
148
+ if err == ErrFormat || err == io.ErrUnexpectedEOF {
149
+ break
150
+ }
151
+ if err != nil {
152
+ return err
153
+ }
154
+ f.headerOffset += r.baseOffset
155
+ r.File = append(r.File, f)
156
+ }
157
+ if uint16(len(r.File)) != uint16(end.directoryRecords) { // only compare 16 bits here
158
+ // Return the readDirectoryHeader error if we read
159
+ // the wrong number of directory entries.
160
+ return err
161
+ }
162
+ if zipinsecurepath.Value() == "0" {
163
+ for _, f := range r.File {
164
+ if f.Name == "" {
165
+ // Zip permits an empty file name field.
166
+ continue
167
+ }
168
+ // The zip specification states that names must use forward slashes,
169
+ // so consider any backslashes in the name insecure.
170
+ if !filepath.IsLocal(f.Name) || strings.Contains(f.Name, `\`) {
171
+ zipinsecurepath.IncNonDefault()
172
+ return ErrInsecurePath
173
+ }
174
+ }
175
+ }
176
+ return nil
177
+ }
178
+
179
+ // RegisterDecompressor registers or overrides a custom decompressor for a
180
+ // specific method ID. If a decompressor for a given method is not found,
181
+ // [Reader] will default to looking up the decompressor at the package level.
182
+ func (r *Reader) RegisterDecompressor(method uint16, dcomp Decompressor) {
183
+ if r.decompressors == nil {
184
+ r.decompressors = make(map[uint16]Decompressor)
185
+ }
186
+ r.decompressors[method] = dcomp
187
+ }
188
+
189
+ func (r *Reader) decompressor(method uint16) Decompressor {
190
+ dcomp := r.decompressors[method]
191
+ if dcomp == nil {
192
+ dcomp = decompressor(method)
193
+ }
194
+ return dcomp
195
+ }
196
+
197
+ // Close closes the Zip file, rendering it unusable for I/O.
198
+ func (rc *ReadCloser) Close() error {
199
+ return rc.f.Close()
200
+ }
201
+
202
+ // DataOffset returns the offset of the file's possibly-compressed
203
+ // data, relative to the beginning of the zip file.
204
+ //
205
+ // Most callers should instead use [File.Open], which transparently
206
+ // decompresses data and verifies checksums.
207
+ func (f *File) DataOffset() (offset int64, err error) {
208
+ bodyOffset, err := f.findBodyOffset()
209
+ if err != nil {
210
+ return
211
+ }
212
+ return f.headerOffset + bodyOffset, nil
213
+ }
214
+
215
+ // Open returns a [ReadCloser] that provides access to the [File]'s contents.
216
+ // Multiple files may be read concurrently.
217
+ func (f *File) Open() (io.ReadCloser, error) {
218
+ bodyOffset, err := f.findBodyOffset()
219
+ if err != nil {
220
+ return nil, err
221
+ }
222
+ if strings.HasSuffix(f.Name, "/") {
223
+ // The ZIP specification (APPNOTE.TXT) specifies that directories, which
224
+ // are technically zero-byte files, must not have any associated file
225
+ // data. We previously tried failing here if f.CompressedSize64 != 0,
226
+ // but it turns out that a number of implementations (namely, the Java
227
+ // jar tool) don't properly set the storage method on directories
228
+ // resulting in a file with compressed size > 0 but uncompressed size ==
229
+ // 0. We still want to fail when a directory has associated uncompressed
230
+ // data, but we are tolerant of cases where the uncompressed size is
231
+ // zero but compressed size is not.
232
+ if f.UncompressedSize64 != 0 {
233
+ return &dirReader{ErrFormat}, nil
234
+ } else {
235
+ return &dirReader{io.EOF}, nil
236
+ }
237
+ }
238
+ size := int64(f.CompressedSize64)
239
+ r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, size)
240
+ dcomp := f.zip.decompressor(f.Method)
241
+ if dcomp == nil {
242
+ return nil, ErrAlgorithm
243
+ }
244
+ var rc io.ReadCloser = dcomp(r)
245
+ var desr io.Reader
246
+ if f.hasDataDescriptor() {
247
+ desr = io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, dataDescriptorLen)
248
+ }
249
+ rc = &checksumReader{
250
+ rc: rc,
251
+ hash: crc32.NewIEEE(),
252
+ f: f,
253
+ desr: desr,
254
+ }
255
+ return rc, nil
256
+ }
257
+
258
+ // OpenRaw returns a [Reader] that provides access to the [File]'s contents without
259
+ // decompression.
260
+ func (f *File) OpenRaw() (io.Reader, error) {
261
+ bodyOffset, err := f.findBodyOffset()
262
+ if err != nil {
263
+ return nil, err
264
+ }
265
+ r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset, int64(f.CompressedSize64))
266
+ return r, nil
267
+ }
268
+
269
+ type dirReader struct {
270
+ err error
271
+ }
272
+
273
+ func (r *dirReader) Read([]byte) (int, error) {
274
+ return 0, r.err
275
+ }
276
+
277
+ func (r *dirReader) Close() error {
278
+ return nil
279
+ }
280
+
281
+ type checksumReader struct {
282
+ rc io.ReadCloser
283
+ hash hash.Hash32
284
+ nread uint64 // number of bytes read so far
285
+ f *File
286
+ desr io.Reader // if non-nil, where to read the data descriptor
287
+ err error // sticky error
288
+ }
289
+
290
+ func (r *checksumReader) Stat() (fs.FileInfo, error) {
291
+ return headerFileInfo{&r.f.FileHeader}, nil
292
+ }
293
+
294
+ func (r *checksumReader) Read(b []byte) (n int, err error) {
295
+ if r.err != nil {
296
+ return 0, r.err
297
+ }
298
+ n, err = r.rc.Read(b)
299
+ r.hash.Write(b[:n])
300
+ r.nread += uint64(n)
301
+ if r.nread > r.f.UncompressedSize64 {
302
+ return 0, ErrFormat
303
+ }
304
+ if err == nil {
305
+ return
306
+ }
307
+ if err == io.EOF {
308
+ if r.nread != r.f.UncompressedSize64 {
309
+ return 0, io.ErrUnexpectedEOF
310
+ }
311
+ if r.desr != nil {
312
+ if err1 := readDataDescriptor(r.desr, r.f); err1 != nil {
313
+ if err1 == io.EOF {
314
+ err = io.ErrUnexpectedEOF
315
+ } else {
316
+ err = err1
317
+ }
318
+ } else if r.hash.Sum32() != r.f.CRC32 {
319
+ err = ErrChecksum
320
+ }
321
+ } else {
322
+ // If there's not a data descriptor, we still compare
323
+ // the CRC32 of what we've read against the file header
324
+ // or TOC's CRC32, if it seems like it was set.
325
+ if r.f.CRC32 != 0 && r.hash.Sum32() != r.f.CRC32 {
326
+ err = ErrChecksum
327
+ }
328
+ }
329
+ }
330
+ r.err = err
331
+ return
332
+ }
333
+
334
+ func (r *checksumReader) Close() error { return r.rc.Close() }
335
+
336
+ // findBodyOffset does the minimum work to verify the file has a header
337
+ // and returns the file body offset.
338
+ func (f *File) findBodyOffset() (int64, error) {
339
+ var buf [fileHeaderLen]byte
340
+ if _, err := f.zipr.ReadAt(buf[:], f.headerOffset); err != nil {
341
+ return 0, err
342
+ }
343
+ b := readBuf(buf[:])
344
+ if sig := b.uint32(); sig != fileHeaderSignature {
345
+ return 0, ErrFormat
346
+ }
347
+ b = b[22:] // skip over most of the header
348
+ filenameLen := int(b.uint16())
349
+ extraLen := int(b.uint16())
350
+ return int64(fileHeaderLen + filenameLen + extraLen), nil
351
+ }
352
+
353
+ // readDirectoryHeader attempts to read a directory header from r.
354
+ // It returns io.ErrUnexpectedEOF if it cannot read a complete header,
355
+ // and ErrFormat if it doesn't find a valid header signature.
356
+ func readDirectoryHeader(f *File, r io.Reader) error {
357
+ var buf [directoryHeaderLen]byte
358
+ if _, err := io.ReadFull(r, buf[:]); err != nil {
359
+ return err
360
+ }
361
+ b := readBuf(buf[:])
362
+ if sig := b.uint32(); sig != directoryHeaderSignature {
363
+ return ErrFormat
364
+ }
365
+ f.CreatorVersion = b.uint16()
366
+ f.ReaderVersion = b.uint16()
367
+ f.Flags = b.uint16()
368
+ f.Method = b.uint16()
369
+ f.ModifiedTime = b.uint16()
370
+ f.ModifiedDate = b.uint16()
371
+ f.CRC32 = b.uint32()
372
+ f.CompressedSize = b.uint32()
373
+ f.UncompressedSize = b.uint32()
374
+ f.CompressedSize64 = uint64(f.CompressedSize)
375
+ f.UncompressedSize64 = uint64(f.UncompressedSize)
376
+ filenameLen := int(b.uint16())
377
+ extraLen := int(b.uint16())
378
+ commentLen := int(b.uint16())
379
+ b = b[4:] // skipped start disk number and internal attributes (2x uint16)
380
+ f.ExternalAttrs = b.uint32()
381
+ f.headerOffset = int64(b.uint32())
382
+ d := make([]byte, filenameLen+extraLen+commentLen)
383
+ if _, err := io.ReadFull(r, d); err != nil {
384
+ return err
385
+ }
386
+ f.Name = string(d[:filenameLen])
387
+ f.Extra = d[filenameLen : filenameLen+extraLen]
388
+ f.Comment = string(d[filenameLen+extraLen:])
389
+
390
+ // Determine the character encoding.
391
+ utf8Valid1, utf8Require1 := detectUTF8(f.Name)
392
+ utf8Valid2, utf8Require2 := detectUTF8(f.Comment)
393
+ switch {
394
+ case !utf8Valid1 || !utf8Valid2:
395
+ // Name and Comment definitely not UTF-8.
396
+ f.NonUTF8 = true
397
+ case !utf8Require1 && !utf8Require2:
398
+ // Name and Comment use only single-byte runes that overlap with UTF-8.
399
+ f.NonUTF8 = false
400
+ default:
401
+ // Might be UTF-8, might be some other encoding; preserve existing flag.
402
+ // Some ZIP writers use UTF-8 encoding without setting the UTF-8 flag.
403
+ // Since it is impossible to always distinguish valid UTF-8 from some
404
+ // other encoding (e.g., GBK or Shift-JIS), we trust the flag.
405
+ f.NonUTF8 = f.Flags&0x800 == 0
406
+ }
407
+
408
+ needUSize := f.UncompressedSize == ^uint32(0)
409
+ needCSize := f.CompressedSize == ^uint32(0)
410
+ needHeaderOffset := f.headerOffset == int64(^uint32(0))
411
+
412
+ // Best effort to find what we need.
413
+ // Other zip authors might not even follow the basic format,
414
+ // and we'll just ignore the Extra content in that case.
415
+ var modified time.Time
416
+ parseExtras:
417
+ for extra := readBuf(f.Extra); len(extra) >= 4; { // need at least tag and size
418
+ fieldTag := extra.uint16()
419
+ fieldSize := int(extra.uint16())
420
+ if len(extra) < fieldSize {
421
+ break
422
+ }
423
+ fieldBuf := extra.sub(fieldSize)
424
+
425
+ switch fieldTag {
426
+ case zip64ExtraID:
427
+ f.zip64 = true
428
+
429
+ // update directory values from the zip64 extra block.
430
+ // They should only be consulted if the sizes read earlier
431
+ // are maxed out.
432
+ // See golang.org/issue/13367.
433
+ if needUSize {
434
+ needUSize = false
435
+ if len(fieldBuf) < 8 {
436
+ return ErrFormat
437
+ }
438
+ f.UncompressedSize64 = fieldBuf.uint64()
439
+ }
440
+ if needCSize {
441
+ needCSize = false
442
+ if len(fieldBuf) < 8 {
443
+ return ErrFormat
444
+ }
445
+ f.CompressedSize64 = fieldBuf.uint64()
446
+ }
447
+ if needHeaderOffset {
448
+ needHeaderOffset = false
449
+ if len(fieldBuf) < 8 {
450
+ return ErrFormat
451
+ }
452
+ f.headerOffset = int64(fieldBuf.uint64())
453
+ }
454
+ case ntfsExtraID:
455
+ if len(fieldBuf) < 4 {
456
+ continue parseExtras
457
+ }
458
+ fieldBuf.uint32() // reserved (ignored)
459
+ for len(fieldBuf) >= 4 { // need at least tag and size
460
+ attrTag := fieldBuf.uint16()
461
+ attrSize := int(fieldBuf.uint16())
462
+ if len(fieldBuf) < attrSize {
463
+ continue parseExtras
464
+ }
465
+ attrBuf := fieldBuf.sub(attrSize)
466
+ if attrTag != 1 || attrSize != 24 {
467
+ continue // Ignore irrelevant attributes
468
+ }
469
+
470
+ const ticksPerSecond = 1e7 // Windows timestamp resolution
471
+ ts := int64(attrBuf.uint64()) // ModTime since Windows epoch
472
+ secs := ts / ticksPerSecond
473
+ nsecs := (1e9 / ticksPerSecond) * (ts % ticksPerSecond)
474
+ epoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC)
475
+ modified = time.Unix(epoch.Unix()+secs, nsecs)
476
+ }
477
+ case unixExtraID, infoZipUnixExtraID:
478
+ if len(fieldBuf) < 8 {
479
+ continue parseExtras
480
+ }
481
+ fieldBuf.uint32() // AcTime (ignored)
482
+ ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
483
+ modified = time.Unix(ts, 0)
484
+ case extTimeExtraID:
485
+ if len(fieldBuf) < 5 || fieldBuf.uint8()&1 == 0 {
486
+ continue parseExtras
487
+ }
488
+ ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
489
+ modified = time.Unix(ts, 0)
490
+ }
491
+ }
492
+
493
+ msdosModified := msDosTimeToTime(f.ModifiedDate, f.ModifiedTime)
494
+ f.Modified = msdosModified
495
+ if !modified.IsZero() {
496
+ f.Modified = modified.UTC()
497
+
498
+ // If legacy MS-DOS timestamps are set, we can use the delta between
499
+ // the legacy and extended versions to estimate timezone offset.
500
+ //
501
+ // A non-UTC timezone is always used (even if offset is zero).
502
+ // Thus, FileHeader.Modified.Location() == time.UTC is useful for
503
+ // determining whether extended timestamps are present.
504
+ // This is necessary for users that need to do additional time
505
+ // calculations when dealing with legacy ZIP formats.
506
+ if f.ModifiedTime != 0 || f.ModifiedDate != 0 {
507
+ f.Modified = modified.In(timeZone(msdosModified.Sub(modified)))
508
+ }
509
+ }
510
+
511
+ // Assume that uncompressed size 2³²-1 could plausibly happen in
512
+ // an old zip32 file that was sharding inputs into the largest chunks
513
+ // possible (or is just malicious; search the web for 42.zip).
514
+ // If needUSize is true still, it means we didn't see a zip64 extension.
515
+ // As long as the compressed size is not also 2³²-1 (implausible)
516
+ // and the header is not also 2³²-1 (equally implausible),
517
+ // accept the uncompressed size 2³²-1 as valid.
518
+ // If nothing else, this keeps archive/zip working with 42.zip.
519
+ _ = needUSize
520
+
521
+ if needCSize || needHeaderOffset {
522
+ return ErrFormat
523
+ }
524
+
525
+ return nil
526
+ }
527
+
528
+ func readDataDescriptor(r io.Reader, f *File) error {
529
+ var buf [dataDescriptorLen]byte
530
+ // The spec says: "Although not originally assigned a
531
+ // signature, the value 0x08074b50 has commonly been adopted
532
+ // as a signature value for the data descriptor record.
533
+ // Implementers should be aware that ZIP files may be
534
+ // encountered with or without this signature marking data
535
+ // descriptors and should account for either case when reading
536
+ // ZIP files to ensure compatibility."
537
+ //
538
+ // dataDescriptorLen includes the size of the signature but
539
+ // first read just those 4 bytes to see if it exists.
540
+ if _, err := io.ReadFull(r, buf[:4]); err != nil {
541
+ return err
542
+ }
543
+ off := 0
544
+ maybeSig := readBuf(buf[:4])
545
+ if maybeSig.uint32() != dataDescriptorSignature {
546
+ // No data descriptor signature. Keep these four
547
+ // bytes.
548
+ off += 4
549
+ }
550
+ if _, err := io.ReadFull(r, buf[off:12]); err != nil {
551
+ return err
552
+ }
553
+ b := readBuf(buf[:12])
554
+ if b.uint32() != f.CRC32 {
555
+ return ErrChecksum
556
+ }
557
+
558
+ // The two sizes that follow here can be either 32 bits or 64 bits
559
+ // but the spec is not very clear on this and different
560
+ // interpretations has been made causing incompatibilities. We
561
+ // already have the sizes from the central directory so we can
562
+ // just ignore these.
563
+
564
+ return nil
565
+ }
566
+
567
+ func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, baseOffset int64, err error) {
568
+ // look for directoryEndSignature in the last 1k, then in the last 65k
569
+ var buf []byte
570
+ var directoryEndOffset int64
571
+ for i, bLen := range []int64{1024, 65 * 1024} {
572
+ if bLen > size {
573
+ bLen = size
574
+ }
575
+ buf = make([]byte, int(bLen))
576
+ if _, err := r.ReadAt(buf, size-bLen); err != nil && err != io.EOF {
577
+ return nil, 0, err
578
+ }
579
+ if p := findSignatureInBlock(buf); p >= 0 {
580
+ buf = buf[p:]
581
+ directoryEndOffset = size - bLen + int64(p)
582
+ break
583
+ }
584
+ if i == 1 || bLen == size {
585
+ return nil, 0, ErrFormat
586
+ }
587
+ }
588
+
589
+ // read header into struct
590
+ b := readBuf(buf[4:]) // skip signature
591
+ d := &directoryEnd{
592
+ diskNbr: uint32(b.uint16()),
593
+ dirDiskNbr: uint32(b.uint16()),
594
+ dirRecordsThisDisk: uint64(b.uint16()),
595
+ directoryRecords: uint64(b.uint16()),
596
+ directorySize: uint64(b.uint32()),
597
+ directoryOffset: uint64(b.uint32()),
598
+ commentLen: b.uint16(),
599
+ }
600
+ l := int(d.commentLen)
601
+ if l > len(b) {
602
+ return nil, 0, errors.New("zip: invalid comment length")
603
+ }
604
+ d.comment = string(b[:l])
605
+
606
+ // These values mean that the file can be a zip64 file
607
+ if d.directoryRecords == 0xffff || d.directorySize == 0xffff || d.directoryOffset == 0xffffffff {
608
+ p, err := findDirectory64End(r, directoryEndOffset)
609
+ if err == nil && p >= 0 {
610
+ directoryEndOffset = p
611
+ err = readDirectory64End(r, p, d)
612
+ }
613
+ if err != nil {
614
+ return nil, 0, err
615
+ }
616
+ }
617
+
618
+ maxInt64 := uint64(1<<63 - 1)
619
+ if d.directorySize > maxInt64 || d.directoryOffset > maxInt64 {
620
+ return nil, 0, ErrFormat
621
+ }
622
+
623
+ baseOffset = directoryEndOffset - int64(d.directorySize) - int64(d.directoryOffset)
624
+
625
+ // Make sure directoryOffset points to somewhere in our file.
626
+ if o := baseOffset + int64(d.directoryOffset); o < 0 || o >= size {
627
+ return nil, 0, ErrFormat
628
+ }
629
+
630
+ // If the directory end data tells us to use a non-zero baseOffset,
631
+ // but we would find a valid directory entry if we assume that the
632
+ // baseOffset is 0, then just use a baseOffset of 0.
633
+ // We've seen files in which the directory end data gives us
634
+ // an incorrect baseOffset.
635
+ if baseOffset > 0 {
636
+ off := int64(d.directoryOffset)
637
+ rs := io.NewSectionReader(r, off, size-off)
638
+ if readDirectoryHeader(&File{}, rs) == nil {
639
+ baseOffset = 0
640
+ }
641
+ }
642
+
643
+ return d, baseOffset, nil
644
+ }
645
+
646
+ // findDirectory64End tries to read the zip64 locator just before the
647
+ // directory end and returns the offset of the zip64 directory end if
648
+ // found.
649
+ func findDirectory64End(r io.ReaderAt, directoryEndOffset int64) (int64, error) {
650
+ locOffset := directoryEndOffset - directory64LocLen
651
+ if locOffset < 0 {
652
+ return -1, nil // no need to look for a header outside the file
653
+ }
654
+ buf := make([]byte, directory64LocLen)
655
+ if _, err := r.ReadAt(buf, locOffset); err != nil {
656
+ return -1, err
657
+ }
658
+ b := readBuf(buf)
659
+ if sig := b.uint32(); sig != directory64LocSignature {
660
+ return -1, nil
661
+ }
662
+ if b.uint32() != 0 { // number of the disk with the start of the zip64 end of central directory
663
+ return -1, nil // the file is not a valid zip64-file
664
+ }
665
+ p := b.uint64() // relative offset of the zip64 end of central directory record
666
+ if b.uint32() != 1 { // total number of disks
667
+ return -1, nil // the file is not a valid zip64-file
668
+ }
669
+ return int64(p), nil
670
+ }
671
+
672
+ // readDirectory64End reads the zip64 directory end and updates the
673
+ // directory end with the zip64 directory end values.
674
+ func readDirectory64End(r io.ReaderAt, offset int64, d *directoryEnd) (err error) {
675
+ buf := make([]byte, directory64EndLen)
676
+ if _, err := r.ReadAt(buf, offset); err != nil {
677
+ return err
678
+ }
679
+
680
+ b := readBuf(buf)
681
+ if sig := b.uint32(); sig != directory64EndSignature {
682
+ return ErrFormat
683
+ }
684
+
685
+ b = b[12:] // skip dir size, version and version needed (uint64 + 2x uint16)
686
+ d.diskNbr = b.uint32() // number of this disk
687
+ d.dirDiskNbr = b.uint32() // number of the disk with the start of the central directory
688
+ d.dirRecordsThisDisk = b.uint64() // total number of entries in the central directory on this disk
689
+ d.directoryRecords = b.uint64() // total number of entries in the central directory
690
+ d.directorySize = b.uint64() // size of the central directory
691
+ d.directoryOffset = b.uint64() // offset of start of central directory with respect to the starting disk number
692
+
693
+ return nil
694
+ }
695
+
696
+ func findSignatureInBlock(b []byte) int {
697
+ for i := len(b) - directoryEndLen; i >= 0; i-- {
698
+ // defined from directoryEndSignature in struct.go
699
+ if b[i] == 'P' && b[i+1] == 'K' && b[i+2] == 0x05 && b[i+3] == 0x06 {
700
+ // n is length of comment
701
+ n := int(b[i+directoryEndLen-2]) | int(b[i+directoryEndLen-1])<<8
702
+ if n+directoryEndLen+i > len(b) {
703
+ // Truncated comment.
704
+ // Some parsers (such as Info-ZIP) ignore the truncated comment
705
+ // rather than treating it as a hard error.
706
+ return -1
707
+ }
708
+ return i
709
+ }
710
+ }
711
+ return -1
712
+ }
713
+
714
+ type readBuf []byte
715
+
716
+ func (b *readBuf) uint8() uint8 {
717
+ v := (*b)[0]
718
+ *b = (*b)[1:]
719
+ return v
720
+ }
721
+
722
+ func (b *readBuf) uint16() uint16 {
723
+ v := binary.LittleEndian.Uint16(*b)
724
+ *b = (*b)[2:]
725
+ return v
726
+ }
727
+
728
+ func (b *readBuf) uint32() uint32 {
729
+ v := binary.LittleEndian.Uint32(*b)
730
+ *b = (*b)[4:]
731
+ return v
732
+ }
733
+
734
+ func (b *readBuf) uint64() uint64 {
735
+ v := binary.LittleEndian.Uint64(*b)
736
+ *b = (*b)[8:]
737
+ return v
738
+ }
739
+
740
+ func (b *readBuf) sub(n int) readBuf {
741
+ b2 := (*b)[:n]
742
+ *b = (*b)[n:]
743
+ return b2
744
+ }
745
+
746
+ // A fileListEntry is a File and its ename.
747
+ // If file == nil, the fileListEntry describes a directory without metadata.
748
+ type fileListEntry struct {
749
+ name string
750
+ file *File
751
+ isDir bool
752
+ isDup bool
753
+ }
754
+
755
+ type fileInfoDirEntry interface {
756
+ fs.FileInfo
757
+ fs.DirEntry
758
+ }
759
+
760
+ func (f *fileListEntry) stat() (fileInfoDirEntry, error) {
761
+ if f.isDup {
762
+ return nil, errors.New(f.name + ": duplicate entries in zip file")
763
+ }
764
+ if !f.isDir {
765
+ return headerFileInfo{&f.file.FileHeader}, nil
766
+ }
767
+ return f, nil
768
+ }
769
+
770
+ // Only used for directories.
771
+ func (f *fileListEntry) Name() string { _, elem, _ := split(f.name); return elem }
772
+ func (f *fileListEntry) Size() int64 { return 0 }
773
+ func (f *fileListEntry) Mode() fs.FileMode { return fs.ModeDir | 0555 }
774
+ func (f *fileListEntry) Type() fs.FileMode { return fs.ModeDir }
775
+ func (f *fileListEntry) IsDir() bool { return true }
776
+ func (f *fileListEntry) Sys() any { return nil }
777
+
778
+ func (f *fileListEntry) ModTime() time.Time {
779
+ if f.file == nil {
780
+ return time.Time{}
781
+ }
782
+ return f.file.FileHeader.Modified.UTC()
783
+ }
784
+
785
+ func (f *fileListEntry) Info() (fs.FileInfo, error) { return f, nil }
786
+
787
+ func (f *fileListEntry) String() string {
788
+ return fs.FormatDirEntry(f)
789
+ }
790
+
791
+ // toValidName coerces name to be a valid name for fs.FS.Open.
792
+ func toValidName(name string) string {
793
+ name = strings.ReplaceAll(name, `\`, `/`)
794
+ p := path.Clean(name)
795
+
796
+ p = strings.TrimPrefix(p, "/")
797
+
798
+ for strings.HasPrefix(p, "../") {
799
+ p = p[len("../"):]
800
+ }
801
+
802
+ return p
803
+ }
804
+
805
+ func (r *Reader) initFileList() {
806
+ r.fileListOnce.Do(func() {
807
+ // files and knownDirs map from a file/directory name
808
+ // to an index into the r.fileList entry that we are
809
+ // building. They are used to mark duplicate entries.
810
+ files := make(map[string]int)
811
+ knownDirs := make(map[string]int)
812
+
813
+ // dirs[name] is true if name is known to be a directory,
814
+ // because it appears as a prefix in a path.
815
+ dirs := make(map[string]bool)
816
+
817
+ for _, file := range r.File {
818
+ isDir := len(file.Name) > 0 && file.Name[len(file.Name)-1] == '/'
819
+ name := toValidName(file.Name)
820
+ if name == "" {
821
+ continue
822
+ }
823
+
824
+ if idx, ok := files[name]; ok {
825
+ r.fileList[idx].isDup = true
826
+ continue
827
+ }
828
+ if idx, ok := knownDirs[name]; ok {
829
+ r.fileList[idx].isDup = true
830
+ continue
831
+ }
832
+
833
+ for dir := path.Dir(name); dir != "."; dir = path.Dir(dir) {
834
+ dirs[dir] = true
835
+ }
836
+
837
+ idx := len(r.fileList)
838
+ entry := fileListEntry{
839
+ name: name,
840
+ file: file,
841
+ isDir: isDir,
842
+ }
843
+ r.fileList = append(r.fileList, entry)
844
+ if isDir {
845
+ knownDirs[name] = idx
846
+ } else {
847
+ files[name] = idx
848
+ }
849
+ }
850
+ for dir := range dirs {
851
+ if _, ok := knownDirs[dir]; !ok {
852
+ if idx, ok := files[dir]; ok {
853
+ r.fileList[idx].isDup = true
854
+ } else {
855
+ entry := fileListEntry{
856
+ name: dir,
857
+ file: nil,
858
+ isDir: true,
859
+ }
860
+ r.fileList = append(r.fileList, entry)
861
+ }
862
+ }
863
+ }
864
+
865
+ sort.Slice(r.fileList, func(i, j int) bool { return fileEntryLess(r.fileList[i].name, r.fileList[j].name) })
866
+ })
867
+ }
868
+
869
+ func fileEntryLess(x, y string) bool {
870
+ xdir, xelem, _ := split(x)
871
+ ydir, yelem, _ := split(y)
872
+ return xdir < ydir || xdir == ydir && xelem < yelem
873
+ }
874
+
875
+ // Open opens the named file in the ZIP archive,
876
+ // using the semantics of fs.FS.Open:
877
+ // paths are always slash separated, with no
878
+ // leading / or ../ elements.
879
+ func (r *Reader) Open(name string) (fs.File, error) {
880
+ r.initFileList()
881
+
882
+ if !fs.ValidPath(name) {
883
+ return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrInvalid}
884
+ }
885
+ e := r.openLookup(name)
886
+ if e == nil {
887
+ return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist}
888
+ }
889
+ if e.isDir {
890
+ return &openDir{e, r.openReadDir(name), 0}, nil
891
+ }
892
+ rc, err := e.file.Open()
893
+ if err != nil {
894
+ return nil, err
895
+ }
896
+ return rc.(fs.File), nil
897
+ }
898
+
899
+ func split(name string) (dir, elem string, isDir bool) {
900
+ if len(name) > 0 && name[len(name)-1] == '/' {
901
+ isDir = true
902
+ name = name[:len(name)-1]
903
+ }
904
+ i := len(name) - 1
905
+ for i >= 0 && name[i] != '/' {
906
+ i--
907
+ }
908
+ if i < 0 {
909
+ return ".", name, isDir
910
+ }
911
+ return name[:i], name[i+1:], isDir
912
+ }
913
+
914
+ var dotFile = &fileListEntry{name: "./", isDir: true}
915
+
916
+ func (r *Reader) openLookup(name string) *fileListEntry {
917
+ if name == "." {
918
+ return dotFile
919
+ }
920
+
921
+ dir, elem, _ := split(name)
922
+ files := r.fileList
923
+ i := sort.Search(len(files), func(i int) bool {
924
+ idir, ielem, _ := split(files[i].name)
925
+ return idir > dir || idir == dir && ielem >= elem
926
+ })
927
+ if i < len(files) {
928
+ fname := files[i].name
929
+ if fname == name || len(fname) == len(name)+1 && fname[len(name)] == '/' && fname[:len(name)] == name {
930
+ return &files[i]
931
+ }
932
+ }
933
+ return nil
934
+ }
935
+
936
+ func (r *Reader) openReadDir(dir string) []fileListEntry {
937
+ files := r.fileList
938
+ i := sort.Search(len(files), func(i int) bool {
939
+ idir, _, _ := split(files[i].name)
940
+ return idir >= dir
941
+ })
942
+ j := sort.Search(len(files), func(j int) bool {
943
+ jdir, _, _ := split(files[j].name)
944
+ return jdir > dir
945
+ })
946
+ return files[i:j]
947
+ }
948
+
949
+ type openDir struct {
950
+ e *fileListEntry
951
+ files []fileListEntry
952
+ offset int
953
+ }
954
+
955
+ func (d *openDir) Close() error { return nil }
956
+ func (d *openDir) Stat() (fs.FileInfo, error) { return d.e.stat() }
957
+
958
+ func (d *openDir) Read([]byte) (int, error) {
959
+ return 0, &fs.PathError{Op: "read", Path: d.e.name, Err: errors.New("is a directory")}
960
+ }
961
+
962
+ func (d *openDir) ReadDir(count int) ([]fs.DirEntry, error) {
963
+ n := len(d.files) - d.offset
964
+ if count > 0 && n > count {
965
+ n = count
966
+ }
967
+ if n == 0 {
968
+ if count <= 0 {
969
+ return nil, nil
970
+ }
971
+ return nil, io.EOF
972
+ }
973
+ list := make([]fs.DirEntry, n)
974
+ for i := range list {
975
+ s, err := d.files[d.offset+i].stat()
976
+ if err != nil {
977
+ return nil, err
978
+ }
979
+ list[i] = s
980
+ }
981
+ d.offset += n
982
+ return list, nil
983
+ }
platform/dbops/binaries/go/go/src/archive/zip/reader_test.go ADDED
@@ -0,0 +1,1836 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2010 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ package zip
6
+
7
+ import (
8
+ "bytes"
9
+ "encoding/binary"
10
+ "encoding/hex"
11
+ "internal/obscuretestdata"
12
+ "io"
13
+ "io/fs"
14
+ "os"
15
+ "path/filepath"
16
+ "reflect"
17
+ "regexp"
18
+ "strings"
19
+ "testing"
20
+ "testing/fstest"
21
+ "time"
22
+ )
23
+
24
+ type ZipTest struct {
25
+ Name string
26
+ Source func() (r io.ReaderAt, size int64) // if non-nil, used instead of testdata/<Name> file
27
+ Comment string
28
+ File []ZipTestFile
29
+ Obscured bool // needed for Apple notarization (golang.org/issue/34986)
30
+ Error error // the error that Opening this file should return
31
+ }
32
+
33
+ type ZipTestFile struct {
34
+ Name string
35
+ Mode fs.FileMode
36
+ NonUTF8 bool
37
+ ModTime time.Time
38
+ Modified time.Time
39
+
40
+ // Information describing expected zip file content.
41
+ // First, reading the entire content should produce the error ContentErr.
42
+ // Second, if ContentErr==nil, the content should match Content.
43
+ // If content is large, an alternative to setting Content is to set File,
44
+ // which names a file in the testdata/ directory containing the
45
+ // uncompressed expected content.
46
+ // If content is very large, an alternative to setting Content or File
47
+ // is to set Size, which will then be checked against the header-reported size
48
+ // but will bypass the decompressing of the actual data.
49
+ // This last option is used for testing very large (multi-GB) compressed files.
50
+ ContentErr error
51
+ Content []byte
52
+ File string
53
+ Size uint64
54
+ }
55
+
56
+ var tests = []ZipTest{
57
+ {
58
+ Name: "test.zip",
59
+ Comment: "This is a zipfile comment.",
60
+ File: []ZipTestFile{
61
+ {
62
+ Name: "test.txt",
63
+ Content: []byte("This is a test text file.\n"),
64
+ Modified: time.Date(2010, 9, 5, 12, 12, 1, 0, timeZone(+10*time.Hour)),
65
+ Mode: 0644,
66
+ },
67
+ {
68
+ Name: "gophercolor16x16.png",
69
+ File: "gophercolor16x16.png",
70
+ Modified: time.Date(2010, 9, 5, 15, 52, 58, 0, timeZone(+10*time.Hour)),
71
+ Mode: 0644,
72
+ },
73
+ },
74
+ },
75
+ {
76
+ Name: "test-trailing-junk.zip",
77
+ Comment: "This is a zipfile comment.",
78
+ File: []ZipTestFile{
79
+ {
80
+ Name: "test.txt",
81
+ Content: []byte("This is a test text file.\n"),
82
+ Modified: time.Date(2010, 9, 5, 12, 12, 1, 0, timeZone(+10*time.Hour)),
83
+ Mode: 0644,
84
+ },
85
+ {
86
+ Name: "gophercolor16x16.png",
87
+ File: "gophercolor16x16.png",
88
+ Modified: time.Date(2010, 9, 5, 15, 52, 58, 0, timeZone(+10*time.Hour)),
89
+ Mode: 0644,
90
+ },
91
+ },
92
+ },
93
+ {
94
+ Name: "test-prefix.zip",
95
+ Comment: "This is a zipfile comment.",
96
+ File: []ZipTestFile{
97
+ {
98
+ Name: "test.txt",
99
+ Content: []byte("This is a test text file.\n"),
100
+ Modified: time.Date(2010, 9, 5, 12, 12, 1, 0, timeZone(+10*time.Hour)),
101
+ Mode: 0644,
102
+ },
103
+ {
104
+ Name: "gophercolor16x16.png",
105
+ File: "gophercolor16x16.png",
106
+ Modified: time.Date(2010, 9, 5, 15, 52, 58, 0, timeZone(+10*time.Hour)),
107
+ Mode: 0644,
108
+ },
109
+ },
110
+ },
111
+ {
112
+ Name: "test-baddirsz.zip",
113
+ Comment: "This is a zipfile comment.",
114
+ File: []ZipTestFile{
115
+ {
116
+ Name: "test.txt",
117
+ Content: []byte("This is a test text file.\n"),
118
+ Modified: time.Date(2010, 9, 5, 12, 12, 1, 0, timeZone(+10*time.Hour)),
119
+ Mode: 0644,
120
+ },
121
+ {
122
+ Name: "gophercolor16x16.png",
123
+ File: "gophercolor16x16.png",
124
+ Modified: time.Date(2010, 9, 5, 15, 52, 58, 0, timeZone(+10*time.Hour)),
125
+ Mode: 0644,
126
+ },
127
+ },
128
+ },
129
+ {
130
+ Name: "test-badbase.zip",
131
+ Comment: "This is a zipfile comment.",
132
+ File: []ZipTestFile{
133
+ {
134
+ Name: "test.txt",
135
+ Content: []byte("This is a test text file.\n"),
136
+ Modified: time.Date(2010, 9, 5, 12, 12, 1, 0, timeZone(+10*time.Hour)),
137
+ Mode: 0644,
138
+ },
139
+ {
140
+ Name: "gophercolor16x16.png",
141
+ File: "gophercolor16x16.png",
142
+ Modified: time.Date(2010, 9, 5, 15, 52, 58, 0, timeZone(+10*time.Hour)),
143
+ Mode: 0644,
144
+ },
145
+ },
146
+ },
147
+ {
148
+ Name: "r.zip",
149
+ Source: returnRecursiveZip,
150
+ File: []ZipTestFile{
151
+ {
152
+ Name: "r/r.zip",
153
+ Content: rZipBytes(),
154
+ Modified: time.Date(2010, 3, 4, 0, 24, 16, 0, time.UTC),
155
+ Mode: 0666,
156
+ },
157
+ },
158
+ },
159
+ {
160
+ Name: "symlink.zip",
161
+ File: []ZipTestFile{
162
+ {
163
+ Name: "symlink",
164
+ Content: []byte("../target"),
165
+ Modified: time.Date(2012, 2, 3, 19, 56, 48, 0, timeZone(-2*time.Hour)),
166
+ Mode: 0777 | fs.ModeSymlink,
167
+ },
168
+ },
169
+ },
170
+ {
171
+ Name: "readme.zip",
172
+ },
173
+ {
174
+ Name: "readme.notzip",
175
+ Error: ErrFormat,
176
+ },
177
+ {
178
+ Name: "dd.zip",
179
+ File: []ZipTestFile{
180
+ {
181
+ Name: "filename",
182
+ Content: []byte("This is a test textfile.\n"),
183
+ Modified: time.Date(2011, 2, 2, 13, 6, 20, 0, time.UTC),
184
+ Mode: 0666,
185
+ },
186
+ },
187
+ },
188
+ {
189
+ // created in windows XP file manager.
190
+ Name: "winxp.zip",
191
+ File: []ZipTestFile{
192
+ {
193
+ Name: "hello",
194
+ Content: []byte("world \r\n"),
195
+ Modified: time.Date(2011, 12, 8, 10, 4, 24, 0, time.UTC),
196
+ Mode: 0666,
197
+ },
198
+ {
199
+ Name: "dir/bar",
200
+ Content: []byte("foo \r\n"),
201
+ Modified: time.Date(2011, 12, 8, 10, 4, 50, 0, time.UTC),
202
+ Mode: 0666,
203
+ },
204
+ {
205
+ Name: "dir/empty/",
206
+ Content: []byte{},
207
+ Modified: time.Date(2011, 12, 8, 10, 8, 6, 0, time.UTC),
208
+ Mode: fs.ModeDir | 0777,
209
+ },
210
+ {
211
+ Name: "readonly",
212
+ Content: []byte("important \r\n"),
213
+ Modified: time.Date(2011, 12, 8, 10, 6, 8, 0, time.UTC),
214
+ Mode: 0444,
215
+ },
216
+ },
217
+ },
218
+ {
219
+ // created by Zip 3.0 under Linux
220
+ Name: "unix.zip",
221
+ File: []ZipTestFile{
222
+ {
223
+ Name: "hello",
224
+ Content: []byte("world \r\n"),
225
+ Modified: time.Date(2011, 12, 8, 10, 4, 24, 0, timeZone(0)),
226
+ Mode: 0666,
227
+ },
228
+ {
229
+ Name: "dir/bar",
230
+ Content: []byte("foo \r\n"),
231
+ Modified: time.Date(2011, 12, 8, 10, 4, 50, 0, timeZone(0)),
232
+ Mode: 0666,
233
+ },
234
+ {
235
+ Name: "dir/empty/",
236
+ Content: []byte{},
237
+ Modified: time.Date(2011, 12, 8, 10, 8, 6, 0, timeZone(0)),
238
+ Mode: fs.ModeDir | 0777,
239
+ },
240
+ {
241
+ Name: "readonly",
242
+ Content: []byte("important \r\n"),
243
+ Modified: time.Date(2011, 12, 8, 10, 6, 8, 0, timeZone(0)),
244
+ Mode: 0444,
245
+ },
246
+ },
247
+ },
248
+ {
249
+ // created by Go, before we wrote the "optional" data
250
+ // descriptor signatures (which are required by macOS).
251
+ // Use obscured file to avoid Apple’s notarization service
252
+ // rejecting the toolchain due to an inability to unzip this archive.
253
+ // See golang.org/issue/34986
254
+ Name: "go-no-datadesc-sig.zip.base64",
255
+ Obscured: true,
256
+ File: []ZipTestFile{
257
+ {
258
+ Name: "foo.txt",
259
+ Content: []byte("foo\n"),
260
+ Modified: time.Date(2012, 3, 8, 16, 59, 10, 0, timeZone(-8*time.Hour)),
261
+ Mode: 0644,
262
+ },
263
+ {
264
+ Name: "bar.txt",
265
+ Content: []byte("bar\n"),
266
+ Modified: time.Date(2012, 3, 8, 16, 59, 12, 0, timeZone(-8*time.Hour)),
267
+ Mode: 0644,
268
+ },
269
+ },
270
+ },
271
+ {
272
+ // created by Go, after we wrote the "optional" data
273
+ // descriptor signatures (which are required by macOS)
274
+ Name: "go-with-datadesc-sig.zip",
275
+ File: []ZipTestFile{
276
+ {
277
+ Name: "foo.txt",
278
+ Content: []byte("foo\n"),
279
+ Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
280
+ Mode: 0666,
281
+ },
282
+ {
283
+ Name: "bar.txt",
284
+ Content: []byte("bar\n"),
285
+ Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
286
+ Mode: 0666,
287
+ },
288
+ },
289
+ },
290
+ {
291
+ Name: "Bad-CRC32-in-data-descriptor",
292
+ Source: returnCorruptCRC32Zip,
293
+ File: []ZipTestFile{
294
+ {
295
+ Name: "foo.txt",
296
+ Content: []byte("foo\n"),
297
+ Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
298
+ Mode: 0666,
299
+ ContentErr: ErrChecksum,
300
+ },
301
+ {
302
+ Name: "bar.txt",
303
+ Content: []byte("bar\n"),
304
+ Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
305
+ Mode: 0666,
306
+ },
307
+ },
308
+ },
309
+ // Tests that we verify (and accept valid) crc32s on files
310
+ // with crc32s in their file header (not in data descriptors)
311
+ {
312
+ Name: "crc32-not-streamed.zip",
313
+ File: []ZipTestFile{
314
+ {
315
+ Name: "foo.txt",
316
+ Content: []byte("foo\n"),
317
+ Modified: time.Date(2012, 3, 8, 16, 59, 10, 0, timeZone(-8*time.Hour)),
318
+ Mode: 0644,
319
+ },
320
+ {
321
+ Name: "bar.txt",
322
+ Content: []byte("bar\n"),
323
+ Modified: time.Date(2012, 3, 8, 16, 59, 12, 0, timeZone(-8*time.Hour)),
324
+ Mode: 0644,
325
+ },
326
+ },
327
+ },
328
+ // Tests that we verify (and reject invalid) crc32s on files
329
+ // with crc32s in their file header (not in data descriptors)
330
+ {
331
+ Name: "crc32-not-streamed.zip",
332
+ Source: returnCorruptNotStreamedZip,
333
+ File: []ZipTestFile{
334
+ {
335
+ Name: "foo.txt",
336
+ Content: []byte("foo\n"),
337
+ Modified: time.Date(2012, 3, 8, 16, 59, 10, 0, timeZone(-8*time.Hour)),
338
+ Mode: 0644,
339
+ ContentErr: ErrChecksum,
340
+ },
341
+ {
342
+ Name: "bar.txt",
343
+ Content: []byte("bar\n"),
344
+ Modified: time.Date(2012, 3, 8, 16, 59, 12, 0, timeZone(-8*time.Hour)),
345
+ Mode: 0644,
346
+ },
347
+ },
348
+ },
349
+ {
350
+ Name: "zip64.zip",
351
+ File: []ZipTestFile{
352
+ {
353
+ Name: "README",
354
+ Content: []byte("This small file is in ZIP64 format.\n"),
355
+ Modified: time.Date(2012, 8, 10, 14, 33, 32, 0, time.UTC),
356
+ Mode: 0644,
357
+ },
358
+ },
359
+ },
360
+ // Another zip64 file with different Extras fields. (golang.org/issue/7069)
361
+ {
362
+ Name: "zip64-2.zip",
363
+ File: []ZipTestFile{
364
+ {
365
+ Name: "README",
366
+ Content: []byte("This small file is in ZIP64 format.\n"),
367
+ Modified: time.Date(2012, 8, 10, 14, 33, 32, 0, timeZone(-4*time.Hour)),
368
+ Mode: 0644,
369
+ },
370
+ },
371
+ },
372
+ // Largest possible non-zip64 file, with no zip64 header.
373
+ {
374
+ Name: "big.zip",
375
+ Source: returnBigZipBytes,
376
+ File: []ZipTestFile{
377
+ {
378
+ Name: "big.file",
379
+ Content: nil,
380
+ Size: 1<<32 - 1,
381
+ Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC),
382
+ Mode: 0666,
383
+ },
384
+ },
385
+ },
386
+ {
387
+ Name: "utf8-7zip.zip",
388
+ File: []ZipTestFile{
389
+ {
390
+ Name: "世界",
391
+ Content: []byte{},
392
+ Mode: 0666,
393
+ Modified: time.Date(2017, 11, 6, 13, 9, 27, 867862500, timeZone(-8*time.Hour)),
394
+ },
395
+ },
396
+ },
397
+ {
398
+ Name: "utf8-infozip.zip",
399
+ File: []ZipTestFile{
400
+ {
401
+ Name: "世界",
402
+ Content: []byte{},
403
+ Mode: 0644,
404
+ // Name is valid UTF-8, but format does not have UTF-8 flag set.
405
+ // We don't do UTF-8 detection for multi-byte runes due to
406
+ // false-positives with other encodings (e.g., Shift-JIS).
407
+ // Format says encoding is not UTF-8, so we trust it.
408
+ NonUTF8: true,
409
+ Modified: time.Date(2017, 11, 6, 13, 9, 27, 0, timeZone(-8*time.Hour)),
410
+ },
411
+ },
412
+ },
413
+ {
414
+ Name: "utf8-osx.zip",
415
+ File: []ZipTestFile{
416
+ {
417
+ Name: "世界",
418
+ Content: []byte{},
419
+ Mode: 0644,
420
+ // Name is valid UTF-8, but format does not have UTF-8 set.
421
+ NonUTF8: true,
422
+ Modified: time.Date(2017, 11, 6, 13, 9, 27, 0, timeZone(-8*time.Hour)),
423
+ },
424
+ },
425
+ },
426
+ {
427
+ Name: "utf8-winrar.zip",
428
+ File: []ZipTestFile{
429
+ {
430
+ Name: "世界",
431
+ Content: []byte{},
432
+ Mode: 0666,
433
+ Modified: time.Date(2017, 11, 6, 13, 9, 27, 867862500, timeZone(-8*time.Hour)),
434
+ },
435
+ },
436
+ },
437
+ {
438
+ Name: "utf8-winzip.zip",
439
+ File: []ZipTestFile{
440
+ {
441
+ Name: "世界",
442
+ Content: []byte{},
443
+ Mode: 0666,
444
+ Modified: time.Date(2017, 11, 6, 13, 9, 27, 867000000, timeZone(-8*time.Hour)),
445
+ },
446
+ },
447
+ },
448
+ {
449
+ Name: "time-7zip.zip",
450
+ File: []ZipTestFile{
451
+ {
452
+ Name: "test.txt",
453
+ Content: []byte{},
454
+ Size: 1<<32 - 1,
455
+ Modified: time.Date(2017, 10, 31, 21, 11, 57, 244817900, timeZone(-7*time.Hour)),
456
+ Mode: 0666,
457
+ },
458
+ },
459
+ },
460
+ {
461
+ Name: "time-infozip.zip",
462
+ File: []ZipTestFile{
463
+ {
464
+ Name: "test.txt",
465
+ Content: []byte{},
466
+ Size: 1<<32 - 1,
467
+ Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)),
468
+ Mode: 0644,
469
+ },
470
+ },
471
+ },
472
+ {
473
+ Name: "time-osx.zip",
474
+ File: []ZipTestFile{
475
+ {
476
+ Name: "test.txt",
477
+ Content: []byte{},
478
+ Size: 1<<32 - 1,
479
+ Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)),
480
+ Mode: 0644,
481
+ },
482
+ },
483
+ },
484
+ {
485
+ Name: "time-win7.zip",
486
+ File: []ZipTestFile{
487
+ {
488
+ Name: "test.txt",
489
+ Content: []byte{},
490
+ Size: 1<<32 - 1,
491
+ Modified: time.Date(2017, 10, 31, 21, 11, 58, 0, time.UTC),
492
+ Mode: 0666,
493
+ },
494
+ },
495
+ },
496
+ {
497
+ Name: "time-winrar.zip",
498
+ File: []ZipTestFile{
499
+ {
500
+ Name: "test.txt",
501
+ Content: []byte{},
502
+ Size: 1<<32 - 1,
503
+ Modified: time.Date(2017, 10, 31, 21, 11, 57, 244817900, timeZone(-7*time.Hour)),
504
+ Mode: 0666,
505
+ },
506
+ },
507
+ },
508
+ {
509
+ Name: "time-winzip.zip",
510
+ File: []ZipTestFile{
511
+ {
512
+ Name: "test.txt",
513
+ Content: []byte{},
514
+ Size: 1<<32 - 1,
515
+ Modified: time.Date(2017, 10, 31, 21, 11, 57, 244000000, timeZone(-7*time.Hour)),
516
+ Mode: 0666,
517
+ },
518
+ },
519
+ },
520
+ {
521
+ Name: "time-go.zip",
522
+ File: []ZipTestFile{
523
+ {
524
+ Name: "test.txt",
525
+ Content: []byte{},
526
+ Size: 1<<32 - 1,
527
+ Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)),
528
+ Mode: 0666,
529
+ },
530
+ },
531
+ },
532
+ {
533
+ Name: "time-22738.zip",
534
+ File: []ZipTestFile{
535
+ {
536
+ Name: "file",
537
+ Content: []byte{},
538
+ Mode: 0666,
539
+ Modified: time.Date(1999, 12, 31, 19, 0, 0, 0, timeZone(-5*time.Hour)),
540
+ ModTime: time.Date(1999, 12, 31, 19, 0, 0, 0, time.UTC),
541
+ },
542
+ },
543
+ },
544
+ {
545
+ Name: "dupdir.zip",
546
+ File: []ZipTestFile{
547
+ {
548
+ Name: "a/",
549
+ Content: []byte{},
550
+ Mode: fs.ModeDir | 0666,
551
+ Modified: time.Date(2021, 12, 29, 0, 0, 0, 0, timeZone(0)),
552
+ },
553
+ {
554
+ Name: "a/b",
555
+ Content: []byte{},
556
+ Mode: 0666,
557
+ Modified: time.Date(2021, 12, 29, 0, 0, 0, 0, timeZone(0)),
558
+ },
559
+ {
560
+ Name: "a/b/",
561
+ Content: []byte{},
562
+ Mode: fs.ModeDir | 0666,
563
+ Modified: time.Date(2021, 12, 29, 0, 0, 0, 0, timeZone(0)),
564
+ },
565
+ {
566
+ Name: "a/b/c",
567
+ Content: []byte{},
568
+ Mode: 0666,
569
+ Modified: time.Date(2021, 12, 29, 0, 0, 0, 0, timeZone(0)),
570
+ },
571
+ },
572
+ },
573
+ // Issue 66869: Don't skip over an EOCDR with a truncated comment.
574
+ // The test file sneakily hides a second EOCDR before the first one;
575
+ // previously we would extract one file ("file") from this archive,
576
+ // while most other tools would reject the file or extract a different one ("FILE").
577
+ {
578
+ Name: "comment-truncated.zip",
579
+ Error: ErrFormat,
580
+ },
581
+ }
582
+
583
+ func TestReader(t *testing.T) {
584
+ for _, zt := range tests {
585
+ t.Run(zt.Name, func(t *testing.T) {
586
+ readTestZip(t, zt)
587
+ })
588
+ }
589
+ }
590
+
591
+ func readTestZip(t *testing.T, zt ZipTest) {
592
+ var z *Reader
593
+ var err error
594
+ var raw []byte
595
+ if zt.Source != nil {
596
+ rat, size := zt.Source()
597
+ z, err = NewReader(rat, size)
598
+ raw = make([]byte, size)
599
+ if _, err := rat.ReadAt(raw, 0); err != nil {
600
+ t.Errorf("ReadAt error=%v", err)
601
+ return
602
+ }
603
+ } else {
604
+ path := filepath.Join("testdata", zt.Name)
605
+ if zt.Obscured {
606
+ tf, err := obscuretestdata.DecodeToTempFile(path)
607
+ if err != nil {
608
+ t.Errorf("obscuretestdata.DecodeToTempFile(%s): %v", path, err)
609
+ return
610
+ }
611
+ defer os.Remove(tf)
612
+ path = tf
613
+ }
614
+ var rc *ReadCloser
615
+ rc, err = OpenReader(path)
616
+ if err == nil {
617
+ defer rc.Close()
618
+ z = &rc.Reader
619
+ }
620
+ var err2 error
621
+ raw, err2 = os.ReadFile(path)
622
+ if err2 != nil {
623
+ t.Errorf("ReadFile(%s) error=%v", path, err2)
624
+ return
625
+ }
626
+ }
627
+ if err != zt.Error {
628
+ t.Errorf("error=%v, want %v", err, zt.Error)
629
+ return
630
+ }
631
+
632
+ // bail if file is not zip
633
+ if err == ErrFormat {
634
+ return
635
+ }
636
+
637
+ // bail here if no Files expected to be tested
638
+ // (there may actually be files in the zip, but we don't care)
639
+ if zt.File == nil {
640
+ return
641
+ }
642
+
643
+ if z.Comment != zt.Comment {
644
+ t.Errorf("comment=%q, want %q", z.Comment, zt.Comment)
645
+ }
646
+ if len(z.File) != len(zt.File) {
647
+ t.Fatalf("file count=%d, want %d", len(z.File), len(zt.File))
648
+ }
649
+
650
+ // test read of each file
651
+ for i, ft := range zt.File {
652
+ readTestFile(t, zt, ft, z.File[i], raw)
653
+ }
654
+ if t.Failed() {
655
+ return
656
+ }
657
+
658
+ // test simultaneous reads
659
+ n := 0
660
+ done := make(chan bool)
661
+ for i := 0; i < 5; i++ {
662
+ for j, ft := range zt.File {
663
+ go func(j int, ft ZipTestFile) {
664
+ readTestFile(t, zt, ft, z.File[j], raw)
665
+ done <- true
666
+ }(j, ft)
667
+ n++
668
+ }
669
+ }
670
+ for ; n > 0; n-- {
671
+ <-done
672
+ }
673
+ }
674
+
675
+ func equalTimeAndZone(t1, t2 time.Time) bool {
676
+ name1, offset1 := t1.Zone()
677
+ name2, offset2 := t2.Zone()
678
+ return t1.Equal(t2) && name1 == name2 && offset1 == offset2
679
+ }
680
+
681
+ func readTestFile(t *testing.T, zt ZipTest, ft ZipTestFile, f *File, raw []byte) {
682
+ if f.Name != ft.Name {
683
+ t.Errorf("name=%q, want %q", f.Name, ft.Name)
684
+ }
685
+ if !ft.Modified.IsZero() && !equalTimeAndZone(f.Modified, ft.Modified) {
686
+ t.Errorf("%s: Modified=%s, want %s", f.Name, f.Modified, ft.Modified)
687
+ }
688
+ if !ft.ModTime.IsZero() && !equalTimeAndZone(f.ModTime(), ft.ModTime) {
689
+ t.Errorf("%s: ModTime=%s, want %s", f.Name, f.ModTime(), ft.ModTime)
690
+ }
691
+
692
+ testFileMode(t, f, ft.Mode)
693
+
694
+ size := uint64(f.UncompressedSize)
695
+ if size == uint32max {
696
+ size = f.UncompressedSize64
697
+ } else if size != f.UncompressedSize64 {
698
+ t.Errorf("%v: UncompressedSize=%#x does not match UncompressedSize64=%#x", f.Name, size, f.UncompressedSize64)
699
+ }
700
+
701
+ // Check that OpenRaw returns the correct byte segment
702
+ rw, err := f.OpenRaw()
703
+ if err != nil {
704
+ t.Errorf("%v: OpenRaw error=%v", f.Name, err)
705
+ return
706
+ }
707
+ start, err := f.DataOffset()
708
+ if err != nil {
709
+ t.Errorf("%v: DataOffset error=%v", f.Name, err)
710
+ return
711
+ }
712
+ got, err := io.ReadAll(rw)
713
+ if err != nil {
714
+ t.Errorf("%v: OpenRaw ReadAll error=%v", f.Name, err)
715
+ return
716
+ }
717
+ end := uint64(start) + f.CompressedSize64
718
+ want := raw[start:end]
719
+ if !bytes.Equal(got, want) {
720
+ t.Logf("got %q", got)
721
+ t.Logf("want %q", want)
722
+ t.Errorf("%v: OpenRaw returned unexpected bytes", f.Name)
723
+ return
724
+ }
725
+
726
+ r, err := f.Open()
727
+ if err != nil {
728
+ t.Errorf("%v", err)
729
+ return
730
+ }
731
+
732
+ // For very large files, just check that the size is correct.
733
+ // The content is expected to be all zeros.
734
+ // Don't bother uncompressing: too big.
735
+ if ft.Content == nil && ft.File == "" && ft.Size > 0 {
736
+ if size != ft.Size {
737
+ t.Errorf("%v: uncompressed size %#x, want %#x", ft.Name, size, ft.Size)
738
+ }
739
+ r.Close()
740
+ return
741
+ }
742
+
743
+ var b bytes.Buffer
744
+ _, err = io.Copy(&b, r)
745
+ if err != ft.ContentErr {
746
+ t.Errorf("copying contents: %v (want %v)", err, ft.ContentErr)
747
+ }
748
+ if err != nil {
749
+ return
750
+ }
751
+ r.Close()
752
+
753
+ if g := uint64(b.Len()); g != size {
754
+ t.Errorf("%v: read %v bytes but f.UncompressedSize == %v", f.Name, g, size)
755
+ }
756
+
757
+ var c []byte
758
+ if ft.Content != nil {
759
+ c = ft.Content
760
+ } else if c, err = os.ReadFile("testdata/" + ft.File); err != nil {
761
+ t.Error(err)
762
+ return
763
+ }
764
+
765
+ if b.Len() != len(c) {
766
+ t.Errorf("%s: len=%d, want %d", f.Name, b.Len(), len(c))
767
+ return
768
+ }
769
+
770
+ for i, b := range b.Bytes() {
771
+ if b != c[i] {
772
+ t.Errorf("%s: content[%d]=%q want %q", f.Name, i, b, c[i])
773
+ return
774
+ }
775
+ }
776
+ }
777
+
778
+ func testFileMode(t *testing.T, f *File, want fs.FileMode) {
779
+ mode := f.Mode()
780
+ if want == 0 {
781
+ t.Errorf("%s mode: got %v, want none", f.Name, mode)
782
+ } else if mode != want {
783
+ t.Errorf("%s mode: want %v, got %v", f.Name, want, mode)
784
+ }
785
+ }
786
+
787
+ func TestInvalidFiles(t *testing.T) {
788
+ const size = 1024 * 70 // 70kb
789
+ b := make([]byte, size)
790
+
791
+ // zeroes
792
+ _, err := NewReader(bytes.NewReader(b), size)
793
+ if err != ErrFormat {
794
+ t.Errorf("zeroes: error=%v, want %v", err, ErrFormat)
795
+ }
796
+
797
+ // repeated directoryEndSignatures
798
+ sig := make([]byte, 4)
799
+ binary.LittleEndian.PutUint32(sig, directoryEndSignature)
800
+ for i := 0; i < size-4; i += 4 {
801
+ copy(b[i:i+4], sig)
802
+ }
803
+ _, err = NewReader(bytes.NewReader(b), size)
804
+ if err != ErrFormat {
805
+ t.Errorf("sigs: error=%v, want %v", err, ErrFormat)
806
+ }
807
+
808
+ // negative size
809
+ _, err = NewReader(bytes.NewReader([]byte("foobar")), -1)
810
+ if err == nil {
811
+ t.Errorf("archive/zip.NewReader: expected error when negative size is passed")
812
+ }
813
+ }
814
+
815
+ func messWith(fileName string, corrupter func(b []byte)) (r io.ReaderAt, size int64) {
816
+ data, err := os.ReadFile(filepath.Join("testdata", fileName))
817
+ if err != nil {
818
+ panic("Error reading " + fileName + ": " + err.Error())
819
+ }
820
+ corrupter(data)
821
+ return bytes.NewReader(data), int64(len(data))
822
+ }
823
+
824
+ func returnCorruptCRC32Zip() (r io.ReaderAt, size int64) {
825
+ return messWith("go-with-datadesc-sig.zip", func(b []byte) {
826
+ // Corrupt one of the CRC32s in the data descriptor:
827
+ b[0x2d]++
828
+ })
829
+ }
830
+
831
+ func returnCorruptNotStreamedZip() (r io.ReaderAt, size int64) {
832
+ return messWith("crc32-not-streamed.zip", func(b []byte) {
833
+ // Corrupt foo.txt's final crc32 byte, in both
834
+ // the file header and TOC. (0x7e -> 0x7f)
835
+ b[0x11]++
836
+ b[0x9d]++
837
+
838
+ // TODO(bradfitz): add a new test that only corrupts
839
+ // one of these values, and verify that that's also an
840
+ // error. Currently, the reader code doesn't verify the
841
+ // fileheader and TOC's crc32 match if they're both
842
+ // non-zero and only the second line above, the TOC,
843
+ // is what matters.
844
+ })
845
+ }
846
+
847
+ // rZipBytes returns the bytes of a recursive zip file, without
848
+ // putting it on disk and triggering certain virus scanners.
849
+ func rZipBytes() []byte {
850
+ s := `
851
+ 0000000 50 4b 03 04 14 00 00 00 08 00 08 03 64 3c f9 f4
852
+ 0000010 89 64 48 01 00 00 b8 01 00 00 07 00 00 00 72 2f
853
+ 0000020 72 2e 7a 69 70 00 25 00 da ff 50 4b 03 04 14 00
854
+ 0000030 00 00 08 00 08 03 64 3c f9 f4 89 64 48 01 00 00
855
+ 0000040 b8 01 00 00 07 00 00 00 72 2f 72 2e 7a 69 70 00
856
+ 0000050 2f 00 d0 ff 00 25 00 da ff 50 4b 03 04 14 00 00
857
+ 0000060 00 08 00 08 03 64 3c f9 f4 89 64 48 01 00 00 b8
858
+ 0000070 01 00 00 07 00 00 00 72 2f 72 2e 7a 69 70 00 2f
859
+ 0000080 00 d0 ff c2 54 8e 57 39 00 05 00 fa ff c2 54 8e
860
+ 0000090 57 39 00 05 00 fa ff 00 05 00 fa ff 00 14 00 eb
861
+ 00000a0 ff c2 54 8e 57 39 00 05 00 fa ff 00 05 00 fa ff
862
+ 00000b0 00 14 00 eb ff 42 88 21 c4 00 00 14 00 eb ff 42
863
+ 00000c0 88 21 c4 00 00 14 00 eb ff 42 88 21 c4 00 00 14
864
+ 00000d0 00 eb ff 42 88 21 c4 00 00 14 00 eb ff 42 88 21
865
+ 00000e0 c4 00 00 00 00 ff ff 00 00 00 ff ff 00 34 00 cb
866
+ 00000f0 ff 42 88 21 c4 00 00 00 00 ff ff 00 00 00 ff ff
867
+ 0000100 00 34 00 cb ff 42 e8 21 5e 0f 00 00 00 ff ff 0a
868
+ 0000110 f0 66 64 12 61 c0 15 dc e8 a0 48 bf 48 af 2a b3
869
+ 0000120 20 c0 9b 95 0d c4 67 04 42 53 06 06 06 40 00 06
870
+ 0000130 00 f9 ff 6d 01 00 00 00 00 42 e8 21 5e 0f 00 00
871
+ 0000140 00 ff ff 0a f0 66 64 12 61 c0 15 dc e8 a0 48 bf
872
+ 0000150 48 af 2a b3 20 c0 9b 95 0d c4 67 04 42 53 06 06
873
+ 0000160 06 40 00 06 00 f9 ff 6d 01 00 00 00 00 50 4b 01
874
+ 0000170 02 14 00 14 00 00 00 08 00 08 03 64 3c f9 f4 89
875
+ 0000180 64 48 01 00 00 b8 01 00 00 07 00 00 00 00 00 00
876
+ 0000190 00 00 00 00 00 00 00 00 00 00 00 72 2f 72 2e 7a
877
+ 00001a0 69 70 50 4b 05 06 00 00 00 00 01 00 01 00 35 00
878
+ 00001b0 00 00 6d 01 00 00 00 00`
879
+ s = regexp.MustCompile(`[0-9a-f]{7}`).ReplaceAllString(s, "")
880
+ s = regexp.MustCompile(`\s+`).ReplaceAllString(s, "")
881
+ b, err := hex.DecodeString(s)
882
+ if err != nil {
883
+ panic(err)
884
+ }
885
+ return b
886
+ }
887
+
888
+ func returnRecursiveZip() (r io.ReaderAt, size int64) {
889
+ b := rZipBytes()
890
+ return bytes.NewReader(b), int64(len(b))
891
+ }
892
+
893
+ // biggestZipBytes returns the bytes of a zip file biggest.zip
894
+ // that contains a zip file bigger.zip that contains a zip file
895
+ // big.zip that contains big.file, which contains 2³²-1 zeros.
896
+ // The big.zip file is interesting because it has no zip64 header,
897
+ // much like the innermost zip files in the well-known 42.zip.
898
+ //
899
+ // biggest.zip was generated by changing isZip64 to use > uint32max
900
+ // instead of >= uint32max and then running this program:
901
+ //
902
+ // package main
903
+ //
904
+ // import (
905
+ // "archive/zip"
906
+ // "bytes"
907
+ // "io"
908
+ // "log"
909
+ // "os"
910
+ // )
911
+ //
912
+ // type zeros struct{}
913
+ //
914
+ // func (zeros) Read(b []byte) (int, error) {
915
+ // for i := range b {
916
+ // b[i] = 0
917
+ // }
918
+ // return len(b), nil
919
+ // }
920
+ //
921
+ // func main() {
922
+ // bigZip := makeZip("big.file", io.LimitReader(zeros{}, 1<<32-1))
923
+ // if err := os.WriteFile("/tmp/big.zip", bigZip, 0666); err != nil {
924
+ // log.Fatal(err)
925
+ // }
926
+ //
927
+ // biggerZip := makeZip("big.zip", bytes.NewReader(bigZip))
928
+ // if err := os.WriteFile("/tmp/bigger.zip", biggerZip, 0666); err != nil {
929
+ // log.Fatal(err)
930
+ // }
931
+ //
932
+ // biggestZip := makeZip("bigger.zip", bytes.NewReader(biggerZip))
933
+ // if err := os.WriteFile("/tmp/biggest.zip", biggestZip, 0666); err != nil {
934
+ // log.Fatal(err)
935
+ // }
936
+ // }
937
+ //
938
+ // func makeZip(name string, r io.Reader) []byte {
939
+ // var buf bytes.Buffer
940
+ // w := zip.NewWriter(&buf)
941
+ // wf, err := w.Create(name)
942
+ // if err != nil {
943
+ // log.Fatal(err)
944
+ // }
945
+ // if _, err = io.Copy(wf, r); err != nil {
946
+ // log.Fatal(err)
947
+ // }
948
+ // if err := w.Close(); err != nil {
949
+ // log.Fatal(err)
950
+ // }
951
+ // return buf.Bytes()
952
+ // }
953
+ //
954
+ // The 4 GB of zeros compresses to 4 MB, which compresses to 20 kB,
955
+ // which compresses to 1252 bytes (in the hex dump below).
956
+ //
957
+ // It's here in hex for the same reason as rZipBytes above: to avoid
958
+ // problems with on-disk virus scanners or other zip processors.
959
+ func biggestZipBytes() []byte {
960
+ s := `
961
+ 0000000 50 4b 03 04 14 00 08 00 08 00 00 00 00 00 00 00
962
+ 0000010 00 00 00 00 00 00 00 00 00 00 0a 00 00 00 62 69
963
+ 0000020 67 67 65 72 2e 7a 69 70 ec dc 6b 4c 53 67 18 07
964
+ 0000030 f0 16 c5 ca 65 2e cb b8 94 20 61 1f 44 33 c7 cd
965
+ 0000040 c0 86 4a b5 c0 62 8a 61 05 c6 cd 91 b2 54 8c 1b
966
+ 0000050 63 8b 03 9c 1b 95 52 5a e3 a0 19 6c b2 05 59 44
967
+ 0000060 64 9d 73 83 71 11 46 61 14 b9 1d 14 09 4a c3 60
968
+ 0000070 2e 4c 6e a5 60 45 02 62 81 95 b6 94 9e 9e 77 e7
969
+ 0000080 d0 43 b6 f8 71 df 96 3c e7 a4 69 ce bf cf e9 79
970
+ 0000090 ce ef 79 3f bf f1 31 db b6 bb 31 76 92 e7 f3 07
971
+ 00000a0 8b fc 9c ca cc 08 cc cb cc 5e d2 1c 88 d9 7e bb
972
+ 00000b0 4f bb 3a 3f 75 f1 5d 7f 8f c2 68 67 77 8f 25 ff
973
+ 00000c0 84 e2 93 2d ef a4 95 3d 71 4e 2c b9 b0 87 c3 be
974
+ 00000d0 3d f8 a7 60 24 61 c5 ef ae 9e c8 6c 6d 4e 69 c8
975
+ 00000e0 67 65 34 f8 37 76 2d 76 5c 54 f3 95 65 49 c7 0f
976
+ 00000f0 18 71 4b 7e 5b 6a d1 79 47 61 41 b0 4e 2a 74 45
977
+ 0000100 43 58 12 b2 5a a5 c6 7d 68 55 88 d4 98 75 18 6d
978
+ 0000110 08 d1 1f 8f 5a 9e 96 ee 45 cf a4 84 4e 4b e8 50
979
+ 0000120 a7 13 d9 06 de 52 81 97 36 b2 d7 b8 fc 2b 5f 55
980
+ 0000130 23 1f 32 59 cf 30 27 fb e2 8a b9 de 45 dd 63 9c
981
+ 0000140 4b b5 8b 96 4c 7a 62 62 cc a1 a7 cf fa f1 fe dd
982
+ 0000150 54 62 11 bf 36 78 b3 c7 b1 b5 f2 61 4d 4e dd 66
983
+ 0000160 32 2e e6 70 34 5f f4 c9 e6 6c 43 6f da 6b c6 c3
984
+ 0000170 09 2c ce 09 57 7f d2 7e b4 23 ba 7c 1b 99 bc 22
985
+ 0000180 3e f1 de 91 2f e3 9c 1b 82 cc c2 84 39 aa e6 de
986
+ 0000190 b4 69 fc cc cb 72 a6 61 45 f0 d3 1d 26 19 7c 8d
987
+ 00001a0 29 c8 66 02 be 77 6a f9 3d 34 79 17 19 c8 96 24
988
+ 00001b0 a3 ac e4 dd 3b 1a 8e c6 fe 96 38 6b bf 67 5a 23
989
+ 00001c0 f4 16 f4 e6 8a b4 fc c2 cd bf 95 66 1d bb 35 aa
990
+ 00001d0 92 7d 66 d8 08 8d a5 1f 54 2a af 09 cf 61 ff d2
991
+ 00001e0 85 9d 8f b6 d7 88 07 4a 86 03 db 64 f3 d9 92 73
992
+ 00001f0 df ec a7 fc 23 4c 8d 83 79 63 2a d9 fd 8d b3 c8
993
+ 0000200 8f 7e d4 19 85 e6 8d 1c 76 f0 8b 58 32 fd 9a d6
994
+ 0000210 85 e2 48 ad c3 d5 60 6f 7e 22 dd ef 09 49 7c 7f
995
+ 0000220 3a 45 c3 71 b7 df f3 4c 63 fb b5 d9 31 5f 6e d6
996
+ 0000230 24 1d a4 4a fe 32 a7 5c 16 48 5c 3e 08 6b 8a d3
997
+ 0000240 25 1d a2 12 a5 59 24 ea 20 5f 52 6d ad 94 db 6b
998
+ 0000250 94 b9 5d eb 4b a7 5c 44 bb 1e f2 3c 6b cf 52 c9
999
+ 0000260 e9 e5 ba 06 b9 c4 e5 0a d0 00 0d d0 00 0d d0 00
1000
+ 0000270 0d d0 00 0d d0 00 0d d0 00 0d d0 00 0d d0 00 0d
1001
+ 0000280 d0 00 0d d0 00 0d d0 00 0d d0 00 0d d0 00 0d d0
1002
+ 0000290 00 0d d0 00 0d d0 00 0d d0 00 0d d0 00 0d d0 00
1003
+ 00002a0 0d d0 00 cd ff 9e 46 86 fa a7 7d 3a 43 d7 8e 10
1004
+ 00002b0 52 e9 be e6 6e cf eb 9e 85 4d 65 ce cc 30 c1 44
1005
+ 00002c0 c0 4e af bc 9c 6c 4b a0 d7 54 ff 1d d5 5c 89 fb
1006
+ 00002d0 b5 34 7e c4 c2 9e f5 a0 f6 5b 7e 6e ca 73 c7 ef
1007
+ 00002e0 5d be de f9 e8 81 eb a5 0a a5 63 54 2c d7 1c d1
1008
+ 00002f0 89 17 85 f8 16 94 f2 8a b2 a3 f5 b6 6d df 75 cd
1009
+ 0000300 90 dd 64 bd 5d 55 4e f2 55 19 1b b7 cc ef 1b ea
1010
+ 0000310 2e 05 9c f4 aa 1e a8 cd a6 82 c7 59 0f 5e 9d e0
1011
+ 0000320 bb fc 6c d6 99 23 eb 36 ad c6 c5 e1 d8 e1 e2 3e
1012
+ 0000330 d9 90 5a f7 91 5d 6f bc 33 6d 98 47 d2 7c 2e 2f
1013
+ 0000340 99 a4 25 72 85 49 2c be 0b 5b af 8f e5 6e 81 a6
1014
+ 0000350 a3 5a 6f 39 53 3a ab 7a 8b 1e 26 f7 46 6c 7d 26
1015
+ 0000360 53 b3 22 31 94 d3 83 f2 18 4d f5 92 33 27 53 97
1016
+ 0000370 0f d3 e6 55 9c a6 c5 31 87 6f d3 f3 ae 39 6f 56
1017
+ 0000380 10 7b ab 7e d0 b4 ca f2 b8 05 be 3f 0e 6e 5a 75
1018
+ 0000390 ab 0c f5 37 0e ba 8e 75 71 7a aa ed 7a dd 6a 63
1019
+ 00003a0 be 9b a0 97 27 6a 6f e7 d3 8b c4 7c ec d3 91 56
1020
+ 00003b0 d9 ac 5e bf 16 42 2f 00 1f 93 a2 23 87 bd e2 59
1021
+ 00003c0 a0 de 1a 66 c8 62 eb 55 8f 91 17 b4 61 42 7a 50
1022
+ 00003d0 40 03 34 40 03 34 40 03 34 40 03 34 40 03 34 40
1023
+ 00003e0 03 34 40 03 34 40 03 34 40 03 34 40 03 34 40 03
1024
+ 00003f0 34 40 03 34 40 03 34 ff 85 86 90 8b ea 67 90 0d
1025
+ 0000400 e1 42 1b d2 61 d6 79 ec fd 3e 44 28 a4 51 6c 5c
1026
+ 0000410 fc d2 72 ca ba 82 18 46 16 61 cd 93 a9 0f d1 24
1027
+ 0000420 17 99 e2 2c 71 16 84 0c c8 7a 13 0f 9a 5e c5 f0
1028
+ 0000430 79 64 e2 12 4d c8 82 a1 81 19 2d aa 44 6d 87 54
1029
+ 0000440 84 71 c1 f6 d4 ca 25 8c 77 b9 08 c7 c8 5e 10 8a
1030
+ 0000450 8f 61 ed 8c ba 30 1f 79 9a c7 60 34 2b b9 8c f8
1031
+ 0000460 18 a6 83 1b e3 9f ad 79 fe fd 1b 8b f1 fc 41 6f
1032
+ 0000470 d4 13 1f e3 b8 83 ba 64 92 e7 eb e4 77 05 8f ba
1033
+ 0000480 fa 3b 00 00 ff ff 50 4b 07 08 a6 18 b1 91 5e 04
1034
+ 0000490 00 00 e4 47 00 00 50 4b 01 02 14 00 14 00 08 00
1035
+ 00004a0 08 00 00 00 00 00 a6 18 b1 91 5e 04 00 00 e4 47
1036
+ 00004b0 00 00 0a 00 00 00 00 00 00 00 00 00 00 00 00 00
1037
+ 00004c0 00 00 00 00 62 69 67 67 65 72 2e 7a 69 70 50 4b
1038
+ 00004d0 05 06 00 00 00 00 01 00 01 00 38 00 00 00 96 04
1039
+ 00004e0 00 00 00 00`
1040
+ s = regexp.MustCompile(`[0-9a-f]{7}`).ReplaceAllString(s, "")
1041
+ s = regexp.MustCompile(`\s+`).ReplaceAllString(s, "")
1042
+ b, err := hex.DecodeString(s)
1043
+ if err != nil {
1044
+ panic(err)
1045
+ }
1046
+ return b
1047
+ }
1048
+
1049
+ func returnBigZipBytes() (r io.ReaderAt, size int64) {
1050
+ b := biggestZipBytes()
1051
+ for i := 0; i < 2; i++ {
1052
+ r, err := NewReader(bytes.NewReader(b), int64(len(b)))
1053
+ if err != nil {
1054
+ panic(err)
1055
+ }
1056
+ f, err := r.File[0].Open()
1057
+ if err != nil {
1058
+ panic(err)
1059
+ }
1060
+ b, err = io.ReadAll(f)
1061
+ if err != nil {
1062
+ panic(err)
1063
+ }
1064
+ }
1065
+ return bytes.NewReader(b), int64(len(b))
1066
+ }
1067
+
1068
+ func TestIssue8186(t *testing.T) {
1069
+ // Directory headers & data found in the TOC of a JAR file.
1070
+ dirEnts := []string{
1071
+ "PK\x01\x02\n\x00\n\x00\x00\b\x00\x004\x9d3?\xaa\x1b\x06\xf0\x81\x02\x00\x00\x81\x02\x00\x00-\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00res/drawable-xhdpi-v4/ic_actionbar_accept.png\xfe\xca\x00\x00\x00",
1072
+ "PK\x01\x02\n\x00\n\x00\x00\b\x00\x004\x9d3?\x90K\x89\xc7t\n\x00\x00t\n\x00\x00\x0e\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x02\x00\x00resources.arsc\x00\x00\x00",
1073
+ "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\xff$\x18\xed3\x03\x00\x00\xb4\b\x00\x00\x13\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00t\r\x00\x00AndroidManifest.xml",
1074
+ "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\x14\xc5K\xab\x192\x02\x00\xc8\xcd\x04\x00\v\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe8\x10\x00\x00classes.dex",
1075
+ "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?E\x96\nD\xac\x01\x00\x00P\x03\x00\x00&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00:C\x02\x00res/layout/actionbar_set_wallpaper.xml",
1076
+ "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?Ļ\x14\xe3\xd8\x01\x00\x00\xd8\x03\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00:E\x02\x00res/layout/wallpaper_cropper.xml",
1077
+ "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?}\xc1\x15\x9eZ\x01\x00\x00!\x02\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00`G\x02\x00META-INF/MANIFEST.MF",
1078
+ "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\xe6\x98Ьo\x01\x00\x00\x84\x02\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfcH\x02\x00META-INF/CERT.SF",
1079
+ "PK\x01\x02\x14\x00\x14\x00\b\b\b\x004\x9d3?\xbfP\x96b\x86\x04\x00\x00\xb2\x06\x00\x00\x11\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa9J\x02\x00META-INF/CERT.RSA",
1080
+ }
1081
+ for i, s := range dirEnts {
1082
+ var f File
1083
+ err := readDirectoryHeader(&f, strings.NewReader(s))
1084
+ if err != nil {
1085
+ t.Errorf("error reading #%d: %v", i, err)
1086
+ }
1087
+ }
1088
+ }
1089
+
1090
+ // Verify we return ErrUnexpectedEOF when length is short.
1091
+ func TestIssue10957(t *testing.T) {
1092
+ data := []byte("PK\x03\x040000000PK\x01\x0200000" +
1093
+ "0000000000000000000\x00" +
1094
+ "\x00\x00\x00\x00\x00000000000000PK\x01" +
1095
+ "\x020000000000000000000" +
1096
+ "00000\v\x00\x00\x00\x00\x00000000000" +
1097
+ "00000000000000PK\x01\x0200" +
1098
+ "00000000000000000000" +
1099
+ "00\v\x00\x00\x00\x00\x00000000000000" +
1100
+ "00000000000PK\x01\x020000<" +
1101
+ "0\x00\x0000000000000000\v\x00\v" +
1102
+ "\x00\x00\x00\x00\x0000000000\x00\x00\x00\x00000" +
1103
+ "00000000PK\x01\x0200000000" +
1104
+ "0000000000000000\v\x00\x00\x00" +
1105
+ "\x00\x0000PK\x05\x06000000\x05\x00\xfd\x00\x00\x00" +
1106
+ "\v\x00\x00\x00\x00\x00")
1107
+ z, err := NewReader(bytes.NewReader(data), int64(len(data)))
1108
+ if err != nil {
1109
+ t.Fatal(err)
1110
+ }
1111
+ for i, f := range z.File {
1112
+ r, err := f.Open()
1113
+ if err != nil {
1114
+ continue
1115
+ }
1116
+ if f.UncompressedSize64 < 1e6 {
1117
+ n, err := io.Copy(io.Discard, r)
1118
+ if i == 3 && err != io.ErrUnexpectedEOF {
1119
+ t.Errorf("File[3] error = %v; want io.ErrUnexpectedEOF", err)
1120
+ }
1121
+ if err == nil && uint64(n) != f.UncompressedSize64 {
1122
+ t.Errorf("file %d: bad size: copied=%d; want=%d", i, n, f.UncompressedSize64)
1123
+ }
1124
+ }
1125
+ r.Close()
1126
+ }
1127
+ }
1128
+
1129
+ // Verify that this particular malformed zip file is rejected.
1130
+ func TestIssue10956(t *testing.T) {
1131
+ data := []byte("PK\x06\x06PK\x06\a0000\x00\x00\x00\x00\x00\x00\x00\x00" +
1132
+ "0000PK\x05\x06000000000000" +
1133
+ "0000\v\x00000\x00\x00\x00\x00\x00\x00\x000")
1134
+ r, err := NewReader(bytes.NewReader(data), int64(len(data)))
1135
+ if err == nil {
1136
+ t.Errorf("got nil error, want ErrFormat")
1137
+ }
1138
+ if r != nil {
1139
+ t.Errorf("got non-nil Reader, want nil")
1140
+ }
1141
+ }
1142
+
1143
+ // Verify we return ErrUnexpectedEOF when reading truncated data descriptor.
1144
+ func TestIssue11146(t *testing.T) {
1145
+ data := []byte("PK\x03\x040000000000000000" +
1146
+ "000000\x01\x00\x00\x000\x01\x00\x00\xff\xff0000" +
1147
+ "0000000000000000PK\x01\x02" +
1148
+ "0000\b0\b\x00000000000000" +
1149
+ "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x000000PK\x05\x06\x00\x00" +
1150
+ "\x00\x0000\x01\x00\x26\x00\x00\x008\x00\x00\x00\x00\x00")
1151
+ z, err := NewReader(bytes.NewReader(data), int64(len(data)))
1152
+ if err != nil {
1153
+ t.Fatal(err)
1154
+ }
1155
+ r, err := z.File[0].Open()
1156
+ if err != nil {
1157
+ t.Fatal(err)
1158
+ }
1159
+ _, err = io.ReadAll(r)
1160
+ if err != io.ErrUnexpectedEOF {
1161
+ t.Errorf("File[0] error = %v; want io.ErrUnexpectedEOF", err)
1162
+ }
1163
+ r.Close()
1164
+ }
1165
+
1166
+ // Verify we do not treat non-zip64 archives as zip64
1167
+ func TestIssue12449(t *testing.T) {
1168
+ data := []byte{
1169
+ 0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x00,
1170
+ 0x00, 0x00, 0x6b, 0xb4, 0xba, 0x46, 0x00, 0x00,
1171
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1172
+ 0x00, 0x00, 0x03, 0x00, 0x18, 0x00, 0xca, 0x64,
1173
+ 0x55, 0x75, 0x78, 0x0b, 0x00, 0x50, 0x4b, 0x05,
1174
+ 0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01,
1175
+ 0x00, 0x49, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00,
1176
+ 0x00, 0x31, 0x31, 0x31, 0x32, 0x32, 0x32, 0x0a,
1177
+ 0x50, 0x4b, 0x07, 0x08, 0x1d, 0x88, 0x77, 0xb0,
1178
+ 0x07, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00,
1179
+ 0x50, 0x4b, 0x01, 0x02, 0x14, 0x03, 0x14, 0x00,
1180
+ 0x08, 0x00, 0x00, 0x00, 0x6b, 0xb4, 0xba, 0x46,
1181
+ 0x1d, 0x88, 0x77, 0xb0, 0x07, 0x00, 0x00, 0x00,
1182
+ 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x18, 0x00,
1183
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1184
+ 0xa0, 0x81, 0x00, 0x00, 0x00, 0x00, 0xca, 0x64,
1185
+ 0x55, 0x75, 0x78, 0x0b, 0x00, 0x50, 0x4b, 0x05,
1186
+ 0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01,
1187
+ 0x00, 0x49, 0x00, 0x00, 0x00, 0x44, 0x00, 0x00,
1188
+ 0x00, 0x97, 0x2b, 0x49, 0x23, 0x05, 0xc5, 0x0b,
1189
+ 0xa7, 0xd1, 0x52, 0xa2, 0x9c, 0x50, 0x4b, 0x06,
1190
+ 0x07, 0xc8, 0x19, 0xc1, 0xaf, 0x94, 0x9c, 0x61,
1191
+ 0x44, 0xbe, 0x94, 0x19, 0x42, 0x58, 0x12, 0xc6,
1192
+ 0x5b, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00,
1193
+ 0x00, 0x01, 0x00, 0x01, 0x00, 0x69, 0x00, 0x00,
1194
+ 0x00, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00,
1195
+ }
1196
+ // Read in the archive.
1197
+ _, err := NewReader(bytes.NewReader(data), int64(len(data)))
1198
+ if err != nil {
1199
+ t.Errorf("Error reading the archive: %v", err)
1200
+ }
1201
+ }
1202
+
1203
+ func TestFS(t *testing.T) {
1204
+ for _, test := range []struct {
1205
+ file string
1206
+ want []string
1207
+ }{
1208
+ {
1209
+ "testdata/unix.zip",
1210
+ []string{"hello", "dir/bar", "readonly"},
1211
+ },
1212
+ {
1213
+ "testdata/subdir.zip",
1214
+ []string{"a/b/c"},
1215
+ },
1216
+ } {
1217
+ test := test
1218
+ t.Run(test.file, func(t *testing.T) {
1219
+ t.Parallel()
1220
+ z, err := OpenReader(test.file)
1221
+ if err != nil {
1222
+ t.Fatal(err)
1223
+ }
1224
+ defer z.Close()
1225
+ if err := fstest.TestFS(z, test.want...); err != nil {
1226
+ t.Error(err)
1227
+ }
1228
+ })
1229
+ }
1230
+ }
1231
+
1232
+ func TestFSWalk(t *testing.T) {
1233
+ for _, test := range []struct {
1234
+ file string
1235
+ want []string
1236
+ wantErr bool
1237
+ }{
1238
+ {
1239
+ file: "testdata/unix.zip",
1240
+ want: []string{".", "dir", "dir/bar", "dir/empty", "hello", "readonly"},
1241
+ },
1242
+ {
1243
+ file: "testdata/subdir.zip",
1244
+ want: []string{".", "a", "a/b", "a/b/c"},
1245
+ },
1246
+ {
1247
+ file: "testdata/dupdir.zip",
1248
+ wantErr: true,
1249
+ },
1250
+ } {
1251
+ test := test
1252
+ t.Run(test.file, func(t *testing.T) {
1253
+ t.Parallel()
1254
+ z, err := OpenReader(test.file)
1255
+ if err != nil {
1256
+ t.Fatal(err)
1257
+ }
1258
+ var files []string
1259
+ sawErr := false
1260
+ err = fs.WalkDir(z, ".", func(path string, d fs.DirEntry, err error) error {
1261
+ if err != nil {
1262
+ if !test.wantErr {
1263
+ t.Errorf("%s: %v", path, err)
1264
+ }
1265
+ sawErr = true
1266
+ return nil
1267
+ }
1268
+ files = append(files, path)
1269
+ return nil
1270
+ })
1271
+ if err != nil {
1272
+ t.Errorf("fs.WalkDir error: %v", err)
1273
+ }
1274
+ if test.wantErr && !sawErr {
1275
+ t.Error("succeeded but want error")
1276
+ } else if !test.wantErr && sawErr {
1277
+ t.Error("unexpected error")
1278
+ }
1279
+ if test.want != nil && !reflect.DeepEqual(files, test.want) {
1280
+ t.Errorf("got %v want %v", files, test.want)
1281
+ }
1282
+ })
1283
+ }
1284
+ }
1285
+
1286
+ func TestFSModTime(t *testing.T) {
1287
+ t.Parallel()
1288
+ z, err := OpenReader("testdata/subdir.zip")
1289
+ if err != nil {
1290
+ t.Fatal(err)
1291
+ }
1292
+ defer z.Close()
1293
+
1294
+ for _, test := range []struct {
1295
+ name string
1296
+ want time.Time
1297
+ }{
1298
+ {
1299
+ "a",
1300
+ time.Date(2021, 4, 19, 12, 29, 56, 0, timeZone(-7*time.Hour)).UTC(),
1301
+ },
1302
+ {
1303
+ "a/b/c",
1304
+ time.Date(2021, 4, 19, 12, 29, 59, 0, timeZone(-7*time.Hour)).UTC(),
1305
+ },
1306
+ } {
1307
+ fi, err := fs.Stat(z, test.name)
1308
+ if err != nil {
1309
+ t.Errorf("%s: %v", test.name, err)
1310
+ continue
1311
+ }
1312
+ if got := fi.ModTime(); !got.Equal(test.want) {
1313
+ t.Errorf("%s: got modtime %v, want %v", test.name, got, test.want)
1314
+ }
1315
+ }
1316
+ }
1317
+
1318
+ func TestCVE202127919(t *testing.T) {
1319
+ t.Setenv("GODEBUG", "zipinsecurepath=0")
1320
+ // Archive containing only the file "../test.txt"
1321
+ data := []byte{
1322
+ 0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x00,
1323
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1324
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1325
+ 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x2e, 0x2e,
1326
+ 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74, 0x78,
1327
+ 0x74, 0x0a, 0xc9, 0xc8, 0x2c, 0x56, 0xc8, 0x2c,
1328
+ 0x56, 0x48, 0x54, 0x28, 0x49, 0x2d, 0x2e, 0x51,
1329
+ 0x28, 0x49, 0xad, 0x28, 0x51, 0x48, 0xcb, 0xcc,
1330
+ 0x49, 0xd5, 0xe3, 0x02, 0x04, 0x00, 0x00, 0xff,
1331
+ 0xff, 0x50, 0x4b, 0x07, 0x08, 0xc0, 0xd7, 0xed,
1332
+ 0xc3, 0x20, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00,
1333
+ 0x00, 0x50, 0x4b, 0x01, 0x02, 0x14, 0x00, 0x14,
1334
+ 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
1335
+ 0x00, 0xc0, 0xd7, 0xed, 0xc3, 0x20, 0x00, 0x00,
1336
+ 0x00, 0x1a, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00,
1337
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1338
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e,
1339
+ 0x2e, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74,
1340
+ 0x78, 0x74, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00,
1341
+ 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x39, 0x00,
1342
+ 0x00, 0x00, 0x59, 0x00, 0x00, 0x00, 0x00, 0x00,
1343
+ }
1344
+ r, err := NewReader(bytes.NewReader(data), int64(len(data)))
1345
+ if err != ErrInsecurePath {
1346
+ t.Fatalf("Error reading the archive: %v", err)
1347
+ }
1348
+ _, err = r.Open("test.txt")
1349
+ if err != nil {
1350
+ t.Errorf("Error reading file: %v", err)
1351
+ }
1352
+ if len(r.File) != 1 {
1353
+ t.Fatalf("No entries in the file list")
1354
+ }
1355
+ if r.File[0].Name != "../test.txt" {
1356
+ t.Errorf("Unexpected entry name: %s", r.File[0].Name)
1357
+ }
1358
+ if _, err := r.File[0].Open(); err != nil {
1359
+ t.Errorf("Error opening file: %v", err)
1360
+ }
1361
+ }
1362
+
1363
+ func TestOpenReaderInsecurePath(t *testing.T) {
1364
+ t.Setenv("GODEBUG", "zipinsecurepath=0")
1365
+ // Archive containing only the file "../test.txt"
1366
+ data := []byte{
1367
+ 0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x00,
1368
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1369
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1370
+ 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x2e, 0x2e,
1371
+ 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74, 0x78,
1372
+ 0x74, 0x0a, 0xc9, 0xc8, 0x2c, 0x56, 0xc8, 0x2c,
1373
+ 0x56, 0x48, 0x54, 0x28, 0x49, 0x2d, 0x2e, 0x51,
1374
+ 0x28, 0x49, 0xad, 0x28, 0x51, 0x48, 0xcb, 0xcc,
1375
+ 0x49, 0xd5, 0xe3, 0x02, 0x04, 0x00, 0x00, 0xff,
1376
+ 0xff, 0x50, 0x4b, 0x07, 0x08, 0xc0, 0xd7, 0xed,
1377
+ 0xc3, 0x20, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00,
1378
+ 0x00, 0x50, 0x4b, 0x01, 0x02, 0x14, 0x00, 0x14,
1379
+ 0x00, 0x08, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
1380
+ 0x00, 0xc0, 0xd7, 0xed, 0xc3, 0x20, 0x00, 0x00,
1381
+ 0x00, 0x1a, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00,
1382
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1383
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e,
1384
+ 0x2e, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x74,
1385
+ 0x78, 0x74, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00,
1386
+ 0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x39, 0x00,
1387
+ 0x00, 0x00, 0x59, 0x00, 0x00, 0x00, 0x00, 0x00,
1388
+ }
1389
+
1390
+ // Read in the archive with the OpenReader interface
1391
+ name := filepath.Join(t.TempDir(), "test.zip")
1392
+ err := os.WriteFile(name, data, 0644)
1393
+ if err != nil {
1394
+ t.Fatalf("Unable to write out the bugos zip entry")
1395
+ }
1396
+ r, err := OpenReader(name)
1397
+ if r != nil {
1398
+ defer r.Close()
1399
+ }
1400
+
1401
+ if err != ErrInsecurePath {
1402
+ t.Fatalf("Error reading the archive, we expected ErrInsecurePath but got: %v", err)
1403
+ }
1404
+ _, err = r.Open("test.txt")
1405
+ if err != nil {
1406
+ t.Errorf("Error reading file: %v", err)
1407
+ }
1408
+ if len(r.File) != 1 {
1409
+ t.Fatalf("No entries in the file list")
1410
+ }
1411
+ if r.File[0].Name != "../test.txt" {
1412
+ t.Errorf("Unexpected entry name: %s", r.File[0].Name)
1413
+ }
1414
+ if _, err := r.File[0].Open(); err != nil {
1415
+ t.Errorf("Error opening file: %v", err)
1416
+ }
1417
+ }
1418
+
1419
+ func TestCVE202133196(t *testing.T) {
1420
+ // Archive that indicates it has 1 << 128 -1 files,
1421
+ // this would previously cause a panic due to attempting
1422
+ // to allocate a slice with 1 << 128 -1 elements.
1423
+ data := []byte{
1424
+ 0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x08,
1425
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1426
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1427
+ 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x02,
1428
+ 0x03, 0x62, 0x61, 0x65, 0x03, 0x04, 0x00, 0x00,
1429
+ 0xff, 0xff, 0x50, 0x4b, 0x07, 0x08, 0xbe, 0x20,
1430
+ 0x5c, 0x6c, 0x09, 0x00, 0x00, 0x00, 0x03, 0x00,
1431
+ 0x00, 0x00, 0x50, 0x4b, 0x01, 0x02, 0x14, 0x00,
1432
+ 0x14, 0x00, 0x08, 0x08, 0x08, 0x00, 0x00, 0x00,
1433
+ 0x00, 0x00, 0xbe, 0x20, 0x5c, 0x6c, 0x09, 0x00,
1434
+ 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x03, 0x00,
1435
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1436
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1437
+ 0x01, 0x02, 0x03, 0x50, 0x4b, 0x06, 0x06, 0x2c,
1438
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d,
1439
+ 0x00, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1440
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1441
+ 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff,
1442
+ 0xff, 0xff, 0xff, 0x31, 0x00, 0x00, 0x00, 0x00,
1443
+ 0x00, 0x00, 0x00, 0x3a, 0x00, 0x00, 0x00, 0x00,
1444
+ 0x00, 0x00, 0x00, 0x50, 0x4b, 0x06, 0x07, 0x00,
1445
+ 0x00, 0x00, 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00,
1446
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x50,
1447
+ 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0xff,
1448
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1449
+ 0xff, 0xff, 0xff, 0x00, 0x00,
1450
+ }
1451
+ _, err := NewReader(bytes.NewReader(data), int64(len(data)))
1452
+ if err != ErrFormat {
1453
+ t.Fatalf("unexpected error, got: %v, want: %v", err, ErrFormat)
1454
+ }
1455
+
1456
+ // Also check that an archive containing a handful of empty
1457
+ // files doesn't cause an issue
1458
+ b := bytes.NewBuffer(nil)
1459
+ w := NewWriter(b)
1460
+ for i := 0; i < 5; i++ {
1461
+ _, err := w.Create("")
1462
+ if err != nil {
1463
+ t.Fatalf("Writer.Create failed: %s", err)
1464
+ }
1465
+ }
1466
+ if err := w.Close(); err != nil {
1467
+ t.Fatalf("Writer.Close failed: %s", err)
1468
+ }
1469
+ r, err := NewReader(bytes.NewReader(b.Bytes()), int64(b.Len()))
1470
+ if err != nil {
1471
+ t.Fatalf("NewReader failed: %s", err)
1472
+ }
1473
+ if len(r.File) != 5 {
1474
+ t.Errorf("Archive has unexpected number of files, got %d, want 5", len(r.File))
1475
+ }
1476
+ }
1477
+
1478
+ func TestCVE202139293(t *testing.T) {
1479
+ // directory size is so large, that the check in Reader.init
1480
+ // overflows when subtracting from the archive size, causing
1481
+ // the pre-allocation check to be bypassed.
1482
+ data := []byte{
1483
+ 0x50, 0x4b, 0x06, 0x06, 0x05, 0x06, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x4b,
1484
+ 0x06, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
1485
+ 0x00, 0x00, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x4b,
1486
+ 0x06, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
1487
+ 0x00, 0x00, 0x00, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x31, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff,
1488
+ 0xff, 0x50, 0xfe, 0x00, 0xff, 0x00, 0x3a, 0x00, 0x00, 0x00, 0xff,
1489
+ }
1490
+ _, err := NewReader(bytes.NewReader(data), int64(len(data)))
1491
+ if err != ErrFormat {
1492
+ t.Fatalf("unexpected error, got: %v, want: %v", err, ErrFormat)
1493
+ }
1494
+ }
1495
+
1496
+ func TestCVE202141772(t *testing.T) {
1497
+ t.Setenv("GODEBUG", "zipinsecurepath=0")
1498
+ // Archive contains a file whose name is exclusively made up of '/', '\'
1499
+ // characters, or "../", "..\" paths, which would previously cause a panic.
1500
+ //
1501
+ // Length Method Size Cmpr Date Time CRC-32 Name
1502
+ // -------- ------ ------- ---- ---------- ----- -------- ----
1503
+ // 0 Stored 0 0% 08-05-2021 18:32 00000000 /
1504
+ // 0 Stored 0 0% 09-14-2021 12:59 00000000 //
1505
+ // 0 Stored 0 0% 09-14-2021 12:59 00000000 \
1506
+ // 11 Stored 11 0% 09-14-2021 13:04 0d4a1185 /test.txt
1507
+ // -------- ------- --- -------
1508
+ // 11 11 0% 4 files
1509
+ data := []byte{
1510
+ 0x50, 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x08,
1511
+ 0x00, 0x00, 0x06, 0x94, 0x05, 0x53, 0x00, 0x00,
1512
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1513
+ 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x2f, 0x50,
1514
+ 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00,
1515
+ 0x00, 0x78, 0x67, 0x2e, 0x53, 0x00, 0x00, 0x00,
1516
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1517
+ 0x00, 0x02, 0x00, 0x00, 0x00, 0x2f, 0x2f, 0x50,
1518
+ 0x4b, 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00,
1519
+ 0x00, 0x78, 0x67, 0x2e, 0x53, 0x00, 0x00, 0x00,
1520
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1521
+ 0x00, 0x01, 0x00, 0x00, 0x00, 0x5c, 0x50, 0x4b,
1522
+ 0x03, 0x04, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00,
1523
+ 0x91, 0x68, 0x2e, 0x53, 0x85, 0x11, 0x4a, 0x0d,
1524
+ 0x0b, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00,
1525
+ 0x09, 0x00, 0x00, 0x00, 0x2f, 0x74, 0x65, 0x73,
1526
+ 0x74, 0x2e, 0x74, 0x78, 0x74, 0x68, 0x65, 0x6c,
1527
+ 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64,
1528
+ 0x50, 0x4b, 0x01, 0x02, 0x14, 0x03, 0x0a, 0x00,
1529
+ 0x00, 0x08, 0x00, 0x00, 0x06, 0x94, 0x05, 0x53,
1530
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1531
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
1532
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,
1533
+ 0xed, 0x41, 0x00, 0x00, 0x00, 0x00, 0x2f, 0x50,
1534
+ 0x4b, 0x01, 0x02, 0x3f, 0x00, 0x0a, 0x00, 0x00,
1535
+ 0x00, 0x00, 0x00, 0x78, 0x67, 0x2e, 0x53, 0x00,
1536
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1537
+ 0x00, 0x00, 0x00, 0x02, 0x00, 0x24, 0x00, 0x00,
1538
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
1539
+ 0x00, 0x1f, 0x00, 0x00, 0x00, 0x2f, 0x2f, 0x0a,
1540
+ 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
1541
+ 0x00, 0x18, 0x00, 0x93, 0x98, 0x25, 0x57, 0x25,
1542
+ 0xa9, 0xd7, 0x01, 0x93, 0x98, 0x25, 0x57, 0x25,
1543
+ 0xa9, 0xd7, 0x01, 0x93, 0x98, 0x25, 0x57, 0x25,
1544
+ 0xa9, 0xd7, 0x01, 0x50, 0x4b, 0x01, 0x02, 0x3f,
1545
+ 0x00, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78,
1546
+ 0x67, 0x2e, 0x53, 0x00, 0x00, 0x00, 0x00, 0x00,
1547
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
1548
+ 0x00, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1549
+ 0x00, 0x20, 0x00, 0x00, 0x00, 0x3f, 0x00, 0x00,
1550
+ 0x00, 0x5c, 0x0a, 0x00, 0x20, 0x00, 0x00, 0x00,
1551
+ 0x00, 0x00, 0x01, 0x00, 0x18, 0x00, 0x93, 0x98,
1552
+ 0x25, 0x57, 0x25, 0xa9, 0xd7, 0x01, 0x93, 0x98,
1553
+ 0x25, 0x57, 0x25, 0xa9, 0xd7, 0x01, 0x93, 0x98,
1554
+ 0x25, 0x57, 0x25, 0xa9, 0xd7, 0x01, 0x50, 0x4b,
1555
+ 0x01, 0x02, 0x3f, 0x00, 0x0a, 0x00, 0x00, 0x00,
1556
+ 0x00, 0x00, 0x91, 0x68, 0x2e, 0x53, 0x85, 0x11,
1557
+ 0x4a, 0x0d, 0x0b, 0x00, 0x00, 0x00, 0x0b, 0x00,
1558
+ 0x00, 0x00, 0x09, 0x00, 0x24, 0x00, 0x00, 0x00,
1559
+ 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
1560
+ 0x5e, 0x00, 0x00, 0x00, 0x2f, 0x74, 0x65, 0x73,
1561
+ 0x74, 0x2e, 0x74, 0x78, 0x74, 0x0a, 0x00, 0x20,
1562
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x18,
1563
+ 0x00, 0xa9, 0x80, 0x51, 0x01, 0x26, 0xa9, 0xd7,
1564
+ 0x01, 0x31, 0xd1, 0x57, 0x01, 0x26, 0xa9, 0xd7,
1565
+ 0x01, 0xdf, 0x48, 0x85, 0xf9, 0x25, 0xa9, 0xd7,
1566
+ 0x01, 0x50, 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00,
1567
+ 0x00, 0x04, 0x00, 0x04, 0x00, 0x31, 0x01, 0x00,
1568
+ 0x00, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00,
1569
+ }
1570
+ r, err := NewReader(bytes.NewReader(data), int64(len(data)))
1571
+ if err != ErrInsecurePath {
1572
+ t.Fatalf("Error reading the archive: %v", err)
1573
+ }
1574
+ entryNames := []string{`/`, `//`, `\`, `/test.txt`}
1575
+ var names []string
1576
+ for _, f := range r.File {
1577
+ names = append(names, f.Name)
1578
+ if _, err := f.Open(); err != nil {
1579
+ t.Errorf("Error opening %q: %v", f.Name, err)
1580
+ }
1581
+ if _, err := r.Open(f.Name); err == nil {
1582
+ t.Errorf("Opening %q with fs.FS API succeeded", f.Name)
1583
+ }
1584
+ }
1585
+ if !reflect.DeepEqual(names, entryNames) {
1586
+ t.Errorf("Unexpected file entries: %q", names)
1587
+ }
1588
+ if _, err := r.Open(""); err == nil {
1589
+ t.Errorf("Opening %q with fs.FS API succeeded", "")
1590
+ }
1591
+ if _, err := r.Open("test.txt"); err != nil {
1592
+ t.Errorf("Error opening %q with fs.FS API: %v", "test.txt", err)
1593
+ }
1594
+ dirEntries, err := fs.ReadDir(r, ".")
1595
+ if err != nil {
1596
+ t.Fatalf("Error reading the root directory: %v", err)
1597
+ }
1598
+ if len(dirEntries) != 1 || dirEntries[0].Name() != "test.txt" {
1599
+ t.Errorf("Unexpected directory entries")
1600
+ for _, dirEntry := range dirEntries {
1601
+ _, err := r.Open(dirEntry.Name())
1602
+ t.Logf("%q (Open error: %v)", dirEntry.Name(), err)
1603
+ }
1604
+ t.FailNow()
1605
+ }
1606
+ info, err := dirEntries[0].Info()
1607
+ if err != nil {
1608
+ t.Fatalf("Error reading info entry: %v", err)
1609
+ }
1610
+ if name := info.Name(); name != "test.txt" {
1611
+ t.Errorf("Inconsistent name in info entry: %v", name)
1612
+ }
1613
+ }
1614
+
1615
+ func TestUnderSize(t *testing.T) {
1616
+ z, err := OpenReader("testdata/readme.zip")
1617
+ if err != nil {
1618
+ t.Fatal(err)
1619
+ }
1620
+ defer z.Close()
1621
+
1622
+ for _, f := range z.File {
1623
+ f.UncompressedSize64 = 1
1624
+ }
1625
+
1626
+ for _, f := range z.File {
1627
+ t.Run(f.Name, func(t *testing.T) {
1628
+ rd, err := f.Open()
1629
+ if err != nil {
1630
+ t.Fatal(err)
1631
+ }
1632
+ defer rd.Close()
1633
+
1634
+ _, err = io.Copy(io.Discard, rd)
1635
+ if err != ErrFormat {
1636
+ t.Fatalf("Error mismatch\n\tGot: %v\n\tWant: %v", err, ErrFormat)
1637
+ }
1638
+ })
1639
+ }
1640
+ }
1641
+
1642
+ func TestIssue54801(t *testing.T) {
1643
+ for _, input := range []string{"testdata/readme.zip", "testdata/dd.zip"} {
1644
+ z, err := OpenReader(input)
1645
+ if err != nil {
1646
+ t.Fatal(err)
1647
+ }
1648
+ defer z.Close()
1649
+
1650
+ for _, f := range z.File {
1651
+ // Make file a directory
1652
+ f.Name += "/"
1653
+
1654
+ t.Run(f.Name, func(t *testing.T) {
1655
+ t.Logf("CompressedSize64: %d, Flags: %#x", f.CompressedSize64, f.Flags)
1656
+
1657
+ rd, err := f.Open()
1658
+ if err != nil {
1659
+ t.Fatal(err)
1660
+ }
1661
+ defer rd.Close()
1662
+
1663
+ n, got := io.Copy(io.Discard, rd)
1664
+ if n != 0 || got != ErrFormat {
1665
+ t.Fatalf("Error mismatch, got: %d, %v, want: %v", n, got, ErrFormat)
1666
+ }
1667
+ })
1668
+ }
1669
+ }
1670
+ }
1671
+
1672
+ func TestInsecurePaths(t *testing.T) {
1673
+ t.Setenv("GODEBUG", "zipinsecurepath=0")
1674
+ for _, path := range []string{
1675
+ "../foo",
1676
+ "/foo",
1677
+ "a/b/../../../c",
1678
+ `a\b`,
1679
+ } {
1680
+ var buf bytes.Buffer
1681
+ zw := NewWriter(&buf)
1682
+ _, err := zw.Create(path)
1683
+ if err != nil {
1684
+ t.Errorf("zw.Create(%q) = %v", path, err)
1685
+ continue
1686
+ }
1687
+ zw.Close()
1688
+
1689
+ zr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
1690
+ if err != ErrInsecurePath {
1691
+ t.Errorf("NewReader for archive with file %q: got err %v, want ErrInsecurePath", path, err)
1692
+ continue
1693
+ }
1694
+ var gotPaths []string
1695
+ for _, f := range zr.File {
1696
+ gotPaths = append(gotPaths, f.Name)
1697
+ }
1698
+ if !reflect.DeepEqual(gotPaths, []string{path}) {
1699
+ t.Errorf("NewReader for archive with file %q: got files %q", path, gotPaths)
1700
+ continue
1701
+ }
1702
+ }
1703
+ }
1704
+
1705
+ func TestDisableInsecurePathCheck(t *testing.T) {
1706
+ t.Setenv("GODEBUG", "zipinsecurepath=1")
1707
+ var buf bytes.Buffer
1708
+ zw := NewWriter(&buf)
1709
+ const name = "/foo"
1710
+ _, err := zw.Create(name)
1711
+ if err != nil {
1712
+ t.Fatalf("zw.Create(%q) = %v", name, err)
1713
+ }
1714
+ zw.Close()
1715
+ zr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
1716
+ if err != nil {
1717
+ t.Fatalf("NewReader with zipinsecurepath=1: got err %v, want nil", err)
1718
+ }
1719
+ var gotPaths []string
1720
+ for _, f := range zr.File {
1721
+ gotPaths = append(gotPaths, f.Name)
1722
+ }
1723
+ if want := []string{name}; !reflect.DeepEqual(gotPaths, want) {
1724
+ t.Errorf("NewReader with zipinsecurepath=1: got files %q, want %q", gotPaths, want)
1725
+ }
1726
+ }
1727
+
1728
+ func TestCompressedDirectory(t *testing.T) {
1729
+ // Empty Java JAR, with a compressed directory with uncompressed size 0
1730
+ // which should not fail.
1731
+ //
1732
+ // Length Method Size Cmpr Date Time CRC-32 Name
1733
+ // -------- ------ ------- ---- ---------- ----- -------- ----
1734
+ // 0 Defl:N 2 0% 12-01-2022 16:50 00000000 META-INF/
1735
+ // 60 Defl:N 59 2% 12-01-2022 16:50 af937e93 META-INF/MANIFEST.MF
1736
+ // -------- ------- --- -------
1737
+ // 60 61 -2% 2 files
1738
+ data := []byte{
1739
+ 0x50, 0x4b, 0x03, 0x04, 0x14, 0x00, 0x08, 0x08,
1740
+ 0x08, 0x00, 0x49, 0x86, 0x81, 0x55, 0x00, 0x00,
1741
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1742
+ 0x00, 0x00, 0x09, 0x00, 0x04, 0x00, 0x4d, 0x45,
1743
+ 0x54, 0x41, 0x2d, 0x49, 0x4e, 0x46, 0x2f, 0xfe,
1744
+ 0xca, 0x00, 0x00, 0x03, 0x00, 0x50, 0x4b, 0x07,
1745
+ 0x08, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
1746
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x4b, 0x03,
1747
+ 0x04, 0x14, 0x00, 0x08, 0x08, 0x08, 0x00, 0x49,
1748
+ 0x86, 0x81, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00,
1749
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14,
1750
+ 0x00, 0x00, 0x00, 0x4d, 0x45, 0x54, 0x41, 0x2d,
1751
+ 0x49, 0x4e, 0x46, 0x2f, 0x4d, 0x41, 0x4e, 0x49,
1752
+ 0x46, 0x45, 0x53, 0x54, 0x2e, 0x4d, 0x46, 0xf3,
1753
+ 0x4d, 0xcc, 0xcb, 0x4c, 0x4b, 0x2d, 0x2e, 0xd1,
1754
+ 0x0d, 0x4b, 0x2d, 0x2a, 0xce, 0xcc, 0xcf, 0xb3,
1755
+ 0x52, 0x30, 0xd4, 0x33, 0xe0, 0xe5, 0x72, 0x2e,
1756
+ 0x4a, 0x4d, 0x2c, 0x49, 0x4d, 0xd1, 0x75, 0xaa,
1757
+ 0x04, 0x0a, 0x00, 0x45, 0xf4, 0x0c, 0x8d, 0x15,
1758
+ 0x34, 0xdc, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0x15,
1759
+ 0x3c, 0xf3, 0x92, 0xf5, 0x34, 0x79, 0xb9, 0x78,
1760
+ 0xb9, 0x00, 0x50, 0x4b, 0x07, 0x08, 0x93, 0x7e,
1761
+ 0x93, 0xaf, 0x3b, 0x00, 0x00, 0x00, 0x3c, 0x00,
1762
+ 0x00, 0x00, 0x50, 0x4b, 0x01, 0x02, 0x14, 0x00,
1763
+ 0x14, 0x00, 0x08, 0x08, 0x08, 0x00, 0x49, 0x86,
1764
+ 0x81, 0x55, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
1765
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x00,
1766
+ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1767
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1768
+ 0x4d, 0x45, 0x54, 0x41, 0x2d, 0x49, 0x4e, 0x46,
1769
+ 0x2f, 0xfe, 0xca, 0x00, 0x00, 0x50, 0x4b, 0x01,
1770
+ 0x02, 0x14, 0x00, 0x14, 0x00, 0x08, 0x08, 0x08,
1771
+ 0x00, 0x49, 0x86, 0x81, 0x55, 0x93, 0x7e, 0x93,
1772
+ 0xaf, 0x3b, 0x00, 0x00, 0x00, 0x3c, 0x00, 0x00,
1773
+ 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1774
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3d,
1775
+ 0x00, 0x00, 0x00, 0x4d, 0x45, 0x54, 0x41, 0x2d,
1776
+ 0x49, 0x4e, 0x46, 0x2f, 0x4d, 0x41, 0x4e, 0x49,
1777
+ 0x46, 0x45, 0x53, 0x54, 0x2e, 0x4d, 0x46, 0x50,
1778
+ 0x4b, 0x05, 0x06, 0x00, 0x00, 0x00, 0x00, 0x02,
1779
+ 0x00, 0x02, 0x00, 0x7d, 0x00, 0x00, 0x00, 0xba,
1780
+ 0x00, 0x00, 0x00, 0x00, 0x00,
1781
+ }
1782
+ r, err := NewReader(bytes.NewReader(data), int64(len(data)))
1783
+ if err != nil {
1784
+ t.Fatalf("unexpected error: %v", err)
1785
+ }
1786
+ for _, f := range r.File {
1787
+ r, err := f.Open()
1788
+ if err != nil {
1789
+ t.Fatalf("unexpected error: %v", err)
1790
+ }
1791
+ if _, err := io.Copy(io.Discard, r); err != nil {
1792
+ t.Fatalf("unexpected error: %v", err)
1793
+ }
1794
+ }
1795
+ }
1796
+
1797
+ func TestBaseOffsetPlusOverflow(t *testing.T) {
1798
+ // directoryOffset > maxInt64 && size-directoryOffset < 0
1799
+ data := []byte{
1800
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1801
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1802
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1803
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1804
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1805
+ 0xff, 0xff, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1806
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1807
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1808
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1809
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1810
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1811
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1812
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1813
+ 0x20, 0x20, 0x20, 0x50, 0x4b, 0x06, 0x06, 0x20,
1814
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1815
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1816
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1817
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1818
+ 0x20, 0xff, 0xff, 0x20, 0x00, 0x00, 0x00, 0x00,
1819
+ 0x00, 0x00, 0x00, 0x20, 0x08, 0x00, 0x00, 0x00,
1820
+ 0x00, 0x00, 0x80, 0x50, 0x4b, 0x06, 0x07, 0x00,
1821
+ 0x00, 0x00, 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00,
1822
+ 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x50,
1823
+ 0x4b, 0x05, 0x06, 0x20, 0x20, 0x20, 0x20, 0xff,
1824
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1825
+ 0xff, 0xff, 0xff, 0x20, 0x00,
1826
+ }
1827
+ defer func() {
1828
+ if r := recover(); r != nil {
1829
+ t.Fatalf("NewReader panicked: %s", r)
1830
+ }
1831
+ }()
1832
+ // Previously, this would trigger a panic as we attempt to read from
1833
+ // an io.SectionReader which would access a slice at a negative offset
1834
+ // as the section reader offset & size were < 0.
1835
+ NewReader(bytes.NewReader(data), int64(len(data))+1875)
1836
+ }
platform/dbops/binaries/go/go/src/archive/zip/register.go ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2010 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ package zip
6
+
7
+ import (
8
+ "compress/flate"
9
+ "errors"
10
+ "io"
11
+ "sync"
12
+ )
13
+
14
+ // A Compressor returns a new compressing writer, writing to w.
15
+ // The WriteCloser's Close method must be used to flush pending data to w.
16
+ // The Compressor itself must be safe to invoke from multiple goroutines
17
+ // simultaneously, but each returned writer will be used only by
18
+ // one goroutine at a time.
19
+ type Compressor func(w io.Writer) (io.WriteCloser, error)
20
+
21
+ // A Decompressor returns a new decompressing reader, reading from r.
22
+ // The [io.ReadCloser]'s Close method must be used to release associated resources.
23
+ // The Decompressor itself must be safe to invoke from multiple goroutines
24
+ // simultaneously, but each returned reader will be used only by
25
+ // one goroutine at a time.
26
+ type Decompressor func(r io.Reader) io.ReadCloser
27
+
28
+ var flateWriterPool sync.Pool
29
+
30
+ func newFlateWriter(w io.Writer) io.WriteCloser {
31
+ fw, ok := flateWriterPool.Get().(*flate.Writer)
32
+ if ok {
33
+ fw.Reset(w)
34
+ } else {
35
+ fw, _ = flate.NewWriter(w, 5)
36
+ }
37
+ return &pooledFlateWriter{fw: fw}
38
+ }
39
+
40
+ type pooledFlateWriter struct {
41
+ mu sync.Mutex // guards Close and Write
42
+ fw *flate.Writer
43
+ }
44
+
45
+ func (w *pooledFlateWriter) Write(p []byte) (n int, err error) {
46
+ w.mu.Lock()
47
+ defer w.mu.Unlock()
48
+ if w.fw == nil {
49
+ return 0, errors.New("Write after Close")
50
+ }
51
+ return w.fw.Write(p)
52
+ }
53
+
54
+ func (w *pooledFlateWriter) Close() error {
55
+ w.mu.Lock()
56
+ defer w.mu.Unlock()
57
+ var err error
58
+ if w.fw != nil {
59
+ err = w.fw.Close()
60
+ flateWriterPool.Put(w.fw)
61
+ w.fw = nil
62
+ }
63
+ return err
64
+ }
65
+
66
+ var flateReaderPool sync.Pool
67
+
68
+ func newFlateReader(r io.Reader) io.ReadCloser {
69
+ fr, ok := flateReaderPool.Get().(io.ReadCloser)
70
+ if ok {
71
+ fr.(flate.Resetter).Reset(r, nil)
72
+ } else {
73
+ fr = flate.NewReader(r)
74
+ }
75
+ return &pooledFlateReader{fr: fr}
76
+ }
77
+
78
+ type pooledFlateReader struct {
79
+ mu sync.Mutex // guards Close and Read
80
+ fr io.ReadCloser
81
+ }
82
+
83
+ func (r *pooledFlateReader) Read(p []byte) (n int, err error) {
84
+ r.mu.Lock()
85
+ defer r.mu.Unlock()
86
+ if r.fr == nil {
87
+ return 0, errors.New("Read after Close")
88
+ }
89
+ return r.fr.Read(p)
90
+ }
91
+
92
+ func (r *pooledFlateReader) Close() error {
93
+ r.mu.Lock()
94
+ defer r.mu.Unlock()
95
+ var err error
96
+ if r.fr != nil {
97
+ err = r.fr.Close()
98
+ flateReaderPool.Put(r.fr)
99
+ r.fr = nil
100
+ }
101
+ return err
102
+ }
103
+
104
+ var (
105
+ compressors sync.Map // map[uint16]Compressor
106
+ decompressors sync.Map // map[uint16]Decompressor
107
+ )
108
+
109
+ func init() {
110
+ compressors.Store(Store, Compressor(func(w io.Writer) (io.WriteCloser, error) { return &nopCloser{w}, nil }))
111
+ compressors.Store(Deflate, Compressor(func(w io.Writer) (io.WriteCloser, error) { return newFlateWriter(w), nil }))
112
+
113
+ decompressors.Store(Store, Decompressor(io.NopCloser))
114
+ decompressors.Store(Deflate, Decompressor(newFlateReader))
115
+ }
116
+
117
+ // RegisterDecompressor allows custom decompressors for a specified method ID.
118
+ // The common methods [Store] and [Deflate] are built in.
119
+ func RegisterDecompressor(method uint16, dcomp Decompressor) {
120
+ if _, dup := decompressors.LoadOrStore(method, dcomp); dup {
121
+ panic("decompressor already registered")
122
+ }
123
+ }
124
+
125
+ // RegisterCompressor registers custom compressors for a specified method ID.
126
+ // The common methods [Store] and [Deflate] are built in.
127
+ func RegisterCompressor(method uint16, comp Compressor) {
128
+ if _, dup := compressors.LoadOrStore(method, comp); dup {
129
+ panic("compressor already registered")
130
+ }
131
+ }
132
+
133
+ func compressor(method uint16) Compressor {
134
+ ci, ok := compressors.Load(method)
135
+ if !ok {
136
+ return nil
137
+ }
138
+ return ci.(Compressor)
139
+ }
140
+
141
+ func decompressor(method uint16) Decompressor {
142
+ di, ok := decompressors.Load(method)
143
+ if !ok {
144
+ return nil
145
+ }
146
+ return di.(Decompressor)
147
+ }
platform/dbops/binaries/go/go/src/archive/zip/struct.go ADDED
@@ -0,0 +1,419 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2010 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ /*
6
+ Package zip provides support for reading and writing ZIP archives.
7
+
8
+ See the [ZIP specification] for details.
9
+
10
+ This package does not support disk spanning.
11
+
12
+ A note about ZIP64:
13
+
14
+ To be backwards compatible the FileHeader has both 32 and 64 bit Size
15
+ fields. The 64 bit fields will always contain the correct value and
16
+ for normal archives both fields will be the same. For files requiring
17
+ the ZIP64 format the 32 bit fields will be 0xffffffff and the 64 bit
18
+ fields must be used instead.
19
+
20
+ [ZIP specification]: https://support.pkware.com/pkzip/appnote
21
+ */
22
+ package zip
23
+
24
+ import (
25
+ "io/fs"
26
+ "path"
27
+ "time"
28
+ )
29
+
30
+ // Compression methods.
31
+ const (
32
+ Store uint16 = 0 // no compression
33
+ Deflate uint16 = 8 // DEFLATE compressed
34
+ )
35
+
36
+ const (
37
+ fileHeaderSignature = 0x04034b50
38
+ directoryHeaderSignature = 0x02014b50
39
+ directoryEndSignature = 0x06054b50
40
+ directory64LocSignature = 0x07064b50
41
+ directory64EndSignature = 0x06064b50
42
+ dataDescriptorSignature = 0x08074b50 // de-facto standard; required by OS X Finder
43
+ fileHeaderLen = 30 // + filename + extra
44
+ directoryHeaderLen = 46 // + filename + extra + comment
45
+ directoryEndLen = 22 // + comment
46
+ dataDescriptorLen = 16 // four uint32: descriptor signature, crc32, compressed size, size
47
+ dataDescriptor64Len = 24 // two uint32: signature, crc32 | two uint64: compressed size, size
48
+ directory64LocLen = 20 //
49
+ directory64EndLen = 56 // + extra
50
+
51
+ // Constants for the first byte in CreatorVersion.
52
+ creatorFAT = 0
53
+ creatorUnix = 3
54
+ creatorNTFS = 11
55
+ creatorVFAT = 14
56
+ creatorMacOSX = 19
57
+
58
+ // Version numbers.
59
+ zipVersion20 = 20 // 2.0
60
+ zipVersion45 = 45 // 4.5 (reads and writes zip64 archives)
61
+
62
+ // Limits for non zip64 files.
63
+ uint16max = (1 << 16) - 1
64
+ uint32max = (1 << 32) - 1
65
+
66
+ // Extra header IDs.
67
+ //
68
+ // IDs 0..31 are reserved for official use by PKWARE.
69
+ // IDs above that range are defined by third-party vendors.
70
+ // Since ZIP lacked high precision timestamps (nor an official specification
71
+ // of the timezone used for the date fields), many competing extra fields
72
+ // have been invented. Pervasive use effectively makes them "official".
73
+ //
74
+ // See http://mdfs.net/Docs/Comp/Archiving/Zip/ExtraField
75
+ zip64ExtraID = 0x0001 // Zip64 extended information
76
+ ntfsExtraID = 0x000a // NTFS
77
+ unixExtraID = 0x000d // UNIX
78
+ extTimeExtraID = 0x5455 // Extended timestamp
79
+ infoZipUnixExtraID = 0x5855 // Info-ZIP Unix extension
80
+ )
81
+
82
+ // FileHeader describes a file within a ZIP file.
83
+ // See the [ZIP specification] for details.
84
+ //
85
+ // [ZIP specification]: https://support.pkware.com/pkzip/appnote
86
+ type FileHeader struct {
87
+ // Name is the name of the file.
88
+ //
89
+ // It must be a relative path, not start with a drive letter (such as "C:"),
90
+ // and must use forward slashes instead of back slashes. A trailing slash
91
+ // indicates that this file is a directory and should have no data.
92
+ Name string
93
+
94
+ // Comment is any arbitrary user-defined string shorter than 64KiB.
95
+ Comment string
96
+
97
+ // NonUTF8 indicates that Name and Comment are not encoded in UTF-8.
98
+ //
99
+ // By specification, the only other encoding permitted should be CP-437,
100
+ // but historically many ZIP readers interpret Name and Comment as whatever
101
+ // the system's local character encoding happens to be.
102
+ //
103
+ // This flag should only be set if the user intends to encode a non-portable
104
+ // ZIP file for a specific localized region. Otherwise, the Writer
105
+ // automatically sets the ZIP format's UTF-8 flag for valid UTF-8 strings.
106
+ NonUTF8 bool
107
+
108
+ CreatorVersion uint16
109
+ ReaderVersion uint16
110
+ Flags uint16
111
+
112
+ // Method is the compression method. If zero, Store is used.
113
+ Method uint16
114
+
115
+ // Modified is the modified time of the file.
116
+ //
117
+ // When reading, an extended timestamp is preferred over the legacy MS-DOS
118
+ // date field, and the offset between the times is used as the timezone.
119
+ // If only the MS-DOS date is present, the timezone is assumed to be UTC.
120
+ //
121
+ // When writing, an extended timestamp (which is timezone-agnostic) is
122
+ // always emitted. The legacy MS-DOS date field is encoded according to the
123
+ // location of the Modified time.
124
+ Modified time.Time
125
+
126
+ // ModifiedTime is an MS-DOS-encoded time.
127
+ //
128
+ // Deprecated: Use Modified instead.
129
+ ModifiedTime uint16
130
+
131
+ // ModifiedDate is an MS-DOS-encoded date.
132
+ //
133
+ // Deprecated: Use Modified instead.
134
+ ModifiedDate uint16
135
+
136
+ // CRC32 is the CRC32 checksum of the file content.
137
+ CRC32 uint32
138
+
139
+ // CompressedSize is the compressed size of the file in bytes.
140
+ // If either the uncompressed or compressed size of the file
141
+ // does not fit in 32 bits, CompressedSize is set to ^uint32(0).
142
+ //
143
+ // Deprecated: Use CompressedSize64 instead.
144
+ CompressedSize uint32
145
+
146
+ // UncompressedSize is the compressed size of the file in bytes.
147
+ // If either the uncompressed or compressed size of the file
148
+ // does not fit in 32 bits, CompressedSize is set to ^uint32(0).
149
+ //
150
+ // Deprecated: Use UncompressedSize64 instead.
151
+ UncompressedSize uint32
152
+
153
+ // CompressedSize64 is the compressed size of the file in bytes.
154
+ CompressedSize64 uint64
155
+
156
+ // UncompressedSize64 is the uncompressed size of the file in bytes.
157
+ UncompressedSize64 uint64
158
+
159
+ Extra []byte
160
+ ExternalAttrs uint32 // Meaning depends on CreatorVersion
161
+ }
162
+
163
+ // FileInfo returns an fs.FileInfo for the [FileHeader].
164
+ func (h *FileHeader) FileInfo() fs.FileInfo {
165
+ return headerFileInfo{h}
166
+ }
167
+
168
+ // headerFileInfo implements [fs.FileInfo].
169
+ type headerFileInfo struct {
170
+ fh *FileHeader
171
+ }
172
+
173
+ func (fi headerFileInfo) Name() string { return path.Base(fi.fh.Name) }
174
+ func (fi headerFileInfo) Size() int64 {
175
+ if fi.fh.UncompressedSize64 > 0 {
176
+ return int64(fi.fh.UncompressedSize64)
177
+ }
178
+ return int64(fi.fh.UncompressedSize)
179
+ }
180
+ func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
181
+ func (fi headerFileInfo) ModTime() time.Time {
182
+ if fi.fh.Modified.IsZero() {
183
+ return fi.fh.ModTime()
184
+ }
185
+ return fi.fh.Modified.UTC()
186
+ }
187
+ func (fi headerFileInfo) Mode() fs.FileMode { return fi.fh.Mode() }
188
+ func (fi headerFileInfo) Type() fs.FileMode { return fi.fh.Mode().Type() }
189
+ func (fi headerFileInfo) Sys() any { return fi.fh }
190
+
191
+ func (fi headerFileInfo) Info() (fs.FileInfo, error) { return fi, nil }
192
+
193
+ func (fi headerFileInfo) String() string {
194
+ return fs.FormatFileInfo(fi)
195
+ }
196
+
197
+ // FileInfoHeader creates a partially-populated [FileHeader] from an
198
+ // fs.FileInfo.
199
+ // Because fs.FileInfo's Name method returns only the base name of
200
+ // the file it describes, it may be necessary to modify the Name field
201
+ // of the returned header to provide the full path name of the file.
202
+ // If compression is desired, callers should set the FileHeader.Method
203
+ // field; it is unset by default.
204
+ func FileInfoHeader(fi fs.FileInfo) (*FileHeader, error) {
205
+ size := fi.Size()
206
+ fh := &FileHeader{
207
+ Name: fi.Name(),
208
+ UncompressedSize64: uint64(size),
209
+ }
210
+ fh.SetModTime(fi.ModTime())
211
+ fh.SetMode(fi.Mode())
212
+ if fh.UncompressedSize64 > uint32max {
213
+ fh.UncompressedSize = uint32max
214
+ } else {
215
+ fh.UncompressedSize = uint32(fh.UncompressedSize64)
216
+ }
217
+ return fh, nil
218
+ }
219
+
220
+ type directoryEnd struct {
221
+ diskNbr uint32 // unused
222
+ dirDiskNbr uint32 // unused
223
+ dirRecordsThisDisk uint64 // unused
224
+ directoryRecords uint64
225
+ directorySize uint64
226
+ directoryOffset uint64 // relative to file
227
+ commentLen uint16
228
+ comment string
229
+ }
230
+
231
+ // timeZone returns a *time.Location based on the provided offset.
232
+ // If the offset is non-sensible, then this uses an offset of zero.
233
+ func timeZone(offset time.Duration) *time.Location {
234
+ const (
235
+ minOffset = -12 * time.Hour // E.g., Baker island at -12:00
236
+ maxOffset = +14 * time.Hour // E.g., Line island at +14:00
237
+ offsetAlias = 15 * time.Minute // E.g., Nepal at +5:45
238
+ )
239
+ offset = offset.Round(offsetAlias)
240
+ if offset < minOffset || maxOffset < offset {
241
+ offset = 0
242
+ }
243
+ return time.FixedZone("", int(offset/time.Second))
244
+ }
245
+
246
+ // msDosTimeToTime converts an MS-DOS date and time into a time.Time.
247
+ // The resolution is 2s.
248
+ // See: https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-dosdatetimetofiletime
249
+ func msDosTimeToTime(dosDate, dosTime uint16) time.Time {
250
+ return time.Date(
251
+ // date bits 0-4: day of month; 5-8: month; 9-15: years since 1980
252
+ int(dosDate>>9+1980),
253
+ time.Month(dosDate>>5&0xf),
254
+ int(dosDate&0x1f),
255
+
256
+ // time bits 0-4: second/2; 5-10: minute; 11-15: hour
257
+ int(dosTime>>11),
258
+ int(dosTime>>5&0x3f),
259
+ int(dosTime&0x1f*2),
260
+ 0, // nanoseconds
261
+
262
+ time.UTC,
263
+ )
264
+ }
265
+
266
+ // timeToMsDosTime converts a time.Time to an MS-DOS date and time.
267
+ // The resolution is 2s.
268
+ // See: https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-filetimetodosdatetime
269
+ func timeToMsDosTime(t time.Time) (fDate uint16, fTime uint16) {
270
+ fDate = uint16(t.Day() + int(t.Month())<<5 + (t.Year()-1980)<<9)
271
+ fTime = uint16(t.Second()/2 + t.Minute()<<5 + t.Hour()<<11)
272
+ return
273
+ }
274
+
275
+ // ModTime returns the modification time in UTC using the legacy
276
+ // [ModifiedDate] and [ModifiedTime] fields.
277
+ //
278
+ // Deprecated: Use [Modified] instead.
279
+ func (h *FileHeader) ModTime() time.Time {
280
+ return msDosTimeToTime(h.ModifiedDate, h.ModifiedTime)
281
+ }
282
+
283
+ // SetModTime sets the [Modified], [ModifiedTime], and [ModifiedDate] fields
284
+ // to the given time in UTC.
285
+ //
286
+ // Deprecated: Use [Modified] instead.
287
+ func (h *FileHeader) SetModTime(t time.Time) {
288
+ t = t.UTC() // Convert to UTC for compatibility
289
+ h.Modified = t
290
+ h.ModifiedDate, h.ModifiedTime = timeToMsDosTime(t)
291
+ }
292
+
293
+ const (
294
+ // Unix constants. The specification doesn't mention them,
295
+ // but these seem to be the values agreed on by tools.
296
+ s_IFMT = 0xf000
297
+ s_IFSOCK = 0xc000
298
+ s_IFLNK = 0xa000
299
+ s_IFREG = 0x8000
300
+ s_IFBLK = 0x6000
301
+ s_IFDIR = 0x4000
302
+ s_IFCHR = 0x2000
303
+ s_IFIFO = 0x1000
304
+ s_ISUID = 0x800
305
+ s_ISGID = 0x400
306
+ s_ISVTX = 0x200
307
+
308
+ msdosDir = 0x10
309
+ msdosReadOnly = 0x01
310
+ )
311
+
312
+ // Mode returns the permission and mode bits for the [FileHeader].
313
+ func (h *FileHeader) Mode() (mode fs.FileMode) {
314
+ switch h.CreatorVersion >> 8 {
315
+ case creatorUnix, creatorMacOSX:
316
+ mode = unixModeToFileMode(h.ExternalAttrs >> 16)
317
+ case creatorNTFS, creatorVFAT, creatorFAT:
318
+ mode = msdosModeToFileMode(h.ExternalAttrs)
319
+ }
320
+ if len(h.Name) > 0 && h.Name[len(h.Name)-1] == '/' {
321
+ mode |= fs.ModeDir
322
+ }
323
+ return mode
324
+ }
325
+
326
+ // SetMode changes the permission and mode bits for the [FileHeader].
327
+ func (h *FileHeader) SetMode(mode fs.FileMode) {
328
+ h.CreatorVersion = h.CreatorVersion&0xff | creatorUnix<<8
329
+ h.ExternalAttrs = fileModeToUnixMode(mode) << 16
330
+
331
+ // set MSDOS attributes too, as the original zip does.
332
+ if mode&fs.ModeDir != 0 {
333
+ h.ExternalAttrs |= msdosDir
334
+ }
335
+ if mode&0200 == 0 {
336
+ h.ExternalAttrs |= msdosReadOnly
337
+ }
338
+ }
339
+
340
+ // isZip64 reports whether the file size exceeds the 32 bit limit
341
+ func (h *FileHeader) isZip64() bool {
342
+ return h.CompressedSize64 >= uint32max || h.UncompressedSize64 >= uint32max
343
+ }
344
+
345
+ func (h *FileHeader) hasDataDescriptor() bool {
346
+ return h.Flags&0x8 != 0
347
+ }
348
+
349
+ func msdosModeToFileMode(m uint32) (mode fs.FileMode) {
350
+ if m&msdosDir != 0 {
351
+ mode = fs.ModeDir | 0777
352
+ } else {
353
+ mode = 0666
354
+ }
355
+ if m&msdosReadOnly != 0 {
356
+ mode &^= 0222
357
+ }
358
+ return mode
359
+ }
360
+
361
+ func fileModeToUnixMode(mode fs.FileMode) uint32 {
362
+ var m uint32
363
+ switch mode & fs.ModeType {
364
+ default:
365
+ m = s_IFREG
366
+ case fs.ModeDir:
367
+ m = s_IFDIR
368
+ case fs.ModeSymlink:
369
+ m = s_IFLNK
370
+ case fs.ModeNamedPipe:
371
+ m = s_IFIFO
372
+ case fs.ModeSocket:
373
+ m = s_IFSOCK
374
+ case fs.ModeDevice:
375
+ m = s_IFBLK
376
+ case fs.ModeDevice | fs.ModeCharDevice:
377
+ m = s_IFCHR
378
+ }
379
+ if mode&fs.ModeSetuid != 0 {
380
+ m |= s_ISUID
381
+ }
382
+ if mode&fs.ModeSetgid != 0 {
383
+ m |= s_ISGID
384
+ }
385
+ if mode&fs.ModeSticky != 0 {
386
+ m |= s_ISVTX
387
+ }
388
+ return m | uint32(mode&0777)
389
+ }
390
+
391
+ func unixModeToFileMode(m uint32) fs.FileMode {
392
+ mode := fs.FileMode(m & 0777)
393
+ switch m & s_IFMT {
394
+ case s_IFBLK:
395
+ mode |= fs.ModeDevice
396
+ case s_IFCHR:
397
+ mode |= fs.ModeDevice | fs.ModeCharDevice
398
+ case s_IFDIR:
399
+ mode |= fs.ModeDir
400
+ case s_IFIFO:
401
+ mode |= fs.ModeNamedPipe
402
+ case s_IFLNK:
403
+ mode |= fs.ModeSymlink
404
+ case s_IFREG:
405
+ // nothing to do
406
+ case s_IFSOCK:
407
+ mode |= fs.ModeSocket
408
+ }
409
+ if m&s_ISGID != 0 {
410
+ mode |= fs.ModeSetgid
411
+ }
412
+ if m&s_ISUID != 0 {
413
+ mode |= fs.ModeSetuid
414
+ }
415
+ if m&s_ISVTX != 0 {
416
+ mode |= fs.ModeSticky
417
+ }
418
+ return mode
419
+ }
platform/dbops/binaries/go/go/src/archive/zip/writer.go ADDED
@@ -0,0 +1,666 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2011 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ package zip
6
+
7
+ import (
8
+ "bufio"
9
+ "encoding/binary"
10
+ "errors"
11
+ "hash"
12
+ "hash/crc32"
13
+ "io"
14
+ "io/fs"
15
+ "strings"
16
+ "unicode/utf8"
17
+ )
18
+
19
+ var (
20
+ errLongName = errors.New("zip: FileHeader.Name too long")
21
+ errLongExtra = errors.New("zip: FileHeader.Extra too long")
22
+ )
23
+
24
+ // Writer implements a zip file writer.
25
+ type Writer struct {
26
+ cw *countWriter
27
+ dir []*header
28
+ last *fileWriter
29
+ closed bool
30
+ compressors map[uint16]Compressor
31
+ comment string
32
+
33
+ // testHookCloseSizeOffset if non-nil is called with the size
34
+ // of offset of the central directory at Close.
35
+ testHookCloseSizeOffset func(size, offset uint64)
36
+ }
37
+
38
+ type header struct {
39
+ *FileHeader
40
+ offset uint64
41
+ raw bool
42
+ }
43
+
44
+ // NewWriter returns a new [Writer] writing a zip file to w.
45
+ func NewWriter(w io.Writer) *Writer {
46
+ return &Writer{cw: &countWriter{w: bufio.NewWriter(w)}}
47
+ }
48
+
49
+ // SetOffset sets the offset of the beginning of the zip data within the
50
+ // underlying writer. It should be used when the zip data is appended to an
51
+ // existing file, such as a binary executable.
52
+ // It must be called before any data is written.
53
+ func (w *Writer) SetOffset(n int64) {
54
+ if w.cw.count != 0 {
55
+ panic("zip: SetOffset called after data was written")
56
+ }
57
+ w.cw.count = n
58
+ }
59
+
60
+ // Flush flushes any buffered data to the underlying writer.
61
+ // Calling Flush is not normally necessary; calling Close is sufficient.
62
+ func (w *Writer) Flush() error {
63
+ return w.cw.w.(*bufio.Writer).Flush()
64
+ }
65
+
66
+ // SetComment sets the end-of-central-directory comment field.
67
+ // It can only be called before [Writer.Close].
68
+ func (w *Writer) SetComment(comment string) error {
69
+ if len(comment) > uint16max {
70
+ return errors.New("zip: Writer.Comment too long")
71
+ }
72
+ w.comment = comment
73
+ return nil
74
+ }
75
+
76
+ // Close finishes writing the zip file by writing the central directory.
77
+ // It does not close the underlying writer.
78
+ func (w *Writer) Close() error {
79
+ if w.last != nil && !w.last.closed {
80
+ if err := w.last.close(); err != nil {
81
+ return err
82
+ }
83
+ w.last = nil
84
+ }
85
+ if w.closed {
86
+ return errors.New("zip: writer closed twice")
87
+ }
88
+ w.closed = true
89
+
90
+ // write central directory
91
+ start := w.cw.count
92
+ for _, h := range w.dir {
93
+ var buf [directoryHeaderLen]byte
94
+ b := writeBuf(buf[:])
95
+ b.uint32(uint32(directoryHeaderSignature))
96
+ b.uint16(h.CreatorVersion)
97
+ b.uint16(h.ReaderVersion)
98
+ b.uint16(h.Flags)
99
+ b.uint16(h.Method)
100
+ b.uint16(h.ModifiedTime)
101
+ b.uint16(h.ModifiedDate)
102
+ b.uint32(h.CRC32)
103
+ if h.isZip64() || h.offset >= uint32max {
104
+ // the file needs a zip64 header. store maxint in both
105
+ // 32 bit size fields (and offset later) to signal that the
106
+ // zip64 extra header should be used.
107
+ b.uint32(uint32max) // compressed size
108
+ b.uint32(uint32max) // uncompressed size
109
+
110
+ // append a zip64 extra block to Extra
111
+ var buf [28]byte // 2x uint16 + 3x uint64
112
+ eb := writeBuf(buf[:])
113
+ eb.uint16(zip64ExtraID)
114
+ eb.uint16(24) // size = 3x uint64
115
+ eb.uint64(h.UncompressedSize64)
116
+ eb.uint64(h.CompressedSize64)
117
+ eb.uint64(h.offset)
118
+ h.Extra = append(h.Extra, buf[:]...)
119
+ } else {
120
+ b.uint32(h.CompressedSize)
121
+ b.uint32(h.UncompressedSize)
122
+ }
123
+
124
+ b.uint16(uint16(len(h.Name)))
125
+ b.uint16(uint16(len(h.Extra)))
126
+ b.uint16(uint16(len(h.Comment)))
127
+ b = b[4:] // skip disk number start and internal file attr (2x uint16)
128
+ b.uint32(h.ExternalAttrs)
129
+ if h.offset > uint32max {
130
+ b.uint32(uint32max)
131
+ } else {
132
+ b.uint32(uint32(h.offset))
133
+ }
134
+ if _, err := w.cw.Write(buf[:]); err != nil {
135
+ return err
136
+ }
137
+ if _, err := io.WriteString(w.cw, h.Name); err != nil {
138
+ return err
139
+ }
140
+ if _, err := w.cw.Write(h.Extra); err != nil {
141
+ return err
142
+ }
143
+ if _, err := io.WriteString(w.cw, h.Comment); err != nil {
144
+ return err
145
+ }
146
+ }
147
+ end := w.cw.count
148
+
149
+ records := uint64(len(w.dir))
150
+ size := uint64(end - start)
151
+ offset := uint64(start)
152
+
153
+ if f := w.testHookCloseSizeOffset; f != nil {
154
+ f(size, offset)
155
+ }
156
+
157
+ if records >= uint16max || size >= uint32max || offset >= uint32max {
158
+ var buf [directory64EndLen + directory64LocLen]byte
159
+ b := writeBuf(buf[:])
160
+
161
+ // zip64 end of central directory record
162
+ b.uint32(directory64EndSignature)
163
+ b.uint64(directory64EndLen - 12) // length minus signature (uint32) and length fields (uint64)
164
+ b.uint16(zipVersion45) // version made by
165
+ b.uint16(zipVersion45) // version needed to extract
166
+ b.uint32(0) // number of this disk
167
+ b.uint32(0) // number of the disk with the start of the central directory
168
+ b.uint64(records) // total number of entries in the central directory on this disk
169
+ b.uint64(records) // total number of entries in the central directory
170
+ b.uint64(size) // size of the central directory
171
+ b.uint64(offset) // offset of start of central directory with respect to the starting disk number
172
+
173
+ // zip64 end of central directory locator
174
+ b.uint32(directory64LocSignature)
175
+ b.uint32(0) // number of the disk with the start of the zip64 end of central directory
176
+ b.uint64(uint64(end)) // relative offset of the zip64 end of central directory record
177
+ b.uint32(1) // total number of disks
178
+
179
+ if _, err := w.cw.Write(buf[:]); err != nil {
180
+ return err
181
+ }
182
+
183
+ // store max values in the regular end record to signal
184
+ // that the zip64 values should be used instead
185
+ records = uint16max
186
+ size = uint32max
187
+ offset = uint32max
188
+ }
189
+
190
+ // write end record
191
+ var buf [directoryEndLen]byte
192
+ b := writeBuf(buf[:])
193
+ b.uint32(uint32(directoryEndSignature))
194
+ b = b[4:] // skip over disk number and first disk number (2x uint16)
195
+ b.uint16(uint16(records)) // number of entries this disk
196
+ b.uint16(uint16(records)) // number of entries total
197
+ b.uint32(uint32(size)) // size of directory
198
+ b.uint32(uint32(offset)) // start of directory
199
+ b.uint16(uint16(len(w.comment))) // byte size of EOCD comment
200
+ if _, err := w.cw.Write(buf[:]); err != nil {
201
+ return err
202
+ }
203
+ if _, err := io.WriteString(w.cw, w.comment); err != nil {
204
+ return err
205
+ }
206
+
207
+ return w.cw.w.(*bufio.Writer).Flush()
208
+ }
209
+
210
+ // Create adds a file to the zip file using the provided name.
211
+ // It returns a [Writer] to which the file contents should be written.
212
+ // The file contents will be compressed using the [Deflate] method.
213
+ // The name must be a relative path: it must not start with a drive
214
+ // letter (e.g. C:) or leading slash, and only forward slashes are
215
+ // allowed. To create a directory instead of a file, add a trailing
216
+ // slash to the name.
217
+ // The file's contents must be written to the [io.Writer] before the next
218
+ // call to [Writer.Create], [Writer.CreateHeader], or [Writer.Close].
219
+ func (w *Writer) Create(name string) (io.Writer, error) {
220
+ header := &FileHeader{
221
+ Name: name,
222
+ Method: Deflate,
223
+ }
224
+ return w.CreateHeader(header)
225
+ }
226
+
227
+ // detectUTF8 reports whether s is a valid UTF-8 string, and whether the string
228
+ // must be considered UTF-8 encoding (i.e., not compatible with CP-437, ASCII,
229
+ // or any other common encoding).
230
+ func detectUTF8(s string) (valid, require bool) {
231
+ for i := 0; i < len(s); {
232
+ r, size := utf8.DecodeRuneInString(s[i:])
233
+ i += size
234
+ // Officially, ZIP uses CP-437, but many readers use the system's
235
+ // local character encoding. Most encoding are compatible with a large
236
+ // subset of CP-437, which itself is ASCII-like.
237
+ //
238
+ // Forbid 0x7e and 0x5c since EUC-KR and Shift-JIS replace those
239
+ // characters with localized currency and overline characters.
240
+ if r < 0x20 || r > 0x7d || r == 0x5c {
241
+ if !utf8.ValidRune(r) || (r == utf8.RuneError && size == 1) {
242
+ return false, false
243
+ }
244
+ require = true
245
+ }
246
+ }
247
+ return true, require
248
+ }
249
+
250
+ // prepare performs the bookkeeping operations required at the start of
251
+ // CreateHeader and CreateRaw.
252
+ func (w *Writer) prepare(fh *FileHeader) error {
253
+ if w.last != nil && !w.last.closed {
254
+ if err := w.last.close(); err != nil {
255
+ return err
256
+ }
257
+ }
258
+ if len(w.dir) > 0 && w.dir[len(w.dir)-1].FileHeader == fh {
259
+ // See https://golang.org/issue/11144 confusion.
260
+ return errors.New("archive/zip: invalid duplicate FileHeader")
261
+ }
262
+ return nil
263
+ }
264
+
265
+ // CreateHeader adds a file to the zip archive using the provided [FileHeader]
266
+ // for the file metadata. [Writer] takes ownership of fh and may mutate
267
+ // its fields. The caller must not modify fh after calling [Writer.CreateHeader].
268
+ //
269
+ // This returns a [Writer] to which the file contents should be written.
270
+ // The file's contents must be written to the io.Writer before the next
271
+ // call to [Writer.Create], [Writer.CreateHeader], [Writer.CreateRaw], or [Writer.Close].
272
+ func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) {
273
+ if err := w.prepare(fh); err != nil {
274
+ return nil, err
275
+ }
276
+
277
+ // The ZIP format has a sad state of affairs regarding character encoding.
278
+ // Officially, the name and comment fields are supposed to be encoded
279
+ // in CP-437 (which is mostly compatible with ASCII), unless the UTF-8
280
+ // flag bit is set. However, there are several problems:
281
+ //
282
+ // * Many ZIP readers still do not support UTF-8.
283
+ // * If the UTF-8 flag is cleared, several readers simply interpret the
284
+ // name and comment fields as whatever the local system encoding is.
285
+ //
286
+ // In order to avoid breaking readers without UTF-8 support,
287
+ // we avoid setting the UTF-8 flag if the strings are CP-437 compatible.
288
+ // However, if the strings require multibyte UTF-8 encoding and is a
289
+ // valid UTF-8 string, then we set the UTF-8 bit.
290
+ //
291
+ // For the case, where the user explicitly wants to specify the encoding
292
+ // as UTF-8, they will need to set the flag bit themselves.
293
+ utf8Valid1, utf8Require1 := detectUTF8(fh.Name)
294
+ utf8Valid2, utf8Require2 := detectUTF8(fh.Comment)
295
+ switch {
296
+ case fh.NonUTF8:
297
+ fh.Flags &^= 0x800
298
+ case (utf8Require1 || utf8Require2) && (utf8Valid1 && utf8Valid2):
299
+ fh.Flags |= 0x800
300
+ }
301
+
302
+ fh.CreatorVersion = fh.CreatorVersion&0xff00 | zipVersion20 // preserve compatibility byte
303
+ fh.ReaderVersion = zipVersion20
304
+
305
+ // If Modified is set, this takes precedence over MS-DOS timestamp fields.
306
+ if !fh.Modified.IsZero() {
307
+ // Contrary to the FileHeader.SetModTime method, we intentionally
308
+ // do not convert to UTC, because we assume the user intends to encode
309
+ // the date using the specified timezone. A user may want this control
310
+ // because many legacy ZIP readers interpret the timestamp according
311
+ // to the local timezone.
312
+ //
313
+ // The timezone is only non-UTC if a user directly sets the Modified
314
+ // field directly themselves. All other approaches sets UTC.
315
+ fh.ModifiedDate, fh.ModifiedTime = timeToMsDosTime(fh.Modified)
316
+
317
+ // Use "extended timestamp" format since this is what Info-ZIP uses.
318
+ // Nearly every major ZIP implementation uses a different format,
319
+ // but at least most seem to be able to understand the other formats.
320
+ //
321
+ // This format happens to be identical for both local and central header
322
+ // if modification time is the only timestamp being encoded.
323
+ var mbuf [9]byte // 2*SizeOf(uint16) + SizeOf(uint8) + SizeOf(uint32)
324
+ mt := uint32(fh.Modified.Unix())
325
+ eb := writeBuf(mbuf[:])
326
+ eb.uint16(extTimeExtraID)
327
+ eb.uint16(5) // Size: SizeOf(uint8) + SizeOf(uint32)
328
+ eb.uint8(1) // Flags: ModTime
329
+ eb.uint32(mt) // ModTime
330
+ fh.Extra = append(fh.Extra, mbuf[:]...)
331
+ }
332
+
333
+ var (
334
+ ow io.Writer
335
+ fw *fileWriter
336
+ )
337
+ h := &header{
338
+ FileHeader: fh,
339
+ offset: uint64(w.cw.count),
340
+ }
341
+
342
+ if strings.HasSuffix(fh.Name, "/") {
343
+ // Set the compression method to Store to ensure data length is truly zero,
344
+ // which the writeHeader method always encodes for the size fields.
345
+ // This is necessary as most compression formats have non-zero lengths
346
+ // even when compressing an empty string.
347
+ fh.Method = Store
348
+ fh.Flags &^= 0x8 // we will not write a data descriptor
349
+
350
+ // Explicitly clear sizes as they have no meaning for directories.
351
+ fh.CompressedSize = 0
352
+ fh.CompressedSize64 = 0
353
+ fh.UncompressedSize = 0
354
+ fh.UncompressedSize64 = 0
355
+
356
+ ow = dirWriter{}
357
+ } else {
358
+ fh.Flags |= 0x8 // we will write a data descriptor
359
+
360
+ fw = &fileWriter{
361
+ zipw: w.cw,
362
+ compCount: &countWriter{w: w.cw},
363
+ crc32: crc32.NewIEEE(),
364
+ }
365
+ comp := w.compressor(fh.Method)
366
+ if comp == nil {
367
+ return nil, ErrAlgorithm
368
+ }
369
+ var err error
370
+ fw.comp, err = comp(fw.compCount)
371
+ if err != nil {
372
+ return nil, err
373
+ }
374
+ fw.rawCount = &countWriter{w: fw.comp}
375
+ fw.header = h
376
+ ow = fw
377
+ }
378
+ w.dir = append(w.dir, h)
379
+ if err := writeHeader(w.cw, h); err != nil {
380
+ return nil, err
381
+ }
382
+ // If we're creating a directory, fw is nil.
383
+ w.last = fw
384
+ return ow, nil
385
+ }
386
+
387
+ func writeHeader(w io.Writer, h *header) error {
388
+ const maxUint16 = 1<<16 - 1
389
+ if len(h.Name) > maxUint16 {
390
+ return errLongName
391
+ }
392
+ if len(h.Extra) > maxUint16 {
393
+ return errLongExtra
394
+ }
395
+
396
+ var buf [fileHeaderLen]byte
397
+ b := writeBuf(buf[:])
398
+ b.uint32(uint32(fileHeaderSignature))
399
+ b.uint16(h.ReaderVersion)
400
+ b.uint16(h.Flags)
401
+ b.uint16(h.Method)
402
+ b.uint16(h.ModifiedTime)
403
+ b.uint16(h.ModifiedDate)
404
+ // In raw mode (caller does the compression), the values are either
405
+ // written here or in the trailing data descriptor based on the header
406
+ // flags.
407
+ if h.raw && !h.hasDataDescriptor() {
408
+ b.uint32(h.CRC32)
409
+ b.uint32(uint32(min(h.CompressedSize64, uint32max)))
410
+ b.uint32(uint32(min(h.UncompressedSize64, uint32max)))
411
+ } else {
412
+ // When this package handle the compression, these values are
413
+ // always written to the trailing data descriptor.
414
+ b.uint32(0) // crc32
415
+ b.uint32(0) // compressed size
416
+ b.uint32(0) // uncompressed size
417
+ }
418
+ b.uint16(uint16(len(h.Name)))
419
+ b.uint16(uint16(len(h.Extra)))
420
+ if _, err := w.Write(buf[:]); err != nil {
421
+ return err
422
+ }
423
+ if _, err := io.WriteString(w, h.Name); err != nil {
424
+ return err
425
+ }
426
+ _, err := w.Write(h.Extra)
427
+ return err
428
+ }
429
+
430
+ // CreateRaw adds a file to the zip archive using the provided [FileHeader] and
431
+ // returns a [Writer] to which the file contents should be written. The file's
432
+ // contents must be written to the io.Writer before the next call to [Writer.Create],
433
+ // [Writer.CreateHeader], [Writer.CreateRaw], or [Writer.Close].
434
+ //
435
+ // In contrast to [Writer.CreateHeader], the bytes passed to Writer are not compressed.
436
+ func (w *Writer) CreateRaw(fh *FileHeader) (io.Writer, error) {
437
+ if err := w.prepare(fh); err != nil {
438
+ return nil, err
439
+ }
440
+
441
+ fh.CompressedSize = uint32(min(fh.CompressedSize64, uint32max))
442
+ fh.UncompressedSize = uint32(min(fh.UncompressedSize64, uint32max))
443
+
444
+ h := &header{
445
+ FileHeader: fh,
446
+ offset: uint64(w.cw.count),
447
+ raw: true,
448
+ }
449
+ w.dir = append(w.dir, h)
450
+ if err := writeHeader(w.cw, h); err != nil {
451
+ return nil, err
452
+ }
453
+
454
+ if strings.HasSuffix(fh.Name, "/") {
455
+ w.last = nil
456
+ return dirWriter{}, nil
457
+ }
458
+
459
+ fw := &fileWriter{
460
+ header: h,
461
+ zipw: w.cw,
462
+ }
463
+ w.last = fw
464
+ return fw, nil
465
+ }
466
+
467
+ // Copy copies the file f (obtained from a [Reader]) into w. It copies the raw
468
+ // form directly bypassing decompression, compression, and validation.
469
+ func (w *Writer) Copy(f *File) error {
470
+ r, err := f.OpenRaw()
471
+ if err != nil {
472
+ return err
473
+ }
474
+ fw, err := w.CreateRaw(&f.FileHeader)
475
+ if err != nil {
476
+ return err
477
+ }
478
+ _, err = io.Copy(fw, r)
479
+ return err
480
+ }
481
+
482
+ // RegisterCompressor registers or overrides a custom compressor for a specific
483
+ // method ID. If a compressor for a given method is not found, [Writer] will
484
+ // default to looking up the compressor at the package level.
485
+ func (w *Writer) RegisterCompressor(method uint16, comp Compressor) {
486
+ if w.compressors == nil {
487
+ w.compressors = make(map[uint16]Compressor)
488
+ }
489
+ w.compressors[method] = comp
490
+ }
491
+
492
+ // AddFS adds the files from fs.FS to the archive.
493
+ // It walks the directory tree starting at the root of the filesystem
494
+ // adding each file to the zip using deflate while maintaining the directory structure.
495
+ func (w *Writer) AddFS(fsys fs.FS) error {
496
+ return fs.WalkDir(fsys, ".", func(name string, d fs.DirEntry, err error) error {
497
+ if err != nil {
498
+ return err
499
+ }
500
+ if d.IsDir() {
501
+ return nil
502
+ }
503
+ info, err := d.Info()
504
+ if err != nil {
505
+ return err
506
+ }
507
+ if !info.Mode().IsRegular() {
508
+ return errors.New("zip: cannot add non-regular file")
509
+ }
510
+ h, err := FileInfoHeader(info)
511
+ if err != nil {
512
+ return err
513
+ }
514
+ h.Name = name
515
+ h.Method = Deflate
516
+ fw, err := w.CreateHeader(h)
517
+ if err != nil {
518
+ return err
519
+ }
520
+ f, err := fsys.Open(name)
521
+ if err != nil {
522
+ return err
523
+ }
524
+ defer f.Close()
525
+ _, err = io.Copy(fw, f)
526
+ return err
527
+ })
528
+ }
529
+
530
+ func (w *Writer) compressor(method uint16) Compressor {
531
+ comp := w.compressors[method]
532
+ if comp == nil {
533
+ comp = compressor(method)
534
+ }
535
+ return comp
536
+ }
537
+
538
+ type dirWriter struct{}
539
+
540
+ func (dirWriter) Write(b []byte) (int, error) {
541
+ if len(b) == 0 {
542
+ return 0, nil
543
+ }
544
+ return 0, errors.New("zip: write to directory")
545
+ }
546
+
547
+ type fileWriter struct {
548
+ *header
549
+ zipw io.Writer
550
+ rawCount *countWriter
551
+ comp io.WriteCloser
552
+ compCount *countWriter
553
+ crc32 hash.Hash32
554
+ closed bool
555
+ }
556
+
557
+ func (w *fileWriter) Write(p []byte) (int, error) {
558
+ if w.closed {
559
+ return 0, errors.New("zip: write to closed file")
560
+ }
561
+ if w.raw {
562
+ return w.zipw.Write(p)
563
+ }
564
+ w.crc32.Write(p)
565
+ return w.rawCount.Write(p)
566
+ }
567
+
568
+ func (w *fileWriter) close() error {
569
+ if w.closed {
570
+ return errors.New("zip: file closed twice")
571
+ }
572
+ w.closed = true
573
+ if w.raw {
574
+ return w.writeDataDescriptor()
575
+ }
576
+ if err := w.comp.Close(); err != nil {
577
+ return err
578
+ }
579
+
580
+ // update FileHeader
581
+ fh := w.header.FileHeader
582
+ fh.CRC32 = w.crc32.Sum32()
583
+ fh.CompressedSize64 = uint64(w.compCount.count)
584
+ fh.UncompressedSize64 = uint64(w.rawCount.count)
585
+
586
+ if fh.isZip64() {
587
+ fh.CompressedSize = uint32max
588
+ fh.UncompressedSize = uint32max
589
+ fh.ReaderVersion = zipVersion45 // requires 4.5 - File uses ZIP64 format extensions
590
+ } else {
591
+ fh.CompressedSize = uint32(fh.CompressedSize64)
592
+ fh.UncompressedSize = uint32(fh.UncompressedSize64)
593
+ }
594
+
595
+ return w.writeDataDescriptor()
596
+ }
597
+
598
+ func (w *fileWriter) writeDataDescriptor() error {
599
+ if !w.hasDataDescriptor() {
600
+ return nil
601
+ }
602
+ // Write data descriptor. This is more complicated than one would
603
+ // think, see e.g. comments in zipfile.c:putextended() and
604
+ // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=7073588.
605
+ // The approach here is to write 8 byte sizes if needed without
606
+ // adding a zip64 extra in the local header (too late anyway).
607
+ var buf []byte
608
+ if w.isZip64() {
609
+ buf = make([]byte, dataDescriptor64Len)
610
+ } else {
611
+ buf = make([]byte, dataDescriptorLen)
612
+ }
613
+ b := writeBuf(buf)
614
+ b.uint32(dataDescriptorSignature) // de-facto standard, required by OS X
615
+ b.uint32(w.CRC32)
616
+ if w.isZip64() {
617
+ b.uint64(w.CompressedSize64)
618
+ b.uint64(w.UncompressedSize64)
619
+ } else {
620
+ b.uint32(w.CompressedSize)
621
+ b.uint32(w.UncompressedSize)
622
+ }
623
+ _, err := w.zipw.Write(buf)
624
+ return err
625
+ }
626
+
627
+ type countWriter struct {
628
+ w io.Writer
629
+ count int64
630
+ }
631
+
632
+ func (w *countWriter) Write(p []byte) (int, error) {
633
+ n, err := w.w.Write(p)
634
+ w.count += int64(n)
635
+ return n, err
636
+ }
637
+
638
+ type nopCloser struct {
639
+ io.Writer
640
+ }
641
+
642
+ func (w nopCloser) Close() error {
643
+ return nil
644
+ }
645
+
646
+ type writeBuf []byte
647
+
648
+ func (b *writeBuf) uint8(v uint8) {
649
+ (*b)[0] = v
650
+ *b = (*b)[1:]
651
+ }
652
+
653
+ func (b *writeBuf) uint16(v uint16) {
654
+ binary.LittleEndian.PutUint16(*b, v)
655
+ *b = (*b)[2:]
656
+ }
657
+
658
+ func (b *writeBuf) uint32(v uint32) {
659
+ binary.LittleEndian.PutUint32(*b, v)
660
+ *b = (*b)[4:]
661
+ }
662
+
663
+ func (b *writeBuf) uint64(v uint64) {
664
+ binary.LittleEndian.PutUint64(*b, v)
665
+ *b = (*b)[8:]
666
+ }
platform/dbops/binaries/go/go/src/archive/zip/writer_test.go ADDED
@@ -0,0 +1,673 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2011 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ package zip
6
+
7
+ import (
8
+ "bytes"
9
+ "compress/flate"
10
+ "encoding/binary"
11
+ "fmt"
12
+ "hash/crc32"
13
+ "io"
14
+ "io/fs"
15
+ "math/rand"
16
+ "os"
17
+ "strings"
18
+ "testing"
19
+ "testing/fstest"
20
+ "time"
21
+ )
22
+
23
+ // TODO(adg): a more sophisticated test suite
24
+
25
+ type WriteTest struct {
26
+ Name string
27
+ Data []byte
28
+ Method uint16
29
+ Mode fs.FileMode
30
+ }
31
+
32
+ var writeTests = []WriteTest{
33
+ {
34
+ Name: "foo",
35
+ Data: []byte("Rabbits, guinea pigs, gophers, marsupial rats, and quolls."),
36
+ Method: Store,
37
+ Mode: 0666,
38
+ },
39
+ {
40
+ Name: "bar",
41
+ Data: nil, // large data set in the test
42
+ Method: Deflate,
43
+ Mode: 0644,
44
+ },
45
+ {
46
+ Name: "setuid",
47
+ Data: []byte("setuid file"),
48
+ Method: Deflate,
49
+ Mode: 0755 | fs.ModeSetuid,
50
+ },
51
+ {
52
+ Name: "setgid",
53
+ Data: []byte("setgid file"),
54
+ Method: Deflate,
55
+ Mode: 0755 | fs.ModeSetgid,
56
+ },
57
+ {
58
+ Name: "symlink",
59
+ Data: []byte("../link/target"),
60
+ Method: Deflate,
61
+ Mode: 0755 | fs.ModeSymlink,
62
+ },
63
+ {
64
+ Name: "device",
65
+ Data: []byte("device file"),
66
+ Method: Deflate,
67
+ Mode: 0755 | fs.ModeDevice,
68
+ },
69
+ {
70
+ Name: "chardevice",
71
+ Data: []byte("char device file"),
72
+ Method: Deflate,
73
+ Mode: 0755 | fs.ModeDevice | fs.ModeCharDevice,
74
+ },
75
+ }
76
+
77
+ func TestWriter(t *testing.T) {
78
+ largeData := make([]byte, 1<<17)
79
+ if _, err := rand.Read(largeData); err != nil {
80
+ t.Fatal("rand.Read failed:", err)
81
+ }
82
+ writeTests[1].Data = largeData
83
+ defer func() {
84
+ writeTests[1].Data = nil
85
+ }()
86
+
87
+ // write a zip file
88
+ buf := new(bytes.Buffer)
89
+ w := NewWriter(buf)
90
+
91
+ for _, wt := range writeTests {
92
+ testCreate(t, w, &wt)
93
+ }
94
+
95
+ if err := w.Close(); err != nil {
96
+ t.Fatal(err)
97
+ }
98
+
99
+ // read it back
100
+ r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
101
+ if err != nil {
102
+ t.Fatal(err)
103
+ }
104
+ for i, wt := range writeTests {
105
+ testReadFile(t, r.File[i], &wt)
106
+ }
107
+ }
108
+
109
+ // TestWriterComment is test for EOCD comment read/write.
110
+ func TestWriterComment(t *testing.T) {
111
+ var tests = []struct {
112
+ comment string
113
+ ok bool
114
+ }{
115
+ {"hi, hello", true},
116
+ {"hi, こんにちわ", true},
117
+ {strings.Repeat("a", uint16max), true},
118
+ {strings.Repeat("a", uint16max+1), false},
119
+ }
120
+
121
+ for _, test := range tests {
122
+ // write a zip file
123
+ buf := new(bytes.Buffer)
124
+ w := NewWriter(buf)
125
+ if err := w.SetComment(test.comment); err != nil {
126
+ if test.ok {
127
+ t.Fatalf("SetComment: unexpected error %v", err)
128
+ }
129
+ continue
130
+ } else {
131
+ if !test.ok {
132
+ t.Fatalf("SetComment: unexpected success, want error")
133
+ }
134
+ }
135
+
136
+ if err := w.Close(); test.ok == (err != nil) {
137
+ t.Fatal(err)
138
+ }
139
+
140
+ if w.closed != test.ok {
141
+ t.Fatalf("Writer.closed: got %v, want %v", w.closed, test.ok)
142
+ }
143
+
144
+ // skip read test in failure cases
145
+ if !test.ok {
146
+ continue
147
+ }
148
+
149
+ // read it back
150
+ r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
151
+ if err != nil {
152
+ t.Fatal(err)
153
+ }
154
+ if r.Comment != test.comment {
155
+ t.Fatalf("Reader.Comment: got %v, want %v", r.Comment, test.comment)
156
+ }
157
+ }
158
+ }
159
+
160
+ func TestWriterUTF8(t *testing.T) {
161
+ var utf8Tests = []struct {
162
+ name string
163
+ comment string
164
+ nonUTF8 bool
165
+ flags uint16
166
+ }{
167
+ {
168
+ name: "hi, hello",
169
+ comment: "in the world",
170
+ flags: 0x8,
171
+ },
172
+ {
173
+ name: "hi, こんにちわ",
174
+ comment: "in the world",
175
+ flags: 0x808,
176
+ },
177
+ {
178
+ name: "hi, こんにちわ",
179
+ comment: "in the world",
180
+ nonUTF8: true,
181
+ flags: 0x8,
182
+ },
183
+ {
184
+ name: "hi, hello",
185
+ comment: "in the 世界",
186
+ flags: 0x808,
187
+ },
188
+ {
189
+ name: "hi, こんにちわ",
190
+ comment: "in the 世界",
191
+ flags: 0x808,
192
+ },
193
+ {
194
+ name: "the replacement rune is �",
195
+ comment: "the replacement rune is �",
196
+ flags: 0x808,
197
+ },
198
+ {
199
+ // Name is Japanese encoded in Shift JIS.
200
+ name: "\x93\xfa\x96{\x8c\xea.txt",
201
+ comment: "in the 世界",
202
+ flags: 0x008, // UTF-8 must not be set
203
+ },
204
+ }
205
+
206
+ // write a zip file
207
+ buf := new(bytes.Buffer)
208
+ w := NewWriter(buf)
209
+
210
+ for _, test := range utf8Tests {
211
+ h := &FileHeader{
212
+ Name: test.name,
213
+ Comment: test.comment,
214
+ NonUTF8: test.nonUTF8,
215
+ Method: Deflate,
216
+ }
217
+ w, err := w.CreateHeader(h)
218
+ if err != nil {
219
+ t.Fatal(err)
220
+ }
221
+ w.Write([]byte{})
222
+ }
223
+
224
+ if err := w.Close(); err != nil {
225
+ t.Fatal(err)
226
+ }
227
+
228
+ // read it back
229
+ r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
230
+ if err != nil {
231
+ t.Fatal(err)
232
+ }
233
+ for i, test := range utf8Tests {
234
+ flags := r.File[i].Flags
235
+ if flags != test.flags {
236
+ t.Errorf("CreateHeader(name=%q comment=%q nonUTF8=%v): flags=%#x, want %#x", test.name, test.comment, test.nonUTF8, flags, test.flags)
237
+ }
238
+ }
239
+ }
240
+
241
+ func TestWriterTime(t *testing.T) {
242
+ var buf bytes.Buffer
243
+ h := &FileHeader{
244
+ Name: "test.txt",
245
+ Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)),
246
+ }
247
+ w := NewWriter(&buf)
248
+ if _, err := w.CreateHeader(h); err != nil {
249
+ t.Fatalf("unexpected CreateHeader error: %v", err)
250
+ }
251
+ if err := w.Close(); err != nil {
252
+ t.Fatalf("unexpected Close error: %v", err)
253
+ }
254
+
255
+ want, err := os.ReadFile("testdata/time-go.zip")
256
+ if err != nil {
257
+ t.Fatalf("unexpected ReadFile error: %v", err)
258
+ }
259
+ if got := buf.Bytes(); !bytes.Equal(got, want) {
260
+ fmt.Printf("%x\n%x\n", got, want)
261
+ t.Error("contents of time-go.zip differ")
262
+ }
263
+ }
264
+
265
+ func TestWriterOffset(t *testing.T) {
266
+ largeData := make([]byte, 1<<17)
267
+ if _, err := rand.Read(largeData); err != nil {
268
+ t.Fatal("rand.Read failed:", err)
269
+ }
270
+ writeTests[1].Data = largeData
271
+ defer func() {
272
+ writeTests[1].Data = nil
273
+ }()
274
+
275
+ // write a zip file
276
+ buf := new(bytes.Buffer)
277
+ existingData := []byte{1, 2, 3, 1, 2, 3, 1, 2, 3}
278
+ n, _ := buf.Write(existingData)
279
+ w := NewWriter(buf)
280
+ w.SetOffset(int64(n))
281
+
282
+ for _, wt := range writeTests {
283
+ testCreate(t, w, &wt)
284
+ }
285
+
286
+ if err := w.Close(); err != nil {
287
+ t.Fatal(err)
288
+ }
289
+
290
+ // read it back
291
+ r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
292
+ if err != nil {
293
+ t.Fatal(err)
294
+ }
295
+ for i, wt := range writeTests {
296
+ testReadFile(t, r.File[i], &wt)
297
+ }
298
+ }
299
+
300
+ func TestWriterFlush(t *testing.T) {
301
+ var buf bytes.Buffer
302
+ w := NewWriter(struct{ io.Writer }{&buf})
303
+ _, err := w.Create("foo")
304
+ if err != nil {
305
+ t.Fatal(err)
306
+ }
307
+ if buf.Len() > 0 {
308
+ t.Fatalf("Unexpected %d bytes already in buffer", buf.Len())
309
+ }
310
+ if err := w.Flush(); err != nil {
311
+ t.Fatal(err)
312
+ }
313
+ if buf.Len() == 0 {
314
+ t.Fatal("No bytes written after Flush")
315
+ }
316
+ }
317
+
318
+ func TestWriterDir(t *testing.T) {
319
+ w := NewWriter(io.Discard)
320
+ dw, err := w.Create("dir/")
321
+ if err != nil {
322
+ t.Fatal(err)
323
+ }
324
+ if _, err := dw.Write(nil); err != nil {
325
+ t.Errorf("Write(nil) to directory: got %v, want nil", err)
326
+ }
327
+ if _, err := dw.Write([]byte("hello")); err == nil {
328
+ t.Error(`Write("hello") to directory: got nil error, want non-nil`)
329
+ }
330
+ }
331
+
332
+ func TestWriterDirAttributes(t *testing.T) {
333
+ var buf bytes.Buffer
334
+ w := NewWriter(&buf)
335
+ if _, err := w.CreateHeader(&FileHeader{
336
+ Name: "dir/",
337
+ Method: Deflate,
338
+ CompressedSize64: 1234,
339
+ UncompressedSize64: 5678,
340
+ }); err != nil {
341
+ t.Fatal(err)
342
+ }
343
+ if err := w.Close(); err != nil {
344
+ t.Fatal(err)
345
+ }
346
+ b := buf.Bytes()
347
+
348
+ var sig [4]byte
349
+ binary.LittleEndian.PutUint32(sig[:], uint32(fileHeaderSignature))
350
+
351
+ idx := bytes.Index(b, sig[:])
352
+ if idx == -1 {
353
+ t.Fatal("file header not found")
354
+ }
355
+ b = b[idx:]
356
+
357
+ if !bytes.Equal(b[6:10], []byte{0, 0, 0, 0}) { // FileHeader.Flags: 0, FileHeader.Method: 0
358
+ t.Errorf("unexpected method and flags: %v", b[6:10])
359
+ }
360
+
361
+ if !bytes.Equal(b[14:26], make([]byte, 12)) { // FileHeader.{CRC32,CompressSize,UncompressedSize} all zero.
362
+ t.Errorf("unexpected crc, compress and uncompressed size to be 0 was: %v", b[14:26])
363
+ }
364
+
365
+ binary.LittleEndian.PutUint32(sig[:], uint32(dataDescriptorSignature))
366
+ if bytes.Contains(b, sig[:]) {
367
+ t.Error("there should be no data descriptor")
368
+ }
369
+ }
370
+
371
+ func TestWriterCopy(t *testing.T) {
372
+ // make a zip file
373
+ buf := new(bytes.Buffer)
374
+ w := NewWriter(buf)
375
+ for _, wt := range writeTests {
376
+ testCreate(t, w, &wt)
377
+ }
378
+ if err := w.Close(); err != nil {
379
+ t.Fatal(err)
380
+ }
381
+
382
+ // read it back
383
+ src, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
384
+ if err != nil {
385
+ t.Fatal(err)
386
+ }
387
+ for i, wt := range writeTests {
388
+ testReadFile(t, src.File[i], &wt)
389
+ }
390
+
391
+ // make a new zip file copying the old compressed data.
392
+ buf2 := new(bytes.Buffer)
393
+ dst := NewWriter(buf2)
394
+ for _, f := range src.File {
395
+ if err := dst.Copy(f); err != nil {
396
+ t.Fatal(err)
397
+ }
398
+ }
399
+ if err := dst.Close(); err != nil {
400
+ t.Fatal(err)
401
+ }
402
+
403
+ // read the new one back
404
+ r, err := NewReader(bytes.NewReader(buf2.Bytes()), int64(buf2.Len()))
405
+ if err != nil {
406
+ t.Fatal(err)
407
+ }
408
+ for i, wt := range writeTests {
409
+ testReadFile(t, r.File[i], &wt)
410
+ }
411
+ }
412
+
413
+ func TestWriterCreateRaw(t *testing.T) {
414
+ files := []struct {
415
+ name string
416
+ content []byte
417
+ method uint16
418
+ flags uint16
419
+ crc32 uint32
420
+ uncompressedSize uint64
421
+ compressedSize uint64
422
+ }{
423
+ {
424
+ name: "small store w desc",
425
+ content: []byte("gophers"),
426
+ method: Store,
427
+ flags: 0x8,
428
+ },
429
+ {
430
+ name: "small deflate wo desc",
431
+ content: bytes.Repeat([]byte("abcdefg"), 2048),
432
+ method: Deflate,
433
+ },
434
+ }
435
+
436
+ // write a zip file
437
+ archive := new(bytes.Buffer)
438
+ w := NewWriter(archive)
439
+
440
+ for i := range files {
441
+ f := &files[i]
442
+ f.crc32 = crc32.ChecksumIEEE(f.content)
443
+ size := uint64(len(f.content))
444
+ f.uncompressedSize = size
445
+ f.compressedSize = size
446
+
447
+ var compressedContent []byte
448
+ if f.method == Deflate {
449
+ var buf bytes.Buffer
450
+ w, err := flate.NewWriter(&buf, flate.BestSpeed)
451
+ if err != nil {
452
+ t.Fatalf("flate.NewWriter err = %v", err)
453
+ }
454
+ _, err = w.Write(f.content)
455
+ if err != nil {
456
+ t.Fatalf("flate Write err = %v", err)
457
+ }
458
+ err = w.Close()
459
+ if err != nil {
460
+ t.Fatalf("flate Writer.Close err = %v", err)
461
+ }
462
+ compressedContent = buf.Bytes()
463
+ f.compressedSize = uint64(len(compressedContent))
464
+ }
465
+
466
+ h := &FileHeader{
467
+ Name: f.name,
468
+ Method: f.method,
469
+ Flags: f.flags,
470
+ CRC32: f.crc32,
471
+ CompressedSize64: f.compressedSize,
472
+ UncompressedSize64: f.uncompressedSize,
473
+ }
474
+ w, err := w.CreateRaw(h)
475
+ if err != nil {
476
+ t.Fatal(err)
477
+ }
478
+ if compressedContent != nil {
479
+ _, err = w.Write(compressedContent)
480
+ } else {
481
+ _, err = w.Write(f.content)
482
+ }
483
+ if err != nil {
484
+ t.Fatalf("%s Write got %v; want nil", f.name, err)
485
+ }
486
+ }
487
+
488
+ if err := w.Close(); err != nil {
489
+ t.Fatal(err)
490
+ }
491
+
492
+ // read it back
493
+ r, err := NewReader(bytes.NewReader(archive.Bytes()), int64(archive.Len()))
494
+ if err != nil {
495
+ t.Fatal(err)
496
+ }
497
+ for i, want := range files {
498
+ got := r.File[i]
499
+ if got.Name != want.name {
500
+ t.Errorf("got Name %s; want %s", got.Name, want.name)
501
+ }
502
+ if got.Method != want.method {
503
+ t.Errorf("%s: got Method %#x; want %#x", want.name, got.Method, want.method)
504
+ }
505
+ if got.Flags != want.flags {
506
+ t.Errorf("%s: got Flags %#x; want %#x", want.name, got.Flags, want.flags)
507
+ }
508
+ if got.CRC32 != want.crc32 {
509
+ t.Errorf("%s: got CRC32 %#x; want %#x", want.name, got.CRC32, want.crc32)
510
+ }
511
+ if got.CompressedSize64 != want.compressedSize {
512
+ t.Errorf("%s: got CompressedSize64 %d; want %d", want.name, got.CompressedSize64, want.compressedSize)
513
+ }
514
+ if got.UncompressedSize64 != want.uncompressedSize {
515
+ t.Errorf("%s: got UncompressedSize64 %d; want %d", want.name, got.UncompressedSize64, want.uncompressedSize)
516
+ }
517
+
518
+ r, err := got.Open()
519
+ if err != nil {
520
+ t.Errorf("%s: Open err = %v", got.Name, err)
521
+ continue
522
+ }
523
+
524
+ buf, err := io.ReadAll(r)
525
+ if err != nil {
526
+ t.Errorf("%s: ReadAll err = %v", got.Name, err)
527
+ continue
528
+ }
529
+
530
+ if !bytes.Equal(buf, want.content) {
531
+ t.Errorf("%v: ReadAll returned unexpected bytes", got.Name)
532
+ }
533
+ }
534
+ }
535
+
536
+ func testCreate(t *testing.T, w *Writer, wt *WriteTest) {
537
+ header := &FileHeader{
538
+ Name: wt.Name,
539
+ Method: wt.Method,
540
+ }
541
+ if wt.Mode != 0 {
542
+ header.SetMode(wt.Mode)
543
+ }
544
+ f, err := w.CreateHeader(header)
545
+ if err != nil {
546
+ t.Fatal(err)
547
+ }
548
+ _, err = f.Write(wt.Data)
549
+ if err != nil {
550
+ t.Fatal(err)
551
+ }
552
+ }
553
+
554
+ func testReadFile(t *testing.T, f *File, wt *WriteTest) {
555
+ if f.Name != wt.Name {
556
+ t.Fatalf("File name: got %q, want %q", f.Name, wt.Name)
557
+ }
558
+ testFileMode(t, f, wt.Mode)
559
+ rc, err := f.Open()
560
+ if err != nil {
561
+ t.Fatalf("opening %s: %v", f.Name, err)
562
+ }
563
+ b, err := io.ReadAll(rc)
564
+ if err != nil {
565
+ t.Fatalf("reading %s: %v", f.Name, err)
566
+ }
567
+ err = rc.Close()
568
+ if err != nil {
569
+ t.Fatalf("closing %s: %v", f.Name, err)
570
+ }
571
+ if !bytes.Equal(b, wt.Data) {
572
+ t.Errorf("File contents %q, want %q", b, wt.Data)
573
+ }
574
+ }
575
+
576
+ func BenchmarkCompressedZipGarbage(b *testing.B) {
577
+ bigBuf := bytes.Repeat([]byte("a"), 1<<20)
578
+
579
+ runOnce := func(buf *bytes.Buffer) {
580
+ buf.Reset()
581
+ zw := NewWriter(buf)
582
+ for j := 0; j < 3; j++ {
583
+ w, _ := zw.CreateHeader(&FileHeader{
584
+ Name: "foo",
585
+ Method: Deflate,
586
+ })
587
+ w.Write(bigBuf)
588
+ }
589
+ zw.Close()
590
+ }
591
+
592
+ b.ReportAllocs()
593
+ // Run once and then reset the timer.
594
+ // This effectively discards the very large initial flate setup cost,
595
+ // as well as the initialization of bigBuf.
596
+ runOnce(&bytes.Buffer{})
597
+ b.ResetTimer()
598
+
599
+ b.RunParallel(func(pb *testing.PB) {
600
+ var buf bytes.Buffer
601
+ for pb.Next() {
602
+ runOnce(&buf)
603
+ }
604
+ })
605
+ }
606
+
607
+ func writeTestsToFS(tests []WriteTest) fs.FS {
608
+ fsys := fstest.MapFS{}
609
+ for _, wt := range tests {
610
+ fsys[wt.Name] = &fstest.MapFile{
611
+ Data: wt.Data,
612
+ Mode: wt.Mode,
613
+ }
614
+ }
615
+ return fsys
616
+ }
617
+
618
+ func TestWriterAddFS(t *testing.T) {
619
+ buf := new(bytes.Buffer)
620
+ w := NewWriter(buf)
621
+ tests := []WriteTest{
622
+ {
623
+ Name: "file.go",
624
+ Data: []byte("hello"),
625
+ Mode: 0644,
626
+ },
627
+ {
628
+ Name: "subfolder/another.go",
629
+ Data: []byte("world"),
630
+ Mode: 0644,
631
+ },
632
+ }
633
+ err := w.AddFS(writeTestsToFS(tests))
634
+ if err != nil {
635
+ t.Fatal(err)
636
+ }
637
+
638
+ if err := w.Close(); err != nil {
639
+ t.Fatal(err)
640
+ }
641
+
642
+ // read it back
643
+ r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
644
+ if err != nil {
645
+ t.Fatal(err)
646
+ }
647
+ for i, wt := range tests {
648
+ testReadFile(t, r.File[i], &wt)
649
+ }
650
+ }
651
+
652
+ func TestIssue61875(t *testing.T) {
653
+ buf := new(bytes.Buffer)
654
+ w := NewWriter(buf)
655
+ tests := []WriteTest{
656
+ {
657
+ Name: "symlink",
658
+ Data: []byte("../link/target"),
659
+ Method: Deflate,
660
+ Mode: 0755 | fs.ModeSymlink,
661
+ },
662
+ {
663
+ Name: "device",
664
+ Data: []byte(""),
665
+ Method: Deflate,
666
+ Mode: 0755 | fs.ModeDevice,
667
+ },
668
+ }
669
+ err := w.AddFS(writeTestsToFS(tests))
670
+ if err == nil {
671
+ t.Errorf("expected error, got nil")
672
+ }
673
+ }
platform/dbops/binaries/go/go/src/archive/zip/zip_test.go ADDED
@@ -0,0 +1,821 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2011 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ // Tests that involve both reading and writing.
6
+
7
+ package zip
8
+
9
+ import (
10
+ "bytes"
11
+ "errors"
12
+ "fmt"
13
+ "hash"
14
+ "internal/testenv"
15
+ "io"
16
+ "runtime"
17
+ "sort"
18
+ "strings"
19
+ "testing"
20
+ "time"
21
+ )
22
+
23
+ func TestOver65kFiles(t *testing.T) {
24
+ if testing.Short() && testenv.Builder() == "" {
25
+ t.Skip("skipping in short mode")
26
+ }
27
+ buf := new(strings.Builder)
28
+ w := NewWriter(buf)
29
+ const nFiles = (1 << 16) + 42
30
+ for i := 0; i < nFiles; i++ {
31
+ _, err := w.CreateHeader(&FileHeader{
32
+ Name: fmt.Sprintf("%d.dat", i),
33
+ Method: Store, // Deflate is too slow when it is compiled with -race flag
34
+ })
35
+ if err != nil {
36
+ t.Fatalf("creating file %d: %v", i, err)
37
+ }
38
+ }
39
+ if err := w.Close(); err != nil {
40
+ t.Fatalf("Writer.Close: %v", err)
41
+ }
42
+ s := buf.String()
43
+ zr, err := NewReader(strings.NewReader(s), int64(len(s)))
44
+ if err != nil {
45
+ t.Fatalf("NewReader: %v", err)
46
+ }
47
+ if got := len(zr.File); got != nFiles {
48
+ t.Fatalf("File contains %d files, want %d", got, nFiles)
49
+ }
50
+ for i := 0; i < nFiles; i++ {
51
+ want := fmt.Sprintf("%d.dat", i)
52
+ if zr.File[i].Name != want {
53
+ t.Fatalf("File(%d) = %q, want %q", i, zr.File[i].Name, want)
54
+ }
55
+ }
56
+ }
57
+
58
+ func TestModTime(t *testing.T) {
59
+ var testTime = time.Date(2009, time.November, 10, 23, 45, 58, 0, time.UTC)
60
+ fh := new(FileHeader)
61
+ fh.SetModTime(testTime)
62
+ outTime := fh.ModTime()
63
+ if !outTime.Equal(testTime) {
64
+ t.Errorf("times don't match: got %s, want %s", outTime, testTime)
65
+ }
66
+ }
67
+
68
+ func testHeaderRoundTrip(fh *FileHeader, wantUncompressedSize uint32, wantUncompressedSize64 uint64, t *testing.T) {
69
+ fi := fh.FileInfo()
70
+ fh2, err := FileInfoHeader(fi)
71
+ if err != nil {
72
+ t.Fatal(err)
73
+ }
74
+ if got, want := fh2.Name, fh.Name; got != want {
75
+ t.Errorf("Name: got %s, want %s\n", got, want)
76
+ }
77
+ if got, want := fh2.UncompressedSize, wantUncompressedSize; got != want {
78
+ t.Errorf("UncompressedSize: got %d, want %d\n", got, want)
79
+ }
80
+ if got, want := fh2.UncompressedSize64, wantUncompressedSize64; got != want {
81
+ t.Errorf("UncompressedSize64: got %d, want %d\n", got, want)
82
+ }
83
+ if got, want := fh2.ModifiedTime, fh.ModifiedTime; got != want {
84
+ t.Errorf("ModifiedTime: got %d, want %d\n", got, want)
85
+ }
86
+ if got, want := fh2.ModifiedDate, fh.ModifiedDate; got != want {
87
+ t.Errorf("ModifiedDate: got %d, want %d\n", got, want)
88
+ }
89
+
90
+ if sysfh, ok := fi.Sys().(*FileHeader); !ok && sysfh != fh {
91
+ t.Errorf("Sys didn't return original *FileHeader")
92
+ }
93
+ }
94
+
95
+ func TestFileHeaderRoundTrip(t *testing.T) {
96
+ fh := &FileHeader{
97
+ Name: "foo.txt",
98
+ UncompressedSize: 987654321,
99
+ ModifiedTime: 1234,
100
+ ModifiedDate: 5678,
101
+ }
102
+ testHeaderRoundTrip(fh, fh.UncompressedSize, uint64(fh.UncompressedSize), t)
103
+ }
104
+
105
+ func TestFileHeaderRoundTrip64(t *testing.T) {
106
+ fh := &FileHeader{
107
+ Name: "foo.txt",
108
+ UncompressedSize64: 9876543210,
109
+ ModifiedTime: 1234,
110
+ ModifiedDate: 5678,
111
+ }
112
+ testHeaderRoundTrip(fh, uint32max, fh.UncompressedSize64, t)
113
+ }
114
+
115
+ func TestFileHeaderRoundTripModified(t *testing.T) {
116
+ fh := &FileHeader{
117
+ Name: "foo.txt",
118
+ UncompressedSize: 987654321,
119
+ Modified: time.Now().Local(),
120
+ ModifiedTime: 1234,
121
+ ModifiedDate: 5678,
122
+ }
123
+ fi := fh.FileInfo()
124
+ fh2, err := FileInfoHeader(fi)
125
+ if err != nil {
126
+ t.Fatal(err)
127
+ }
128
+ if got, want := fh2.Modified, fh.Modified.UTC(); got != want {
129
+ t.Errorf("Modified: got %s, want %s\n", got, want)
130
+ }
131
+ if got, want := fi.ModTime(), fh.Modified.UTC(); got != want {
132
+ t.Errorf("Modified: got %s, want %s\n", got, want)
133
+ }
134
+ }
135
+
136
+ func TestFileHeaderRoundTripWithoutModified(t *testing.T) {
137
+ fh := &FileHeader{
138
+ Name: "foo.txt",
139
+ UncompressedSize: 987654321,
140
+ ModifiedTime: 1234,
141
+ ModifiedDate: 5678,
142
+ }
143
+ fi := fh.FileInfo()
144
+ fh2, err := FileInfoHeader(fi)
145
+ if err != nil {
146
+ t.Fatal(err)
147
+ }
148
+ if got, want := fh2.ModTime(), fh.ModTime(); got != want {
149
+ t.Errorf("Modified: got %s, want %s\n", got, want)
150
+ }
151
+ if got, want := fi.ModTime(), fh.ModTime(); got != want {
152
+ t.Errorf("Modified: got %s, want %s\n", got, want)
153
+ }
154
+ }
155
+
156
+ type repeatedByte struct {
157
+ off int64
158
+ b byte
159
+ n int64
160
+ }
161
+
162
+ // rleBuffer is a run-length-encoded byte buffer.
163
+ // It's an io.Writer (like a bytes.Buffer) and also an io.ReaderAt,
164
+ // allowing random-access reads.
165
+ type rleBuffer struct {
166
+ buf []repeatedByte
167
+ }
168
+
169
+ func (r *rleBuffer) Size() int64 {
170
+ if len(r.buf) == 0 {
171
+ return 0
172
+ }
173
+ last := &r.buf[len(r.buf)-1]
174
+ return last.off + last.n
175
+ }
176
+
177
+ func (r *rleBuffer) Write(p []byte) (n int, err error) {
178
+ var rp *repeatedByte
179
+ if len(r.buf) > 0 {
180
+ rp = &r.buf[len(r.buf)-1]
181
+ // Fast path, if p is entirely the same byte repeated.
182
+ if lastByte := rp.b; len(p) > 0 && p[0] == lastByte {
183
+ if bytes.Count(p, []byte{lastByte}) == len(p) {
184
+ rp.n += int64(len(p))
185
+ return len(p), nil
186
+ }
187
+ }
188
+ }
189
+
190
+ for _, b := range p {
191
+ if rp == nil || rp.b != b {
192
+ r.buf = append(r.buf, repeatedByte{r.Size(), b, 1})
193
+ rp = &r.buf[len(r.buf)-1]
194
+ } else {
195
+ rp.n++
196
+ }
197
+ }
198
+ return len(p), nil
199
+ }
200
+
201
+ func memset(a []byte, b byte) {
202
+ if len(a) == 0 {
203
+ return
204
+ }
205
+ // Double, until we reach power of 2 >= len(a), same as bytes.Repeat,
206
+ // but without allocation.
207
+ a[0] = b
208
+ for i, l := 1, len(a); i < l; i *= 2 {
209
+ copy(a[i:], a[:i])
210
+ }
211
+ }
212
+
213
+ func (r *rleBuffer) ReadAt(p []byte, off int64) (n int, err error) {
214
+ if len(p) == 0 {
215
+ return
216
+ }
217
+ skipParts := sort.Search(len(r.buf), func(i int) bool {
218
+ part := &r.buf[i]
219
+ return part.off+part.n > off
220
+ })
221
+ parts := r.buf[skipParts:]
222
+ if len(parts) > 0 {
223
+ skipBytes := off - parts[0].off
224
+ for _, part := range parts {
225
+ repeat := int(min(part.n-skipBytes, int64(len(p)-n)))
226
+ memset(p[n:n+repeat], part.b)
227
+ n += repeat
228
+ if n == len(p) {
229
+ return
230
+ }
231
+ skipBytes = 0
232
+ }
233
+ }
234
+ if n != len(p) {
235
+ err = io.ErrUnexpectedEOF
236
+ }
237
+ return
238
+ }
239
+
240
+ // Just testing the rleBuffer used in the Zip64 test above. Not used by the zip code.
241
+ func TestRLEBuffer(t *testing.T) {
242
+ b := new(rleBuffer)
243
+ var all []byte
244
+ writes := []string{"abcdeee", "eeeeeee", "eeeefghaaiii"}
245
+ for _, w := range writes {
246
+ b.Write([]byte(w))
247
+ all = append(all, w...)
248
+ }
249
+ if len(b.buf) != 10 {
250
+ t.Fatalf("len(b.buf) = %d; want 10", len(b.buf))
251
+ }
252
+
253
+ for i := 0; i < len(all); i++ {
254
+ for j := 0; j < len(all)-i; j++ {
255
+ buf := make([]byte, j)
256
+ n, err := b.ReadAt(buf, int64(i))
257
+ if err != nil || n != len(buf) {
258
+ t.Errorf("ReadAt(%d, %d) = %d, %v; want %d, nil", i, j, n, err, len(buf))
259
+ }
260
+ if !bytes.Equal(buf, all[i:i+j]) {
261
+ t.Errorf("ReadAt(%d, %d) = %q; want %q", i, j, buf, all[i:i+j])
262
+ }
263
+ }
264
+ }
265
+ }
266
+
267
+ // fakeHash32 is a dummy Hash32 that always returns 0.
268
+ type fakeHash32 struct {
269
+ hash.Hash32
270
+ }
271
+
272
+ func (fakeHash32) Write(p []byte) (int, error) { return len(p), nil }
273
+ func (fakeHash32) Sum32() uint32 { return 0 }
274
+
275
+ func TestZip64(t *testing.T) {
276
+ if testing.Short() {
277
+ t.Skip("slow test; skipping")
278
+ }
279
+ t.Parallel()
280
+ const size = 1 << 32 // before the "END\n" part
281
+ buf := testZip64(t, size)
282
+ testZip64DirectoryRecordLength(buf, t)
283
+ }
284
+
285
+ func TestZip64EdgeCase(t *testing.T) {
286
+ if testing.Short() {
287
+ t.Skip("slow test; skipping")
288
+ }
289
+ t.Parallel()
290
+ // Test a zip file with uncompressed size 0xFFFFFFFF.
291
+ // That's the magic marker for a 64-bit file, so even though
292
+ // it fits in a 32-bit field we must use the 64-bit field.
293
+ // Go 1.5 and earlier got this wrong,
294
+ // writing an invalid zip file.
295
+ const size = 1<<32 - 1 - int64(len("END\n")) // before the "END\n" part
296
+ buf := testZip64(t, size)
297
+ testZip64DirectoryRecordLength(buf, t)
298
+ }
299
+
300
+ // Tests that we generate a zip64 file if the directory at offset
301
+ // 0xFFFFFFFF, but not before.
302
+ func TestZip64DirectoryOffset(t *testing.T) {
303
+ if testing.Short() {
304
+ t.Skip("skipping in short mode")
305
+ }
306
+ t.Parallel()
307
+ const filename = "huge.txt"
308
+ gen := func(wantOff uint64) func(*Writer) {
309
+ return func(w *Writer) {
310
+ w.testHookCloseSizeOffset = func(size, off uint64) {
311
+ if off != wantOff {
312
+ t.Errorf("central directory offset = %d (%x); want %d", off, off, wantOff)
313
+ }
314
+ }
315
+ f, err := w.CreateHeader(&FileHeader{
316
+ Name: filename,
317
+ Method: Store,
318
+ })
319
+ if err != nil {
320
+ t.Fatal(err)
321
+ }
322
+ f.(*fileWriter).crc32 = fakeHash32{}
323
+ size := wantOff - fileHeaderLen - uint64(len(filename)) - dataDescriptorLen
324
+ if _, err := io.CopyN(f, zeros{}, int64(size)); err != nil {
325
+ t.Fatal(err)
326
+ }
327
+ if err := w.Close(); err != nil {
328
+ t.Fatal(err)
329
+ }
330
+ }
331
+ }
332
+ t.Run("uint32max-2_NoZip64", func(t *testing.T) {
333
+ t.Parallel()
334
+ if generatesZip64(t, gen(0xfffffffe)) {
335
+ t.Error("unexpected zip64")
336
+ }
337
+ })
338
+ t.Run("uint32max-1_Zip64", func(t *testing.T) {
339
+ t.Parallel()
340
+ if !generatesZip64(t, gen(0xffffffff)) {
341
+ t.Error("expected zip64")
342
+ }
343
+ })
344
+ }
345
+
346
+ // At 16k records, we need to generate a zip64 file.
347
+ func TestZip64ManyRecords(t *testing.T) {
348
+ if testing.Short() {
349
+ t.Skip("skipping in short mode")
350
+ }
351
+ t.Parallel()
352
+ gen := func(numRec int) func(*Writer) {
353
+ return func(w *Writer) {
354
+ for i := 0; i < numRec; i++ {
355
+ _, err := w.CreateHeader(&FileHeader{
356
+ Name: "a.txt",
357
+ Method: Store,
358
+ })
359
+ if err != nil {
360
+ t.Fatal(err)
361
+ }
362
+ }
363
+ if err := w.Close(); err != nil {
364
+ t.Fatal(err)
365
+ }
366
+ }
367
+ }
368
+ // 16k-1 records shouldn't make a zip64:
369
+ t.Run("uint16max-1_NoZip64", func(t *testing.T) {
370
+ t.Parallel()
371
+ if generatesZip64(t, gen(0xfffe)) {
372
+ t.Error("unexpected zip64")
373
+ }
374
+ })
375
+ // 16k records should make a zip64:
376
+ t.Run("uint16max_Zip64", func(t *testing.T) {
377
+ t.Parallel()
378
+ if !generatesZip64(t, gen(0xffff)) {
379
+ t.Error("expected zip64")
380
+ }
381
+ })
382
+ }
383
+
384
+ // suffixSaver is an io.Writer & io.ReaderAt that remembers the last 0
385
+ // to 'keep' bytes of data written to it. Call Suffix to get the
386
+ // suffix bytes.
387
+ type suffixSaver struct {
388
+ keep int
389
+ buf []byte
390
+ start int
391
+ size int64
392
+ }
393
+
394
+ func (ss *suffixSaver) Size() int64 { return ss.size }
395
+
396
+ var errDiscardedBytes = errors.New("ReadAt of discarded bytes")
397
+
398
+ func (ss *suffixSaver) ReadAt(p []byte, off int64) (n int, err error) {
399
+ back := ss.size - off
400
+ if back > int64(ss.keep) {
401
+ return 0, errDiscardedBytes
402
+ }
403
+ suf := ss.Suffix()
404
+ n = copy(p, suf[len(suf)-int(back):])
405
+ if n != len(p) {
406
+ err = io.EOF
407
+ }
408
+ return
409
+ }
410
+
411
+ func (ss *suffixSaver) Suffix() []byte {
412
+ if len(ss.buf) < ss.keep {
413
+ return ss.buf
414
+ }
415
+ buf := make([]byte, ss.keep)
416
+ n := copy(buf, ss.buf[ss.start:])
417
+ copy(buf[n:], ss.buf[:])
418
+ return buf
419
+ }
420
+
421
+ func (ss *suffixSaver) Write(p []byte) (n int, err error) {
422
+ n = len(p)
423
+ ss.size += int64(len(p))
424
+ if len(ss.buf) < ss.keep {
425
+ space := ss.keep - len(ss.buf)
426
+ add := len(p)
427
+ if add > space {
428
+ add = space
429
+ }
430
+ ss.buf = append(ss.buf, p[:add]...)
431
+ p = p[add:]
432
+ }
433
+ for len(p) > 0 {
434
+ n := copy(ss.buf[ss.start:], p)
435
+ p = p[n:]
436
+ ss.start += n
437
+ if ss.start == ss.keep {
438
+ ss.start = 0
439
+ }
440
+ }
441
+ return
442
+ }
443
+
444
+ // generatesZip64 reports whether f wrote a zip64 file.
445
+ // f is also responsible for closing w.
446
+ func generatesZip64(t *testing.T, f func(w *Writer)) bool {
447
+ ss := &suffixSaver{keep: 10 << 20}
448
+ w := NewWriter(ss)
449
+ f(w)
450
+ return suffixIsZip64(t, ss)
451
+ }
452
+
453
+ type sizedReaderAt interface {
454
+ io.ReaderAt
455
+ Size() int64
456
+ }
457
+
458
+ func suffixIsZip64(t *testing.T, zip sizedReaderAt) bool {
459
+ d := make([]byte, 1024)
460
+ if _, err := zip.ReadAt(d, zip.Size()-int64(len(d))); err != nil {
461
+ t.Fatalf("ReadAt: %v", err)
462
+ }
463
+
464
+ sigOff := findSignatureInBlock(d)
465
+ if sigOff == -1 {
466
+ t.Errorf("failed to find signature in block")
467
+ return false
468
+ }
469
+
470
+ dirOff, err := findDirectory64End(zip, zip.Size()-int64(len(d))+int64(sigOff))
471
+ if err != nil {
472
+ t.Fatalf("findDirectory64End: %v", err)
473
+ }
474
+ if dirOff == -1 {
475
+ return false
476
+ }
477
+
478
+ d = make([]byte, directory64EndLen)
479
+ if _, err := zip.ReadAt(d, dirOff); err != nil {
480
+ t.Fatalf("ReadAt(off=%d): %v", dirOff, err)
481
+ }
482
+
483
+ b := readBuf(d)
484
+ if sig := b.uint32(); sig != directory64EndSignature {
485
+ return false
486
+ }
487
+
488
+ size := b.uint64()
489
+ if size != directory64EndLen-12 {
490
+ t.Errorf("expected length of %d, got %d", directory64EndLen-12, size)
491
+ }
492
+ return true
493
+ }
494
+
495
+ // Zip64 is required if the total size of the records is uint32max.
496
+ func TestZip64LargeDirectory(t *testing.T) {
497
+ if runtime.GOARCH == "wasm" {
498
+ t.Skip("too slow on wasm")
499
+ }
500
+ if testing.Short() {
501
+ t.Skip("skipping in short mode")
502
+ }
503
+ t.Parallel()
504
+ // gen returns a func that writes a zip with a wantLen bytes
505
+ // of central directory.
506
+ gen := func(wantLen int64) func(*Writer) {
507
+ return func(w *Writer) {
508
+ w.testHookCloseSizeOffset = func(size, off uint64) {
509
+ if size != uint64(wantLen) {
510
+ t.Errorf("Close central directory size = %d; want %d", size, wantLen)
511
+ }
512
+ }
513
+
514
+ uint16string := strings.Repeat(".", uint16max)
515
+ remain := wantLen
516
+ for remain > 0 {
517
+ commentLen := int(uint16max) - directoryHeaderLen - 1
518
+ thisRecLen := directoryHeaderLen + int(uint16max) + commentLen
519
+ if int64(thisRecLen) > remain {
520
+ remove := thisRecLen - int(remain)
521
+ commentLen -= remove
522
+ thisRecLen -= remove
523
+ }
524
+ remain -= int64(thisRecLen)
525
+ f, err := w.CreateHeader(&FileHeader{
526
+ Name: uint16string,
527
+ Comment: uint16string[:commentLen],
528
+ })
529
+ if err != nil {
530
+ t.Fatalf("CreateHeader: %v", err)
531
+ }
532
+ f.(*fileWriter).crc32 = fakeHash32{}
533
+ }
534
+ if err := w.Close(); err != nil {
535
+ t.Fatalf("Close: %v", err)
536
+ }
537
+ }
538
+ }
539
+ t.Run("uint32max-1_NoZip64", func(t *testing.T) {
540
+ t.Parallel()
541
+ if generatesZip64(t, gen(uint32max-1)) {
542
+ t.Error("unexpected zip64")
543
+ }
544
+ })
545
+ t.Run("uint32max_HasZip64", func(t *testing.T) {
546
+ t.Parallel()
547
+ if !generatesZip64(t, gen(uint32max)) {
548
+ t.Error("expected zip64")
549
+ }
550
+ })
551
+ }
552
+
553
+ func testZip64(t testing.TB, size int64) *rleBuffer {
554
+ const chunkSize = 1024
555
+ chunks := int(size / chunkSize)
556
+ // write size bytes plus "END\n" to a zip file
557
+ buf := new(rleBuffer)
558
+ w := NewWriter(buf)
559
+ f, err := w.CreateHeader(&FileHeader{
560
+ Name: "huge.txt",
561
+ Method: Store,
562
+ })
563
+ if err != nil {
564
+ t.Fatal(err)
565
+ }
566
+ f.(*fileWriter).crc32 = fakeHash32{}
567
+ chunk := make([]byte, chunkSize)
568
+ for i := range chunk {
569
+ chunk[i] = '.'
570
+ }
571
+ for i := 0; i < chunks; i++ {
572
+ _, err := f.Write(chunk)
573
+ if err != nil {
574
+ t.Fatal("write chunk:", err)
575
+ }
576
+ }
577
+ if frag := int(size % chunkSize); frag > 0 {
578
+ _, err := f.Write(chunk[:frag])
579
+ if err != nil {
580
+ t.Fatal("write chunk:", err)
581
+ }
582
+ }
583
+ end := []byte("END\n")
584
+ _, err = f.Write(end)
585
+ if err != nil {
586
+ t.Fatal("write end:", err)
587
+ }
588
+ if err := w.Close(); err != nil {
589
+ t.Fatal(err)
590
+ }
591
+
592
+ // read back zip file and check that we get to the end of it
593
+ r, err := NewReader(buf, buf.Size())
594
+ if err != nil {
595
+ t.Fatal("reader:", err)
596
+ }
597
+ f0 := r.File[0]
598
+ rc, err := f0.Open()
599
+ if err != nil {
600
+ t.Fatal("opening:", err)
601
+ }
602
+ rc.(*checksumReader).hash = fakeHash32{}
603
+ for i := 0; i < chunks; i++ {
604
+ _, err := io.ReadFull(rc, chunk)
605
+ if err != nil {
606
+ t.Fatal("read:", err)
607
+ }
608
+ }
609
+ if frag := int(size % chunkSize); frag > 0 {
610
+ _, err := io.ReadFull(rc, chunk[:frag])
611
+ if err != nil {
612
+ t.Fatal("read:", err)
613
+ }
614
+ }
615
+ gotEnd, err := io.ReadAll(rc)
616
+ if err != nil {
617
+ t.Fatal("read end:", err)
618
+ }
619
+ if !bytes.Equal(gotEnd, end) {
620
+ t.Errorf("End of zip64 archive %q, want %q", gotEnd, end)
621
+ }
622
+ err = rc.Close()
623
+ if err != nil {
624
+ t.Fatal("closing:", err)
625
+ }
626
+ if size+int64(len("END\n")) >= 1<<32-1 {
627
+ if got, want := f0.UncompressedSize, uint32(uint32max); got != want {
628
+ t.Errorf("UncompressedSize %#x, want %#x", got, want)
629
+ }
630
+ }
631
+
632
+ if got, want := f0.UncompressedSize64, uint64(size)+uint64(len(end)); got != want {
633
+ t.Errorf("UncompressedSize64 %#x, want %#x", got, want)
634
+ }
635
+
636
+ return buf
637
+ }
638
+
639
+ // Issue 9857
640
+ func testZip64DirectoryRecordLength(buf *rleBuffer, t *testing.T) {
641
+ if !suffixIsZip64(t, buf) {
642
+ t.Fatal("not a zip64")
643
+ }
644
+ }
645
+
646
+ func testValidHeader(h *FileHeader, t *testing.T) {
647
+ var buf bytes.Buffer
648
+ z := NewWriter(&buf)
649
+
650
+ f, err := z.CreateHeader(h)
651
+ if err != nil {
652
+ t.Fatalf("error creating header: %v", err)
653
+ }
654
+ if _, err := f.Write([]byte("hi")); err != nil {
655
+ t.Fatalf("error writing content: %v", err)
656
+ }
657
+ if err := z.Close(); err != nil {
658
+ t.Fatalf("error closing zip writer: %v", err)
659
+ }
660
+
661
+ b := buf.Bytes()
662
+ zf, err := NewReader(bytes.NewReader(b), int64(len(b)))
663
+ if err != nil {
664
+ t.Fatalf("got %v, expected nil", err)
665
+ }
666
+ zh := zf.File[0].FileHeader
667
+ if zh.Name != h.Name || zh.Method != h.Method || zh.UncompressedSize64 != uint64(len("hi")) {
668
+ t.Fatalf("got %q/%d/%d expected %q/%d/%d", zh.Name, zh.Method, zh.UncompressedSize64, h.Name, h.Method, len("hi"))
669
+ }
670
+ }
671
+
672
+ // Issue 4302.
673
+ func TestHeaderInvalidTagAndSize(t *testing.T) {
674
+ const timeFormat = "20060102T150405.000.txt"
675
+
676
+ ts := time.Now()
677
+ filename := ts.Format(timeFormat)
678
+
679
+ h := FileHeader{
680
+ Name: filename,
681
+ Method: Deflate,
682
+ Extra: []byte(ts.Format(time.RFC3339Nano)), // missing tag and len, but Extra is best-effort parsing
683
+ }
684
+ h.SetModTime(ts)
685
+
686
+ testValidHeader(&h, t)
687
+ }
688
+
689
+ func TestHeaderTooShort(t *testing.T) {
690
+ h := FileHeader{
691
+ Name: "foo.txt",
692
+ Method: Deflate,
693
+ Extra: []byte{zip64ExtraID}, // missing size and second half of tag, but Extra is best-effort parsing
694
+ }
695
+ testValidHeader(&h, t)
696
+ }
697
+
698
+ func TestHeaderTooLongErr(t *testing.T) {
699
+ var headerTests = []struct {
700
+ name string
701
+ extra []byte
702
+ wanterr error
703
+ }{
704
+ {
705
+ name: strings.Repeat("x", 1<<16),
706
+ extra: []byte{},
707
+ wanterr: errLongName,
708
+ },
709
+ {
710
+ name: "long_extra",
711
+ extra: bytes.Repeat([]byte{0xff}, 1<<16),
712
+ wanterr: errLongExtra,
713
+ },
714
+ }
715
+
716
+ // write a zip file
717
+ buf := new(bytes.Buffer)
718
+ w := NewWriter(buf)
719
+
720
+ for _, test := range headerTests {
721
+ h := &FileHeader{
722
+ Name: test.name,
723
+ Extra: test.extra,
724
+ }
725
+ _, err := w.CreateHeader(h)
726
+ if err != test.wanterr {
727
+ t.Errorf("error=%v, want %v", err, test.wanterr)
728
+ }
729
+ }
730
+
731
+ if err := w.Close(); err != nil {
732
+ t.Fatal(err)
733
+ }
734
+ }
735
+
736
+ func TestHeaderIgnoredSize(t *testing.T) {
737
+ h := FileHeader{
738
+ Name: "foo.txt",
739
+ Method: Deflate,
740
+ Extra: []byte{zip64ExtraID & 0xFF, zip64ExtraID >> 8, 24, 0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8}, // bad size but shouldn't be consulted
741
+ }
742
+ testValidHeader(&h, t)
743
+ }
744
+
745
+ // Issue 4393. It is valid to have an extra data header
746
+ // which contains no body.
747
+ func TestZeroLengthHeader(t *testing.T) {
748
+ h := FileHeader{
749
+ Name: "extadata.txt",
750
+ Method: Deflate,
751
+ Extra: []byte{
752
+ 85, 84, 5, 0, 3, 154, 144, 195, 77, // tag 21589 size 5
753
+ 85, 120, 0, 0, // tag 30805 size 0
754
+ },
755
+ }
756
+ testValidHeader(&h, t)
757
+ }
758
+
759
+ // Just benchmarking how fast the Zip64 test above is. Not related to
760
+ // our zip performance, since the test above disabled CRC32 and flate.
761
+ func BenchmarkZip64Test(b *testing.B) {
762
+ for i := 0; i < b.N; i++ {
763
+ testZip64(b, 1<<26)
764
+ }
765
+ }
766
+
767
+ func BenchmarkZip64TestSizes(b *testing.B) {
768
+ for _, size := range []int64{1 << 12, 1 << 20, 1 << 26} {
769
+ b.Run(fmt.Sprint(size), func(b *testing.B) {
770
+ b.RunParallel(func(pb *testing.PB) {
771
+ for pb.Next() {
772
+ testZip64(b, size)
773
+ }
774
+ })
775
+ })
776
+ }
777
+ }
778
+
779
+ func TestSuffixSaver(t *testing.T) {
780
+ const keep = 10
781
+ ss := &suffixSaver{keep: keep}
782
+ ss.Write([]byte("abc"))
783
+ if got := string(ss.Suffix()); got != "abc" {
784
+ t.Errorf("got = %q; want abc", got)
785
+ }
786
+ ss.Write([]byte("defghijklmno"))
787
+ if got := string(ss.Suffix()); got != "fghijklmno" {
788
+ t.Errorf("got = %q; want fghijklmno", got)
789
+ }
790
+ if got, want := ss.Size(), int64(len("abc")+len("defghijklmno")); got != want {
791
+ t.Errorf("Size = %d; want %d", got, want)
792
+ }
793
+ buf := make([]byte, ss.Size())
794
+ for off := int64(0); off < ss.Size(); off++ {
795
+ for size := 1; size <= int(ss.Size()-off); size++ {
796
+ readBuf := buf[:size]
797
+ n, err := ss.ReadAt(readBuf, off)
798
+ if off < ss.Size()-keep {
799
+ if err != errDiscardedBytes {
800
+ t.Errorf("off %d, size %d = %v, %v (%q); want errDiscardedBytes", off, size, n, err, readBuf[:n])
801
+ }
802
+ continue
803
+ }
804
+ want := "abcdefghijklmno"[off : off+int64(size)]
805
+ got := string(readBuf[:n])
806
+ if err != nil || got != want {
807
+ t.Errorf("off %d, size %d = %v, %v (%q); want %q", off, size, n, err, got, want)
808
+ }
809
+ }
810
+ }
811
+
812
+ }
813
+
814
+ type zeros struct{}
815
+
816
+ func (zeros) Read(p []byte) (int, error) {
817
+ for i := range p {
818
+ p[i] = 0
819
+ }
820
+ return len(p), nil
821
+ }
platform/dbops/binaries/go/go/src/runtime/msan0.go ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2015 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ //go:build !msan
6
+
7
+ // Dummy MSan support API, used when not built with -msan.
8
+
9
+ package runtime
10
+
11
+ import (
12
+ "unsafe"
13
+ )
14
+
15
+ const msanenabled = false
16
+
17
+ // Because msanenabled is false, none of these functions should be called.
18
+
19
+ func msanread(addr unsafe.Pointer, sz uintptr) { throw("msan") }
20
+ func msanwrite(addr unsafe.Pointer, sz uintptr) { throw("msan") }
21
+ func msanmalloc(addr unsafe.Pointer, sz uintptr) { throw("msan") }
22
+ func msanfree(addr unsafe.Pointer, sz uintptr) { throw("msan") }
23
+ func msanmove(dst, src unsafe.Pointer, sz uintptr) { throw("msan") }
platform/dbops/binaries/go/go/src/runtime/msan_amd64.s ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2015 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ //go:build msan
6
+
7
+ #include "go_asm.h"
8
+ #include "go_tls.h"
9
+ #include "funcdata.h"
10
+ #include "textflag.h"
11
+
12
+ // This is like race_amd64.s, but for the msan calls.
13
+ // See race_amd64.s for detailed comments.
14
+
15
+ #ifdef GOOS_windows
16
+ #define RARG0 CX
17
+ #define RARG1 DX
18
+ #define RARG2 R8
19
+ #define RARG3 R9
20
+ #else
21
+ #define RARG0 DI
22
+ #define RARG1 SI
23
+ #define RARG2 DX
24
+ #define RARG3 CX
25
+ #endif
26
+
27
+ // func runtime·domsanread(addr unsafe.Pointer, sz uintptr)
28
+ // Called from msanread.
29
+ TEXT runtime·domsanread(SB), NOSPLIT, $0-16
30
+ MOVQ addr+0(FP), RARG0
31
+ MOVQ sz+8(FP), RARG1
32
+ // void __msan_read_go(void *addr, uintptr_t sz);
33
+ MOVQ $__msan_read_go(SB), AX
34
+ JMP msancall<>(SB)
35
+
36
+ // func runtime·msanwrite(addr unsafe.Pointer, sz uintptr)
37
+ // Called from instrumented code.
38
+ TEXT runtime·msanwrite(SB), NOSPLIT, $0-16
39
+ MOVQ addr+0(FP), RARG0
40
+ MOVQ sz+8(FP), RARG1
41
+ // void __msan_write_go(void *addr, uintptr_t sz);
42
+ MOVQ $__msan_write_go(SB), AX
43
+ JMP msancall<>(SB)
44
+
45
+ // func runtime·msanmalloc(addr unsafe.Pointer, sz uintptr)
46
+ TEXT runtime·msanmalloc(SB), NOSPLIT, $0-16
47
+ MOVQ addr+0(FP), RARG0
48
+ MOVQ sz+8(FP), RARG1
49
+ // void __msan_malloc_go(void *addr, uintptr_t sz);
50
+ MOVQ $__msan_malloc_go(SB), AX
51
+ JMP msancall<>(SB)
52
+
53
+ // func runtime·msanfree(addr unsafe.Pointer, sz uintptr)
54
+ TEXT runtime·msanfree(SB), NOSPLIT, $0-16
55
+ MOVQ addr+0(FP), RARG0
56
+ MOVQ sz+8(FP), RARG1
57
+ // void __msan_free_go(void *addr, uintptr_t sz);
58
+ MOVQ $__msan_free_go(SB), AX
59
+ JMP msancall<>(SB)
60
+
61
+ // func runtime·msanmove(dst, src unsafe.Pointer, sz uintptr)
62
+ TEXT runtime·msanmove(SB), NOSPLIT, $0-24
63
+ MOVQ dst+0(FP), RARG0
64
+ MOVQ src+8(FP), RARG1
65
+ MOVQ sz+16(FP), RARG2
66
+ // void __msan_memmove(void *dst, void *src, uintptr_t sz);
67
+ MOVQ $__msan_memmove(SB), AX
68
+ JMP msancall<>(SB)
69
+
70
+ // Switches SP to g0 stack and calls (AX). Arguments already set.
71
+ TEXT msancall<>(SB), NOSPLIT, $0-0
72
+ get_tls(R12)
73
+ MOVQ g(R12), R14
74
+ MOVQ SP, R12 // callee-saved, preserved across the CALL
75
+ CMPQ R14, $0
76
+ JE call // no g; still on a system stack
77
+
78
+ MOVQ g_m(R14), R13
79
+ // Switch to g0 stack.
80
+ MOVQ m_g0(R13), R10
81
+ CMPQ R10, R14
82
+ JE call // already on g0
83
+
84
+ MOVQ (g_sched+gobuf_sp)(R10), SP
85
+ call:
86
+ ANDQ $~15, SP // alignment for gcc ABI
87
+ CALL AX
88
+ MOVQ R12, SP
89
+ RET
platform/dbops/binaries/go/go/src/runtime/msan_arm64.s ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2018 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ //go:build msan
6
+
7
+ #include "go_asm.h"
8
+ #include "textflag.h"
9
+
10
+ #define RARG0 R0
11
+ #define RARG1 R1
12
+ #define RARG2 R2
13
+ #define FARG R3
14
+
15
+ // func runtime·domsanread(addr unsafe.Pointer, sz uintptr)
16
+ // Called from msanread.
17
+ TEXT runtime·domsanread(SB), NOSPLIT, $0-16
18
+ MOVD addr+0(FP), RARG0
19
+ MOVD sz+8(FP), RARG1
20
+ // void __msan_read_go(void *addr, uintptr_t sz);
21
+ MOVD $__msan_read_go(SB), FARG
22
+ JMP msancall<>(SB)
23
+
24
+ // func runtime·msanwrite(addr unsafe.Pointer, sz uintptr)
25
+ // Called from instrumented code.
26
+ TEXT runtime·msanwrite(SB), NOSPLIT, $0-16
27
+ MOVD addr+0(FP), RARG0
28
+ MOVD sz+8(FP), RARG1
29
+ // void __msan_write_go(void *addr, uintptr_t sz);
30
+ MOVD $__msan_write_go(SB), FARG
31
+ JMP msancall<>(SB)
32
+
33
+ // func runtime·msanmalloc(addr unsafe.Pointer, sz uintptr)
34
+ TEXT runtime·msanmalloc(SB), NOSPLIT, $0-16
35
+ MOVD addr+0(FP), RARG0
36
+ MOVD sz+8(FP), RARG1
37
+ // void __msan_malloc_go(void *addr, uintptr_t sz);
38
+ MOVD $__msan_malloc_go(SB), FARG
39
+ JMP msancall<>(SB)
40
+
41
+ // func runtime·msanfree(addr unsafe.Pointer, sz uintptr)
42
+ TEXT runtime·msanfree(SB), NOSPLIT, $0-16
43
+ MOVD addr+0(FP), RARG0
44
+ MOVD sz+8(FP), RARG1
45
+ // void __msan_free_go(void *addr, uintptr_t sz);
46
+ MOVD $__msan_free_go(SB), FARG
47
+ JMP msancall<>(SB)
48
+
49
+ // func runtime·msanmove(dst, src unsafe.Pointer, sz uintptr)
50
+ TEXT runtime·msanmove(SB), NOSPLIT, $0-24
51
+ MOVD dst+0(FP), RARG0
52
+ MOVD src+8(FP), RARG1
53
+ MOVD sz+16(FP), RARG2
54
+ // void __msan_memmove(void *dst, void *src, uintptr_t sz);
55
+ MOVD $__msan_memmove(SB), FARG
56
+ JMP msancall<>(SB)
57
+
58
+ // Switches SP to g0 stack and calls (FARG). Arguments already set.
59
+ TEXT msancall<>(SB), NOSPLIT, $0-0
60
+ MOVD RSP, R19 // callee-saved
61
+ CBZ g, g0stack // no g, still on a system stack
62
+ MOVD g_m(g), R10
63
+ MOVD m_g0(R10), R11
64
+ CMP R11, g
65
+ BEQ g0stack
66
+
67
+ MOVD (g_sched+gobuf_sp)(R11), R4
68
+ MOVD R4, RSP
69
+
70
+ g0stack:
71
+ BL (FARG)
72
+ MOVD R19, RSP
73
+ RET
platform/dbops/binaries/go/go/src/runtime/msan_loong64.s ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2023 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ //go:build msan
6
+
7
+ #include "go_asm.h"
8
+ #include "textflag.h"
9
+
10
+ #define RARG0 R4
11
+ #define RARG1 R5
12
+ #define RARG2 R6
13
+ #define FARG R7
14
+
15
+ // func runtime·domsanread(addr unsafe.Pointer, sz uintptr)
16
+ // Called from msanread.
17
+ TEXT runtime·domsanread(SB), NOSPLIT, $0-16
18
+ MOVV addr+0(FP), RARG0
19
+ MOVV sz+8(FP), RARG1
20
+ // void __msan_read_go(void *addr, uintptr_t sz);
21
+ MOVV $__msan_read_go(SB), FARG
22
+ JMP msancall<>(SB)
23
+
24
+ // func runtime·msanwrite(addr unsafe.Pointer, sz uintptr)
25
+ // Called from instrumented code.
26
+ TEXT runtime·msanwrite(SB), NOSPLIT, $0-16
27
+ MOVV addr+0(FP), RARG0
28
+ MOVV sz+8(FP), RARG1
29
+ // void __msan_write_go(void *addr, uintptr_t sz);
30
+ MOVV $__msan_write_go(SB), FARG
31
+ JMP msancall<>(SB)
32
+
33
+ // func runtime·msanmalloc(addr unsafe.Pointer, sz uintptr)
34
+ TEXT runtime·msanmalloc(SB), NOSPLIT, $0-16
35
+ MOVV addr+0(FP), RARG0
36
+ MOVV sz+8(FP), RARG1
37
+ // void __msan_malloc_go(void *addr, uintptr_t sz);
38
+ MOVV $__msan_malloc_go(SB), FARG
39
+ JMP msancall<>(SB)
40
+
41
+ // func runtime·msanfree(addr unsafe.Pointer, sz uintptr)
42
+ TEXT runtime·msanfree(SB), NOSPLIT, $0-16
43
+ MOVV addr+0(FP), RARG0
44
+ MOVV sz+8(FP), RARG1
45
+ // void __msan_free_go(void *addr, uintptr_t sz);
46
+ MOVV $__msan_free_go(SB), FARG
47
+ JMP msancall<>(SB)
48
+
49
+ // func runtime·msanmove(dst, src unsafe.Pointer, sz uintptr)
50
+ TEXT runtime·msanmove(SB), NOSPLIT, $0-24
51
+ MOVV dst+0(FP), RARG0
52
+ MOVV src+8(FP), RARG1
53
+ MOVV sz+16(FP), RARG2
54
+ // void __msan_memmove(void *dst, void *src, uintptr_t sz);
55
+ MOVV $__msan_memmove(SB), FARG
56
+ JMP msancall<>(SB)
57
+
58
+ // Switches SP to g0 stack and calls (FARG). Arguments already set.
59
+ TEXT msancall<>(SB), NOSPLIT, $0-0
60
+ MOVV R3, R23 // callee-saved
61
+ BEQ g, g0stack // no g, still on a system stack
62
+ MOVV g_m(g), R14
63
+ MOVV m_g0(R14), R15
64
+ BEQ R15, g, g0stack
65
+
66
+ MOVV (g_sched+gobuf_sp)(R15), R9
67
+ MOVV R9, R3
68
+
69
+ g0stack:
70
+ JAL (FARG)
71
+ MOVV R23, R3
72
+ RET
platform/dbops/binaries/go/go/src/runtime/msize_allocheaders.go ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2009 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ //go:build goexperiment.allocheaders
6
+
7
+ // Malloc small size classes.
8
+ //
9
+ // See malloc.go for overview.
10
+ // See also mksizeclasses.go for how we decide what size classes to use.
11
+
12
+ package runtime
13
+
14
+ // Returns size of the memory block that mallocgc will allocate if you ask for the size,
15
+ // minus any inline space for metadata.
16
+ func roundupsize(size uintptr, noscan bool) (reqSize uintptr) {
17
+ reqSize = size
18
+ if reqSize <= maxSmallSize-mallocHeaderSize {
19
+ // Small object.
20
+ if !noscan && reqSize > minSizeForMallocHeader { // !noscan && !heapBitsInSpan(reqSize)
21
+ reqSize += mallocHeaderSize
22
+ }
23
+ // (reqSize - size) is either mallocHeaderSize or 0. We need to subtract mallocHeaderSize
24
+ // from the result if we have one, since mallocgc will add it back in.
25
+ if reqSize <= smallSizeMax-8 {
26
+ return uintptr(class_to_size[size_to_class8[divRoundUp(reqSize, smallSizeDiv)]]) - (reqSize - size)
27
+ }
28
+ return uintptr(class_to_size[size_to_class128[divRoundUp(reqSize-smallSizeMax, largeSizeDiv)]]) - (reqSize - size)
29
+ }
30
+ // Large object. Align reqSize up to the next page. Check for overflow.
31
+ reqSize += pageSize - 1
32
+ if reqSize < size {
33
+ return size
34
+ }
35
+ return reqSize &^ (pageSize - 1)
36
+ }
platform/dbops/binaries/go/go/src/runtime/msize_noallocheaders.go ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2009 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ //go:build !goexperiment.allocheaders
6
+
7
+ // Malloc small size classes.
8
+ //
9
+ // See malloc.go for overview.
10
+ // See also mksizeclasses.go for how we decide what size classes to use.
11
+
12
+ package runtime
13
+
14
+ // Returns size of the memory block that mallocgc will allocate if you ask for the size.
15
+ //
16
+ // The noscan argument is purely for compatibility with goexperiment.AllocHeaders.
17
+ func roundupsize(size uintptr, noscan bool) uintptr {
18
+ if size < _MaxSmallSize {
19
+ if size <= smallSizeMax-8 {
20
+ return uintptr(class_to_size[size_to_class8[divRoundUp(size, smallSizeDiv)]])
21
+ } else {
22
+ return uintptr(class_to_size[size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]])
23
+ }
24
+ }
25
+ if size+_PageSize < size {
26
+ return size
27
+ }
28
+ return alignUp(size, _PageSize)
29
+ }
platform/dbops/binaries/go/go/src/runtime/mspanset.go ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2020 The Go Authors. All rights reserved.
2
+ // Use of this source code is governed by a BSD-style
3
+ // license that can be found in the LICENSE file.
4
+
5
+ package runtime
6
+
7
+ import (
8
+ "internal/cpu"
9
+ "internal/goarch"
10
+ "runtime/internal/atomic"
11
+ "unsafe"
12
+ )
13
+
14
+ // A spanSet is a set of *mspans.
15
+ //
16
+ // spanSet is safe for concurrent push and pop operations.
17
+ type spanSet struct {
18
+ // A spanSet is a two-level data structure consisting of a
19
+ // growable spine that points to fixed-sized blocks. The spine
20
+ // can be accessed without locks, but adding a block or
21
+ // growing it requires taking the spine lock.
22
+ //
23
+ // Because each mspan covers at least 8K of heap and takes at
24
+ // most 8 bytes in the spanSet, the growth of the spine is
25
+ // quite limited.
26
+ //
27
+ // The spine and all blocks are allocated off-heap, which
28
+ // allows this to be used in the memory manager and avoids the
29
+ // need for write barriers on all of these. spanSetBlocks are
30
+ // managed in a pool, though never freed back to the operating
31
+ // system. We never release spine memory because there could be
32
+ // concurrent lock-free access and we're likely to reuse it
33
+ // anyway. (In principle, we could do this during STW.)
34
+
35
+ spineLock mutex
36
+ spine atomicSpanSetSpinePointer // *[N]atomic.Pointer[spanSetBlock]
37
+ spineLen atomic.Uintptr // Spine array length
38
+ spineCap uintptr // Spine array cap, accessed under spineLock
39
+
40
+ // index is the head and tail of the spanSet in a single field.
41
+ // The head and the tail both represent an index into the logical
42
+ // concatenation of all blocks, with the head always behind or
43
+ // equal to the tail (indicating an empty set). This field is
44
+ // always accessed atomically.
45
+ //
46
+ // The head and the tail are only 32 bits wide, which means we
47
+ // can only support up to 2^32 pushes before a reset. If every
48
+ // span in the heap were stored in this set, and each span were
49
+ // the minimum size (1 runtime page, 8 KiB), then roughly the
50
+ // smallest heap which would be unrepresentable is 32 TiB in size.
51
+ index atomicHeadTailIndex
52
+ }
53
+
54
+ const (
55
+ spanSetBlockEntries = 512 // 4KB on 64-bit
56
+ spanSetInitSpineCap = 256 // Enough for 1GB heap on 64-bit
57
+ )
58
+
59
+ type spanSetBlock struct {
60
+ // Free spanSetBlocks are managed via a lock-free stack.
61
+ lfnode
62
+
63
+ // popped is the number of pop operations that have occurred on
64
+ // this block. This number is used to help determine when a block
65
+ // may be safely recycled.
66
+ popped atomic.Uint32
67
+
68
+ // spans is the set of spans in this block.
69
+ spans [spanSetBlockEntries]atomicMSpanPointer
70
+ }
71
+
72
+ // push adds span s to buffer b. push is safe to call concurrently
73
+ // with other push and pop operations.
74
+ func (b *spanSet) push(s *mspan) {
75
+ // Obtain our slot.
76
+ cursor := uintptr(b.index.incTail().tail() - 1)
77
+ top, bottom := cursor/spanSetBlockEntries, cursor%spanSetBlockEntries
78
+
79
+ // Do we need to add a block?
80
+ spineLen := b.spineLen.Load()
81
+ var block *spanSetBlock
82
+ retry:
83
+ if top < spineLen {
84
+ block = b.spine.Load().lookup(top).Load()
85
+ } else {
86
+ // Add a new block to the spine, potentially growing
87
+ // the spine.
88
+ lock(&b.spineLock)
89
+ // spineLen cannot change until we release the lock,
90
+ // but may have changed while we were waiting.
91
+ spineLen = b.spineLen.Load()
92
+ if top < spineLen {
93
+ unlock(&b.spineLock)
94
+ goto retry
95
+ }
96
+
97
+ spine := b.spine.Load()
98
+ if spineLen == b.spineCap {
99
+ // Grow the spine.
100
+ newCap := b.spineCap * 2
101
+ if newCap == 0 {
102
+ newCap = spanSetInitSpineCap
103
+ }
104
+ newSpine := persistentalloc(newCap*goarch.PtrSize, cpu.CacheLineSize, &memstats.gcMiscSys)
105
+ if b.spineCap != 0 {
106
+ // Blocks are allocated off-heap, so
107
+ // no write barriers.
108
+ memmove(newSpine, spine.p, b.spineCap*goarch.PtrSize)
109
+ }
110
+ spine = spanSetSpinePointer{newSpine}
111
+
112
+ // Spine is allocated off-heap, so no write barrier.
113
+ b.spine.StoreNoWB(spine)
114
+ b.spineCap = newCap
115
+ // We can't immediately free the old spine
116
+ // since a concurrent push with a lower index
117
+ // could still be reading from it. We let it
118
+ // leak because even a 1TB heap would waste
119
+ // less than 2MB of memory on old spines. If
120
+ // this is a problem, we could free old spines
121
+ // during STW.
122
+ }
123
+
124
+ // Allocate a new block from the pool.
125
+ block = spanSetBlockPool.alloc()
126
+
127
+ // Add it to the spine.
128
+ // Blocks are allocated off-heap, so no write barrier.
129
+ spine.lookup(top).StoreNoWB(block)
130
+ b.spineLen.Store(spineLen + 1)
131
+ unlock(&b.spineLock)
132
+ }
133
+
134
+ // We have a block. Insert the span atomically, since there may be
135
+ // concurrent readers via the block API.
136
+ block.spans[bottom].StoreNoWB(s)
137
+ }
138
+
139
+ // pop removes and returns a span from buffer b, or nil if b is empty.
140
+ // pop is safe to call concurrently with other pop and push operations.
141
+ func (b *spanSet) pop() *mspan {
142
+ var head, tail uint32
143
+ claimLoop:
144
+ for {
145
+ headtail := b.index.load()
146
+ head, tail = headtail.split()
147
+ if head >= tail {
148
+ // The buf is empty, as far as we can tell.
149
+ return nil
150
+ }
151
+ // Check if the head position we want to claim is actually
152
+ // backed by a block.
153
+ spineLen := b.spineLen.Load()
154
+ if spineLen <= uintptr(head)/spanSetBlockEntries {
155
+ // We're racing with a spine growth and the allocation of
156
+ // a new block (and maybe a new spine!), and trying to grab
157
+ // the span at the index which is currently being pushed.
158
+ // Instead of spinning, let's just notify the caller that
159
+ // there's nothing currently here. Spinning on this is
160
+ // almost definitely not worth it.
161
+ return nil
162
+ }
163
+ // Try to claim the current head by CASing in an updated head.
164
+ // This may fail transiently due to a push which modifies the
165
+ // tail, so keep trying while the head isn't changing.
166
+ want := head
167
+ for want == head {
168
+ if b.index.cas(headtail, makeHeadTailIndex(want+1, tail)) {
169
+ break claimLoop
170
+ }
171
+ headtail = b.index.load()
172
+ head, tail = headtail.split()
173
+ }
174
+ // We failed to claim the spot we were after and the head changed,
175
+ // meaning a popper got ahead of us. Try again from the top because
176
+ // the buf may not be empty.
177
+ }
178
+ top, bottom := head/spanSetBlockEntries, head%spanSetBlockEntries
179
+
180
+ // We may be reading a stale spine pointer, but because the length
181
+ // grows monotonically and we've already verified it, we'll definitely
182
+ // be reading from a valid block.
183
+ blockp := b.spine.Load().lookup(uintptr(top))
184
+
185
+ // Given that the spine length is correct, we know we will never
186
+ // see a nil block here, since the length is always updated after
187
+ // the block is set.
188
+ block := blockp.Load()
189
+ s := block.spans[bottom].Load()
190
+ for s == nil {
191
+ // We raced with the span actually being set, but given that we
192
+ // know a block for this span exists, the race window here is
193
+ // extremely small. Try again.
194
+ s = block.spans[bottom].Load()
195
+ }
196
+ // Clear the pointer. This isn't strictly necessary, but defensively
197
+ // avoids accidentally re-using blocks which could lead to memory
198
+ // corruption. This way, we'll get a nil pointer access instead.
199
+ block.spans[bottom].StoreNoWB(nil)
200
+
201
+ // Increase the popped count. If we are the last possible popper
202
+ // in the block (note that bottom need not equal spanSetBlockEntries-1
203
+ // due to races) then it's our responsibility to free the block.
204
+ //
205
+ // If we increment popped to spanSetBlockEntries, we can be sure that
206
+ // we're the last popper for this block, and it's thus safe to free it.
207
+ // Every other popper must have crossed this barrier (and thus finished
208
+ // popping its corresponding mspan) by the time we get here. Because
209
+ // we're the last popper, we also don't have to worry about concurrent
210
+ // pushers (there can't be any). Note that we may not be the popper
211
+ // which claimed the last slot in the block, we're just the last one
212
+ // to finish popping.
213
+ if block.popped.Add(1) == spanSetBlockEntries {
214
+ // Clear the block's pointer.
215
+ blockp.StoreNoWB(nil)
216
+
217
+ // Return the block to the block pool.
218
+ spanSetBlockPool.free(block)
219
+ }
220
+ return s
221
+ }
222
+
223
+ // reset resets a spanSet which is empty. It will also clean up
224
+ // any left over blocks.
225
+ //
226
+ // Throws if the buf is not empty.
227
+ //
228
+ // reset may not be called concurrently with any other operations
229
+ // on the span set.
230
+ func (b *spanSet) reset() {
231
+ head, tail := b.index.load().split()
232
+ if head < tail {
233
+ print("head = ", head, ", tail = ", tail, "\n")
234
+ throw("attempt to clear non-empty span set")
235
+ }
236
+ top := head / spanSetBlockEntries
237
+ if uintptr(top) < b.spineLen.Load() {
238
+ // If the head catches up to the tail and the set is empty,
239
+ // we may not clean up the block containing the head and tail
240
+ // since it may be pushed into again. In order to avoid leaking
241
+ // memory since we're going to reset the head and tail, clean
242
+ // up such a block now, if it exists.
243
+ blockp := b.spine.Load().lookup(uintptr(top))
244
+ block := blockp.Load()
245
+ if block != nil {
246
+ // Check the popped value.
247
+ if block.popped.Load() == 0 {
248
+ // popped should never be zero because that means we have
249
+ // pushed at least one value but not yet popped if this
250
+ // block pointer is not nil.
251
+ throw("span set block with unpopped elements found in reset")
252
+ }
253
+ if block.popped.Load() == spanSetBlockEntries {
254
+ // popped should also never be equal to spanSetBlockEntries
255
+ // because the last popper should have made the block pointer
256
+ // in this slot nil.
257
+ throw("fully empty unfreed span set block found in reset")
258
+ }
259
+
260
+ // Clear the pointer to the block.
261
+ blockp.StoreNoWB(nil)
262
+
263
+ // Return the block to the block pool.
264
+ spanSetBlockPool.free(block)
265
+ }
266
+ }
267
+ b.index.reset()
268
+ b.spineLen.Store(0)
269
+ }
270
+
271
+ // atomicSpanSetSpinePointer is an atomically-accessed spanSetSpinePointer.
272
+ //
273
+ // It has the same semantics as atomic.UnsafePointer.
274
+ type atomicSpanSetSpinePointer struct {
275
+ a atomic.UnsafePointer
276
+ }
277
+
278
+ // Loads the spanSetSpinePointer and returns it.
279
+ //
280
+ // It has the same semantics as atomic.UnsafePointer.
281
+ func (s *atomicSpanSetSpinePointer) Load() spanSetSpinePointer {
282
+ return spanSetSpinePointer{s.a.Load()}
283
+ }
284
+
285
+ // Stores the spanSetSpinePointer.
286
+ //
287
+ // It has the same semantics as [atomic.UnsafePointer].
288
+ func (s *atomicSpanSetSpinePointer) StoreNoWB(p spanSetSpinePointer) {
289
+ s.a.StoreNoWB(p.p)
290
+ }
291
+
292
+ // spanSetSpinePointer represents a pointer to a contiguous block of atomic.Pointer[spanSetBlock].
293
+ type spanSetSpinePointer struct {
294
+ p unsafe.Pointer
295
+ }
296
+
297
+ // lookup returns &s[idx].
298
+ func (s spanSetSpinePointer) lookup(idx uintptr) *atomic.Pointer[spanSetBlock] {
299
+ return (*atomic.Pointer[spanSetBlock])(add(s.p, goarch.PtrSize*idx))
300
+ }
301
+
302
+ // spanSetBlockPool is a global pool of spanSetBlocks.
303
+ var spanSetBlockPool spanSetBlockAlloc
304
+
305
+ // spanSetBlockAlloc represents a concurrent pool of spanSetBlocks.
306
+ type spanSetBlockAlloc struct {
307
+ stack lfstack
308
+ }
309
+
310
+ // alloc tries to grab a spanSetBlock out of the pool, and if it fails
311
+ // persistentallocs a new one and returns it.
312
+ func (p *spanSetBlockAlloc) alloc() *spanSetBlock {
313
+ if s := (*spanSetBlock)(p.stack.pop()); s != nil {
314
+ return s
315
+ }
316
+ return (*spanSetBlock)(persistentalloc(unsafe.Sizeof(spanSetBlock{}), cpu.CacheLineSize, &memstats.gcMiscSys))
317
+ }
318
+
319
+ // free returns a spanSetBlock back to the pool.
320
+ func (p *spanSetBlockAlloc) free(block *spanSetBlock) {
321
+ block.popped.Store(0)
322
+ p.stack.push(&block.lfnode)
323
+ }
324
+
325
+ // headTailIndex represents a combined 32-bit head and 32-bit tail
326
+ // of a queue into a single 64-bit value.
327
+ type headTailIndex uint64
328
+
329
+ // makeHeadTailIndex creates a headTailIndex value from a separate
330
+ // head and tail.
331
+ func makeHeadTailIndex(head, tail uint32) headTailIndex {
332
+ return headTailIndex(uint64(head)<<32 | uint64(tail))
333
+ }
334
+
335
+ // head returns the head of a headTailIndex value.
336
+ func (h headTailIndex) head() uint32 {
337
+ return uint32(h >> 32)
338
+ }
339
+
340
+ // tail returns the tail of a headTailIndex value.
341
+ func (h headTailIndex) tail() uint32 {
342
+ return uint32(h)
343
+ }
344
+
345
+ // split splits the headTailIndex value into its parts.
346
+ func (h headTailIndex) split() (head uint32, tail uint32) {
347
+ return h.head(), h.tail()
348
+ }
349
+
350
+ // atomicHeadTailIndex is an atomically-accessed headTailIndex.
351
+ type atomicHeadTailIndex struct {
352
+ u atomic.Uint64
353
+ }
354
+
355
+ // load atomically reads a headTailIndex value.
356
+ func (h *atomicHeadTailIndex) load() headTailIndex {
357
+ return headTailIndex(h.u.Load())
358
+ }
359
+
360
+ // cas atomically compares-and-swaps a headTailIndex value.
361
+ func (h *atomicHeadTailIndex) cas(old, new headTailIndex) bool {
362
+ return h.u.CompareAndSwap(uint64(old), uint64(new))
363
+ }
364
+
365
+ // incHead atomically increments the head of a headTailIndex.
366
+ func (h *atomicHeadTailIndex) incHead() headTailIndex {
367
+ return headTailIndex(h.u.Add(1 << 32))
368
+ }
369
+
370
+ // decHead atomically decrements the head of a headTailIndex.
371
+ func (h *atomicHeadTailIndex) decHead() headTailIndex {
372
+ return headTailIndex(h.u.Add(-(1 << 32)))
373
+ }
374
+
375
+ // incTail atomically increments the tail of a headTailIndex.
376
+ func (h *atomicHeadTailIndex) incTail() headTailIndex {
377
+ ht := headTailIndex(h.u.Add(1))
378
+ // Check for overflow.
379
+ if ht.tail() == 0 {
380
+ print("runtime: head = ", ht.head(), ", tail = ", ht.tail(), "\n")
381
+ throw("headTailIndex overflow")
382
+ }
383
+ return ht
384
+ }
385
+
386
+ // reset clears the headTailIndex to (0, 0).
387
+ func (h *atomicHeadTailIndex) reset() {
388
+ h.u.Store(0)
389
+ }
390
+
391
+ // atomicMSpanPointer is an atomic.Pointer[mspan]. Can't use generics because it's NotInHeap.
392
+ type atomicMSpanPointer struct {
393
+ p atomic.UnsafePointer
394
+ }
395
+
396
+ // Load returns the *mspan.
397
+ func (p *atomicMSpanPointer) Load() *mspan {
398
+ return (*mspan)(p.p.Load())
399
+ }
400
+
401
+ // Store stores an *mspan.
402
+ func (p *atomicMSpanPointer) StoreNoWB(s *mspan) {
403
+ p.p.StoreNoWB(unsafe.Pointer(s))
404
+ }