Kitxuuu commited on
Commit
296a4e2
·
verified ·
1 Parent(s): cf36d98

Add files using upload-large-folder tool

Browse files
Files changed (20) hide show
  1. local-test-commons-compress-delta-03/afc-commons-compress/src/site/resources/download_compress.cgi +20 -0
  2. local-test-commons-compress-delta-03/afc-commons-compress/src/site/resources/images/compress-logo-white.xcf +0 -0
  3. local-test-commons-compress-delta-03/afc-commons-compress/src/site/resources/profile.jacoco +16 -0
  4. local-test-commons-compress-delta-03/afc-commons-compress/src/site/xdoc/conventions.xml +71 -0
  5. local-test-commons-compress-delta-03/afc-commons-compress/src/site/xdoc/download_compress.xml +158 -0
  6. local-test-commons-compress-delta-03/afc-commons-compress/src/site/xdoc/examples.xml +1295 -0
  7. local-test-commons-compress-delta-03/afc-commons-compress/src/site/xdoc/index.xml +122 -0
  8. local-test-commons-compress-delta-03/afc-commons-compress/src/site/xdoc/issue-tracking.xml +104 -0
  9. local-test-commons-compress-delta-03/afc-commons-compress/src/site/xdoc/limitations.xml +259 -0
  10. local-test-commons-compress-delta-03/afc-commons-compress/src/site/xdoc/mail-lists.xml +215 -0
  11. local-test-commons-compress-delta-03/afc-commons-compress/src/site/xdoc/pack200.xml +91 -0
  12. local-test-commons-compress-delta-03/afc-commons-compress/src/site/xdoc/security.xml +301 -0
  13. local-test-commons-compress-delta-03/afc-commons-compress/src/site/xdoc/tar.xml +236 -0
  14. local-test-commons-compress-delta-03/afc-commons-compress/src/site/xdoc/zip.xml +645 -0
  15. local-test-commons-compress-delta-03/afc-commons-compress/src/test/resources/COMPRESS-379.jar +0 -0
  16. local-test-commons-compress-delta-03/afc-commons-compress/src/test/resources/COMPRESS-382 +0 -0
  17. local-test-commons-compress-delta-03/afc-commons-compress/src/test/resources/COMPRESS-386 +1 -0
  18. local-test-commons-compress-delta-03/afc-commons-compress/src/test/resources/bla-multi.7z.001 +0 -0
  19. local-test-commons-compress-delta-03/afc-commons-compress/src/test/resources/test1.xml +4 -0
  20. local-test-commons-compress-delta-03/afc-commons-compress/src/test/resources/test3.xml +10 -0
local-test-commons-compress-delta-03/afc-commons-compress/src/site/resources/download_compress.cgi ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/sh
2
+
3
+ # Licensed to the Apache Software Foundation (ASF) under one or more
4
+ # contributor license agreements. See the NOTICE file distributed with
5
+ # this work for additional information regarding copyright ownership.
6
+ # The ASF licenses this file to You under the Apache License, Version 2.0
7
+ # (the "License"); you may not use this file except in compliance with
8
+ # the License. You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ # Just call the standard mirrors.cgi script. It will use download.html
19
+ # as the input template.
20
+ exec /www/www.apache.org/dyn/mirrors/mirrors.cgi $*
local-test-commons-compress-delta-03/afc-commons-compress/src/site/resources/images/compress-logo-white.xcf ADDED
Binary file (25.8 kB). View file
 
local-test-commons-compress-delta-03/afc-commons-compress/src/site/resources/profile.jacoco ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Licensed to the Apache Software Foundation (ASF) under one
2
+ // or more contributor license agreements. See the NOTICE file
3
+ // distributed with this work for additional information
4
+ // regarding copyright ownership. The ASF licenses this file
5
+ // to you under the Apache License, Version 2.0 (the
6
+ // "License"); you may not use this file except in compliance
7
+ // with the License. You may obtain a copy of the License at
8
+ //
9
+ // https://www.apache.org/licenses/LICENSE-2.0
10
+ //
11
+ // Unless required by applicable law or agreed to in writing,
12
+ // software distributed under the License is distributed on an
13
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14
+ // KIND, either express or implied. See the License for the
15
+ // specific language governing permissions and limitations
16
+ // under the License.
local-test-commons-compress-delta-03/afc-commons-compress/src/site/xdoc/conventions.xml ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <!--
3
+ Licensed to the Apache Software Foundation (ASF) under one or more
4
+ contributor license agreements. See the NOTICE file distributed with
5
+ this work for additional information regarding copyright ownership.
6
+ The ASF licenses this file to You under the Apache License, Version 2.0
7
+ (the "License"); you may not use this file except in compliance with
8
+ the License. You may obtain a copy of the License at
9
+
10
+ http://www.apache.org/licenses/LICENSE-2.0
11
+
12
+ Unless required by applicable law or agreed to in writing, software
13
+ distributed under the License is distributed on an "AS IS" BASIS,
14
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ See the License for the specific language governing permissions and
16
+ limitations under the License.
17
+ -->
18
+ <document xmlns="http://maven.apache.org/XDOC/2.0"
19
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
20
+ xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 https://maven.apache.org/xsd/xdoc-2.0.xsd">
21
+ <properties>
22
+ <title>Conventions</title>
23
+ </properties>
24
+
25
+ <body>
26
+ <section name="Code Style">
27
+ <p>
28
+ The developers of this component decided to follow the recommended standards
29
+ but not to include Checkstyle (or similar tools) into Commons Compress.
30
+ </p>
31
+ </section>
32
+ <section name="Multithreading">
33
+ <p>
34
+ Commons Compress does not aim to be threadsafe at the moment. But the developers
35
+ agreed to document multithreading behavior in the javadocs.
36
+ </p>
37
+ <p>
38
+ We use some of the annotations from
39
+ <a href="http://jcip.net/annotations/doc/net/jcip/annotations/package-summary.html">JCIP</a>
40
+ as Javadoc tags. The used tags are:
41
+ </p>
42
+ <ul>
43
+ <li>@GuardedBy (field or method)</li>
44
+ <li>@Immutable (class)</li>
45
+ <li>@NotThreadSafe (class)</li>
46
+ <li>@ThreadSafe (class)</li>
47
+ </ul>
48
+ <p>
49
+ For example:
50
+ <source>
51
+ /**
52
+ * Utility class that represents a four byte integer with conversion
53
+ * rules for the big endian byte order of ZIP files.
54
+ *
55
+ * @Immutable
56
+ */
57
+ public final class ZipLong implements Cloneable {
58
+ </source>
59
+
60
+ and:
61
+
62
+ <source>
63
+ private final char [] highChars;
64
+ //@GuardedBy("this")
65
+ private Simple8BitZipEncoding encoding;
66
+ </source>
67
+ </p>
68
+ </section>
69
+
70
+ </body>
71
+ </document>
local-test-commons-compress-delta-03/afc-commons-compress/src/site/xdoc/download_compress.xml ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <!--
3
+ Licensed to the Apache Software Foundation (ASF) under one or more
4
+ contributor license agreements. See the NOTICE file distributed with
5
+ this work for additional information regarding copyright ownership.
6
+ The ASF licenses this file to You under the Apache License, Version 2.0
7
+ (the "License"); you may not use this file except in compliance with
8
+ the License. You may obtain a copy of the License at
9
+
10
+ https://www.apache.org/licenses/LICENSE-2.0
11
+
12
+ Unless required by applicable law or agreed to in writing, software
13
+ distributed under the License is distributed on an "AS IS" BASIS,
14
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ See the License for the specific language governing permissions and
16
+ limitations under the License.
17
+ -->
18
+ <!--
19
+ +======================================================================+
20
+ |**** ****|
21
+ |**** THIS FILE IS GENERATED BY THE COMMONS BUILD PLUGIN ****|
22
+ |**** DO NOT EDIT DIRECTLY ****|
23
+ |**** ****|
24
+ +======================================================================+
25
+ | TEMPLATE FILE: download-page-template.xml |
26
+ | commons-build-plugin/trunk/src/main/resources/commons-xdoc-templates |
27
+ +======================================================================+
28
+ | |
29
+ | 1) Re-generate using: mvn commons-build:download-page |
30
+ | |
31
+ | 2) Set the following properties in the component's pom: |
32
+ | - commons.componentid (required, alphabetic, lower case) |
33
+ | - commons.release.version (required) |
34
+ | - commons.release.name (required) |
35
+ | - commons.binary.suffix (optional) |
36
+ | (defaults to "-bin", set to "" for pre-maven2 releases) |
37
+ | - commons.release.desc (optional) |
38
+ | - commons.release.subdir (optional) |
39
+ | - commons.release.hash (optional, lowercase, default sha512) |
40
+ | |
41
+ | - commons.release.[234].version (conditional) |
42
+ | - commons.release.[234].name (conditional) |
43
+ | - commons.release.[234].binary.suffix (optional) |
44
+ | - commons.release.[234].desc (optional) |
45
+ | - commons.release.[234].subdir (optional) |
46
+ | - commons.release.[234].hash (optional, lowercase, [sha512])|
47
+ | |
48
+ | 3) Example Properties |
49
+ | (commons.release.name inherited by parent: |
50
+ | ${project.artifactId}-${commons.release.version} |
51
+ | |
52
+ | <properties> |
53
+ | <commons.componentid>math</commons.componentid> |
54
+ | <commons.release.version>1.2</commons.release.version> |
55
+ | </properties> |
56
+ | |
57
+ +======================================================================+
58
+ -->
59
+ <document xmlns="http://maven.apache.org/XDOC/2.0"
60
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
61
+ xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 https://maven.apache.org/xsd/xdoc-2.0.xsd">
62
+ <properties>
63
+ <title>Download Apache Commons Compress</title>
64
+ <author email="dev@commons.apache.org">Apache Commons Team</author>
65
+ </properties>
66
+ <body>
67
+ <section name="Download Apache Commons Compress">
68
+ <subsection name="Using a Mirror">
69
+ <p>
70
+ We recommend you use a mirror to download our release
71
+ builds, but you <strong>must</strong> <a href="https://www.apache.org/info/verification.html">verify the integrity</a> of
72
+ the downloaded files using signatures downloaded from our main
73
+ distribution directories. Recent releases (48 hours) may not yet
74
+ be available from all the mirrors.
75
+ </p>
76
+
77
+ <p>
78
+ You are currently using <b>[preferred]</b>. If you
79
+ encounter a problem with this mirror, please select another
80
+ mirror. If all mirrors are failing, there are <i>backup</i>
81
+ mirrors (at the end of the mirrors list) that should be
82
+ available.
83
+ <br></br>
84
+ [if-any logo]<a href="[link]"><img align="right" src="[logo]" border="0" alt="Logo"></img></a>[end]
85
+ </p>
86
+
87
+ <form action="[location]" method="get" id="SelectMirror">
88
+ <p>
89
+ Other mirrors:
90
+ <select name="Preferred">
91
+ [if-any http]
92
+ [for http]<option value="[http]">[http]</option>[end]
93
+ [end]
94
+ [if-any ftp]
95
+ [for ftp]<option value="[ftp]">[ftp]</option>[end]
96
+ [end]
97
+ [if-any backup]
98
+ [for backup]<option value="[backup]">[backup] (backup)</option>[end]
99
+ [end]
100
+ </select>
101
+ <input type="submit" value="Change"></input>
102
+ </p>
103
+ </form>
104
+
105
+ <p>
106
+ It is essential that you
107
+ <a href="https://www.apache.org/info/verification.html">verify the integrity</a>
108
+ of downloaded files, preferably using the <code>PGP</code> signature (<code>*.asc</code> files);
109
+ failing that using the <code>SHA512</code> hash (<code>*.sha512</code> checksum files).
110
+ </p>
111
+ <p>
112
+ The <a href="https://downloads.apache.org/commons/KEYS">KEYS</a>
113
+ file contains the public PGP keys used by Apache Commons developers
114
+ to sign releases.
115
+ </p>
116
+ </subsection>
117
+ </section>
118
+ <section name="Apache Commons Compress 1.28.0 ">
119
+ <subsection name="Binaries">
120
+ <table>
121
+ <tr>
122
+ <td><a href="[preferred]/commons/compress/binaries/commons-compress-1.28.0-bin.tar.gz">commons-compress-1.28.0-bin.tar.gz</a></td>
123
+ <td><a href="https://downloads.apache.org/commons/compress/binaries/commons-compress-1.28.0-bin.tar.gz.sha512">sha512</a></td>
124
+ <td><a href="https://downloads.apache.org/commons/compress/binaries/commons-compress-1.28.0-bin.tar.gz.asc">pgp</a></td>
125
+ </tr>
126
+ <tr>
127
+ <td><a href="[preferred]/commons/compress/binaries/commons-compress-1.28.0-bin.zip">commons-compress-1.28.0-bin.zip</a></td>
128
+ <td><a href="https://downloads.apache.org/commons/compress/binaries/commons-compress-1.28.0-bin.zip.sha512">sha512</a></td>
129
+ <td><a href="https://downloads.apache.org/commons/compress/binaries/commons-compress-1.28.0-bin.zip.asc">pgp</a></td>
130
+ </tr>
131
+ </table>
132
+ </subsection>
133
+ <subsection name="Source">
134
+ <table>
135
+ <tr>
136
+ <td><a href="[preferred]/commons/compress/source/commons-compress-1.28.0-src.tar.gz">commons-compress-1.28.0-src.tar.gz</a></td>
137
+ <td><a href="https://downloads.apache.org/commons/compress/source/commons-compress-1.28.0-src.tar.gz.sha512">sha512</a></td>
138
+ <td><a href="https://downloads.apache.org/commons/compress/source/commons-compress-1.28.0-src.tar.gz.asc">pgp</a></td>
139
+ </tr>
140
+ <tr>
141
+ <td><a href="[preferred]/commons/compress/source/commons-compress-1.28.0-src.zip">commons-compress-1.28.0-src.zip</a></td>
142
+ <td><a href="https://downloads.apache.org/commons/compress/source/commons-compress-1.28.0-src.zip.sha512">sha512</a></td>
143
+ <td><a href="https://downloads.apache.org/commons/compress/source/commons-compress-1.28.0-src.zip.asc">pgp</a></td>
144
+ </tr>
145
+ </table>
146
+ </subsection>
147
+ </section>
148
+ <section name="Archives">
149
+ <p>
150
+ Older releases can be obtained from the archives.
151
+ </p>
152
+ <ul>
153
+ <li class="download"><a href="[preferred]/commons/compress/">browse download area</a></li>
154
+ <li><a href="https://archive.apache.org/dist/commons/compress/">archives...</a></li>
155
+ </ul>
156
+ </section>
157
+ </body>
158
+ </document>
local-test-commons-compress-delta-03/afc-commons-compress/src/site/xdoc/examples.xml ADDED
@@ -0,0 +1,1295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <!--
3
+
4
+ Licensed to the Apache Software Foundation (ASF) under one or more
5
+ contributor license agreements. See the NOTICE file distributed with
6
+ this work for additional information regarding copyright ownership.
7
+ The ASF licenses this file to You under the Apache License, Version 2.0
8
+ (the "License"); you may not use this file except in compliance with
9
+ the License. You may obtain a copy of the License at
10
+
11
+ http://www.apache.org/licenses/LICENSE-2.0
12
+
13
+ Unless required by applicable law or agreed to in writing, software
14
+ distributed under the License is distributed on an "AS IS" BASIS,
15
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ See the License for the specific language governing permissions and
17
+ limitations under the License.
18
+
19
+ -->
20
+ <document xmlns="http://maven.apache.org/XDOC/2.0"
21
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
22
+ xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 https://maven.apache.org/xsd/xdoc-2.0.xsd">
23
+ <properties>
24
+ <title>Commons Compress User Guide</title>
25
+ <author email="dev@commons.apache.org">Apache Commons Team</author>
26
+ </properties>
27
+ <body>
28
+ <section name="General Notes">
29
+
30
+ <subsection name="Archivers and Compressors">
31
+ <p>Commons Compress calls all formats that compress a single
32
+ stream of data compressor formats while all formats that
33
+ collect multiple entries inside a single (potentially
34
+ compressed) archive are archiver formats.</p>
35
+
36
+ <p>The compressor formats supported are gzip, bzip2, XZ, LZMA,
37
+ Pack200, DEFLATE, Brotli, DEFLATE64, ZStandard and Z, the archiver formats are 7z, ar, arj,
38
+ cpio, dump, tar and zip. Pack200 is a special case as it can
39
+ only compress JAR files.</p>
40
+
41
+ <p>We currently only provide read support for arj,
42
+ dump, Brotli, DEFLATE64 and Z. arj can only read uncompressed archives, 7z can read
43
+ archives with many compression and encryption algorithms
44
+ supported by 7z but doesn't support encryption when writing
45
+ archives.</p>
46
+ </subsection>
47
+
48
+ <subsection name="Buffering">
49
+ <p>The stream classes all wrap around streams provided by the
50
+ calling code and they work on them directly without any
51
+ additional buffering. On the other hand most of them will
52
+ benefit from buffering so it is highly recommended that
53
+ users wrap their stream
54
+ in <code>Buffered<em>(In|Out)</em>putStream</code>s before
55
+ using the Commons Compress API.</p>
56
+
57
+ </subsection>
58
+
59
+ <subsection name="Factories">
60
+
61
+ <p>Compress provides factory methods to create input/output
62
+ streams based on the names of the compressor or archiver
63
+ format as well as factory methods that try to guess the
64
+ format of an input stream.</p>
65
+
66
+ <p>To create a compressor writing to a given output by using
67
+ the algorithm name:</p>
68
+ <source><![CDATA[
69
+ CompressorOutputStream gzippedOut = new CompressorStreamFactory()
70
+ .createCompressorOutputStream(CompressorStreamFactory.GZIP, myOutputStream);
71
+ ]]></source>
72
+
73
+ <p>Make the factory guess the input format for a given
74
+ archiver stream:</p>
75
+ <source><![CDATA[
76
+ ArchiveInputStream input = new ArchiveStreamFactory()
77
+ .createArchiveInputStream(originalInput);
78
+ ]]></source>
79
+
80
+ <p>Make the factory guess the input format for a given
81
+ compressor stream:</p>
82
+ <source><![CDATA[
83
+ CompressorInputStream input = new CompressorStreamFactory()
84
+ .createCompressorInputStream(originalInput);
85
+ ]]></source>
86
+
87
+ <p>Note that there is no way to detect the LZMA or Brotli formats so only
88
+ the two-arg version of
89
+ <code>createCompressorInputStream</code> can be used. Prior
90
+ to Compress 1.9 the .Z format hasn't been auto-detected
91
+ either.</p>
92
+
93
+ </subsection>
94
+
95
+ <subsection name="Restricting Memory Usage">
96
+ <p>Starting with Compress 1.14
97
+ <code>CompressorStreamFactory</code> has an optional
98
+ constructor argument that can be used to set an upper limit of
99
+ memory that may be used while decompressing or compressing a
100
+ stream. As of 1.14 this setting only affects decompressing Z,
101
+ XZ and LZMA compressed streams.</p>
102
+ <p>Since Compress 1.19 <code>SevenZFile</code> also has an
103
+ optional constructor to pass an upper memory limit which is supported
104
+ in LZMA compressed streams. Since Compress 1.21 this setting
105
+ also is taken into account when reading the metadata of an archive.</p>
106
+ <p>For the Snappy and LZ4 formats the amount of memory used
107
+ during compression is directly proportional to the window
108
+ size.</p>
109
+ </subsection>
110
+
111
+ <subsection name="Statistics">
112
+ <p>Starting with Compress 1.17 most of the
113
+ <code>CompressorInputStream</code> implementations as well as
114
+ <code>ZipArchiveInputStream</code> and all streams returned by
115
+ <code>ZipFile.getInputStream</code> implement the
116
+ <code>InputStreamStatistics</code>
117
+ interface. <code>SevenZFile</code> provides statistics for the
118
+ current entry via the
119
+ <code>getStatisticsForCurrentEntry</code> method. This
120
+ interface can be used to track progress while extracting a
121
+ stream or to detect potential <a
122
+ href="https://en.wikipedia.org/wiki/Zip_bomb">zip bombs</a>
123
+ when the compression ratio becomes suspiciously large.</p>
124
+ </subsection>
125
+
126
+ </section>
127
+ <section name="Archivers">
128
+
129
+ <subsection name="Unsupported Features">
130
+ <p>Many of the supported formats have developed different
131
+ dialects and extensions and some formats allow for features
132
+ (not yet) supported by Commons Compress.</p>
133
+
134
+ <p>The <code>ArchiveInputStream</code> class provides a method
135
+ <code>canReadEntryData</code> that will return false if
136
+ Commons Compress can detect that an archive uses a feature
137
+ that is not supported by the current implementation. If it
138
+ returns false you should not try to read the entry but skip
139
+ over it.</p>
140
+
141
+ </subsection>
142
+
143
+ <subsection name="Entry Names">
144
+ <p>All archive formats provide meta data about the individual
145
+ archive entries via instances of <code>ArchiveEntry</code> (or
146
+ rather subclasses of it). When reading from an archive the
147
+ information provided the <code>getName</code> method is the
148
+ raw name as stored inside of the archive. There is no
149
+ guarantee the name represents a relative file name or even a
150
+ valid file name on your target operating system at all. You
151
+ should double check the outcome when you try to create file
152
+ names from entry names.</p>
153
+ </subsection>
154
+
155
+ <subsection name="Common Extraction Logic">
156
+ <p>Apart from 7z all formats provide a subclass of
157
+ <code>ArchiveInputStream</code> that can be used to create an
158
+ archive. For 7z <code>SevenZFile</code> provides a similar API
159
+ that does not represent a stream as our implementation
160
+ requires random access to the input and cannot be used for
161
+ general streams. The ZIP implementation can benefit a lot from
162
+ random access as well, see the <a
163
+ href="zip.html#ZipArchiveInputStream_vs_ZipFile">zip
164
+ page</a> for details.</p>
165
+
166
+ <p>Assuming you want to extract an archive to a target
167
+ directory you'd call <code>getNextEntry</code>, verify the
168
+ entry can be read, construct a sane file name from the entry's
169
+ name, create a <code>File</code> and write all contents to
170
+ it - here <code>IOUtils.copy</code> may come handy. You do so
171
+ for every entry until <code>getNextEntry</code> returns
172
+ <code>null</code>.</p>
173
+
174
+ <p>A skeleton might look like:</p>
175
+
176
+ <source><![CDATA[
177
+ File targetDir = ...
178
+ try (ArchiveInputStream i = ... create the stream for your format, use buffering...) {
179
+ ArchiveEntry entry = null;
180
+ while ((entry = i.getNextEntry()) != null) {
181
+ if (!i.canReadEntryData(entry)) {
182
+ // log something?
183
+ continue;
184
+ }
185
+ String name = fileName(targetDir, entry);
186
+ File f = new File(name);
187
+ if (entry.isDirectory()) {
188
+ if (!f.isDirectory() && !f.mkdirs()) {
189
+ throw new IOException("failed to create directory " + f);
190
+ }
191
+ } else {
192
+ File parent = f.getParentFile();
193
+ if (!parent.isDirectory() && !parent.mkdirs()) {
194
+ throw new IOException("failed to create directory " + parent);
195
+ }
196
+ try (OutputStream o = Files.newOutputStream(f.toPath())) {
197
+ IOUtils.copy(i, o);
198
+ }
199
+ }
200
+ }
201
+ }
202
+ ]]></source>
203
+
204
+ <p>where the hypothetical <code>fileName</code> method is
205
+ written by you and provides the absolute name for the file
206
+ that is going to be written on disk. Here you should perform
207
+ checks that ensure the resulting file name actually is a valid
208
+ file name on your operating system or belongs to a file inside
209
+ of <code>targetDir</code> when using the entry's name as
210
+ input.</p>
211
+
212
+ <p>If you want to combine an archive format with a compression
213
+ format - like when reading a "tar.gz" file - you wrap the
214
+ <code>ArchiveInputStream</code> around
215
+ <code>CompressorInputStream</code> for example:</p>
216
+
217
+ <source><![CDATA[
218
+ try (InputStream fi = Files.newInputStream(Paths.get("my.tar.gz"));
219
+ InputStream bi = new BufferedInputStream(fi);
220
+ InputStream gzi = new GzipCompressorInputStream(bi);
221
+ ArchiveInputStream o = new TarArchiveInputStream(gzi)) {
222
+ }
223
+ ]]></source>
224
+
225
+ </subsection>
226
+
227
+ <subsection name="Common Archival Logic">
228
+ <p>Apart from 7z all formats that support writing provide a
229
+ subclass of <code>ArchiveOutputStream</code> that can be used
230
+ to create an archive. For 7z <code>SevenZOutputFile</code>
231
+ provides a similar API that does not represent a stream as our
232
+ implementation requires random access to the output and cannot
233
+ be used for general streams. The
234
+ <code>ZipArchiveOutputStream</code> class will benefit from
235
+ random access as well but can be used for non-seekable streams
236
+ - but not all features will be available and the archive size
237
+ might be slightly bigger, see <a
238
+ href="zip.html#ZipArchiveOutputStream">the zip page</a> for
239
+ details.</p>
240
+
241
+ <p>Assuming you want to add a collection of files to an
242
+ archive, you can first use <code>createArchiveEntry</code> for
243
+ each file. In general this will set a few flags (usually the
244
+ last modified time, the size and the information whether this
245
+ is a file or directory) based on the <code>File</code> or <code>Path</code>
246
+ instance. Alternatively you can create the
247
+ <code>ArchiveEntry</code> subclass corresponding to your
248
+ format directly. Often you may want to set additional flags
249
+ like file permissions or owner information before adding the
250
+ entry to the archive.</p>
251
+
252
+ <p>Next you use <code>putArchiveEntry</code> in order to add
253
+ the entry and then start using <code>write</code> to add the
254
+ content of the entry - here <code>IOUtils.copy</code> may
255
+ come handy. Finally you invoke
256
+ <code>closeArchiveEntry</code> once you've written all content
257
+ and before you add the next entry.</p>
258
+
259
+ <p>Once all entries have been added you'd invoke
260
+ <code>finish</code> and finally <code>close</code> the
261
+ stream.</p>
262
+
263
+ <p>A skeleton might look like:</p>
264
+
265
+ <source><![CDATA[
266
+ Collection<File> filesToArchive = ...
267
+ try (ArchiveOutputStream o = ... create the stream for your format ...) {
268
+ for (File f : filesToArchive) {
269
+ // maybe skip directories for formats like AR that don't store directories
270
+ ArchiveEntry entry = o.createArchiveEntry(f, entryName(f));
271
+ // potentially add more flags to entry
272
+ o.putArchiveEntry(entry);
273
+ if (f.isFile()) {
274
+ try (InputStream i = Files.newInputStream(f.toPath())) {
275
+ IOUtils.copy(i, o);
276
+ }
277
+ }
278
+ o.closeArchiveEntry();
279
+ }
280
+ o.finish();
281
+ }
282
+ ]]></source>
283
+
284
+ <p>where the hypothetical <code>entryName</code> method is
285
+ written by you and provides the name for the entry as it is
286
+ going to be written to the archive.</p>
287
+
288
+ <p>If you want to combine an archive format with a compression
289
+ format - like when creating a "tar.gz" file - you wrap the
290
+ <code>ArchiveOutputStream</code> around a
291
+ <code>CompressorOutputStream</code> for example:</p>
292
+
293
+ <source><![CDATA[
294
+ try (OutputStream fo = Files.newOutputStream(Paths.get("my.tar.gz"));
295
+ OutputStream gzo = new GzipCompressorOutputStream(fo);
296
+ ArchiveOutputStream o = new TarArchiveOutputStream(gzo)) {
297
+ }
298
+ ]]></source>
299
+
300
+ </subsection>
301
+
302
+ <subsection name="7z">
303
+
304
+ <p>Note that Commons Compress currently only supports a subset
305
+ of compression and encryption algorithms used for 7z archives.
306
+ For writing only uncompressed entries, LZMA, LZMA2, BZIP2 and
307
+ Deflate are supported - in addition to those reading supports
308
+ AES-256/SHA-256 and DEFLATE64.</p>
309
+
310
+ <p>Writing multipart archives is not supported at
311
+ all. Multipart archives can be read by concatenating the parts
312
+ for example by using
313
+ <code>MultiReadOnlySeekableByteChannel</code>.</p>
314
+
315
+ <p>7z archives can use multiple compression and encryption
316
+ methods as well as filters combined as a pipeline of methods
317
+ for its entries. Prior to Compress 1.8 you could only specify
318
+ a single method when creating archives - reading archives
319
+ using more than one method has been possible before. Starting
320
+ with Compress 1.8 it is possible to configure the full
321
+ pipeline using the <code>setContentMethods</code> method of
322
+ <code>SevenZOutputFile</code>. Methods are specified in the
323
+ order they appear inside the pipeline when creating the
324
+ archive, you can also specify certain parameters for some of
325
+ the methods - see the Javadocs of
326
+ <code>SevenZMethodConfiguration</code> for details.</p>
327
+
328
+ <p>When reading entries from an archive the
329
+ <code>getContentMethods</code> method of
330
+ <code>SevenZArchiveEntry</code> will properly represent the
331
+ compression/encryption/filter methods but may fail to
332
+ determine the configuration options used. As of Compress 1.8
333
+ only the dictionary size used for LZMA2 can be read.</p>
334
+
335
+ <p>Currently solid compression - compressing multiple files
336
+ as a single block to benefit from patterns repeating across
337
+ files - is only supported when reading archives. This also
338
+ means compression ratio will likely be worse when using
339
+ Commons Compress compared to the native 7z executable.</p>
340
+
341
+ <p>Reading or writing requires a
342
+ <code>SeekableByteChannel</code> that will be obtained
343
+ transparently when reading from or writing to a file. The
344
+ class
345
+ <code>org.apache.commons.compress.utils.SeekableInMemoryByteChannel</code>
346
+ allows you to read from or write to an in-memory archive.</p>
347
+
348
+ <p>Some 7z archives don't contain any names for the archive
349
+ entries. The native 7zip tools derive a default name from the
350
+ name of the archive itself for such entries. Starting with
351
+ Compress 1.19 <code>SevenZFile</code> has an option to mimic
352
+ this behavior, but by default unnamed archive entries will
353
+ return <code>null</code> from
354
+ <code>SevenZArchiveEntry#getName</code>.</p>
355
+
356
+ <p>Adding an entry to a 7z archive:</p>
357
+ <source><![CDATA[
358
+ SevenZOutputFile sevenZOutput = new SevenZOutputFile(file);
359
+ SevenZArchiveEntry entry = sevenZOutput.createArchiveEntry(fileToArchive, name);
360
+ sevenZOutput.putArchiveEntry(entry);
361
+ sevenZOutput.write(contentOfEntry);
362
+ sevenZOutput.closeArchiveEntry();
363
+ ]]></source>
364
+
365
+ <p>Uncompressing a given 7z archive (you would
366
+ certainly add exception handling and make sure all streams
367
+ get closed properly):</p>
368
+ <source><![CDATA[
369
+ SevenZFile sevenZFile = new SevenZFile(new File("archive.7z"));
370
+ SevenZArchiveEntry entry = sevenZFile.getNextEntry();
371
+ byte[] content = new byte[entry.getSize()];
372
+ LOOP UNTIL entry.getSize() HAS BEEN READ {
373
+ sevenZFile.read(content, offset, content.length - offset);
374
+ }
375
+ ]]></source>
376
+
377
+ <p>Uncompressing a given in-memory 7z archive:</p>
378
+ <source><![CDATA[
379
+ byte[] inputData; // 7z archive contents
380
+ SeekableInMemoryByteChannel inMemoryByteChannel = new SeekableInMemoryByteChannel(inputData);
381
+ SevenZFile sevenZFile = new SevenZFile(inMemoryByteChannel);
382
+ SevenZArchiveEntry entry = sevenZFile.getNextEntry();
383
+ sevenZFile.read(); // read current entry's data
384
+ ]]></source>
385
+
386
+ <h4><a name="Encrypted-7z-Archives"></a>Encrypted 7z Archives</h4>
387
+
388
+ <p>Currently Compress supports reading but not writing of
389
+ encrypted archives. When reading an encrypted archive a
390
+ password has to be provided to one of
391
+ <code>SevenZFile</code>'s constructors. If you try to read
392
+ an encrypted archive without specifying a password a
393
+ <code>PasswordRequiredException</code> (a subclass of
394
+ <code>IOException</code>) will be thrown.</p>
395
+
396
+ <p>When specifying the password as a <code>byte[]</code> one
397
+ common mistake is to use the wrong encoding when creating
398
+ the <code>byte[]</code> from a <code>String</code>. The
399
+ <code>SevenZFile</code> class expects the bytes to
400
+ correspond to the UTF16-LE encoding of the password. An
401
+ example of reading an encrypted archive is</p>
402
+
403
+ <source><![CDATA[
404
+ SevenZFile sevenZFile = new SevenZFile(new File("archive.7z"), "secret".getBytes(StandardCharsets.UTF_16LE));
405
+ SevenZArchiveEntry entry = sevenZFile.getNextEntry();
406
+ byte[] content = new byte[entry.getSize()];
407
+ LOOP UNTIL entry.getSize() HAS BEEN READ {
408
+ sevenZFile.read(content, offset, content.length - offset);
409
+ }
410
+ ]]></source>
411
+
412
+ <p>Starting with Compress 1.17 new constructors have been
413
+ added that accept the password as <code>char[]</code> rather
414
+ than a <code>byte[]</code>. We recommend you use these in
415
+ order to avoid the problem above.</p>
416
+
417
+ <source><![CDATA[
418
+ SevenZFile sevenZFile = new SevenZFile(new File("archive.7z"), "secret".toCharArray());
419
+ SevenZArchiveEntry entry = sevenZFile.getNextEntry();
420
+ byte[] content = new byte[entry.getSize()];
421
+ LOOP UNTIL entry.getSize() HAS BEEN READ {
422
+ sevenZFile.read(content, offset, content.length - offset);
423
+ }
424
+ ]]></source>
425
+
426
+ <h4><a name="Random-Access-to-7z-Archives"></a>Random-Access to 7z Archives</h4>
427
+
428
+ <p>Prior to Compress 1.20 7z archives could only be read
429
+ sequentially. The
430
+ <code>getInputStream(SevenZArchiveEntry)</code> method
431
+ introduced with Compress 1.20 now provides random access but
432
+ at least when the archive uses solid compression random access
433
+ will likely be significantly slower than sequential
434
+ access.</p>
435
+
436
+ <h4><a name="Recovering-from-Certain-Broken-7z-Archives"></a>Recovering from Certain Broken 7z Archives</h4>
437
+
438
+ <p><code>SevenZFile</code> tries
439
+ to recover archives that look as if they were part of a
440
+ multi-volume archive where the first volume has been removed
441
+ too early.</p>
442
+
443
+ <p>This option has to be enabled
444
+ explicitly in <code>SevenZFile.Builder</code>. The way recovery
445
+ works is by Compress scanning an archive from the end for
446
+ something that might look like valid 7z metadata and use that,
447
+ if it can successfully parse the block of data. When doing so
448
+ Compress may encounter blocks of metadata that look like the
449
+ metadata of very large archives which in turn may make
450
+ Compress allocate a lot of memory. Therefore we strongly
451
+ recommend you also set a memory limit inside the
452
+ <code>SevenZFile.Builder</code> if you enable recovery.</p>
453
+ </subsection>
454
+
455
+ <subsection name="ar">
456
+
457
+ <p>In addition to the information stored
458
+ in <code>ArchiveEntry</code> a <code>ArArchiveEntry</code>
459
+ stores information about the owner user and group as well as
460
+ Unix permissions.</p>
461
+
462
+ <p>Adding an entry to an ar archive:</p>
463
+ <source><![CDATA[
464
+ ArArchiveEntry entry = new ArArchiveEntry(name, size);
465
+ arOutput.putArchiveEntry(entry);
466
+ arOutput.write(contentOfEntry);
467
+ arOutput.closeArchiveEntry();
468
+ ]]></source>
469
+
470
+ <p>Reading entries from an ar archive:</p>
471
+ <source><![CDATA[
472
+ ArArchiveEntry entry = (ArArchiveEntry) arInput.getNextEntry();
473
+ byte[] content = new byte[entry.getSize()];
474
+ LOOP UNTIL entry.getSize() HAS BEEN READ {
475
+ arInput.read(content, offset, content.length - offset);
476
+ }
477
+ ]]></source>
478
+
479
+ <p>Traditionally the AR format doesn't allow file names longer
480
+ than 16 characters. There are two variants that circumvent
481
+ this limitation in different ways, the GNU/SRV4 and the BSD
482
+ variant. Commons Compress 1.0 to 1.2 can only read archives
483
+ using the GNU/SRV4 variant, support for the BSD variant has
484
+ been added in Commons Compress 1.3. Commons Compress 1.3
485
+ also optionally supports writing archives with file names
486
+ longer than 16 characters using the BSD dialect, writing
487
+ the SVR4/GNU dialect is not supported.</p>
488
+
489
+ <table>
490
+ <thead>
491
+ <tr>
492
+ <th>Version of Apache Commons Compress</th>
493
+ <th>Support for Traditional AR Format</th>
494
+ <th>Support for GNU/SRV4 Dialect</th>
495
+ <th>Support for BSD Dialect</th>
496
+ </tr>
497
+ </thead>
498
+ <tbody>
499
+ <tr>
500
+ <td>1.0 to 1.2</td>
501
+ <td>read/write</td>
502
+ <td>read</td>
503
+ <td>-</td>
504
+ </tr>
505
+ <tr>
506
+ <td>1.3 and later</td>
507
+ <td>read/write</td>
508
+ <td>read</td>
509
+ <td>read/write</td>
510
+ </tr>
511
+ </tbody>
512
+ </table>
513
+
514
+ <p>It is not possible to detect the end of an AR archive in a
515
+ reliable way so <code>ArArchiveInputStream</code> will read
516
+ until it reaches the end of the stream or fails to parse the
517
+ stream's content as AR entries.</p>
518
+
519
+ </subsection>
520
+
521
+ <subsection name="arj">
522
+
523
+ <p>Note that Commons Compress doesn't support compressed,
524
+ encrypted or multi-volume ARJ archives, yet.</p>
525
+
526
+ <p>Uncompressing a given arj archive (you would
527
+ certainly add exception handling and make sure all streams
528
+ get closed properly):</p>
529
+ <source><![CDATA[
530
+ ArjArchiveEntry entry = arjInput.getNextEntry();
531
+ byte[] content = new byte[entry.getSize()];
532
+ LOOP UNTIL entry.getSize() HAS BEEN READ {
533
+ arjInput.read(content, offset, content.length - offset);
534
+ }
535
+ ]]></source>
536
+ </subsection>
537
+
538
+ <subsection name="cpio">
539
+
540
+ <p>In addition to the information stored
541
+ in <code>ArchiveEntry</code> a <code>CpioArchiveEntry</code>
542
+ stores various attributes including information about the
543
+ original owner and permissions.</p>
544
+
545
+ <p>The cpio package supports the "new portable" as well as the
546
+ "old" format of CPIO archives in their binary, ASCII and
547
+ "with CRC" variants.</p>
548
+
549
+ <p>Adding an entry to a cpio archive:</p>
550
+ <source><![CDATA[
551
+ CpioArchiveEntry entry = new CpioArchiveEntry(name, size);
552
+ cpioOutput.putArchiveEntry(entry);
553
+ cpioOutput.write(contentOfEntry);
554
+ cpioOutput.closeArchiveEntry();
555
+ ]]></source>
556
+
557
+ <p>Reading entries from an cpio archive:</p>
558
+ <source><![CDATA[
559
+ CpioArchiveEntry entry = cpioInput.getNextCPIOEntry();
560
+ byte[] content = new byte[entry.getSize()];
561
+ LOOP UNTIL entry.getSize() HAS BEEN READ {
562
+ cpioInput.read(content, offset, content.length - offset);
563
+ }
564
+ ]]></source>
565
+
566
+ <p>Traditionally CPIO archives are written in blocks of 512
567
+ bytes - the block size is a configuration parameter of the
568
+ <code>Cpio*Stream</code>'s constructors. Starting with version
569
+ 1.5 <code>CpioArchiveInputStream</code> will consume the
570
+ padding written to fill the current block when the end of the
571
+ archive is reached. Unfortunately many CPIO implementations
572
+ use larger block sizes so there may be more zero-byte padding
573
+ left inside the original input stream after the archive has
574
+ been consumed completely.</p>
575
+
576
+ </subsection>
577
+
578
+ <subsection name="jar">
579
+ <p>In general, JAR archives are ZIP files, so the JAR package
580
+ supports all options provided by the <a href="#zip">ZIP</a> package.</p>
581
+
582
+ <p>To be interoperable JAR archives should always be created
583
+ using the UTF-8 encoding for file names (which is the
584
+ default).</p>
585
+
586
+ <p>Archives created using <code>JarArchiveOutputStream</code>
587
+ will implicitly add a <code>JarMarker</code> extra field to
588
+ the very first archive entry of the archive which will make
589
+ Solaris recognize them as Java archives and allows them to
590
+ be used as executables.</p>
591
+
592
+ <p>Note that <code>ArchiveStreamFactory</code> doesn't
593
+ distinguish ZIP archives from JAR archives, so if you use
594
+ the one-argument <code>createArchiveInputStream</code>
595
+ method on a JAR archive, it will still return the more
596
+ generic <code>ZipArchiveInputStream</code>.</p>
597
+
598
+ <p>The <code>JarArchiveEntry</code> class contains fields for
599
+ certificates and attributes that are planned to be supported
600
+ in the future but are not supported as of Compress 1.0.</p>
601
+
602
+ <p>Adding an entry to a jar archive:</p>
603
+ <source><![CDATA[
604
+ JarArchiveEntry entry = new JarArchiveEntry(name, size);
605
+ entry.setSize(size);
606
+ jarOutput.putArchiveEntry(entry);
607
+ jarOutput.write(contentOfEntry);
608
+ jarOutput.closeArchiveEntry();
609
+ ]]></source>
610
+
611
+ <p>Reading entries from an jar archive:</p>
612
+ <source><![CDATA[
613
+ JarArchiveEntry entry = jarInput.getNextJarEntry();
614
+ byte[] content = new byte[entry.getSize()];
615
+ LOOP UNTIL entry.getSize() HAS BEEN READ {
616
+ jarInput.read(content, offset, content.length - offset);
617
+ }
618
+ ]]></source>
619
+ </subsection>
620
+
621
+ <subsection name="dump">
622
+
623
+ <p>In addition to the information stored
624
+ in <code>ArchiveEntry</code> a <code>DumpArchiveEntry</code>
625
+ stores various attributes including information about the
626
+ original owner and permissions.</p>
627
+
628
+ <p>As of Commons Compress 1.3 only dump archives using the
629
+ new-fs format - this is the most common variant - are
630
+ supported. Right now this library supports uncompressed and
631
+ ZLIB compressed archives and can not write archives at
632
+ all.</p>
633
+
634
+ <p>Reading entries from an dump archive:</p>
635
+ <source><![CDATA[
636
+ DumpArchiveEntry entry = dumpInput.getNextDumpEntry();
637
+ byte[] content = new byte[entry.getSize()];
638
+ LOOP UNTIL entry.getSize() HAS BEEN READ {
639
+ dumpInput.read(content, offset, content.length - offset);
640
+ }
641
+ ]]></source>
642
+
643
+ <p>Prior to version 1.5 <code>DumpArchiveInputStream</code>
644
+ would close the original input once it had read the last
645
+ record. Starting with version 1.5 it will not close the
646
+ stream implicitly.</p>
647
+
648
+ </subsection>
649
+
650
+ <subsection name="tar">
651
+
652
+ <p>The TAR package has a <a href="tar.html">dedicated
653
+ documentation page</a>.</p>
654
+
655
+ <p>Adding an entry to a tar archive:</p>
656
+ <source><![CDATA[
657
+ TarArchiveEntry entry = new TarArchiveEntry(name);
658
+ entry.setSize(size);
659
+ tarOutput.putArchiveEntry(entry);
660
+ tarOutput.write(contentOfEntry);
661
+ tarOutput.closeArchiveEntry();
662
+ ]]></source>
663
+
664
+ <p>Reading entries from an tar archive:</p>
665
+ <source><![CDATA[
666
+ TarArchiveEntry entry = tarInput.getNextTarEntry();
667
+ byte[] content = new byte[entry.getSize()];
668
+ LOOP UNTIL entry.getSize() HAS BEEN READ {
669
+ tarInput.read(content, offset, content.length - offset);
670
+ }
671
+ ]]></source>
672
+ </subsection>
673
+
674
+ <subsection name="zip">
675
+ <p>The ZIP package has a <a href="zip.html">dedicated
676
+ documentation page</a>.</p>
677
+
678
+ <p>Adding an entry to a zip archive:</p>
679
+ <source><![CDATA[
680
+ ZipArchiveEntry entry = new ZipArchiveEntry(name);
681
+ entry.setSize(size);
682
+ zipOutput.putArchiveEntry(entry);
683
+ zipOutput.write(contentOfEntry);
684
+ zipOutput.closeArchiveEntry();
685
+ ]]></source>
686
+
687
+ <p><code>ZipArchiveOutputStream</code> can use some internal
688
+ optimizations exploiting <code>SeekableByteChannel</code> if it
689
+ knows it is writing to a seekable output rather than a non-seekable
690
+ stream. If you are writing to a file, you should use the
691
+ constructor that accepts a <code>File</code> or
692
+ <code>SeekableByteChannel</code> argument rather
693
+ than the one using an <code>OutputStream</code> or the
694
+ factory method in <code>ArchiveStreamFactory</code>.</p>
695
+
696
+ <p>Reading entries from an zip archive:</p>
697
+ <source><![CDATA[
698
+ ZipArchiveEntry entry = zipInput.getNextZipEntry();
699
+ byte[] content = new byte[entry.getSize()];
700
+ LOOP UNTIL entry.getSize() HAS BEEN READ {
701
+ zipInput.read(content, offset, content.length - offset);
702
+ }
703
+ ]]></source>
704
+
705
+ <p>Reading entries from an zip archive using the
706
+ recommended <code>ZipFile</code> class:</p>
707
+ <source><![CDATA[
708
+ ZipArchiveEntry entry = zipFile.getEntry(name);
709
+ InputStream content = zipFile.getInputStream(entry);
710
+ try {
711
+ READ UNTIL content IS EXHAUSTED
712
+ } finally {
713
+ content.close();
714
+ }
715
+ ]]></source>
716
+
717
+ <p>Reading entries from an in-memory zip archive using
718
+ <code>SeekableInMemoryByteChannel</code> and <code>ZipFile</code> class:</p>
719
+ <source><![CDATA[
720
+ byte[] inputData; // zip archive contents
721
+ SeekableInMemoryByteChannel inMemoryByteChannel = new SeekableInMemoryByteChannel(inputData);
722
+ ZipFile zipFile = new ZipFile(inMemoryByteChannel);
723
+ ZipArchiveEntry archiveEntry = zipFile.getEntry("entryName");
724
+ InputStream inputStream = zipFile.getInputStream(archiveEntry);
725
+ inputStream.read() // read data from the input stream
726
+ ]]></source>
727
+
728
+ <p>Creating a zip file with multiple threads:</p>
729
+ <p>
730
+ A simple implementation to create a zip file might look like this:
731
+ </p>
732
+
733
+ <source><![CDATA[
734
+ public class ScatterSample {
735
+
736
+ ParallelScatterZipCreator scatterZipCreator = new ParallelScatterZipCreator();
737
+ ScatterZipOutputStream dirs = ScatterZipOutputStream.fileBased(File.createTempFile("scatter-dirs", "tmp"));
738
+
739
+ public ScatterSample() throws IOException {
740
+ }
741
+
742
+ public void addEntry(ZipArchiveEntry zipArchiveEntry, InputStreamSupplier streamSupplier) throws IOException {
743
+ if (zipArchiveEntry.isDirectory() && !zipArchiveEntry.isUnixSymlink())
744
+ dirs.addArchiveEntry(ZipArchiveEntryRequest.createZipArchiveEntryRequest(zipArchiveEntry, streamSupplier));
745
+ else
746
+ scatterZipCreator.addArchiveEntry( zipArchiveEntry, streamSupplier);
747
+ }
748
+
749
+ public void writeTo(ZipArchiveOutputStream zipArchiveOutputStream)
750
+ throws IOException, ExecutionException, InterruptedException {
751
+ dirs.writeTo(zipArchiveOutputStream);
752
+ dirs.close();
753
+ scatterZipCreator.writeTo(zipArchiveOutputStream);
754
+ }
755
+ }
756
+ ]]></source>
757
+ </subsection>
758
+
759
+ </section>
760
+ <section name="Compressors">
761
+
762
+ <subsection name="Concatenated Streams">
763
+ <p>For the bzip2, gzip and XZ formats as well as the framed
764
+ lz4 format a single compressed file
765
+ may actually consist of several streams that will be
766
+ concatenated by the command line utilities when decompressing
767
+ them. Starting with Commons Compress 1.4 the
768
+ <code>*CompressorInputStream</code>s for these formats support
769
+ concatenating streams as well, but they won't do so by
770
+ default. You must use the two-arg constructor and explicitly
771
+ enable the support.</p>
772
+ </subsection>
773
+
774
+ <subsection name="Brotli">
775
+
776
+ <p>The implementation of this package is provided by the
777
+ <a href="https://github.com/google/brotli">Google Brotli dec</a> library.</p>
778
+
779
+ <p>Uncompressing a given Brotli compressed file (you would
780
+ certainly add exception handling and make sure all streams
781
+ get closed properly):</p>
782
+ <source><![CDATA[
783
+ InputStream fin = Files.newInputStream(Paths.get("archive.tar.br"));
784
+ BufferedInputStream in = new BufferedInputStream(fin);
785
+ OutputStream out = Files.newOutputStream(Paths.get("archive.tar"));
786
+ BrotliCompressorInputStream brIn = new BrotliCompressorInputStream(in);
787
+ final byte[] buffer = new byte[buffersize];
788
+ int n = 0;
789
+ while (-1 != (n = brIn.read(buffer))) {
790
+ out.write(buffer, 0, n);
791
+ }
792
+ out.close();
793
+ brIn.close();
794
+ ]]></source>
795
+ </subsection>
796
+
797
+ <subsection name="bzip2">
798
+
799
+ <p>Note that <code>BZipCompressorOutputStream</code> keeps
800
+ hold of some big data structures in memory. While it is
801
+ recommended for <em>any</em> stream that you close it as soon as
802
+ you no longer need it, this is even more important
803
+ for <code>BZipCompressorOutputStream</code>.</p>
804
+
805
+ <p>Uncompressing a given bzip2 compressed file (you would
806
+ certainly add exception handling and make sure all streams
807
+ get closed properly):</p>
808
+ <source><![CDATA[
809
+ InputStream fin = Files.newInputStream(Paths.get("archive.tar.bz2"));
810
+ BufferedInputStream in = new BufferedInputStream(fin);
811
+ OutputStream out = Files.newOutputStream(Paths.get("archive.tar"));
812
+ BZip2CompressorInputStream bzIn = new BZip2CompressorInputStream(in);
813
+ final byte[] buffer = new byte[buffersize];
814
+ int n = 0;
815
+ while (-1 != (n = bzIn.read(buffer))) {
816
+ out.write(buffer, 0, n);
817
+ }
818
+ out.close();
819
+ bzIn.close();
820
+ ]]></source>
821
+
822
+ <p>Compressing a given file using bzip2 (you would
823
+ certainly add exception handling and make sure all streams
824
+ get closed properly):</p>
825
+ <source><![CDATA[
826
+ InputStream in = Files.newInputStream(Paths.get("archive.tar"));
827
+ OutputStream fout = Files.newOutputStream(Paths.get("archive.tar.bz2"));
828
+ BufferedOutputStream out = new BufferedOutputStream(fout);
829
+ BZip2CompressorOutputStream bzOut = new BZip2CompressorOutputStream(out);
830
+ final byte[] buffer = new byte[buffersize];
831
+ int n = 0;
832
+ while (-1 != (n = in.read(buffer))) {
833
+ bzOut.write(buffer, 0, n);
834
+ }
835
+ bzOut.close();
836
+ in.close();
837
+ ]]></source>
838
+
839
+ </subsection>
840
+
841
+ <subsection name="DEFLATE">
842
+
843
+ <p>The implementation of the DEFLATE/INFLATE code used by this
844
+ package is provided by the <code>java.util.zip</code> package
845
+ of the Java class library.</p>
846
+
847
+ <p>Uncompressing a given DEFLATE compressed file (you would
848
+ certainly add exception handling and make sure all streams
849
+ get closed properly):</p>
850
+ <source><![CDATA[
851
+ InputStream fin = Files.newInputStream(Paths.get("some-file"));
852
+ BufferedInputStream in = new BufferedInputStream(fin);
853
+ OutputStream out = Files.newOutputStream(Paths.get("archive.tar"));
854
+ DeflateCompressorInputStream defIn = new DeflateCompressorInputStream(in);
855
+ final byte[] buffer = new byte[buffersize];
856
+ int n = 0;
857
+ while (-1 != (n = defIn.read(buffer))) {
858
+ out.write(buffer, 0, n);
859
+ }
860
+ out.close();
861
+ defIn.close();
862
+ ]]></source>
863
+
864
+ <p>Compressing a given file using DEFLATE (you would
865
+ certainly add exception handling and make sure all streams
866
+ get closed properly):</p>
867
+ <source><![CDATA[
868
+ InputStream in = Files.newInputStream(Paths.get("archive.tar"));
869
+ OutputStream fout = Files.newOutputStream(Paths.get("some-file"));
870
+ BufferedOutputStream out = new BufferedOutputStream(fout);
871
+ DeflateCompressorOutputStream defOut = new DeflateCompressorOutputStream(out);
872
+ final byte[] buffer = new byte[buffersize];
873
+ int n = 0;
874
+ while (-1 != (n = in.read(buffer))) {
875
+ defOut.write(buffer, 0, n);
876
+ }
877
+ defOut.close();
878
+ in.close();
879
+ ]]></source>
880
+
881
+ </subsection>
882
+
883
+ <subsection name="DEFLATE64">
884
+
885
+ <p>Uncompressing a given DEFLATE64 compressed file (you would
886
+ certainly add exception handling and make sure all streams
887
+ get closed properly):</p>
888
+ <source><![CDATA[
889
+ InputStream fin = Files.newInputStream(Paths.get("some-file"));
890
+ BufferedInputStream in = new BufferedInputStream(fin);
891
+ OutputStream out = Files.newOutputStream(Paths.get("archive.tar"));
892
+ Deflate64CompressorInputStream defIn = new Deflate64CompressorInputStream(in);
893
+ final byte[] buffer = new byte[buffersize];
894
+ int n = 0;
895
+ while (-1 != (n = defIn.read(buffer))) {
896
+ out.write(buffer, 0, n);
897
+ }
898
+ out.close();
899
+ defIn.close();
900
+ ]]></source>
901
+
902
+ </subsection>
903
+
904
+ <subsection name="gzip">
905
+
906
+ <p>The implementation of the DEFLATE/INFLATE code used by this
907
+ package is provided by the <code>java.util.zip</code> package
908
+ of the Java class library.</p>
909
+
910
+ <p>Uncompressing a given gzip compressed file (you would
911
+ certainly add exception handling and make sure all streams
912
+ get closed properly):</p>
913
+ <source><![CDATA[
914
+ InputStream fin = Files.newInputStream(Paths.get("archive.tar.gz"));
915
+ BufferedInputStream in = new BufferedInputStream(fin);
916
+ OutputStream out = Files.newOutputStream(Paths.get("archive.tar"));
917
+ GzipCompressorInputStream gzIn = new GzipCompressorInputStream(in);
918
+ final byte[] buffer = new byte[buffersize];
919
+ int n = 0;
920
+ while (-1 != (n = gzIn.read(buffer))) {
921
+ out.write(buffer, 0, n);
922
+ }
923
+ out.close();
924
+ gzIn.close();
925
+ ]]></source>
926
+
927
+ <p>Compressing a given file using gzip (you would
928
+ certainly add exception handling and make sure all streams
929
+ get closed properly):</p>
930
+ <source><![CDATA[
931
+ InputStream in = Files.newInputStream(Paths.get("archive.tar"));
932
+ OutputStream fout = Files.newOutputStream(Paths.get("archive.tar.gz"));
933
+ BufferedOutputStream out = new BufferedOutputStream(fout);
934
+ GzipCompressorOutputStream gzOut = new GzipCompressorOutputStream(out);
935
+ final byte[] buffer = new byte[buffersize];
936
+ int n = 0;
937
+ while (-1 != (n = in.read(buffer))) {
938
+ gzOut.write(buffer, 0, n);
939
+ }
940
+ gzOut.close();
941
+ in.close();
942
+ ]]></source>
943
+
944
+ </subsection>
945
+
946
+ <subsection name="LZ4">
947
+
948
+ <p>There are two different "formats" used for <a
949
+ href="http://lz4.github.io/lz4/">lz4</a>. The format called
950
+ "block format" only contains the raw compressed data while the
951
+ other provides a higher level "frame format" - Commons
952
+ Compress offers two different stream classes for reading or
953
+ writing either format.</p>
954
+
955
+ <p>Uncompressing a given framed LZ4 file (you would
956
+ certainly add exception handling and make sure all streams
957
+ get closed properly):</p>
958
+ <source><![CDATA[
959
+ InputStream fin = Files.newInputStream(Paths.get("archive.tar.lz4"));
960
+ BufferedInputStream in = new BufferedInputStream(fin);
961
+ OutputStream out = Files.newOutputStream(Paths.get("archive.tar"));
962
+ FramedLZ4CompressorInputStream zIn = new FramedLZ4CompressorInputStream(in);
963
+ final byte[] buffer = new byte[buffersize];
964
+ int n = 0;
965
+ while (-1 != (n = zIn.read(buffer))) {
966
+ out.write(buffer, 0, n);
967
+ }
968
+ out.close();
969
+ zIn.close();
970
+ ]]></source>
971
+
972
+ <p>Compressing a given file using the LZ4 frame format (you would
973
+ certainly add exception handling and make sure all streams
974
+ get closed properly):</p>
975
+ <source><![CDATA[
976
+ InputStream in = Files.newInputStream(Paths.get("archive.tar"));
977
+ OutputStream fout = Files.newOutputStream(Paths.get("archive.tar.lz4"));
978
+ BufferedOutputStream out = new BufferedOutputStream(fout);
979
+ FramedLZ4CompressorOutputStream lzOut = new FramedLZ4CompressorOutputStream(out);
980
+ final byte[] buffer = new byte[buffersize];
981
+ int n = 0;
982
+ while (-1 != (n = in.read(buffer))) {
983
+ lzOut.write(buffer, 0, n);
984
+ }
985
+ lzOut.close();
986
+ in.close();
987
+ ]]></source>
988
+
989
+ </subsection>
990
+
991
+ <subsection name="lzma">
992
+
993
+ <p>The implementation of this package is provided by the
994
+ public domain <a href="https://tukaani.org/xz/java.html">XZ
995
+ for Java</a> library.</p>
996
+
997
+ <p>Uncompressing a given LZMA compressed file (you would
998
+ certainly add exception handling and make sure all streams
999
+ get closed properly):</p>
1000
+ <source><![CDATA[
1001
+ InputStream fin = Files.newInputStream(Paths.get("archive.tar.lzma"));
1002
+ BufferedInputStream in = new BufferedInputStream(fin);
1003
+ OutputStream out = Files.newOutputStream(Paths.get("archive.tar"));
1004
+ LZMACompressorInputStream lzmaIn = new LZMACompressorInputStream(in);
1005
+ final byte[] buffer = new byte[buffersize];
1006
+ int n = 0;
1007
+ while (-1 != (n = xzIn.read(buffer))) {
1008
+ out.write(buffer, 0, n);
1009
+ }
1010
+ out.close();
1011
+ lzmaIn.close();
1012
+ ]]></source>
1013
+
1014
+ <p>Compressing a given file using LZMA (you would
1015
+ certainly add exception handling and make sure all streams
1016
+ get closed properly):</p>
1017
+ <source><![CDATA[
1018
+ InputStream in = Files.newInputStream(Paths.get("archive.tar"));
1019
+ OutputStream fout = Files.newOutputStream(Paths.get("archive.tar.lzma"));
1020
+ BufferedOutputStream out = new BufferedOutputStream(fout);
1021
+ LZMACompressorOutputStream lzOut = new LZMACompressorOutputStream(out);
1022
+ final byte[] buffer = new byte[buffersize];
1023
+ int n = 0;
1024
+ while (-1 != (n = in.read(buffer))) {
1025
+ lzOut.write(buffer, 0, n);
1026
+ }
1027
+ lzOut.close();
1028
+ in.close();
1029
+ ]]></source>
1030
+
1031
+ </subsection>
1032
+
1033
+ <subsection name="Pack200">
1034
+
1035
+ <p>The Pack200 package has a <a href="pack200.html">dedicated
1036
+ documentation page</a>.</p>
1037
+
1038
+ <p>The implementation of this package used to be provided by
1039
+ the <code>java.util.zip</code> package of the Java class
1040
+ library. Starting with Compress 1.21 the implementation uses
1041
+ a copy of the pack200 code of the now retired Apache
1042
+ Harmony&#x2122; project that ships with Compress itself.</p>
1043
+
1044
+ <p>Uncompressing a given pack200 compressed file (you would
1045
+ certainly add exception handling and make sure all streams
1046
+ get closed properly):</p>
1047
+ <source><![CDATA[
1048
+ InputStream fin = Files.newInputStream(Paths.get("archive.pack"));
1049
+ BufferedInputStream in = new BufferedInputStream(fin);
1050
+ OutputStream out = Files.newOutputStream(Paths.get("archive.jar"));
1051
+ Pack200CompressorInputStream pIn = new Pack200CompressorInputStream(in);
1052
+ final byte[] buffer = new byte[buffersize];
1053
+ int n = 0;
1054
+ while (-1 != (n = pIn.read(buffer))) {
1055
+ out.write(buffer, 0, n);
1056
+ }
1057
+ out.close();
1058
+ pIn.close();
1059
+ ]]></source>
1060
+
1061
+ <p>Compressing a given jar using pack200 (you would
1062
+ certainly add exception handling and make sure all streams
1063
+ get closed properly):</p>
1064
+ <source><![CDATA[
1065
+ InputStream in = Files.newInputStream(Paths.get("archive.jar"));
1066
+ OutputStream fout = Files.newOutputStream(Paths.get("archive.pack"));
1067
+ BufferedOutputStream out = new BufferedInputStream(fout);
1068
+ Pack200CompressorOutputStream pOut = new Pack200CompressorOutputStream(out);
1069
+ final byte[] buffer = new byte[buffersize];
1070
+ int n = 0;
1071
+ while (-1 != (n = in.read(buffer))) {
1072
+ pOut.write(buffer, 0, n);
1073
+ }
1074
+ pOut.close();
1075
+ in.close();
1076
+ ]]></source>
1077
+
1078
+ </subsection>
1079
+
1080
+ <subsection name="Snappy">
1081
+
1082
+ <p>There are two different "formats" used for <a
1083
+ href="https://github.com/google/snappy/">Snappy</a>, one only
1084
+ contains the raw compressed data while the other provides a
1085
+ higher level "framing format" - Commons Compress offers two
1086
+ different stream classes for reading either format.</p>
1087
+
1088
+ <p>Starting with 1.12 we've added support for different
1089
+ dialects of the framing format that can be specified when
1090
+ constructing the stream. The <code>STANDARD</code> dialect
1091
+ follows the "framing format" specification while the
1092
+ <code>IWORK_ARCHIVE</code> dialect can be used to parse IWA
1093
+ files that are part of Apple's iWork 13 format. If no dialect
1094
+ has been specified, <code>STANDARD</code> is used. Only the
1095
+ <code>STANDARD</code> format can be detected by
1096
+ <code>CompressorStreamFactory</code>.</p>
1097
+
1098
+ <p>Uncompressing a given framed Snappy file (you would
1099
+ certainly add exception handling and make sure all streams
1100
+ get closed properly):</p>
1101
+ <source><![CDATA[
1102
+ InputStream fin = Files.newInputStream(Paths.get("archive.tar.sz"));
1103
+ BufferedInputStream in = new BufferedInputStream(fin);
1104
+ OutputStream out = Files.newOutputStream(Paths.get("archive.tar"));
1105
+ FramedSnappyCompressorInputStream zIn = new FramedSnappyCompressorInputStream(in);
1106
+ final byte[] buffer = new byte[buffersize];
1107
+ int n = 0;
1108
+ while (-1 != (n = zIn.read(buffer))) {
1109
+ out.write(buffer, 0, n);
1110
+ }
1111
+ out.close();
1112
+ zIn.close();
1113
+ ]]></source>
1114
+
1115
+ <p>Compressing a given file using framed Snappy (you would
1116
+ certainly add exception handling and make sure all streams
1117
+ get closed properly):</p>
1118
+ <source><![CDATA[
1119
+ InputStream in = Files.newInputStream(Paths.get("archive.tar"));
1120
+ OutputStream fout = Files.newOutputStream(Paths.get("archive.tar.sz"));
1121
+ BufferedOutputStream out = new BufferedOutputStream(fout);
1122
+ FramedSnappyCompressorOutputStream snOut = new FramedSnappyCompressorOutputStream(out);
1123
+ final byte[] buffer = new byte[buffersize];
1124
+ int n = 0;
1125
+ while (-1 != (n = in.read(buffer))) {
1126
+ snOut.write(buffer, 0, n);
1127
+ }
1128
+ snOut.close();
1129
+ in.close();
1130
+ ]]></source>
1131
+
1132
+ </subsection>
1133
+
1134
+ <subsection name="XZ">
1135
+
1136
+ <p>The implementation of this package is provided by the
1137
+ public domain <a href="https://tukaani.org/xz/java.html">XZ
1138
+ for Java</a> library.</p>
1139
+
1140
+ <p>When you try to open an XZ stream for reading using
1141
+ <code>CompressorStreamFactory</code>, Commons Compress will
1142
+ check whether the XZ for Java library is available. Starting
1143
+ with Compress 1.9 the result of this check will be cached
1144
+ unless Compress finds OSGi classes in its classpath. You can
1145
+ use <code>XZUtils#setCacheXZAvailability</code> to override
1146
+ this default behavior.</p>
1147
+
1148
+ <p>Uncompressing a given XZ compressed file (you would
1149
+ certainly add exception handling and make sure all streams
1150
+ get closed properly):</p>
1151
+ <source><![CDATA[
1152
+ InputStream fin = Files.newInputStream(Paths.get("archive.tar.xz"));
1153
+ BufferedInputStream in = new BufferedInputStream(fin);
1154
+ OutputStream out = Files.newOutputStream(Paths.get("archive.tar"));
1155
+ XZCompressorInputStream xzIn = new XZCompressorInputStream(in);
1156
+ final byte[] buffer = new byte[buffersize];
1157
+ int n = 0;
1158
+ while (-1 != (n = xzIn.read(buffer))) {
1159
+ out.write(buffer, 0, n);
1160
+ }
1161
+ out.close();
1162
+ xzIn.close();
1163
+ ]]></source>
1164
+
1165
+ <p>Compressing a given file using XZ (you would
1166
+ certainly add exception handling and make sure all streams
1167
+ get closed properly):</p>
1168
+ <source><![CDATA[
1169
+ InputStream in = Files.newInputStream(Paths.get("archive.tar"));
1170
+ OutputStream fout = Files.newOutputStream(Paths.get("archive.tar.xz"));
1171
+ BufferedOutputStream out = new BufferedInputStream(fout);
1172
+ XZCompressorOutputStream xzOut = new XZCompressorOutputStream(out);
1173
+ final byte[] buffer = new byte[buffersize];
1174
+ int n = 0;
1175
+ while (-1 != (n = in.read(buffer))) {
1176
+ xzOut.write(buffer, 0, n);
1177
+ }
1178
+ xzOut.close();
1179
+ in.close();
1180
+ ]]></source>
1181
+
1182
+ </subsection>
1183
+
1184
+ <subsection name="Z">
1185
+
1186
+ <p>Uncompressing a given Z compressed file (you would
1187
+ certainly add exception handling and make sure all streams
1188
+ get closed properly):</p>
1189
+ <source><![CDATA[
1190
+ InputStream fin = Files.newInputStream(Paths.get("archive.tar.Z"));
1191
+ BufferedInputStream in = new BufferedInputStream(fin);
1192
+ OutputStream out = Files.newOutputStream(Paths.get("archive.tar"));
1193
+ ZCompressorInputStream zIn = new ZCompressorInputStream(in);
1194
+ final byte[] buffer = new byte[buffersize];
1195
+ int n = 0;
1196
+ while (-1 != (n = zIn.read(buffer))) {
1197
+ out.write(buffer, 0, n);
1198
+ }
1199
+ out.close();
1200
+ zIn.close();
1201
+ ]]></source>
1202
+
1203
+ </subsection>
1204
+
1205
+ <subsection name="Zstandard">
1206
+
1207
+ <p>The implementation of this package is provided by the
1208
+ <a href="https://github.com/luben/zstd-jni">Zstandard JNI</a> library.</p>
1209
+
1210
+ <p>Uncompressing a given Zstandard compressed file (you would
1211
+ certainly add exception handling and make sure all streams
1212
+ get closed properly):</p>
1213
+ <source><![CDATA[
1214
+ InputStream fin = Files.newInputStream(Paths.get("archive.tar.zstd"));
1215
+ BufferedInputStream in = new BufferedInputStream(fin);
1216
+ OutputStream out = Files.newOutputStream(Paths.get("archive.tar"));
1217
+ ZstdCompressorInputStream zsIn = new ZstdCompressorInputStream(in);
1218
+ final byte[] buffer = new byte[buffersize];
1219
+ int n = 0;
1220
+ while (-1 != (n = zsIn.read(buffer))) {
1221
+ out.write(buffer, 0, n);
1222
+ }
1223
+ out.close();
1224
+ zsIn.close();
1225
+ ]]></source>
1226
+
1227
+ <p>Compressing a given file using the Zstandard format (you
1228
+ would certainly add exception handling and make sure all
1229
+ streams get closed properly):</p>
1230
+ <source><![CDATA[
1231
+ InputStream in = Files.newInputStream(Paths.get("archive.tar"));
1232
+ OutputStream fout = Files.newOutputStream(Paths.get("archive.tar.zstd"));
1233
+ BufferedOutputStream out = new BufferedOutputStream(fout);
1234
+ ZstdCompressorOutputStream zOut = new ZstdCompressorOutputStream(out);
1235
+ final byte[] buffer = new byte[buffersize];
1236
+ int n = 0;
1237
+ while (-1 != (n = in.read(buffer))) {
1238
+ zOut.write(buffer, 0, n);
1239
+ }
1240
+ zOut.close();
1241
+ in.close();
1242
+ ]]></source>
1243
+
1244
+ </subsection>
1245
+ </section>
1246
+
1247
+ <section name="Extending Commons Compress">
1248
+
1249
+ <p>
1250
+ Starting in release 1.13, it is now possible to add Compressor- and ArchiverStream implementations using the
1251
+ Java's <a href="https://docs.oracle.com/javase/7/docs/api/java/util/ServiceLoader.html">ServiceLoader</a>
1252
+ mechanism.
1253
+ </p>
1254
+
1255
+ <subsection name="Extending Commons Compress Compressors">
1256
+
1257
+ <p>
1258
+ To provide your own compressor, you must make available on the classpath a file called
1259
+ <code>META-INF/services/org.apache.commons.compress.compressors.CompressorStreamProvider</code>.
1260
+ </p>
1261
+ <p>
1262
+ This file MUST contain one fully-qualified class name per line.
1263
+ </p>
1264
+ <p>
1265
+ For example:
1266
+ </p>
1267
+ <pre>org.apache.commons.compress.compressors.TestCompressorStreamProvider</pre>
1268
+ <p>
1269
+ This class MUST implement the Commons Compress interface
1270
+ <a href="apidocs/org/apache/commons/compress/compressors/CompressorStreamProvider.html">org.apache.commons.compress.compressors.CompressorStreamProvider</a>.
1271
+ </p>
1272
+ </subsection>
1273
+
1274
+ <subsection name="Extending Commons Compress Archivers">
1275
+
1276
+ <p>
1277
+ To provide your own compressor, you must make available on the classpath a file called
1278
+ <code>META-INF/services/org.apache.commons.compress.archivers.ArchiveStreamProvider</code>.
1279
+ </p>
1280
+ <p>
1281
+ This file MUST contain one fully-qualified class name per line.
1282
+ </p>
1283
+ <p>
1284
+ For example:
1285
+ </p>
1286
+ <pre>org.apache.commons.compress.archivers.TestArchiveStreamProvider</pre>
1287
+ <p>
1288
+ This class MUST implement the Commons Compress interface
1289
+ <a href="apidocs/org/apache/commons/compress/archivers/ArchiveStreamProvider.html">org.apache.commons.compress.archivers.ArchiveStreamProvider</a>.
1290
+ </p>
1291
+ </subsection>
1292
+
1293
+ </section>
1294
+ </body>
1295
+ </document>
local-test-commons-compress-delta-03/afc-commons-compress/src/site/xdoc/index.xml ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <!--
3
+
4
+ Licensed to the Apache Software Foundation (ASF) under one or more
5
+ contributor license agreements. See the NOTICE file distributed with
6
+ this work for additional information regarding copyright ownership.
7
+ The ASF licenses this file to You under the Apache License, Version 2.0
8
+ (the "License"); you may not use this file except in compliance with
9
+ the License. You may obtain a copy of the License at
10
+
11
+ http://www.apache.org/licenses/LICENSE-2.0
12
+
13
+ Unless required by applicable law or agreed to in writing, software
14
+ distributed under the License is distributed on an "AS IS" BASIS,
15
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ See the License for the specific language governing permissions and
17
+ limitations under the License.
18
+
19
+ -->
20
+ <document xmlns="http://maven.apache.org/XDOC/2.0"
21
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
22
+ xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 https://maven.apache.org/xsd/xdoc-2.0.xsd">
23
+ <properties>
24
+ <title>Overview</title>
25
+ <author email="dev@commons.apache.org">Apache Commons Team</author>
26
+ </properties>
27
+ <body>
28
+ <section name="Apache Commons Compress&#x2122;">
29
+ <p>
30
+ The Apache Commons Compress library defines an API for
31
+ working with ar, cpio, Unix dump, tar, zip, gzip, XZ,
32
+ Pack200, bzip2, 7z, arj, LZMA, snappy, DEFLATE, lz4,
33
+ Brotli, Zstandard, DEFLATE64 and Z files.
34
+ </p>
35
+ <p>
36
+ The code in this component has many origins:
37
+ </p>
38
+ <ul>
39
+ <li>The bzip2, tar and zip support came from Avalon's
40
+ Excalibur, but originally from Ant, as far as life in
41
+ Apache goes. The tar package is originally Tim Endres'
42
+ public domain package. The bzip2 package is based on
43
+ the work done by Keiron Liddle as well as Julian Seward's
44
+ <a href="http://www.bzip.org/index.html">libbzip2</a>.
45
+ It has migrated
46
+ via:<br/> Ant -&gt; Avalon-Excalibur -&gt; Commons-IO
47
+ -&gt; Commons-Compress.</li>
48
+ <li>The cpio package has been contributed by Michael Kuss
49
+ and
50
+ the <a href="http://jrpm.sourceforge.net/">jRPM</a>
51
+ project.</li>
52
+ <li>The pack200 code has originally been part of the now
53
+ retired <a href="https://harmony.apache.org/">Apache
54
+ Harmony&#x2122;</a> project.</li>
55
+ </ul>
56
+
57
+ </section>
58
+
59
+ <section name="Status">
60
+ <p>The current release requires Java 8 or above.</p>
61
+
62
+ <p>For a list of changes see the <a href="changes.html">Changes Report</a>.</p>
63
+
64
+ </section>
65
+
66
+ <section name="Documentation">
67
+ <p>The compress component is split into <em>compressors</em> and
68
+ <em>archivers</em>. While <em>compressors</em>
69
+ (un)compress streams that usually store a single
70
+ entry, <em>archivers</em> deal with archives that contain
71
+ structured content represented
72
+ by <code>ArchiveEntry</code> instances which in turn
73
+ usually correspond to single files or directories.</p>
74
+
75
+ <p>Currently the bzip2, Pack200, XZ, gzip, LZMA, brotli,
76
+ Zstandard and Z formats are
77
+ supported as compressors where gzip support is mostly provided by
78
+ the <code>java.util.zip</code> package of the Java
79
+ class library. XZ and LZMA support is provided by the public
80
+ domain <a href="https://tukaani.org/xz/java.html">XZ for
81
+ Java</a> library. Brotli support is provided by the MIT
82
+ licensed <a href="https://github.com/google/brotli">Google
83
+ Brotli decoder</a>. Zstandard support is provided by the BSD
84
+ licensed <a href="https://github.com/luben/zstd-jni">Zstd-jni</a>.
85
+ As of Commons Compress 1.21 support for the DEFLATE64, Z and Brotli
86
+ formats is read-only.</p>
87
+
88
+ <p>The ar, arj, cpio, dump, tar, 7z and zip formats are supported as
89
+ archivers where the <a href="zip.html">zip</a>
90
+ implementation provides capabilities that go beyond the
91
+ features found in java.util.zip. As of Commons Compress
92
+ 1.21 support for the dump and arj formats is
93
+ read-only - 7z can read most compressed and encrypted
94
+ archives but only write unencrypted ones. LZMA(2) support
95
+ in 7z requires <a href="https://tukaani.org/xz/java.html">XZ for
96
+ Java</a> as well.</p>
97
+
98
+ <p>The compress component provides abstract base classes for
99
+ compressors and archivers together with factories that can
100
+ be used to choose implementations by algorithm name. In
101
+ the case of input streams the factories can also be used
102
+ to guess the format and provide the matching
103
+ implementation.</p>
104
+
105
+ <ul>
106
+ <li>The <a href="examples.html">user guide</a> contains
107
+ more detailed information and some examples.</li>
108
+ <li>The <a href="limitations.html">known limitations and
109
+ problems</a> page lists the currently known problems
110
+ grouped by the format they apply to.</li>
111
+ <li>The <a href="apidocs/index.html">Javadoc</a> of the latest GIT</li>
112
+ <li>The <a href="https://gitbox.apache.org/repos/asf?p=commons-compress.git">GIT
113
+ repository</a> can be browsed.</li>
114
+ </ul>
115
+ </section>
116
+ <section name="Releases">
117
+ <p>
118
+ <a href="https://commons.apache.org/compress/download_compress.cgi">Download now!</a>
119
+ </p>
120
+ </section>
121
+ </body>
122
+ </document>
local-test-commons-compress-delta-03/afc-commons-compress/src/site/xdoc/issue-tracking.xml ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <!--
3
+ Licensed to the Apache Software Foundation (ASF) under one or more
4
+ contributor license agreements. See the NOTICE file distributed with
5
+ this work for additional information regarding copyright ownership.
6
+ The ASF licenses this file to You under the Apache License, Version 2.0
7
+ (the "License"); you may not use this file except in compliance with
8
+ the License. You may obtain a copy of the License at
9
+
10
+ https://www.apache.org/licenses/LICENSE-2.0
11
+
12
+ Unless required by applicable law or agreed to in writing, software
13
+ distributed under the License is distributed on an "AS IS" BASIS,
14
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ See the License for the specific language governing permissions and
16
+ limitations under the License.
17
+ -->
18
+ <!--
19
+ +======================================================================+
20
+ |**** ****|
21
+ |**** THIS FILE IS GENERATED BY THE COMMONS BUILD PLUGIN ****|
22
+ |**** DO NOT EDIT DIRECTLY ****|
23
+ |**** ****|
24
+ +======================================================================+
25
+ | TEMPLATE FILE: issue-tracking-template.xml |
26
+ | commons-build-plugin/trunk/src/main/resources/commons-xdoc-templates |
27
+ +======================================================================+
28
+ | |
29
+ | 1) Re-generate using: mvn commons-build:jira-page |
30
+ | |
31
+ | 2) Set the following properties in the component's pom: |
32
+ | - commons.jira.id (required, alphabetic, upper case) |
33
+ | - commons.jira.pid (required, numeric) |
34
+ | |
35
+ | 3) Example Properties |
36
+ | |
37
+ | <properties> |
38
+ | <commons.jira.id>MATH</commons.jira.id> |
39
+ | <commons.jira.pid>12310485</commons.jira.pid> |
40
+ | </properties> |
41
+ | |
42
+ +======================================================================+
43
+ -->
44
+ <document xmlns="http://maven.apache.org/XDOC/2.0"
45
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
46
+ xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 https://maven.apache.org/xsd/xdoc-2.0.xsd">
47
+ <properties>
48
+ <title>Apache Commons Compress Issue tracking</title>
49
+ <author email="dev@commons.apache.org">Apache Commons Team</author>
50
+ </properties>
51
+ <body>
52
+
53
+ <section name="Apache Commons Compress Issue tracking">
54
+ <p>
55
+ Apache Commons Compress uses <a href="https://issues.apache.org/jira/">ASF JIRA</a> for tracking issues.
56
+ See the <a href="https://issues.apache.org/jira/browse/COMPRESS">Apache Commons Compress JIRA project page</a>.
57
+ </p>
58
+
59
+ <p>
60
+ To use JIRA you may need to <a href="https://issues.apache.org/jira/secure/Signup!default.jspa">create an account</a>
61
+ (if you have previously created/updated Commons issues using Bugzilla an account will have been automatically
62
+ created and you can use the <a href="https://issues.apache.org/jira/secure/ForgotPassword!default.jspa">Forgot Password</a>
63
+ page to get a new password).
64
+ </p>
65
+
66
+ <p>
67
+ If you would like to report a bug, or raise an enhancement request with
68
+ Apache Commons Compress please do the following:
69
+ </p>
70
+ <ol>
71
+ <li><a href="https://issues.apache.org/jira/secure/IssueNavigator.jspa?reset=true&amp;pid=12310904&amp;sorter/field=issuekey&amp;sorter/order=DESC&amp;status=1&amp;status=3&amp;status=4">Search existing open bugs</a>.
72
+ If you find your issue listed then please add a comment with your details.</li>
73
+ <li><a href="mail-lists.html">Search the mailing list archive(s)</a>.
74
+ You may find your issue or idea has already been discussed.</li>
75
+ <li>Decide if your issue is a bug or an enhancement.</li>
76
+ <li>Submit either a <a href="https://issues.apache.org/jira/secure/CreateIssueDetails!init.jspa?pid=12310904&amp;issuetype=1&amp;priority=4&amp;assignee=-1">bug report</a>
77
+ or <a href="https://issues.apache.org/jira/secure/CreateIssueDetails!init.jspa?pid=12310904&amp;issuetype=4&amp;priority=4&amp;assignee=-1">enhancement request</a>.</li>
78
+ </ol>
79
+
80
+ <p>
81
+ Please also remember these points:
82
+ </p>
83
+ <ul>
84
+ <li>the more information you provide, the better we can help you</li>
85
+ <li>test cases are vital, particularly for any proposed enhancements</li>
86
+ <li>the developers of Apache Commons Compress are all unpaid volunteers</li>
87
+ </ul>
88
+
89
+ <p>
90
+ For more information on creating patches see the
91
+ <a href="https://www.apache.org/dev/contributors.html">Apache Contributors Guide</a>.
92
+ </p>
93
+
94
+ <p>
95
+ You may also find these links useful:
96
+ </p>
97
+ <ul>
98
+ <li><a href="https://issues.apache.org/jira/secure/IssueNavigator.jspa?reset=true&amp;pid=12310904&amp;sorter/field=issuekey&amp;sorter/order=DESC&amp;status=1&amp;status=3&amp;status=4">All Open Apache Commons Compress bugs</a></li>
99
+ <li><a href="https://issues.apache.org/jira/secure/IssueNavigator.jspa?reset=true&amp;pid=12310904&amp;sorter/field=issuekey&amp;sorter/order=DESC&amp;status=5&amp;status=6">All Resolved Apache Commons Compress bugs</a></li>
100
+ <li><a href="https://issues.apache.org/jira/secure/IssueNavigator.jspa?reset=true&amp;pid=12310904&amp;sorter/field=issuekey&amp;sorter/order=DESC">All Apache Commons Compress bugs</a></li>
101
+ </ul>
102
+ </section>
103
+ </body>
104
+ </document>
local-test-commons-compress-delta-03/afc-commons-compress/src/site/xdoc/limitations.xml ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <!--
3
+ Licensed to the Apache Software Foundation (ASF) under one or more
4
+ contributor license agreements. See the NOTICE file distributed with
5
+ this work for additional information regarding copyright ownership.
6
+ The ASF licenses this file to You under the Apache License, Version 2.0
7
+ (the "License"); you may not use this file except in compliance with
8
+ the License. You may obtain a copy of the License at
9
+
10
+ http://www.apache.org/licenses/LICENSE-2.0
11
+
12
+ Unless required by applicable law or agreed to in writing, software
13
+ distributed under the License is distributed on an "AS IS" BASIS,
14
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ See the License for the specific language governing permissions and
16
+ limitations under the License.
17
+ -->
18
+ <document xmlns="http://maven.apache.org/XDOC/2.0"
19
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
20
+ xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 https://maven.apache.org/xsd/xdoc-2.0.xsd">
21
+ <properties>
22
+ <title>Known Limitations and Problems</title>
23
+ </properties>
24
+
25
+ <body>
26
+ <section name="General">
27
+ <p>
28
+ This page lists the known limitations and problems of Apache
29
+ Commons Compress&#x2122; grouped by the archiving/compression
30
+ format they apply to.
31
+ </p>
32
+ <ul>
33
+ <li>Several implementations of decompressors and unarchivers will
34
+ invoke <a
35
+ href="https://docs.oracle.com/javase/10/docs/api/java/io/InputStream.html#skip(long)"><code>skip</code></a>
36
+ on the underlying <code>InputStream</code> which may throw an
37
+ <code>IOException</code> in some stream implementations. One
38
+ known case where this happens is when using
39
+ <code>System.in</code> as input. If you encounter an
40
+ exception with a message like "Illegal seek" we recommend you
41
+ wrap your stream in a <code>SkipShieldingInputStream</code>
42
+ from our utils package before passing it to Compress.</li>
43
+ <li>Commons Compress prior to 1.21 cannot be built on JDK 14 or newer.</li>
44
+ </ul>
45
+ </section>
46
+
47
+ <section name="7Z">
48
+ <ul>
49
+ <li>the format requires the otherwise optional <a
50
+ href="https://tukaani.org/xz/java.html">XZ for Java</a>
51
+ library.</li>
52
+ <li>only <code>File</code>s are supported as input/output,
53
+ not streams. Starting with Compress 1.13
54
+ <code>SeekableByteChannel</code> is supported as well.</li>
55
+ <li>In Compress 1.7
56
+ <code>ArchiveStreamFactory</code> will not auto-detect 7z
57
+ archives, starting with 1.8 it will throw a
58
+ <code>StreamingNotSupportedException</code> when reading from
59
+ a 7z archive.</li>
60
+ <li>Encryption, solid compression and header compression
61
+ are only supported when reading archives</li>
62
+ <li>Commons Compress 1.12 and earlier didn't support writing
63
+ LZMA.</li>
64
+ <li>Several of the "methods" supported by 7z are not
65
+ implemented in Compress.</li>
66
+ <li>No support for writing multi-volume archives. Such
67
+ archives can be read by simply concatenating the parts, for
68
+ example by using
69
+ <code>MultiReadOnlySeekableByteChannel</code>.</li>
70
+ <li>Support for some BCJ filters and the DELTA filter has
71
+ been added with Compress 1.8. Because of a known bug in
72
+ version 1.4 of the <a
73
+ href="https://tukaani.org/xz/java.html">XZ for Java</a>
74
+ library, archives using BCJ filters will cause an
75
+ <code>AssertionError</code> when read. If you need support
76
+ for BCJ filters you must use XZ for Java 1.5 or later.</li>
77
+ </ul>
78
+ </section>
79
+ <section name="AR">
80
+ <ul>
81
+ <li>AR archives can not contain directories - this is a
82
+ limitation of the format rather than one of Compress'
83
+ implementation.</li>
84
+ <li>file names longer than 16 characters are only fully
85
+ supported using the BSD dialect, the GNU/SRV4 dialect is only
86
+ supported when reading archives.</li>
87
+ </ul>
88
+ </section>
89
+ <section name="ARJ">
90
+ <ul>
91
+ <li>read-only support</li>
92
+ <li>no support for compression, encryption or multi-volume
93
+ archives</li>
94
+ </ul>
95
+ </section>
96
+ <section name="Brotli">
97
+ <ul>
98
+ <li>the format requires the otherwise optional <a
99
+ href="https://github.com/google/brotli">Google Brotli dec</a>
100
+ library.</li>
101
+ <li>read-only support</li>
102
+ <li><code>CompressorStreamFactory</code> is not able to auto-detect
103
+ streams using Brotli compression.</li>
104
+ </ul>
105
+ </section>
106
+ <section name="BZIP2">
107
+ <p>Versions of Compress prior to 1.4.1 are vulnerable to a
108
+ possible denial of service attack, see the <a
109
+ href="security.html">Security Reports</a> page for details.</p>
110
+ </section>
111
+ <section name="CPIO">
112
+ <p>We are not aware of any problems.</p>
113
+ </section>
114
+ <section name="DEFLATE">
115
+ <ul>
116
+ <li><code>CompressorStreamFactory</code> is not able to auto-detect
117
+ streams using DEFLATE compression.</li>
118
+ </ul>
119
+ </section>
120
+ <section name="DEFLATE64">
121
+ <ul>
122
+ <li><code>CompressorStreamFactory</code> is not able to auto-detect
123
+ streams using DEFLATE64 compression.</li>
124
+ <li>read-only support</li>
125
+ </ul>
126
+ </section>
127
+ <section name="DUMP">
128
+ <ul>
129
+ <li>read-only support</li>
130
+ <li>only the new-fs format is supported</li>
131
+ <li>the only compression algorithm supported is zlib</li>
132
+ </ul>
133
+ </section>
134
+ <section name="GZIP">
135
+ <p>We are not aware of any problems.</p>
136
+ </section>
137
+ <section name="JAR">
138
+ <p>JAR archives are special ZIP archives, all limitations of <a
139
+ href="#ZIP">ZIP</a> apply to JAR as well.</p>
140
+ <ul>
141
+ <li><code>ArchiveStreamFactory</code> cannot tell JAR
142
+ archives from ZIP archives and will not auto-detect
143
+ JARs.</li>
144
+ <li>Compress doesn't provide special access to the archive's
145
+ MANIFEST</li>
146
+ </ul>
147
+ </section>
148
+ <section name="LZ4">
149
+ <ul>
150
+ <li>In theory LZ4 compressed streams can contain literals and
151
+ copies of arbitrary length while Commons Compress only
152
+ supports sizes up to 2<sup>63</sup> - 1 (i.e. &#x2248; 9.2
153
+ EB).</li>
154
+ </ul>
155
+ </section>
156
+ <section name="LZMA">
157
+ <ul>
158
+ <li>the format requires the otherwise optional <a
159
+ href="https://tukaani.org/xz/java.html">XZ for Java</a>
160
+ library.</li>
161
+ <li>Commons Compress 1.12 and earlier only support reading
162
+ the format</li>
163
+ </ul>
164
+ </section>
165
+ <section name="PACK200">
166
+ <ul>
167
+ <li><p>Pack200 support in Commons Compress prior to 1.21 relies on the
168
+ <code>Pack200</code> class of the Java classlib. Java 14
169
+ removed support and thus Pack200 will not work at all when
170
+ running on Java 14 or later.</p>
171
+ <p>Starting with Commons Compress 1.21 the classlib
172
+ implementation is no longer used at all, instead Commons
173
+ Compress contains the pack200 code of the retired Apache
174
+ Harmony&#x2122; project.</p></li>
175
+ </ul>
176
+ </section>
177
+ <section name="SNAPPY">
178
+ <ul>
179
+ <li>Commons Compress 1.13 and earlier only support reading
180
+ the format</li>
181
+ </ul>
182
+ </section>
183
+ <section name="TAR">
184
+ <ul>
185
+ <li>sparse files could not be read in version prior to
186
+ Compress 1.20</li>
187
+ <li>sparse files can not be written</li>
188
+ <li>only a subset of the GNU and POSIX extensions are
189
+ supported</li>
190
+ <li>In Compress 1.6 <code>TarArchiveInputStream</code> could
191
+ fail to read the full contents of an entry unless the stream
192
+ was wrapped in a buffering stream.</li>
193
+ </ul>
194
+ </section>
195
+ <section name="XZ">
196
+ <ul>
197
+ <li>the format requires the otherwise optional <a
198
+ href="https://tukaani.org/xz/java.html">XZ for Java</a>
199
+ library.</li>
200
+ </ul>
201
+ </section>
202
+ <section name="Z">
203
+ <ul>
204
+ <li>Prior to Compress 1.8.1
205
+ <code>CompressorStreamFactory</code> was not able to
206
+ auto-detect streams using .Z compression.</li>
207
+ <li>read-only support</li>
208
+ </ul>
209
+ </section>
210
+ <section name="ZIP">
211
+ <ul>
212
+ <li><code>ZipArchiveInputStream</code> is limited and may
213
+ even return false contents in some cases, use
214
+ <code>ZipFile</code> whenever possible. See <a
215
+ href="zip.html#ZipArchiveInputStream_vs_ZipFile">the ZIP
216
+ documentation page</a> for details. This limitation is a
217
+ result of streaming data vs using random access and not a
218
+ limitation of Compress' specific implementation.</li>
219
+ <li>only a subset of compression methods are supported,
220
+ including the most common STORED and DEFLATEd. IMPLODE,
221
+ SHRINK, DEFLATE64 and BZIP2 support is read-only.</li>
222
+ <li>no support for encryption</li>
223
+ <li>no support for multi-volume archives prior to Compress 1.20</li>
224
+ <li>It is currently not possible to write split archives with
225
+ more than 64k segments. When creating split archives with more
226
+ than 100 segments you will need to adjust the file names as
227
+ <code>ZipArchiveOutputStream</code> assumes extensions will be
228
+ three characters long.</li>
229
+ <li>In versions prior to Compress 1.6
230
+ <code>ZipArchiveEntries</code> read from an archive will
231
+ contain non-zero millisecond values when using Java 8 or later rather
232
+ than the expected two-second granularity.</li>
233
+ <li>Compress 1.7 has a known bug where the very first entry
234
+ of an archive will not be read correctly by
235
+ <code>ZipArchiveInputStream</code> if it used the STORED
236
+ method.</li>
237
+ <li><code>ZipArchiveEntry#getLastModifiedDate</code> uses
238
+ <code>ZipEntry#getTime</code> under the covers which may
239
+ return different times for the same archive when using
240
+ different versions of Java.</li>
241
+ <li>In versions of Compress prior to 1.16 a specially crafted
242
+ ZIP archive can be used to cause an infinite loop inside of
243
+ Compress' extra field parser used by the <code>ZipFile</code>
244
+ and <code>ZipArchiveInputStream</code> classes. This can be
245
+ used to mount a denial of service attack against services
246
+ that use Compress' zip package. See the <a
247
+ href="security.html">Security Reports</a> page for
248
+ details.</li>
249
+ </ul>
250
+ </section>
251
+ <section name="Zstandard">
252
+ <ul>
253
+ <li>the format requires the otherwise optional <a
254
+ href="https://github.com/luben/zstd-jni">Zstandard JNI</a>
255
+ library.</li>
256
+ </ul>
257
+ </section>
258
+ </body>
259
+ </document>
local-test-commons-compress-delta-03/afc-commons-compress/src/site/xdoc/mail-lists.xml ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <!--
3
+ Licensed to the Apache Software Foundation (ASF) under one or more
4
+ contributor license agreements. See the NOTICE file distributed with
5
+ this work for additional information regarding copyright ownership.
6
+ The ASF licenses this file to You under the Apache License, Version 2.0
7
+ (the "License"); you may not use this file except in compliance with
8
+ the License. You may obtain a copy of the License at
9
+
10
+ https://www.apache.org/licenses/LICENSE-2.0
11
+
12
+ Unless required by applicable law or agreed to in writing, software
13
+ distributed under the License is distributed on an "AS IS" BASIS,
14
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ See the License for the specific language governing permissions and
16
+ limitations under the License.
17
+ -->
18
+ <!--
19
+ +======================================================================+
20
+ |**** ****|
21
+ |**** THIS FILE IS GENERATED BY THE COMMONS BUILD PLUGIN ****|
22
+ |**** DO NOT EDIT DIRECTLY ****|
23
+ |**** ****|
24
+ +======================================================================+
25
+ | TEMPLATE FILE: mail-lists-template.xml |
26
+ | commons-build-plugin/trunk/src/main/resources/commons-xdoc-templates |
27
+ +======================================================================+
28
+ | |
29
+ | 1) Re-generate using: mvn commons-build:mail-page |
30
+ | |
31
+ | 2) Set the following properties in the component's pom: |
32
+ | - commons.componentid (required, alphabetic, lower case) |
33
+ | |
34
+ | 3) Example Properties |
35
+ | |
36
+ | <properties> |
37
+ | <commons.componentid>math</commons.componentid> |
38
+ | </properties> |
39
+ | |
40
+ +======================================================================+
41
+ -->
42
+ <document xmlns="http://maven.apache.org/XDOC/2.0"
43
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
44
+ xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 https://maven.apache.org/xsd/xdoc-2.0.xsd">
45
+ <properties>
46
+ <title>Apache Commons Compress Mailing Lists</title>
47
+ <author email="dev@commons.apache.org">Apache Commons Team</author>
48
+ </properties>
49
+ <body>
50
+
51
+ <section name="Overview">
52
+ <p>
53
+ <a href="index.html">Apache Commons Compress</a> shares mailing lists with all the other
54
+ <a href="https://commons.apache.org/components.html">Commons Components</a>.
55
+ To make it easier for people to only read messages related to components they are interested in,
56
+ the convention in Commons is to prefix the subject line of messages with the component's name,
57
+ for example:
58
+ </p>
59
+ <ul>
60
+ <li>[compress] Problem with the ...</li>
61
+ </ul>
62
+ <p>
63
+ Questions related to the usage of Apache Commons Compress should be posted to the
64
+ <a href="https://lists.apache.org/list.html?user@commons.apache.org">User List</a>.
65
+ <br />
66
+ The <a href="https://lists.apache.org/list.html?dev@commons.apache.org">Developer List</a>
67
+ is for questions and discussion related to the development of Apache Commons Compress.
68
+ <br />
69
+ Please do not cross-post; developers are also subscribed to the user list.
70
+ <br />
71
+ You must be subscribed to post to the mailing lists. Follow the Subscribe links below
72
+ to subscribe.
73
+ </p>
74
+ <p>
75
+ <strong>Note:</strong> please don't send patches or attachments to any of the mailing lists;
76
+ most of the lists are set up to drop attachments.
77
+ Patches are best handled via the <a href="issue-tracking.html">Issue Tracking</a> system.
78
+ If you have a GitHub account, most components also accept PRs (pull requests).
79
+ Otherwise, please upload the file to a public server and include the URL in the mail.
80
+ </p>
81
+ </section>
82
+
83
+ <section name="Apache Commons Compress Mailing Lists">
84
+ <p>
85
+ <strong>Please prefix the subject line of any messages for <a href="index.html">Apache Commons Compress</a>
86
+ with <i>[compress]</i></strong> - <i>thanks!</i>
87
+ <br />
88
+ <br />
89
+ </p>
90
+
91
+ <table>
92
+ <tr>
93
+ <th>Name</th>
94
+ <th>Subscribe</th>
95
+ <th>Unsubscribe</th>
96
+ <th>Post</th>
97
+ <th>Archive</th>
98
+ <th>Other Archives</th>
99
+ </tr>
100
+
101
+
102
+ <tr>
103
+ <td>
104
+ <strong>Commons User List</strong>
105
+ <br /><br />
106
+ Questions on using Apache Commons Compress.
107
+ <br /><br />
108
+ </td>
109
+ <td><a href="mailto:user-subscribe@commons.apache.org">Subscribe</a></td>
110
+ <td><a href="mailto:user-unsubscribe@commons.apache.org">Unsubscribe</a></td>
111
+ <td><a href="mailto:user@commons.apache.org?subject=[compress]">Post</a></td>
112
+ <td>
113
+ <a href="https://lists.apache.org/list.html?user@commons.apache.org">lists.apache.org</a>
114
+ </td>
115
+ <td>
116
+ <a href="https://www.mail-archive.com/user@commons.apache.org/">www.mail-archive.com</a>
117
+ </td>
118
+ </tr>
119
+
120
+
121
+ <tr>
122
+ <td>
123
+ <strong>Commons Developer List</strong>
124
+ <br /><br />
125
+ Discussion of development of Apache Commons Compress.
126
+ <br /><br />
127
+ </td>
128
+ <td><a href="mailto:dev-subscribe@commons.apache.org">Subscribe</a></td>
129
+ <td><a href="mailto:dev-unsubscribe@commons.apache.org">Unsubscribe</a></td>
130
+ <td><a href="mailto:dev@commons.apache.org?subject=[compress]">Post</a></td>
131
+ <td>
132
+ <a href="https://lists.apache.org/list.html?dev@commons.apache.org">lists.apache.org</a>
133
+ </td>
134
+ <td>
135
+ <a href="https://www.mail-archive.com/dev@commons.apache.org/">www.mail-archive.com</a>
136
+ </td>
137
+ </tr>
138
+
139
+
140
+ <tr>
141
+ <td>
142
+ <strong>Commons Issues List</strong>
143
+ <br /><br />
144
+ Only for e-mails automatically generated by the <a href="issue-tracking.html">issue tracking</a> system.
145
+ <br /><br />
146
+ </td>
147
+ <td><a href="mailto:issues-subscribe@commons.apache.org">Subscribe</a></td>
148
+ <td><a href="mailto:issues-unsubscribe@commons.apache.org">Unsubscribe</a></td>
149
+ <td><i>read only</i></td>
150
+ <td>
151
+ <a href="https://lists.apache.org/list.html?issues@commons.apache.org">lists.apache.org</a>
152
+ </td>
153
+ <td>
154
+ <a href="https://www.mail-archive.com/issues@commons.apache.org/">www.mail-archive.com</a>
155
+ </td>
156
+ </tr>
157
+
158
+
159
+ <tr>
160
+ <td>
161
+ <strong>Commons Commits List</strong>
162
+ <br /><br />
163
+ Only for e-mails automatically generated by the <a href="scm.html">source control</a> system.
164
+ <br /><br />
165
+ </td>
166
+ <td><a href="mailto:commits-subscribe@commons.apache.org">Subscribe</a></td>
167
+ <td><a href="mailto:commits-unsubscribe@commons.apache.org">Unsubscribe</a></td>
168
+ <td><i>read only</i></td>
169
+ <td>
170
+ <a href="https://lists.apache.org/list.html?commits@commons.apache.org">lists.apache.org</a>
171
+ </td>
172
+ <td>
173
+ <a href="https://www.mail-archive.com/commits@commons.apache.org/">www.mail-archive.com</a>
174
+ </td>
175
+ </tr>
176
+
177
+ </table>
178
+
179
+ </section>
180
+ <section name="Apache Mailing Lists">
181
+ <p>
182
+ Other mailing lists which you may find useful include:
183
+ </p>
184
+
185
+ <table>
186
+ <tr>
187
+ <th>Name</th>
188
+ <th>Subscribe</th>
189
+ <th>Unsubscribe</th>
190
+ <th>Post</th>
191
+ <th>Archive</th>
192
+ <th>Other Archives</th>
193
+ </tr>
194
+ <tr>
195
+ <td>
196
+ <strong>Apache Announce List</strong>
197
+ <br /><br />
198
+ General announcements of Apache project releases.
199
+ <br /><br />
200
+ </td>
201
+ <td><a class="externalLink" href="mailto:announce-subscribe@apache.org">Subscribe</a></td>
202
+ <td><a class="externalLink" href="mailto:announce-unsubscribe@apache.org">Unsubscribe</a></td>
203
+ <td><i>read only</i></td>
204
+ <td>
205
+ <a class="externalLink" href="https://lists.apache.org/list.html?announce@apache.org">lists.apache.org</a>
206
+ </td>
207
+ <td>
208
+ <a class="externalLink" href="https://www.mail-archive.com/announce@apache.org/">www.mail-archive.com</a>
209
+ </td>
210
+ </tr>
211
+ </table>
212
+
213
+ </section>
214
+ </body>
215
+ </document>
local-test-commons-compress-delta-03/afc-commons-compress/src/site/xdoc/pack200.xml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <!--
3
+
4
+ Licensed to the Apache Software Foundation (ASF) under one or more
5
+ contributor license agreements. See the NOTICE file distributed with
6
+ this work for additional information regarding copyright ownership.
7
+ The ASF licenses this file to You under the Apache License, Version 2.0
8
+ (the "License"); you may not use this file except in compliance with
9
+ the License. You may obtain a copy of the License at
10
+
11
+ http://www.apache.org/licenses/LICENSE-2.0
12
+
13
+ Unless required by applicable law or agreed to in writing, software
14
+ distributed under the License is distributed on an "AS IS" BASIS,
15
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ See the License for the specific language governing permissions and
17
+ limitations under the License.
18
+
19
+ -->
20
+ <document xmlns="http://maven.apache.org/XDOC/2.0"
21
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
22
+ xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 https://maven.apache.org/xsd/xdoc-2.0.xsd">
23
+ <properties>
24
+ <title>Commons Compress Pack200 Package</title>
25
+ <author email="dev@commons.apache.org">Apache Commons Team</author>
26
+ </properties>
27
+ <body>
28
+ <section name="The Pack200 package">
29
+
30
+ <p>The Pack200 algorithm is not a general purpose compression
31
+ algorithm but one specialized for compressing JAR archives. JAR
32
+ archives compressed with Pack200 will in general be different
33
+ from the original archive when decompressed again. More
34
+ information can be found in the Javadocs of the <a
35
+ href="https://docs.oracle.com/javase/7/docs/api/java/util/jar/Pack200.Packer.html">Pack200.Packer
36
+ class</a>.</p>
37
+
38
+ <p>While the <code>pack200</code> command line utility of the
39
+ JDK creates GZip compressed archives (<code>.pack.gz</code>) by
40
+ default, the streams provided by the Pack200 package only
41
+ perform the actual Pack200 operation. Wrap them in an
42
+ additional <code>GzipCompressor(In|Out)putStream</code> in order to deal
43
+ with deflated streams.</p>
44
+
45
+ <subsection name="Pack200Strategy">
46
+
47
+ <p>The Pack200-API provided by the java class library is not
48
+ streaming friendly as it wants to consume its input completely
49
+ in a single operation. Because of this
50
+ <code>Pack200CompressorInputStream</code>'s constructor will immediately
51
+ unpack the stream, cache the results and provide an input
52
+ stream to the cache.</p>
53
+
54
+ <p><code>Pack200CompressorOutputStream</code> will cache all data that
55
+ is written to it and then pack it once the <code>finish</code>
56
+ or <code>close</code> method is called.</p>
57
+
58
+ <p>Two different caching modes are available - "in memory",
59
+ which is the default, and "temporary file". By default data
60
+ is cached in memory but you should switch to the temporary
61
+ file option if your archives are really big.</p>
62
+
63
+ <p>Given there always is an intermediate result
64
+ the <code>getBytesRead</code> and <code>getCount</code>
65
+ methods of <code>Pack200CompressorInputStream</code> are
66
+ meaningless (read from the real stream or from the
67
+ intermediate result?) and always return 0.</p>
68
+
69
+ </subsection>
70
+
71
+ <subsection name="Normalization">
72
+
73
+ <p>As a pack/unpack cycle may create a JAR archive that is
74
+ different from the original, digital signatures created for
75
+ the initial JAR will be broken by the process. There is a way
76
+ to "normalize" JAR archives prior to packing them that ensures
77
+ signatures applied to the "normalized" JAR will still be valid
78
+ aftre a pack/unpack cycle - see <a
79
+ href="https://download.oracle.com/javase/7/docs/api/java/util/jar/Pack200.Packer.html">Pack200.Packer</a>'s
80
+ javadocs.</p>
81
+
82
+ <p>The <code>Pack200Utils</code> class in the
83
+ <code>pack200</code> package provides several overloads of a
84
+ <code>normalize</code> method that can be used to prepare a
85
+ JAR archive in place or to a separate file.</p>
86
+
87
+ </subsection>
88
+
89
+ </section>
90
+ </body>
91
+ </document>
local-test-commons-compress-delta-03/afc-commons-compress/src/site/xdoc/security.xml ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <!--
3
+
4
+ Licensed to the Apache Software Foundation (ASF) under one or more
5
+ contributor license agreements. See the NOTICE file distributed with
6
+ this work for additional information regarding copyright ownership.
7
+ The ASF licenses this file to You under the Apache License, Version 2.0
8
+ (the "License"); you may not use this file except in compliance with
9
+ the License. You may obtain a copy of the License at
10
+
11
+ http://www.apache.org/licenses/LICENSE-2.0
12
+
13
+ Unless required by applicable law or agreed to in writing, software
14
+ distributed under the License is distributed on an "AS IS" BASIS,
15
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ See the License for the specific language governing permissions and
17
+ limitations under the License.
18
+ -->
19
+ <document xmlns="http://maven.apache.org/XDOC/2.0"
20
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
21
+ xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 https://maven.apache.org/xsd/xdoc-2.0.xsd">
22
+ <properties>
23
+ <title>Commons Compress Security Reports</title>
24
+ <author email="dev@commons.apache.org">Commons Team</author>
25
+ </properties>
26
+ <body>
27
+ <section name="General Information">
28
+ <p>For information about reporting or asking questions about
29
+ security problems, please see the <a
30
+ href="https://commons.apache.org/security.html">security page
31
+ of the Commons project</a>.</p>
32
+ </section>
33
+
34
+ <section name="Apache Commons Compress Security Vulnerabilities">
35
+ <p>This page lists all security vulnerabilities fixed in
36
+ released versions of Apache Commons Compress. Each
37
+ vulnerability is given a security impact rating by the
38
+ development team - please note that this rating may vary from
39
+ platform to platform. We also list the versions of Commons
40
+ Compress the flaw is known to affect, and where a flaw has not
41
+ been verified list the version with a question mark.</p>
42
+
43
+ <p>Please note that binary patches are never provided. If you
44
+ need to apply a source code patch, use the building
45
+ instructions for the Commons Compress version that you are
46
+ using.</p>
47
+
48
+ <p>If you need help on building Commons Compress or other help
49
+ on following the instructions to mitigate the known
50
+ vulnerabilities listed here, please send your questions to the
51
+ public <a href="mail-lists.html">Compress Users mailing
52
+ list</a>.</p>
53
+
54
+ <p>If you have encountered an unlisted security vulnerability
55
+ or other unexpected behavior that has security impact, or if
56
+ the descriptions here are incomplete, please report them
57
+ privately to the Apache Security Team. Thank you.</p>
58
+
59
+ <subsection name="Fixed in Apache Commons Compress 1.26.0">
60
+ <p><b>Important: Denial of Service</b> <a
61
+ href="https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-25710">CVE-2024-25710</a></p>
62
+ <p>This affects version 1.3 through 1.25.0.</p>
63
+ <p>This denial of service is caused by an infinite loop reading a corrupted DUMP file.</p>
64
+ <p>Users are recommended to upgrade to version 1.26.0 which fixes the issue.</p>
65
+ <p>Credit to Yakov Shafranovich, Amazon Web Services (reporter).</p>
66
+
67
+ <p><b>Moderate: Denial of Service</b> <a
68
+ href="https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-26308">CVE-2024-26308</a></p>
69
+ <p>You can get an OutOfMemoryError unpacking a broken Pack200 file.</p>
70
+ <p>This issue affects Commons Compress 1.21 before 1.26.0.</p>
71
+ <p>Users are recommended to upgrade to version 1.26.0 which fixes the issue.</p>
72
+ <p>Credit to Yakov Shafranovich, Amazon Web Services (reporter).</p>
73
+ </subsection>
74
+
75
+ <subsection name="Fixed in Apache Commons Compress 1.24.0">
76
+ <p><b>Moderate: Denial of Service</b> <a
77
+ href="https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-42503">CVE-2023-42503</a></p>
78
+
79
+ <p>Improper Input Validation, Uncontrolled Resource Consumption vulnerability in Apache Commons Compress in TAR parsing.</p>
80
+ <p>This issue affects Apache Commons Compress: from 1.22 before 1.24.0.</p>
81
+ <p>Users are recommended to upgrade to version 1.24.0, which fixes the issue.</p>
82
+ <p>A third party can create a malformed TAR file by manipulating file modification times headers,
83
+ which when parsed with Apache Commons Compress, will cause a denial of service issue via CPU consumption.</p>
84
+ <p>In version 1.22 of Apache Commons Compress, support was added for file modification times with higher precision
85
+ (issue # COMPRESS-612<sup><a href="#Ref-1-24-1">[1]</a></sup>).
86
+ The format for the PAX extended headers carrying this data consists of two numbers separated by a period<sup><a href="#Ref-1-24-2">[2]</a></sup>,
87
+ indicating seconds and subsecond precision (for example “1647221103.5998539”). The impacted fields are “atime”, “ctime”, “mtime” and
88
+ “LIBARCHIVE.creationtime”. No input validation is performed prior to the parsing of header values.</p>
89
+ <p>Parsing of these numbers uses the BigDecimal<sup><a href="#Ref-1-24-3">[3]</a></sup> class from the JDK which has a publicly known algorithmic complexity issue when doing
90
+ operations on large numbers, causing denial of service (see issue # JDK-6560193<sup><a href="#Ref-1-24-4">[4]</a></sup>). A third party can manipulate file time headers
91
+ in a TAR file by placing a number with a very long fraction (300,000 digits) or a number with exponent notation (such as “9e9999999”)
92
+ within a file modification time header, and the parsing of files with these headers will take hours instead of seconds, leading to a
93
+ denial of service via exhaustion of CPU resources. This issue is similar to CVE-2012-2098<sup><a href="#Ref-1-24-5">[5]</a></sup>.</p>
94
+ <ul>
95
+ <li id="Ref-1-24-1">[1]: <a href="https://issues.apache.org/jira/browse/COMPRESS-612">COMPRESS-612</a></li>
96
+ <li id="Ref-1-24-2">[2]: <a href="https://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_05">PAX extended headers</a></li>
97
+ <li id="Ref-1-24-3">[3]: <a href="https://docs.oracle.com/javase/8/docs/api/java/math/BigDecimal.html">BigDecimal</a></li>
98
+ <li id="Ref-1-24-4">[4]: <a href="https://bugs.openjdk.org/browse/JDK-6560193">JDK-6560193</a></li>
99
+ <li id="Ref-1-24-5">[5]: <a href="https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2012-2098">CVE-2012-2098</a></li>
100
+ </ul>
101
+ <p>Only applications using CompressorStreamFactory class (with auto-detection of file types), TarArchiveInputStream and TarFile
102
+ classes to parse TAR files are impacted. Since this code was introduced in v1.22, only that version and later versions are impacted.</p>
103
+ </subsection>
104
+
105
+ <subsection name="Fixed in Apache Commons Compress 1.21">
106
+ <p><b>Low: Denial of Service</b> <a
107
+ href="https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-35515">CVE-2021-35515</a></p>
108
+
109
+ <p>When reading a specially crafted 7Z archive, the construction of the
110
+ list of codecs that decompress an entry can result in an infinite
111
+ loop. This could be used to mount a denial of service attack against
112
+ services that use Compress' sevenz package.</p>
113
+
114
+ <p>This was fixed in revision <a
115
+ href="https://gitbox.apache.org/repos/asf?p=commons-compress.git;a=commit;h=3fe6b42110dc56d0d6fe0aaf80cfecb8feea5321">3fe6b42</a>.</p>
116
+
117
+ <p>This issue was discovered by OSS Fuzz.</p>
118
+
119
+ <p>Affects: 1.6 - 1.20</p>
120
+
121
+ <p><b>Low: Denial of Service</b> <a
122
+ href="https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-35516">CVE-2021-35516</a></p>
123
+
124
+ <p>When reading a specially crafted 7Z archive, Compress can be made to
125
+ allocate large amounts of memory that finally leads to an out of memory
126
+ error even for very small inputs. This could be used to mount a denial
127
+ of service attack against services that use Compress' sevenz package.</p>
128
+
129
+ <p>This was fixed in revisions
130
+ <a
131
+ href="https://gitbox.apache.org/repos/asf?p=commons-compress.git;a=commit;h=26924e96c7730db014c310757e11c9359db07f3e">26924e9</a>,
132
+ <a
133
+ href="https://gitbox.apache.org/repos/asf?p=commons-compress.git;a=commit;h=c51de6cfaec75b21566374158f25e1734c3a94cb">c51de6c</a>,
134
+ <a
135
+ href="https://gitbox.apache.org/repos/asf?p=commons-compress.git;a=commit;h=0aba8b8fd8053ae323f15d736d1762b2161c76a6">0aba8b8</a>,
136
+ <a
137
+ href="https://gitbox.apache.org/repos/asf?p=commons-compress.git;a=commit;h=60d551a748236d7f4651a4ae88d5a351f7c5754b">60d551a</a>,
138
+ <a
139
+ href="https://gitbox.apache.org/repos/asf?p=commons-compress.git;a=commit;h=bf5a5346ae04b9d2a5b0356ca75f11dcc8d94789">bf5a534</a>,
140
+ <a
141
+ href="https://gitbox.apache.org/repos/asf?p=commons-compress.git;a=commit;h=5761493cbaf7a7d608a3b68f4d61aaa822dbeb4f">5761493</a>,
142
+ and <a
143
+ href="https://gitbox.apache.org/repos/asf?p=commons-compress.git;a=commit;h=ae2b27cc011f47f0289cb24a11f2d4f1db711f8a">ae2b27c</a>
144
+ .</p>
145
+
146
+ <p>This issue was first reported to the project's issue tracker as
147
+ <a href="https://issues.apache.org/jira/browse/COMPRESS-542">COMPRESS-542</a>
148
+ by Robin Schimpf.
149
+ Later OSS Fuzz detected ways to exploit this issue which managed to
150
+ escape the initial attempt to fix it.</p>
151
+
152
+ <p>Affects: 1.6 - 1.20</p>
153
+
154
+ <p><b>Low: Denial of Service</b> <a
155
+ href="https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-35517">CVE-2021-35517</a></p>
156
+
157
+ <p>When reading a specially crafted TAR archive, Compress
158
+ can be made to allocate large amounts of memory that finally
159
+ leads to an out of memory error even for very small
160
+ inputs. This could be used to mount a denial of service
161
+ attack against services that use Compress' tar package.</p>
162
+
163
+ <p>This was fixed in revisions
164
+ <a
165
+ href="https://gitbox.apache.org/repos/asf?p=commons-compress.git;a=commit;h=d0af873e77d16f41edfef7b69da5c8c35c96a650">d0af873</a>,
166
+ <a
167
+ href="https://gitbox.apache.org/repos/asf?p=commons-compress.git;a=commit;h=7ce1b0796d6cbe1f41b969583bd49f33ae0efef0">7ce1b07</a>
168
+ and <a
169
+ href="https://gitbox.apache.org/repos/asf?p=commons-compress.git;a=commit;h=80124dd9fe4b0a0b2e203ca19aacac8cd0afc96f">80124dd</a>.</p>
170
+
171
+ <p>This issue was discovered by OSS Fuzz.</p>
172
+
173
+ <p>Affects: 1.1 - 1.20</p>
174
+
175
+ <p><b>Low: Denial of Service</b> <a
176
+ href="https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-36090">CVE-2021-36090</a></p>
177
+
178
+ <p>When reading a specially crafted ZIP archive, Compress
179
+ can be made to allocate large amounts of memory that finally
180
+ leads to an out of memory error even for very small
181
+ inputs. This could be used to mount a denial of service
182
+ attack against services that use Compress' zip package.</p>
183
+
184
+ <p>This was fixed in revisions
185
+ <a
186
+ href="https://gitbox.apache.org/repos/asf?p=commons-compress.git;a=commit;h=ef5d70b625000e38404194aaab311b771c44efda">ef5d70b</a>
187
+ and <a
188
+ href="https://gitbox.apache.org/repos/asf?p=commons-compress.git;a=commit;h=80124dd9fe4b0a0b2e203ca19aacac8cd0afc96f">80124dd</a>.</p>
189
+
190
+ <p>This issue was discovered by OSS Fuzz.</p>
191
+
192
+ <p>Affects: 1.0 - 1.20</p>
193
+
194
+ </subsection>
195
+
196
+ <subsection name="Fixed in Apache Commons Compress 1.19">
197
+ <p><b>Low: Denial of Service</b> <a
198
+ href="https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-12402">CVE-2019-12402</a></p>
199
+
200
+ <p>The file name encoding algorithm used internally in Apache Commons
201
+ Compress can get into an infinite loop when faced with specially
202
+ crafted inputs. This can lead to a denial of service attack if an
203
+ attacker can choose the file names inside of an archive created by
204
+ Compress.</p>
205
+
206
+ <p>This was fixed in revision <a
207
+ href="https://gitbox.apache.org/repos/asf?p=commons-compress.git;a=commitdiff;h=4ad5d80a6272e007f64a6ac66829ca189a8093b9;hp=16a0c84e84b93cc8c107b7ff3080bd11317ab581">4ad5d80a</a>.</p>
208
+
209
+ <p>This was first reported to the Commons Security Team on 22 August
210
+ 2019 and made public on 27 August 2019.</p>
211
+
212
+ <p>Affects: 1.15 - 1.18</p>
213
+
214
+ </subsection>
215
+
216
+ <subsection name="Fixed in Apache Commons Compress 1.18">
217
+ <p><b>Low: Denial of Service</b> <a
218
+ href="https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-11771">CVE-2018-11771</a></p>
219
+
220
+ <p>When reading a specially crafted ZIP archive, the read
221
+ method of <code>ZipArchiveInputStream</code> can fail to
222
+ return the correct EOF indication after the end of the
223
+ stream has been reached. When combined with a
224
+ <code>java.io.InputStreamReader</code> this can lead to an
225
+ infinite stream, which can be used to mount a denial of
226
+ service attack against services that use Compress' zip
227
+ package</p>
228
+
229
+ <p>This was fixed in revision <a
230
+ href="https://gitbox.apache.org/repos/asf?p=commons-compress.git;a=blobdiff;f=src/main/java/org/apache/commons/compress/archivers/zip/ZipArchiveInputStream.java;h=e1995d7aa51dfac6ae933987fb0b7760c607582b;hp=0a2c1aa0063c620c867715119eae2013c87b5e70;hb=a41ce6892cb0590b2e658704434ac0dbcb6834c8;hpb=64ed6dde03afbef6715fdfdeab5fc04be6192899">a41ce68</a>.</p>
231
+
232
+ <p>This was first reported to the Security Team on 14 June
233
+ 2018 and made public on 16 August 2018.</p>
234
+
235
+ <p>Affects: 1.7 - 1.17</p>
236
+
237
+ </subsection>
238
+
239
+ <subsection name="Fixed in Apache Commons Compress 1.16">
240
+ <p><b>Low: Denial of Service</b> <a
241
+ href="https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-1324">CVE-2018-1324</a></p>
242
+
243
+ <p>A specially crafted ZIP archive can be used to cause an
244
+ infinite loop inside of Compress' extra field parser used by
245
+ the <code>ZipFile</code> and
246
+ <code>ZipArchiveInputStream</code> classes. This can be
247
+ used to mount a denial of service attack against services
248
+ that use Compress' zip package.</p>
249
+
250
+ <p>This was fixed in revision <a
251
+ href="https://gitbox.apache.org/repos/asf?p=commons-compress.git;a=blobdiff;f=src/main/java/org/apache/commons/compress/archivers/zip/X0017_StrongEncryptionHeader.java;h=acc3b22346b49845e85b5ef27a5814b69e834139;hp=0feb9c98cc622cde1defa3bbd268ef82b4ae5c18;hb=2a2f1dc48e22a34ddb72321a4db211da91aa933b;hpb=dcb0486fb4cb2b6592c04d6ec2edbd3f690df5f2">2a2f1dc4</a>.</p>
252
+
253
+ <p>This was first reported to the project's JIRA on <a
254
+ href="https://issues.apache.org/jira/browse/COMPRESS-432">19
255
+ December 2017</a>.</p>
256
+
257
+ <p>Affects: 1.11 - 1.15</p>
258
+
259
+ </subsection>
260
+
261
+ <subsection name="Fixed in Apache Commons Compress 1.4.1">
262
+ <p><b>Low: Denial of Service</b> <a
263
+ href="https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2012-2098">CVE-2012-2098</a></p>
264
+
265
+ <p>The bzip2 compressing streams in Apache Commons Compress
266
+ internally use sorting algorithms with unacceptable
267
+ worst-case performance on very repetitive inputs. A
268
+ specially crafted input to Compress'
269
+ <code>BZip2CompressorOutputStream</code> can be used to make
270
+ the process spend a very long time while using up all
271
+ available processing time effectively leading to a denial of
272
+ service.</p>
273
+
274
+ <p>This was fixed in revisions
275
+ <a href="https://svn.apache.org/viewvc?view=revision&amp;revision=1332540">1332540</a>,
276
+ <a href="https://svn.apache.org/viewvc?view=revision&amp;revision=1332552">1332552</a>,
277
+ <a href="https://svn.apache.org/viewvc?view=revision&amp;revision=1333522">1333522</a>,
278
+ <a href="https://svn.apache.org/viewvc?view=revision&amp;revision=1337444">1337444</a>,
279
+ <a href="https://svn.apache.org/viewvc?view=revision&amp;revision=1340715">1340715</a>,
280
+ <a href="https://svn.apache.org/viewvc?view=revision&amp;revision=1340723">1340723</a>,
281
+ <a href="https://svn.apache.org/viewvc?view=revision&amp;revision=1340757">1340757</a>,
282
+ <a href="https://svn.apache.org/viewvc?view=revision&amp;revision=1340786">1340786</a>,
283
+ <a href="https://svn.apache.org/viewvc?view=revision&amp;revision=1340787">1340787</a>,
284
+ <a href="https://svn.apache.org/viewvc?view=revision&amp;revision=1340790">1340790</a>,
285
+ <a href="https://svn.apache.org/viewvc?view=revision&amp;revision=1340795">1340795</a> and
286
+ <a href="https://svn.apache.org/viewvc?view=revision&amp;revision=1340799">1340799</a>.</p>
287
+
288
+ <p>This was first reported to the Security Team on 12 April
289
+ 2012 and made public on 23 May 2012.</p>
290
+
291
+ <p>Affects: 1.0 - 1.4</p>
292
+
293
+ </subsection>
294
+ </section>
295
+
296
+ <section name="Errors and Omissions">
297
+ <p>Please report any errors or omissions to <a
298
+ href="mail-lists.html">the dev mailing list</a>.</p>
299
+ </section>
300
+ </body>
301
+ </document>
local-test-commons-compress-delta-03/afc-commons-compress/src/site/xdoc/tar.xml ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <!--
3
+
4
+ Licensed to the Apache Software Foundation (ASF) under one or more
5
+ contributor license agreements. See the NOTICE file distributed with
6
+ this work for additional information regarding copyright ownership.
7
+ The ASF licenses this file to You under the Apache License, Version 2.0
8
+ (the "License"); you may not use this file except in compliance with
9
+ the License. You may obtain a copy of the License at
10
+
11
+ http://www.apache.org/licenses/LICENSE-2.0
12
+
13
+ Unless required by applicable law or agreed to in writing, software
14
+ distributed under the License is distributed on an "AS IS" BASIS,
15
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ See the License for the specific language governing permissions and
17
+ limitations under the License.
18
+
19
+ -->
20
+ <document xmlns="http://maven.apache.org/XDOC/2.0"
21
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
22
+ xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 https://maven.apache.org/xsd/xdoc-2.0.xsd">
23
+ <properties>
24
+ <title>Commons Compress TAR package</title>
25
+ <author email="dev@commons.apache.org">Apache Commons Team</author>
26
+ </properties>
27
+ <body>
28
+ <section name="The TAR package">
29
+
30
+ <p>In addition to the information stored
31
+ in <code>ArchiveEntry</code> a <code>TarArchiveEntry</code>
32
+ stores various attributes including information about the
33
+ original owner and permissions.</p>
34
+
35
+ <p>There are several different dialects of the TAR format, maybe
36
+ even different TAR formats. The tar package contains special
37
+ cases in order to read many of the existing dialects and will by
38
+ default try to create archives in the original format (often
39
+ called "ustar"). This original format didn't support file names
40
+ longer than 100 characters or bigger than 8 GiB and the tar
41
+ package will by default fail if you try to write an entry that
42
+ goes beyond those limits. "ustar" is the common denominator of
43
+ all the existing tar dialects and is understood by most of the
44
+ existing tools.</p>
45
+
46
+ <p>The tar package does not support the full POSIX tar standard
47
+ nor more modern GNU extension of said standard.</p>
48
+
49
+ <subsection name="Long File Names">
50
+
51
+ <p>The <code>longFileMode</code> option of
52
+ <code>TarArchiveOutputStream</code> controls how files with
53
+ names longer than 100 characters are handled. The possible
54
+ choices are:</p>
55
+
56
+ <ul>
57
+ <li><code>LONGFILE_ERROR</code>: throw an exception if such a
58
+ file is added. This is the default.</li>
59
+ <li><code>LONGFILE_TRUNCATE</code>: truncate such names.</li>
60
+ <li><code>LONGFILE_GNU</code>: use a GNU tar variant now
61
+ referred to as "oldgnu" of storing such names. If you choose
62
+ the GNU tar option, the archive can not be extracted using
63
+ many other tar implementations like the ones of OpenBSD,
64
+ Solaris or MacOS X.</li>
65
+ <li><code>LONGFILE_POSIX</code>: use a PAX <a
66
+ href="http://pubs.opengroup.org/onlinepubs/009695399/utilities/pax.html#tag_04_100_13_03">extended
67
+ header</a> as defined by POSIX 1003.1. Most modern tar
68
+ implementations are able to extract such archives. <em>since
69
+ Commons Compress 1.4</em></li>
70
+ </ul>
71
+
72
+ <p><code>TarArchiveInputStream</code> will recognize the GNU
73
+ tar as well as the POSIX extensions (starting with Commons
74
+ Compress 1.2) for long file names and reads the longer names
75
+ transparently.</p>
76
+ </subsection>
77
+
78
+ <subsection name="Big Numeric Values">
79
+
80
+ <p>The <code>bigNumberMode</code> option of
81
+ <code>TarArchiveOutputStream</code> controls how files larger
82
+ than 8GiB or with other big numeric values that can't be
83
+ encoded in traditional header fields are handled. The
84
+ possible choices are:</p>
85
+
86
+ <ul>
87
+ <li><code>BIGNUMBER_ERROR</code>: throw an exception if such an
88
+ entry is added. This is the default.</li>
89
+ <li><code>BIGNUMBER_STAR</code>: use a variant first
90
+ introduced by J&#xf6;rg Schilling's <a
91
+ href="http://developer.berlios.de/projects/star">star</a>
92
+ and later adopted by GNU and BSD tar. This method is not
93
+ supported by all implementations.</li>
94
+ <li><code>BIGNUMBER_POSIX</code>: use a PAX <a
95
+ href="http://pubs.opengroup.org/onlinepubs/009695399/utilities/pax.html#tag_04_100_13_03">extended
96
+ header</a> as defined by POSIX 1003.1. Most modern tar
97
+ implementations are able to extract such archives.</li>
98
+ </ul>
99
+
100
+ <p>Starting with Commons Compress 1.4
101
+ <code>TarArchiveInputStream</code> will recognize the star as
102
+ well as the POSIX extensions for big numeric values and reads them
103
+ transparently.</p>
104
+ </subsection>
105
+
106
+ <subsection name="File Name Encoding">
107
+ <p>The original ustar format only supports 7-Bit ASCII file
108
+ names, later implementations use the platform's default
109
+ encoding to encode file names. The POSIX standard recommends
110
+ using PAX extension headers for non-ASCII file names
111
+ instead.</p>
112
+
113
+ <p>Commons Compress 1.1 to 1.3 assumed file names would be
114
+ encoded using ISO-8859-1. Starting with Commons Compress 1.4
115
+ you can specify the encoding to expect (to use when writing)
116
+ as a parameter to <code>TarArchiveInputStream</code>
117
+ (<code>TarArchiveOutputStream</code>), it now defaults to the
118
+ platform's default encoding.</p>
119
+
120
+ <p>Since Commons Compress 1.4 another optional parameter -
121
+ <code>addPaxHeadersForNonAsciiNames</code> - of
122
+ <code>TarArchiveOutputStream</code> controls whether PAX
123
+ extension headers will be written for non-ASCII file names.
124
+ By default they will not be written to preserve space.
125
+ <code>TarArchiveInputStream</code> will read them
126
+ transparently if present.</p>
127
+ </subsection>
128
+
129
+ <subsection name="Sparse files">
130
+
131
+ <p>Prior to Commons Compress 1.20 <code>TarArchiveInputStream</code> would recognize sparse
132
+ file entries stored using the "oldgnu" format
133
+ (<code>-&#x2d;sparse-version=0.0</code> in GNU tar) but not
134
+ able to extract them correctly. Starting with Commons Compress 1.20
135
+ all GNU and POSIX variants of sparse files are recognized and
136
+ can be read.</p>
137
+ </subsection>
138
+
139
+ <subsection name="Consuming Archives Completely">
140
+
141
+ <p>The end of a tar archive is signaled by two consecutive
142
+ records of all zeros. Unfortunately not all tar
143
+ implementations adhere to this and some only write one record
144
+ to end the archive. Commons Compress will always write two
145
+ records but stop reading an archive as soon as finds one
146
+ record of all zeros.</p>
147
+
148
+ <p>Prior to version 1.5 this could leave the second EOF record
149
+ inside the stream when <code>getNextEntry</code> or
150
+ <code>getNextTarEntry</code> returned <code>null</code>
151
+ Starting with version 1.5 <code>TarArchiveInputStream</code>
152
+ will try to read a second record as well if present,
153
+ effectively consuming the archive completely.</p>
154
+
155
+ </subsection>
156
+
157
+ <subsection name="PAX Extended Header">
158
+ <p>The tar package has supported reading PAX extended headers
159
+ since 1.3 for local headers and 1.11 for global headers. The
160
+ following entries of PAX headers are applied when reading:</p>
161
+
162
+ <dl>
163
+ <dt>path</dt>
164
+ <dd>set the entry's name</dd>
165
+
166
+ <dt>linkpath</dt>
167
+ <dd>set the entry's link name</dd>
168
+
169
+ <dt>gid</dt>
170
+ <dd>set the entry's group id</dd>
171
+
172
+ <dt>gname</dt>
173
+ <dd>set the entry's group name</dd>
174
+
175
+ <dt>uid</dt>
176
+ <dd>set the entry's user id</dd>
177
+
178
+ <dt>uname</dt>
179
+ <dd>set the entry's user name</dd>
180
+
181
+ <dt>size</dt>
182
+ <dd>set the entry's size</dd>
183
+
184
+ <dt>mtime</dt>
185
+ <dd>set the entry's modification time</dd>
186
+
187
+ <dt>SCHILY.devminor</dt>
188
+ <dd>set the entry's minor device number</dd>
189
+
190
+ <dt>SCHILY.devmajor</dt>
191
+ <dd>set the entry's major device number</dd>
192
+ </dl>
193
+
194
+ <p>in addition some fields used by GNU tar and star used to
195
+ signal sparse entries are supported and are used for the
196
+ <code>is*GNUSparse</code> and <code>isStarSparse</code>
197
+ methods.</p>
198
+
199
+ <p>Some PAX extra headers may be set when writing archives,
200
+ for example for non-ASCII names or big numeric values. This
201
+ depends on various setting of the output stream - see the
202
+ previous sections.</p>
203
+
204
+ <p>Since 1.15 you can directly access all PAX extension
205
+ headers that have been found when reading an entry or specify
206
+ extra headers to be written to a (local) PAX extended header
207
+ entry.</p>
208
+
209
+ <p>Some hints if you try to set extended headers:</p>
210
+
211
+ <ul>
212
+ <li>pax header keywords should be ascii. star/gnutar
213
+ (SCHILY.xattr.* ) do not check for this. libarchive/bsdtar
214
+ (LIBARCHIVE.xattr.*) uses URL-Encoding.</li>
215
+ <li>pax header values should be encoded as UTF-8 characters
216
+ (including trailing <code>\0</code>). star/gnutar
217
+ (SCHILY.xattr.*) do not check for this. libarchive/bsdtar
218
+ (LIBARCHIVE.xattr.*) encode values using Base64.</li>
219
+ <li>libarchive/bsdtar will read SCHILY.xattr headers, but
220
+ will not generate them.</li>
221
+ <li>gnutar will complain about LIBARCHIVE.xattr (and any
222
+ other unknown) headers and will neither encode nor decode
223
+ them.</li>
224
+ </ul>
225
+ </subsection>
226
+
227
+ <subsection name="Random Access">
228
+ <p>Starting with Commons Compress 1.21 the tar package
229
+ contains a <code>TarFile</code> class that provides random
230
+ access to archives. Except for the ability to access entries
231
+ out of order <code>TarFile</code> is not superior to
232
+ <code>TarArchiveInputStream</code>.</p>
233
+ </subsection>
234
+ </section>
235
+ </body>
236
+ </document>
local-test-commons-compress-delta-03/afc-commons-compress/src/site/xdoc/zip.xml ADDED
@@ -0,0 +1,645 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <!--
3
+
4
+ Licensed to the Apache Software Foundation (ASF) under one or more
5
+ contributor license agreements. See the NOTICE file distributed with
6
+ this work for additional information regarding copyright ownership.
7
+ The ASF licenses this file to You under the Apache License, Version 2.0
8
+ (the "License"); you may not use this file except in compliance with
9
+ the License. You may obtain a copy of the License at
10
+
11
+ http://www.apache.org/licenses/LICENSE-2.0
12
+
13
+ Unless required by applicable law or agreed to in writing, software
14
+ distributed under the License is distributed on an "AS IS" BASIS,
15
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ See the License for the specific language governing permissions and
17
+ limitations under the License.
18
+
19
+ -->
20
+ <document xmlns="http://maven.apache.org/XDOC/2.0"
21
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
22
+ xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 https://maven.apache.org/xsd/xdoc-2.0.xsd">
23
+ <properties>
24
+ <title>Commons Compress ZIP package</title>
25
+ <author email="dev@commons.apache.org">Apache Commons Team</author>
26
+ </properties>
27
+ <body>
28
+ <section name="The ZIP package">
29
+
30
+ <p>The ZIP package provides features not found
31
+ in <code>java.util.zip</code>:</p>
32
+
33
+ <ul>
34
+ <li>Support for encodings other than UTF-8 for filenames and
35
+ comments. Starting with Java7 this is supported
36
+ by <code>java.util.zip</code> as well.</li>
37
+ <li>Access to internal and external attributes (which are used
38
+ to store Unix permission by some zip implementations).</li>
39
+ <li>Structured support for extra fields.</li>
40
+ </ul>
41
+
42
+ <p>In addition to the information stored
43
+ in <code>ArchiveEntry</code> a <code>ZipArchiveEntry</code>
44
+ stores internal and external attributes as well as extra
45
+ fields which may contain information like Unix permissions,
46
+ information about the platform they've been created on, their
47
+ last modification time and an optional comment.</p>
48
+
49
+ <subsection name="ZipArchiveInputStream vs ZipFile">
50
+
51
+ <p>ZIP archives store a archive entries in sequence and
52
+ contain a registry of all entries at the very end of the
53
+ archive. It is acceptable for an archive to contain several
54
+ entries of the same name and have the registry (called the
55
+ central directory) decide which entry is actually to be used
56
+ (if any).</p>
57
+
58
+ <p>In addition the ZIP format stores certain information only
59
+ inside the central directory but not together with the entry
60
+ itself, this is:</p>
61
+
62
+ <ul>
63
+ <li>internal and external attributes</li>
64
+ <li>different or additional extra fields</li>
65
+ </ul>
66
+
67
+ <p>This means the ZIP format cannot really be parsed
68
+ correctly while reading a non-seekable stream, which is what
69
+ <code>ZipArchiveInputStream</code> is forced to do. As a
70
+ result <code>ZipArchiveInputStream</code></p>
71
+ <ul>
72
+ <li>may return entries that are not part of the central
73
+ directory at all and shouldn't be considered part of the
74
+ archive.</li>
75
+ <li>may return several entries with the same name.</li>
76
+ <li>will not return internal or external attributes.</li>
77
+ <li>may return incomplete extra field data.</li>
78
+ <li>may return unknown sizes and CRC values for entries
79
+ until the next entry has been reached if the archive uses
80
+ the data descriptor feature (see below).</li>
81
+ <li>can not skip over bytes that occur before the real zip
82
+ stream. This means self-extracting zips as they are created
83
+ by some tools can not be read using
84
+ <code>ZipArchiveInputStream</code> at all. This also applies
85
+ to Chrome extension archives, for example.</li>
86
+ </ul>
87
+
88
+ <p><code>ZipArchiveInputStream</code> shares these limitations
89
+ with <code>java.util.zip.ZipInputStream</code>.</p>
90
+
91
+ <p><code>ZipFile</code> is able to read the central directory
92
+ first and provide correct and complete information on any
93
+ ZIP archive.</p>
94
+
95
+ <p>ZIP archives know a feature called the data descriptor
96
+ which is a way to store an entry's length after the entry's
97
+ data. This can only work reliably if the size information
98
+ can be taken from the central directory or the data itself
99
+ can signal it is complete, which is true for data that is
100
+ compressed using the DEFLATED compression algorithm.</p>
101
+
102
+ <p><code>ZipFile</code> has access to the central directory
103
+ and can extract entries using the data descriptor reliably.
104
+ The same is true for <code>ZipArchiveInputStream</code> as
105
+ long as the entry is DEFLATED. For STORED
106
+ entries <code>ZipArchiveInputStream</code> can try to read
107
+ ahead until it finds the next entry, but this approach is
108
+ not safe and has to be enabled by a constructor argument
109
+ explicitly. For example it will completely fail if the
110
+ stored entry is a ZIP archive itself. Starting with Compress 1.19
111
+ <code>ZipArchiveInputStream</code> will perform a few sanity
112
+ checks for STORED entries with data descriptors and throw an
113
+ exception if they fail.</p>
114
+
115
+ <p>If possible, you should <strong>always</strong> prefer <code>ZipFile</code>
116
+ over <code>ZipArchiveInputStream</code>.</p>
117
+
118
+ <p><code>ZipFile</code> requires a
119
+ <code>SeekableByteChannel</code> that will be obtained
120
+ transparently when reading from a file. The class
121
+ <code>org.apache.commons.compress.utils.SeekableInMemoryByteChannel</code>
122
+ allows you to read from an in-memory archive.</p>
123
+
124
+ </subsection>
125
+
126
+ <subsection name="ZipArchiveOutputStream" id="ZipArchiveOutputStream">
127
+ <p><code>ZipArchiveOutputStream</code> has four constructors,
128
+ two of them uses a <code>File</code> argument, one a
129
+ <code>SeekableByteChannel</code> and the last uses an
130
+ <code>OutputStream</code>.</p>
131
+
132
+ <p>The constructor accepting a <code>File</code> and a size is
133
+ used exclusively for creating a split ZIP archive and is
134
+ described in the next section. For the remainder of this
135
+ section this constructor is equivalent to the one using the
136
+ <code>OutputStream</code> argument and thus it is not possible
137
+ to add uncompressed entries of unknown size.</p>
138
+
139
+ <p>Of the remaining three constructors the <code>File</code> version will
140
+ try to use <code>SeekableByteChannel</code> and fall back to
141
+ using a <code>FileOutputStream</code> internally if that
142
+ fails.</p>
143
+
144
+ <p>If <code>ZipArchiveOutputStream</code> can
145
+ use <code>SeekableByteChannel</code> it can employ some
146
+ optimizations that lead to smaller archives. It also makes
147
+ it possible to add uncompressed (<code>setMethod</code> used
148
+ with <code>STORED</code>) entries of unknown size when
149
+ calling <code>putArchiveEntry</code> - this is not allowed
150
+ if <code>ZipArchiveOutputStream</code> has to use
151
+ an <code>OutputStream</code>.</p>
152
+
153
+ <p>If you know you are writing to a file, you should always
154
+ prefer the <code>File</code>- or
155
+ <code>SeekableByteChannel</code>-arg constructors. The class
156
+ <code>org.apache.commons.compress.utils.SeekableInMemoryByteChannel</code>
157
+ allows you to write to an in-memory archive.</p>
158
+
159
+ </subsection>
160
+
161
+ <subsection name="Multi Volume Archives">
162
+ <p>The ZIP format knows so called split and spanned
163
+ archives. Spanned archives cross several removable media and
164
+ are not supported by Commons Compress.</p>
165
+
166
+ <p>Split archives consist of multiple files that reside in the
167
+ same directory with the same base name (the file name without
168
+ the file extension). The last file of the archive has the
169
+ extension <code>zip</code> the remaining files conventionally
170
+ use extensions <code>z01</code>, <code>z02</code> and so
171
+ on. Support for splitted archives has been added with Compress
172
+ 1.20.</p>
173
+
174
+ <p>If you want to create a split ZIP archive you use the
175
+ constructor of <code>ZipArchiveOutputStream</code> that
176
+ accepts a <code>File</code> argument and a size. The size
177
+ determines the maximum size of a split segment - the size must
178
+ be between 64kB and 4GB. While creating the archive, this will
179
+ create several files following the naming convention described
180
+ above. The name of the <code>File</code> argument used inside
181
+ of the constructor must use the extension
182
+ <code>zip</code>.</p>
183
+
184
+ <p>It is currently not possible to write split archives with
185
+ more than 64k segments. When creating split archives with more
186
+ than 100 segments you will need to adjust the file names as
187
+ <code>ZipArchiveOutputStream</code> assumes extensions will be
188
+ three characters long.</p>
189
+
190
+ <p>If you want to read a split archive you must create a
191
+ <code>ZipSplitReadOnlySeekableByteChannel</code> from the
192
+ parts. Both <code>ZipFile</code> and
193
+ <code>ZipArchiveInputStream</code> support reading streams of
194
+ this type, in the case of <code>ZipArchiveInputStream</code>
195
+ you need to use a constructor where you can set
196
+ <code>skipSplitSig</code> to true.</p>
197
+ </subsection>
198
+
199
+ <subsection name="Extra Fields">
200
+
201
+ <p>Inside a ZIP archive, additional data can be attached to
202
+ each entry. The <code>java.util.zip.ZipEntry</code> class
203
+ provides access to this via the <code>get/setExtra</code>
204
+ methods as arrays of <code>byte</code>s.</p>
205
+
206
+ <p>Actually the extra data is supposed to be more structured
207
+ than that and Compress' ZIP package provides access to the
208
+ structured data as <code>ZipExtraField</code> instances. Only
209
+ a subset of all defined extra field formats is supported by
210
+ the package, any other extra field will be stored
211
+ as <code>UnrecognizedExtraField</code>.</p>
212
+
213
+ <p>Prior to version 1.1 of this library trying to read an
214
+ archive with extra fields that didn't follow the recommended
215
+ structure for those fields would cause Compress to throw an
216
+ exception. Starting with version 1.1 these extra fields
217
+ will now be read
218
+ as <code>UnparseableExtraFieldData</code>.</p>
219
+
220
+ <p>Prior to version 1.19 of this library trying to read an
221
+ archive with extra fields that Compress expects to
222
+ understand but that used a different content than expected
223
+ would cause Compress to throw an exception. Starting with
224
+ version 1.19 these extra fields will now be read as
225
+ <code>UnrecognizedExtraField</code>. Using
226
+ <code>ZipArchiveEntry.getExtraFields(ExtraFieldParsingBehavior)</code>
227
+ you have a more fine grained control over the parser.</p>
228
+
229
+ </subsection>
230
+
231
+ <subsection name="Encoding" id="encoding">
232
+
233
+ <p>Traditionally the ZIP archive format uses CodePage 437 as
234
+ encoding for file name, which is not sufficient for many
235
+ international character sets.</p>
236
+
237
+ <p>Over time different archivers have chosen different ways to
238
+ work around the limitation - the <code>java.util.zip</code>
239
+ packages simply uses UTF-8 as its encoding for example.</p>
240
+
241
+ <p>Ant has been offering the encoding attribute of the zip and
242
+ unzip task as a way to explicitly specify the encoding to
243
+ use (or expect) since Ant 1.4. It defaults to the
244
+ platform's default encoding for zip and UTF-8 for jar and
245
+ other jar-like tasks (war, ear, ...) as well as the unzip
246
+ family of tasks.</p>
247
+
248
+ <p>More recent versions of the ZIP specification introduce
249
+ something called the &quot;language encoding flag&quot;
250
+ which can be used to signal that a file name has been
251
+ encoded using UTF-8. All ZIP-archives written by Compress
252
+ will set this flag, if the encoding has been set to UTF-8.
253
+ Our interoperability tests with existing archivers didn't
254
+ show any ill effects (in fact, most archivers ignore the
255
+ flag to date), but you can turn off the "language encoding
256
+ flag" by setting the attribute
257
+ <code>useLanguageEncodingFlag</code> to <code>false</code> on the
258
+ <code>ZipArchiveOutputStream</code> if you should encounter
259
+ problems.</p>
260
+
261
+ <p>The <code>ZipFile</code>
262
+ and <code>ZipArchiveInputStream</code> classes will
263
+ recognize the language encoding flag and ignore the encoding
264
+ set in the constructor if it has been found.</p>
265
+
266
+ <p>The InfoZIP developers have introduced new ZIP extra fields
267
+ that can be used to add an additional UTF-8 encoded file
268
+ name to the entry's metadata. Most archivers ignore these
269
+ extra fields. <code>ZipArchiveOutputStream</code> supports
270
+ an option <code>createUnicodeExtraFields</code> which makes
271
+ it write these extra fields either for all entries
272
+ ("always") or only those whose name cannot be encoded using
273
+ the specified encoding (not-encodable), it defaults to
274
+ "never" since the extra fields create bigger archives.</p>
275
+
276
+ <p>The fallbackToUTF8 attribute
277
+ of <code>ZipArchiveOutputStream</code> can be used to create
278
+ archives that use the specified encoding in the majority of
279
+ cases but UTF-8 and the language encoding flag for filenames
280
+ that cannot be encoded using the specified encoding.</p>
281
+
282
+ <p>The <code>ZipFile</code>
283
+ and <code>ZipArchiveInputStream</code> classes recognize the
284
+ Unicode extra fields by default and read the file name
285
+ information from them, unless you set the constructor parameter
286
+ <code>scanForUnicodeExtraFields</code> to false.</p>
287
+
288
+ <h4>Recommendations for Interoperability</h4>
289
+
290
+ <p>The optimal setting of flags depends on the archivers you
291
+ expect as consumers/producers of the ZIP archives. Below
292
+ are some test results which may be superseded with later
293
+ versions of each tool.</p>
294
+
295
+ <ul>
296
+ <li>The java.util.zip package used by the jar executable or
297
+ to read jars from your CLASSPATH reads and writes UTF-8
298
+ names, it doesn't set or recognize any flags or Unicode
299
+ extra fields.</li>
300
+
301
+ <li>Starting with Java7 <code>java.util.zip</code> writes
302
+ UTF-8 by default and uses the language encoding flag. It
303
+ is possible to specify a different encoding when
304
+ reading/writing ZIPs via new constructors. The package
305
+ now recognizes the language encoding flag when reading and
306
+ ignores the Unicode extra fields.</li>
307
+
308
+ <li>7Zip writes CodePage 437 by default but uses UTF-8 and
309
+ the language encoding flag when writing entries that
310
+ cannot be encoded as CodePage 437 (similar to the zip task
311
+ with fallbacktoUTF8 set to true). It recognizes the
312
+ language encoding flag when reading and ignores the
313
+ Unicode extra fields.</li>
314
+
315
+ <li>WinZIP writes CodePage 437 and uses Unicode extra fields
316
+ by default. It recognizes the Unicode extra field and the
317
+ language encoding flag when reading.</li>
318
+
319
+ <li>Windows' "compressed folder" feature doesn't recognize
320
+ any flag or extra field and creates archives using the
321
+ platforms default encoding - and expects archives to be in
322
+ that encoding when reading them.</li>
323
+
324
+ <li>InfoZIP based tools can recognize and write both, it is
325
+ a compile time option and depends on the platform so your
326
+ mileage may vary.</li>
327
+
328
+ <li>PKWARE zip tools recognize both and prefer the language
329
+ encoding flag. They create archives using CodePage 437 if
330
+ possible and UTF-8 plus the language encoding flag for
331
+ file names that cannot be encoded as CodePage 437.</li>
332
+ </ul>
333
+
334
+ <p>So, what to do?</p>
335
+
336
+ <p>If you are creating jars, then java.util.zip is your main
337
+ consumer. We recommend you set the encoding to UTF-8 and
338
+ keep the language encoding flag enabled. The flag won't
339
+ help or hurt java.util.zip prior to Java7 but archivers that
340
+ support it will show the correct file names.</p>
341
+
342
+ <p>For maximum interop it is probably best to set the encoding
343
+ to UTF-8, enable the language encoding flag and create
344
+ Unicode extra fields when writing ZIPs. Such archives
345
+ should be extracted correctly by java.util.zip, 7Zip,
346
+ WinZIP, PKWARE tools and most likely InfoZIP tools. They
347
+ will be unusable with Windows' "compressed folders" feature
348
+ and bigger than archives without the Unicode extra fields,
349
+ though.</p>
350
+
351
+ <p>If Windows' "compressed folders" is your primary consumer,
352
+ then your best option is to explicitly set the encoding to
353
+ the target platform. You may want to enable creation of
354
+ Unicode extra fields so the tools that support them will
355
+ extract the file names correctly.</p>
356
+ </subsection>
357
+
358
+ <subsection name="Encryption and Alternative Compression Algorithms"
359
+ id="encryption">
360
+
361
+ <p>In most cases entries of an archive are not encrypted and
362
+ are either not compressed at all or use the DEFLATE
363
+ algorithm, Commons Compress' ZIP archiver will handle them
364
+ just fine. As of version 1.7, Commons Compress can also
365
+ decompress entries compressed with the legacy SHRINK and
366
+ IMPLODE algorithms of PKZIP 1.x. Version 1.11 of Commons
367
+ Compress adds read-only support for BZIP2. Version 1.16 adds
368
+ read-only support for DEFLATE64 - also known as "enhanced DEFLATE".</p>
369
+
370
+ <p>The ZIP specification allows for various other compression
371
+ algorithms and also supports several different ways of
372
+ encrypting archive contents. Neither of those methods is
373
+ currently supported by Commons Compress and any such entry can
374
+ not be extracted by the archiving code.</p>
375
+
376
+ <p><code>ZipFile</code>'s and
377
+ <code>ZipArchiveInputStream</code>'s
378
+ <code>canReadEntryData</code> methods will return false for
379
+ encrypted entries or entries using an unsupported encryption
380
+ mechanism. Using this method it is possible to at least
381
+ detect and skip the entries that can not be extracted.</p>
382
+
383
+ <table>
384
+ <thead>
385
+ <tr>
386
+ <th>Version of Apache Commons Compress</th>
387
+ <th>Supported Compression Methods</th>
388
+ <th>Supported Encryption Methods</th>
389
+ </tr>
390
+ </thead>
391
+ <tbody>
392
+ <tr>
393
+ <td>1.0 to 1.6</td>
394
+ <td>STORED, DEFLATE</td>
395
+ <td>-</td>
396
+ </tr>
397
+ <tr>
398
+ <td>1.7 to 1.10</td>
399
+ <td>STORED, DEFLATE, SHRINK, IMPLODE</td>
400
+ <td>-</td>
401
+ </tr>
402
+ <tr>
403
+ <td>1.11 to 1.15</td>
404
+ <td>STORED, DEFLATE, SHRINK, IMPLODE, BZIP2</td>
405
+ <td>-</td>
406
+ </tr>
407
+ <tr>
408
+ <td>1.16 and later</td>
409
+ <td>STORED, DEFLATE, SHRINK, IMPLODE, BZIP2, DEFLATE64
410
+ (enhanced deflate)</td>
411
+ <td>-</td>
412
+ </tr>
413
+ </tbody>
414
+ </table>
415
+
416
+ </subsection>
417
+
418
+ <subsection name="Zip64 Support" id="zip64">
419
+ <p>The traditional ZIP format is limited to archive sizes of
420
+ four gibibyte (actually 2<sup>32</sup> - 1 bytes &#x2248;
421
+ 4.3 GB) and 65635 entries, where each individual entry is
422
+ limited to four gibibyte as well. These limits seemed
423
+ excessive in the 1980s.</p>
424
+
425
+ <p>Version 4.5 of the ZIP specification introduced the so
426
+ called "Zip64 extensions" to push those limitations for
427
+ compressed or uncompressed sizes of up to 16 exbibyte
428
+ (actually 2<sup>64</sup> - 1 bytes &#x2248; 18.5 EB, i.e
429
+ 18.5 x 10<sup>18</sup> bytes) in archives that themselves
430
+ can take up to 16 exbibyte containing more than
431
+ 18 x 10<sup>18</sup> entries.</p>
432
+
433
+ <p>Apache Commons Compress 1.2 and earlier do not support
434
+ Zip64 extensions at all.</p>
435
+
436
+ <p>Starting with Apache Commons Compress
437
+ 1.3 <code>ZipArchiveInputStream</code>
438
+ and <code>ZipFile</code> transparently support Zip64
439
+ extensions. By default <code>ZipArchiveOutputStream</code>
440
+ supports them transparently as well (i.e. it adds Zip64
441
+ extensions if needed and doesn't use them for
442
+ entries/archives that don't need them) if the compressed and
443
+ uncompressed sizes of the entry are known
444
+ when <code>putArchiveEntry</code> is called
445
+ or <code>ZipArchiveOutputStream</code>
446
+ uses <code>SeekableByteChannel</code>
447
+ (see <a href="#ZipArchiveOutputStream">above</a>). If only
448
+ the uncompressed size is
449
+ known <code>ZipArchiveOutputStream</code> will assume the
450
+ compressed size will not be bigger than the uncompressed
451
+ size.</p>
452
+
453
+ <p><code>ZipArchiveOutputStream</code>'s
454
+ <code>setUseZip64</code> can be used to control the behavior.
455
+ <code>Zip64Mode.AsNeeded</code> is the default behavior
456
+ described in the previous paragraph.</p>
457
+
458
+ <p>If <code>ZipArchiveOutputStream</code> is writing to a
459
+ non-seekable stream it has to decide whether to use Zip64
460
+ extensions or not before it starts writing the entry data.
461
+ This means that if the size of the entry is unknown
462
+ when <code>putArchiveEntry</code> is called it doesn't have
463
+ anything to base the decision on. By default it will not
464
+ use Zip64 extensions in order to create archives that can be
465
+ extracted by older archivers (it will later throw an
466
+ exception in <code>closeEntry</code> if it detects Zip64
467
+ extensions had been needed). It is possible to
468
+ instruct <code>ZipArchiveOutputStream</code> to always
469
+ create Zip64 extensions by using
470
+ the <code>setUseZip64</code> with an argument
471
+ of <code>Zip64Mode.Always</code>; use this if you are
472
+ writing entries of unknown size to a stream and expect some
473
+ of them to be too big to fit into the traditional
474
+ limits.</p>
475
+
476
+ <p><code>Zip64Mode.Always</code> creates archives that use
477
+ Zip64 extensions for all entries, even those that don't
478
+ require them. Such archives will be slightly bigger than
479
+ archives created with one of the other modes and not be
480
+ readable by unarchivers that don't support Zip64
481
+ extensions.</p>
482
+
483
+ <p><code>Zip64Mode.Never</code> will not use any Zip64
484
+ extensions at all and may lead to
485
+ a <code>Zip64RequiredException</code> to be thrown
486
+ if <code>ZipArchiveOutputStream</code> detects that one of
487
+ the format's limits is exceeded. Archives created in this
488
+ mode will be readable by all unarchivers; they may be
489
+ slightly smaller than archives created
490
+ with <code>SeekableByteChannel</code>
491
+ in <code>Zip64Mode.AsNeeded</code> mode if some of the
492
+ entries had unknown sizes.</p>
493
+
494
+ <p>The <code>java.util.zip</code> package and the
495
+ <code>jar</code> command of Java5 and earlier can not read
496
+ Zip64 extensions and will fail if the archive contains any.
497
+ So if you intend to create archives that Java5 can consume
498
+ you must set the mode to <code>Zip64Mode.Never</code></p>
499
+
500
+ <h4>Known Limitations</h4>
501
+
502
+ <p>Some of the theoretical limits of the format are not
503
+ reached because Apache Commons Compress' own API
504
+ (<code>ArchiveEntry</code>'s size information uses
505
+ a <code>long</code>) or its usage of Java collections
506
+ or <code>SeekableByteChannel</code> internally. The table
507
+ below shows the theoretical limits supported by Apache
508
+ Commons Compress. In practice it is very likely that you'd
509
+ run out of memory or your file system won't allow files that
510
+ big long before you reach either limit.</p>
511
+
512
+ <table>
513
+ <thead>
514
+ <tr>
515
+ <th/>
516
+ <th>Max. Size of Archive</th>
517
+ <th>Max. Compressed/Uncompressed Size of Entry</th>
518
+ <th>Max. Number of Entries</th>
519
+ </tr>
520
+ </thead>
521
+ <tbody>
522
+ <tr>
523
+ <td>ZIP Format Without Zip 64 Extensions</td>
524
+ <td>2<sup>32</sup> - 1 bytes &#x2248; 4.3 GB</td>
525
+ <td>2<sup>32</sup> - 1 bytes &#x2248; 4.3 GB</td>
526
+ <td>65535</td>
527
+ </tr>
528
+ <tr>
529
+ <td>ZIP Format using Zip 64 Extensions</td>
530
+ <td>2<sup>64</sup> - 1 bytes &#x2248; 18.5 EB</td>
531
+ <td>2<sup>64</sup> - 1 bytes &#x2248; 18.5 EB</td>
532
+ <td>2<sup>64</sup> - 1 &#x2248; 18.5 x 10<sup>18</sup></td>
533
+ </tr>
534
+ <tr>
535
+ <td>Commons Compress 1.2 and earlier</td>
536
+ <td>unlimited in <code>ZipArchiveInputStream</code>
537
+ and <code>ZipArchiveOutputStream</code> and
538
+ 2<sup>32</sup> - 1 bytes &#x2248; 4.3 GB
539
+ in <code>ZipFile</code>.</td>
540
+ <td>2<sup>32</sup> - 1 bytes &#x2248; 4.3 GB</td>
541
+ <td>unlimited in <code>ZipArchiveInputStream</code>,
542
+ 65535 in <code>ZipArchiveOutputStream</code>
543
+ and <code>ZipFile</code>.</td>
544
+ </tr>
545
+ <tr>
546
+ <td>Commons Compress 1.3 and later</td>
547
+ <td>unlimited in <code>ZipArchiveInputStream</code>
548
+ and <code>ZipArchiveOutputStream</code> and
549
+ 2<sup>63</sup> - 1 bytes &#x2248; 9.2 EB
550
+ in <code>ZipFile</code>.</td>
551
+ <td>2<sup>63</sup> - 1 bytes &#x2248; 9.2 EB</td>
552
+ <td>unlimited in <code>ZipArchiveInputStream</code>,
553
+ 2<sup>31</sup> - 1 &#x2248; 2.1 billion
554
+ in <code>ZipArchiveOutputStream</code>
555
+ and <code>ZipFile</code>.</td>
556
+ </tr>
557
+ </tbody>
558
+ </table>
559
+
560
+ <h4>Known Interoperability Problems</h4>
561
+
562
+ <p>The <code>java.util.zip</code> package of OpenJDK7 supports
563
+ Zip 64 extensions but its <code>ZipInputStream</code> and
564
+ <code>ZipFile</code> classes will be unable to extract
565
+ archives created with Commons Compress 1.3's
566
+ <code>ZipArchiveOutputStream</code> if the archive contains
567
+ entries that use the data descriptor, are smaller than 4 GiB
568
+ and have Zip 64 extensions enabled. I.e. the classes in
569
+ OpenJDK currently only support archives that use Zip 64
570
+ extensions only when they are actually needed. These classes
571
+ are used to load JAR files and are the base for the
572
+ <code>jar</code> command line utility as well.</p>
573
+ </subsection>
574
+
575
+ <subsection name="Consuming Archives Completely">
576
+
577
+ <p>Prior to version 1.5 <code>ZipArchiveInputStream</code>
578
+ would return null from <code>getNextEntry</code> or
579
+ <code>getNextZipEntry</code> as soon as the first central
580
+ directory header of the archive was found, leaving the whole
581
+ central directory itself unread inside the stream. Starting
582
+ with version 1.5 <code>ZipArchiveInputStream</code> will try
583
+ to read the archive up to and including the "end of central
584
+ directory" record effectively consuming the archive
585
+ completely.</p>
586
+
587
+ </subsection>
588
+
589
+ <subsection name="Symbolic Links" id="symlinks">
590
+
591
+ <p>Starting with Compress 1.5 <code>ZipArchiveEntry</code>
592
+ recognizes Unix Symbolic Link entries written by InfoZIP's
593
+ zip.</p>
594
+
595
+ <p>The <code>ZipFile</code> class contains a convenience
596
+ method to read the link name of an entry. Basically all it
597
+ does is read the contents of the entry and convert it to
598
+ a string using the given file name encoding of the
599
+ archive.</p>
600
+
601
+ </subsection>
602
+
603
+ <subsection name="Parallel zip creation" id="parallel">
604
+
605
+ <p>Starting with Compress 1.10 there is now built-in support for
606
+ parallel creation of zip archives</p>
607
+
608
+ <p>Multiple threads can write
609
+ to their own <code>ScatterZipOutputStream</code>
610
+ instance that is backed to file or to some user-implemented form of
611
+ storage (implementing <code>ScatterGatherBackingStore</code>).</p>
612
+
613
+ <p>When the threads finish, they can join these streams together
614
+ to a complete zip file using the <code>writeTo</code> method
615
+ that will write a single <code>ScatterOutputStream</code> to a target
616
+ <code>ZipArchiveOutputStream</code>.</p>
617
+
618
+ <p>To assist this process, clients can use
619
+ <code>ParallelScatterZipCreator</code> that will handle threads
620
+ pools and correct memory model consistency so the client
621
+ can avoid these issues.</p>
622
+
623
+ <p>Until version 1.18, there was no guarantee of order of the entries when writing a Zip
624
+ file with <code>ParallelScatterZipCreator</code>. In consequence, when writing well-formed
625
+ Zip files this way, it was usually necessary to keep a
626
+ separate <code>ScatterZipOutputStream</code> that received all directories
627
+ and wrote this to the target <code>ZipArchiveOutputStream</code> before
628
+ the ones created through <code>ParallelScatterZipCreator</code>. This was the responsibility of the client.</p>
629
+
630
+ <p>Starting with version 1.19, entries order is kept, then this specific handling of directories is not
631
+ necessary any more.</p>
632
+
633
+ <p>
634
+ See the examples section for a code sample demonstrating how to make a zip file.
635
+ </p>
636
+ </subsection>
637
+ <subsection name="Zstandard Support" id="zstd">
638
+ <p>
639
+ Starting with Compress 1.28.0, <code>org.apache.commons.compress.archivers.zip</code> supports reading and writing using the Zstandard method.
640
+ Zstandard method <code>93</code> and the deprecated <code>20</code> are supported.
641
+ </p>
642
+ </subsection>
643
+ </section>
644
+ </body>
645
+ </document>
local-test-commons-compress-delta-03/afc-commons-compress/src/test/resources/COMPRESS-379.jar ADDED
Binary file (222 Bytes). View file
 
local-test-commons-compress-delta-03/afc-commons-compress/src/test/resources/COMPRESS-382 ADDED
Binary file (19 Bytes). View file
 
local-test-commons-compress-delta-03/afc-commons-compress/src/test/resources/COMPRESS-386 ADDED
@@ -0,0 +1 @@
 
 
1
+ �B
local-test-commons-compress-delta-03/afc-commons-compress/src/test/resources/bla-multi.7z.001 ADDED
Binary file (512 Bytes). View file
 
local-test-commons-compress-delta-03/afc-commons-compress/src/test/resources/test1.xml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ <?xml version = '1.0'?>
2
+ <!DOCTYPE connections>
3
+ <connections>
4
+ </connections>
local-test-commons-compress-delta-03/afc-commons-compress/src/test/resources/test3.xml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version = '1.0'?>
2
+ <!DOCTYPE connections>
3
+ <text>
4
+ Lorem ipsum dolor sit amet, consetetur sadipscing elitr,
5
+ sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat,
6
+ sed diam voluptua.
7
+ At vero eos et accusam et justo duo dolores et ea rebum.
8
+ Stet clita kasd gubergren, no sea takimata sanctus est
9
+ Lorem ipsum dolor sit amet.
10
+ </text>