index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro/io/LegacyBinaryEncoder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.io;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import org.apache.avro.util.Utf8;
/**
* Low-level support for serializing Avro values.
*
* This class has two types of methods. One type of methods support the writing
* of leaf values (for example, {@link #writeLong} and {@link #writeString}).
* These methods have analogs in {@link Decoder}.
*
* The other type of methods support the writing of maps and arrays. These
* methods are {@link #writeArrayStart}, {@link #startItem}, and
* {@link #writeArrayEnd} (and similar methods for maps). Some implementations
* of {@link Encoder} handle the buffering required to break large maps and
* arrays into blocks, which is necessary for applications that want to do
* streaming. (See {@link #writeArrayStart} for details on these methods.)
*
* @see Decoder
*/
public class LegacyBinaryEncoder extends Encoder {
protected OutputStream out;
private interface ByteWriter {
void write(ByteBuffer bytes) throws IOException;
}
private static final class SimpleByteWriter implements ByteWriter {
private final OutputStream out;
public SimpleByteWriter(OutputStream out) {
this.out = out;
}
@Override
public void write(ByteBuffer bytes) throws IOException {
encodeLong(bytes.remaining(), out);
out.write(bytes.array(), bytes.position(), bytes.remaining());
}
}
private final ByteWriter byteWriter;
/**
* Create a writer that sends its output to the underlying stream
* <code>out</code>.
*/
public LegacyBinaryEncoder(OutputStream out) {
this.out = out;
this.byteWriter = new SimpleByteWriter(out);
}
@Override
public void flush() throws IOException {
if (out != null) {
out.flush();
}
}
@Override
public void writeNull() throws IOException {
}
@Override
public void writeBoolean(boolean b) throws IOException {
out.write(b ? 1 : 0);
}
@Override
public void writeInt(int n) throws IOException {
encodeLong(n, out);
}
@Override
public void writeLong(long n) throws IOException {
encodeLong(n, out);
}
@Override
public void writeFloat(float f) throws IOException {
encodeFloat(f, out);
}
@Override
public void writeDouble(double d) throws IOException {
encodeDouble(d, out);
}
@Override
public void writeString(Utf8 utf8) throws IOException {
encodeString(utf8.getBytes(), 0, utf8.getByteLength());
}
@Override
public void writeString(String string) throws IOException {
byte[] bytes = Utf8.getBytesFor(string);
encodeString(bytes, 0, bytes.length);
}
private void encodeString(byte[] bytes, int offset, int length) throws IOException {
encodeLong(length, out);
out.write(bytes, offset, length);
}
@Override
public void writeBytes(ByteBuffer bytes) throws IOException {
byteWriter.write(bytes);
}
@Override
public void writeBytes(byte[] bytes, int start, int len) throws IOException {
encodeLong(len, out);
out.write(bytes, start, len);
}
@Override
public void writeFixed(byte[] bytes, int start, int len) throws IOException {
out.write(bytes, start, len);
}
@Override
public void writeEnum(int e) throws IOException {
encodeLong(e, out);
}
@Override
public void writeArrayStart() throws IOException {
}
@Override
public void setItemCount(long itemCount) throws IOException {
if (itemCount > 0) {
writeLong(itemCount);
}
}
@Override
public void startItem() throws IOException {
}
@Override
public void writeArrayEnd() throws IOException {
encodeLong(0, out);
}
@Override
public void writeMapStart() throws IOException {
}
@Override
public void writeMapEnd() throws IOException {
encodeLong(0, out);
}
@Override
public void writeIndex(int unionIndex) throws IOException {
encodeLong(unionIndex, out);
}
protected static void encodeLong(long n, OutputStream o) throws IOException {
n = (n << 1) ^ (n >> 63); // move sign to low-order bit
while ((n & ~0x7F) != 0) {
o.write((byte) ((n & 0x7f) | 0x80));
n >>>= 7;
}
o.write((byte) n);
}
protected static void encodeFloat(float f, OutputStream o) throws IOException {
long bits = Float.floatToRawIntBits(f);
o.write((int) (bits) & 0xFF);
o.write((int) (bits >> 8) & 0xFF);
o.write((int) (bits >> 16) & 0xFF);
o.write((int) (bits >> 24) & 0xFF);
}
protected static void encodeDouble(double d, OutputStream o) throws IOException {
long bits = Double.doubleToRawLongBits(d);
o.write((int) (bits) & 0xFF);
o.write((int) (bits >> 8) & 0xFF);
o.write((int) (bits >> 16) & 0xFF);
o.write((int) (bits >> 24) & 0xFF);
o.write((int) (bits >> 32) & 0xFF);
o.write((int) (bits >> 40) & 0xFF);
o.write((int) (bits >> 48) & 0xFF);
o.write((int) (bits >> 56) & 0xFF);
}
}
| 7,200 |
0 | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro/io/TestBinaryDecoder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.io;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.Schema;
import org.apache.avro.SystemLimitException;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.util.ByteBufferInputStream;
import org.apache.avro.util.ByteBufferOutputStream;
import org.apache.avro.util.RandomData;
import org.apache.avro.util.Utf8;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import static org.apache.avro.TestSystemLimitException.*;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class TestBinaryDecoder {
// prime number buffer size so that looping tests hit the buffer edge
// at different points in the loop.
DecoderFactory factory = new DecoderFactory().configureDecoderBufferSize(521);
static EncoderFactory e_factory = EncoderFactory.get();
private Decoder newDecoderWithNoData(boolean useDirect) {
return newDecoder(new byte[0], useDirect);
}
private BinaryDecoder newDecoder(byte[] bytes, int start, int len, boolean useDirect) {
return this.newDecoder(bytes, start, len, null, useDirect);
}
private BinaryDecoder newDecoder(byte[] bytes, int start, int len, BinaryDecoder reuse, boolean useDirect) {
if (useDirect) {
final ByteArrayInputStream input = new ByteArrayInputStream(bytes, start, len);
return factory.directBinaryDecoder(input, reuse);
} else {
return factory.binaryDecoder(bytes, start, len, reuse);
}
}
private BinaryDecoder newDecoder(InputStream in, boolean useDirect) {
return this.newDecoder(in, null, useDirect);
}
private BinaryDecoder newDecoder(InputStream in, BinaryDecoder reuse, boolean useDirect) {
if (useDirect) {
return factory.directBinaryDecoder(in, reuse);
} else {
return factory.binaryDecoder(in, reuse);
}
}
private BinaryDecoder newDecoder(byte[] bytes, BinaryDecoder reuse, boolean useDirect) {
if (useDirect) {
return this.factory.directBinaryDecoder(new ByteArrayInputStream(bytes), reuse);
} else {
return factory.binaryDecoder(bytes, reuse);
}
}
private BinaryDecoder newDecoder(byte[] bytes, boolean useDirect) {
return this.newDecoder(bytes, null, useDirect);
}
/**
* Create a decoder for simulating reading corrupt, unexpected or out-of-bounds
* data.
*
* @return a {@link org.apache.avro.io.BinaryDecoder that has been initialized
* on a byte array containing the sequence of encoded longs in order.
*/
private BinaryDecoder newDecoder(boolean useDirect, long... values) throws IOException {
try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
BinaryEncoder encoder = EncoderFactory.get().binaryEncoder(baos, null);
for (long v : values)
encoder.writeLong(v);
encoder.flush();
return newDecoder(baos.toByteArray(), useDirect);
}
}
/** Verify EOFException throw at EOF */
@ParameterizedTest
@ValueSource(booleans = { true, false })
void eofBoolean(boolean useDirect) {
Assertions.assertThrows(EOFException.class, () -> newDecoderWithNoData(useDirect).readBoolean());
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
void eofInt(boolean useDirect) {
Assertions.assertThrows(EOFException.class, () -> newDecoderWithNoData(useDirect).readInt());
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
void eofLong(boolean useDirect) {
Assertions.assertThrows(EOFException.class, () -> newDecoderWithNoData(useDirect).readLong());
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
void eofFloat(boolean useDirect) {
Assertions.assertThrows(EOFException.class, () -> newDecoderWithNoData(useDirect).readFloat());
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
void eofDouble(boolean useDirect) {
Assertions.assertThrows(EOFException.class, () -> newDecoderWithNoData(useDirect).readDouble());
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
void eofBytes(boolean useDirect) {
Assertions.assertThrows(EOFException.class, () -> newDecoderWithNoData(useDirect).readBytes(null));
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
void eofString(boolean useDirect) {
Assertions.assertThrows(EOFException.class, () -> newDecoderWithNoData(useDirect).readString(new Utf8("a")));
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
void eofFixed(boolean useDirect) {
Assertions.assertThrows(EOFException.class, () -> newDecoderWithNoData(useDirect).readFixed(new byte[1]));
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
void eofEnum(boolean useDirect) {
Assertions.assertThrows(EOFException.class, () -> newDecoderWithNoData(useDirect).readEnum());
}
@Test
void reuse() throws IOException {
ByteBufferOutputStream bbo1 = new ByteBufferOutputStream();
ByteBufferOutputStream bbo2 = new ByteBufferOutputStream();
byte[] b1 = new byte[] { 1, 2 };
BinaryEncoder e1 = e_factory.binaryEncoder(bbo1, null);
e1.writeBytes(b1);
e1.flush();
BinaryEncoder e2 = e_factory.binaryEncoder(bbo2, null);
e2.writeBytes(b1);
e2.flush();
DirectBinaryDecoder d = new DirectBinaryDecoder(new ByteBufferInputStream(bbo1.getBufferList()));
ByteBuffer bb1 = d.readBytes(null);
Assertions.assertEquals(b1.length, bb1.limit() - bb1.position());
d.configure(new ByteBufferInputStream(bbo2.getBufferList()));
ByteBuffer bb2 = d.readBytes(null);
Assertions.assertEquals(b1.length, bb2.limit() - bb2.position());
}
private static byte[] data = null;
private static Schema schema = null;
private static final int count = 200;
private static final ArrayList<Object> records = new ArrayList<>(count);
@BeforeAll
public static void generateData() throws IOException {
int seed = (int) System.currentTimeMillis();
// note some tests (testSkipping) rely on this explicitly
String jsonSchema = "{\"type\": \"record\", \"name\": \"Test\", \"fields\": ["
+ "{\"name\":\"intField\", \"type\":\"int\"}," + "{\"name\":\"bytesField\", \"type\":\"bytes\"},"
+ "{\"name\":\"booleanField\", \"type\":\"boolean\"}," + "{\"name\":\"stringField\", \"type\":\"string\"},"
+ "{\"name\":\"floatField\", \"type\":\"float\"}," + "{\"name\":\"doubleField\", \"type\":\"double\"},"
+ "{\"name\":\"arrayField\", \"type\": " + "{\"type\":\"array\", \"items\":\"boolean\"}},"
+ "{\"name\":\"longField\", \"type\":\"long\"}]}";
schema = new Schema.Parser().parse(jsonSchema);
GenericDatumWriter<Object> writer = new GenericDatumWriter<>();
writer.setSchema(schema);
ByteArrayOutputStream baos = new ByteArrayOutputStream(8192);
BinaryEncoder encoder = e_factory.binaryEncoder(baos, null);
for (Object datum : new RandomData(schema, count, seed)) {
writer.write(datum, encoder);
records.add(datum);
}
encoder.flush();
data = baos.toByteArray();
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
void decodeFromSources(boolean useDirect) throws IOException {
GenericDatumReader<Object> reader = new GenericDatumReader<>();
reader.setSchema(schema);
ByteArrayInputStream is = new ByteArrayInputStream(data);
ByteArrayInputStream is2 = new ByteArrayInputStream(data);
ByteArrayInputStream is3 = new ByteArrayInputStream(data);
Decoder fromInputStream = newDecoder(is, useDirect);
Decoder fromArray = newDecoder(data, useDirect);
byte[] data2 = new byte[data.length + 30];
Arrays.fill(data2, (byte) 0xff);
System.arraycopy(data, 0, data2, 15, data.length);
Decoder fromOffsetArray = newDecoder(data2, 15, data.length, useDirect);
BinaryDecoder initOnInputStream = newDecoder(new byte[50], 0, 30, useDirect);
initOnInputStream = newDecoder(is2, initOnInputStream, useDirect);
BinaryDecoder initOnArray = this.newDecoder(is3, null, useDirect);
initOnArray = this.newDecoder(data, initOnArray, useDirect);
for (Object datum : records) {
Assertions.assertEquals(datum, reader.read(null, fromInputStream),
"InputStream based BinaryDecoder result does not match");
Assertions.assertEquals(datum, reader.read(null, fromArray), "Array based BinaryDecoder result does not match");
Assertions.assertEquals(datum, reader.read(null, fromOffsetArray),
"offset Array based BinaryDecoder result does not match");
Assertions.assertEquals(datum, reader.read(null, initOnInputStream),
"InputStream initialized BinaryDecoder result does not match");
Assertions.assertEquals(datum, reader.read(null, initOnArray),
"Array initialized BinaryDecoder result does not match");
}
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
void inputStreamProxy(boolean useDirect) throws IOException {
BinaryDecoder d = newDecoder(data, useDirect);
if (d != null) {
BinaryDecoder bd = d;
InputStream test = bd.inputStream();
InputStream check = new ByteArrayInputStream(data);
validateInputStreamReads(test, check);
bd = this.newDecoder(data, bd, useDirect);
test = bd.inputStream();
check = new ByteArrayInputStream(data);
validateInputStreamSkips(test, check);
// with input stream sources
bd = newDecoder(new ByteArrayInputStream(data), bd, useDirect);
test = bd.inputStream();
check = new ByteArrayInputStream(data);
validateInputStreamReads(test, check);
bd = newDecoder(new ByteArrayInputStream(data), bd, useDirect);
test = bd.inputStream();
check = new ByteArrayInputStream(data);
validateInputStreamSkips(test, check);
}
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
void inputStreamProxyDetached(boolean useDirect) throws IOException {
BinaryDecoder bd = newDecoder(data, useDirect);
InputStream test = bd.inputStream();
InputStream check = new ByteArrayInputStream(data);
// detach input stream and decoder from old source
this.newDecoder(new byte[56], useDirect);
try (InputStream bad = bd.inputStream(); InputStream check2 = new ByteArrayInputStream(data)) {
validateInputStreamReads(test, check);
Assertions.assertNotEquals(bad.read(), check2.read());
}
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
void inputStreamPartiallyUsed(boolean useDirect) throws IOException {
BinaryDecoder bd = this.newDecoder(new ByteArrayInputStream(data), useDirect);
InputStream test = bd.inputStream();
InputStream check = new ByteArrayInputStream(data);
// triggers buffer fill if unused and tests isEnd()
try {
Assertions.assertFalse(bd.isEnd());
} catch (UnsupportedOperationException e) {
// this is ok if its a DirectBinaryDecoder.
if (bd.getClass() != DirectBinaryDecoder.class) {
throw e;
}
}
bd.readFloat(); // use data, and otherwise trigger buffer fill
check.skip(4); // skip the same # of bytes here
validateInputStreamReads(test, check);
}
private void validateInputStreamReads(InputStream test, InputStream check) throws IOException {
byte[] bt = new byte[7];
byte[] bc = new byte[7];
while (true) {
int t = test.read();
int c = check.read();
Assertions.assertEquals(c, t);
if (-1 == t) {
break;
}
t = test.read(bt);
c = check.read(bc);
Assertions.assertEquals(c, t);
Assertions.assertArrayEquals(bt, bc);
if (-1 == t) {
break;
}
t = test.read(bt, 1, 4);
c = check.read(bc, 1, 4);
Assertions.assertEquals(c, t);
Assertions.assertArrayEquals(bt, bc);
if (-1 == t) {
break;
}
}
Assertions.assertEquals(0, test.skip(5));
Assertions.assertEquals(0, test.available());
Assertions.assertFalse(test.getClass() != ByteArrayInputStream.class && test.markSupported());
test.close();
}
private void validateInputStreamSkips(InputStream test, InputStream check) throws IOException {
while (true) {
long t2 = test.skip(19);
long c2 = check.skip(19);
Assertions.assertEquals(c2, t2);
if (0 == t2) {
break;
}
}
Assertions.assertEquals(-1, test.read());
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
void badIntEncoding(boolean useDirect) throws IOException {
byte[] badint = new byte[5];
Arrays.fill(badint, (byte) 0xff);
Decoder bd = this.newDecoder(badint, useDirect);
String message = "";
try {
bd.readInt();
} catch (IOException ioe) {
message = ioe.getMessage();
}
Assertions.assertEquals("Invalid int encoding", message);
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
void badLongEncoding(boolean useDirect) throws IOException {
byte[] badint = new byte[10];
Arrays.fill(badint, (byte) 0xff);
Decoder bd = this.newDecoder(badint, useDirect);
String message = "";
try {
bd.readLong();
} catch (IOException ioe) {
message = ioe.getMessage();
}
Assertions.assertEquals("Invalid long encoding", message);
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testStringNegativeLength(boolean useDirect) throws IOException {
Exception ex = Assertions.assertThrows(AvroRuntimeException.class, this.newDecoder(useDirect, -1L)::readString);
Assertions.assertEquals(ERROR_NEGATIVE, ex.getMessage());
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testStringVmMaxSize(boolean useDirect) throws IOException {
Exception ex = Assertions.assertThrows(UnsupportedOperationException.class,
newDecoder(useDirect, MAX_ARRAY_VM_LIMIT + 1L)::readString);
Assertions.assertEquals(ERROR_VM_LIMIT_STRING, ex.getMessage());
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testStringMaxCustom(boolean useDirect) throws IOException {
try {
System.setProperty(SystemLimitException.MAX_STRING_LENGTH_PROPERTY, Long.toString(128));
resetLimits();
Exception ex = Assertions.assertThrows(SystemLimitException.class, newDecoder(useDirect, 129)::readString);
Assertions.assertEquals("String length 129 exceeds maximum allowed", ex.getMessage());
} finally {
System.clearProperty(SystemLimitException.MAX_STRING_LENGTH_PROPERTY);
resetLimits();
}
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testBytesNegativeLength(boolean useDirect) throws IOException {
Exception ex = Assertions.assertThrows(AvroRuntimeException.class,
() -> this.newDecoder(useDirect, -1).readBytes(null));
Assertions.assertEquals(ERROR_NEGATIVE, ex.getMessage());
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testBytesVmMaxSize(boolean useDirect) throws IOException {
Exception ex = Assertions.assertThrows(UnsupportedOperationException.class,
() -> this.newDecoder(useDirect, MAX_ARRAY_VM_LIMIT + 1).readBytes(null));
Assertions.assertEquals(ERROR_VM_LIMIT_BYTES, ex.getMessage());
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testBytesMaxCustom(boolean useDirect) throws IOException {
try {
System.setProperty(SystemLimitException.MAX_BYTES_LENGTH_PROPERTY, Long.toString(128));
resetLimits();
Exception ex = Assertions.assertThrows(SystemLimitException.class,
() -> newDecoder(useDirect, 129).readBytes(null));
Assertions.assertEquals("Bytes length 129 exceeds maximum allowed", ex.getMessage());
} finally {
System.clearProperty(SystemLimitException.MAX_BYTES_LENGTH_PROPERTY);
resetLimits();
}
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testArrayVmMaxSize(boolean useDirect) throws IOException {
// At start
Exception ex = Assertions.assertThrows(UnsupportedOperationException.class,
() -> this.newDecoder(useDirect, MAX_ARRAY_VM_LIMIT + 1).readArrayStart());
Assertions.assertEquals(ERROR_VM_LIMIT_COLLECTION, ex.getMessage());
// Next
ex = Assertions.assertThrows(UnsupportedOperationException.class,
() -> this.newDecoder(useDirect, MAX_ARRAY_VM_LIMIT + 1).arrayNext());
Assertions.assertEquals(ERROR_VM_LIMIT_COLLECTION, ex.getMessage());
// An OK reads followed by an overflow
Decoder bd = newDecoder(useDirect, MAX_ARRAY_VM_LIMIT - 100, Long.MAX_VALUE);
Assertions.assertEquals(MAX_ARRAY_VM_LIMIT - 100, bd.readArrayStart());
ex = Assertions.assertThrows(UnsupportedOperationException.class, bd::arrayNext);
Assertions.assertEquals(ERROR_VM_LIMIT_COLLECTION, ex.getMessage());
// Two OK reads followed by going over the VM limit.
bd = newDecoder(useDirect, MAX_ARRAY_VM_LIMIT - 100, 100, 1);
Assertions.assertEquals(MAX_ARRAY_VM_LIMIT - 100, bd.readArrayStart());
Assertions.assertEquals(100, bd.arrayNext());
ex = Assertions.assertThrows(UnsupportedOperationException.class, bd::arrayNext);
Assertions.assertEquals(ERROR_VM_LIMIT_COLLECTION, ex.getMessage());
// Two OK reads followed by going over the VM limit, where negative numbers are
// followed by the byte length of the items. For testing, the 999 values are
// read but ignored.
bd = newDecoder(useDirect, 100 - MAX_ARRAY_VM_LIMIT, 999, -100, 999, 1);
Assertions.assertEquals(MAX_ARRAY_VM_LIMIT - 100, bd.readArrayStart());
Assertions.assertEquals(100, bd.arrayNext());
ex = Assertions.assertThrows(UnsupportedOperationException.class, bd::arrayNext);
Assertions.assertEquals(ERROR_VM_LIMIT_COLLECTION, ex.getMessage());
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testArrayMaxCustom(boolean useDirect) throws IOException {
try {
System.setProperty(SystemLimitException.MAX_COLLECTION_LENGTH_PROPERTY, Long.toString(128));
resetLimits();
Exception ex = Assertions.assertThrows(UnsupportedOperationException.class,
() -> newDecoder(useDirect, MAX_ARRAY_VM_LIMIT + 1).readArrayStart());
Assertions.assertEquals(ERROR_VM_LIMIT_COLLECTION, ex.getMessage());
// Two OK reads followed by going over the custom limit.
Decoder bd = newDecoder(useDirect, 118, 10, 1);
Assertions.assertEquals(118, bd.readArrayStart());
Assertions.assertEquals(10, bd.arrayNext());
ex = Assertions.assertThrows(SystemLimitException.class, bd::arrayNext);
Assertions.assertEquals("Collection length 129 exceeds maximum allowed", ex.getMessage());
// Two OK reads followed by going over the VM limit, where negative numbers are
// followed by the byte length of the items. For testing, the 999 values are
// read but ignored.
bd = newDecoder(useDirect, -118, 999, -10, 999, 1);
Assertions.assertEquals(118, bd.readArrayStart());
Assertions.assertEquals(10, bd.arrayNext());
ex = Assertions.assertThrows(SystemLimitException.class, bd::arrayNext);
Assertions.assertEquals("Collection length 129 exceeds maximum allowed", ex.getMessage());
} finally {
System.clearProperty(SystemLimitException.MAX_COLLECTION_LENGTH_PROPERTY);
resetLimits();
}
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testMapVmMaxSize(boolean useDirect) throws IOException {
// At start
Exception ex = Assertions.assertThrows(UnsupportedOperationException.class,
() -> this.newDecoder(useDirect, MAX_ARRAY_VM_LIMIT + 1).readMapStart());
Assertions.assertEquals(ERROR_VM_LIMIT_COLLECTION, ex.getMessage());
// Next
ex = Assertions.assertThrows(UnsupportedOperationException.class,
() -> this.newDecoder(useDirect, MAX_ARRAY_VM_LIMIT + 1).mapNext());
Assertions.assertEquals(ERROR_VM_LIMIT_COLLECTION, ex.getMessage());
// Two OK reads followed by going over the VM limit.
Decoder bd = newDecoder(useDirect, MAX_ARRAY_VM_LIMIT - 100, 100, 1);
Assertions.assertEquals(MAX_ARRAY_VM_LIMIT - 100, bd.readMapStart());
Assertions.assertEquals(100, bd.mapNext());
ex = Assertions.assertThrows(UnsupportedOperationException.class, bd::mapNext);
Assertions.assertEquals(ERROR_VM_LIMIT_COLLECTION, ex.getMessage());
// Two OK reads followed by going over the VM limit, where negative numbers are
// followed by the byte length of the items. For testing, the 999 values are
// read but ignored.
bd = newDecoder(useDirect, 100 - MAX_ARRAY_VM_LIMIT, 999, -100, 999, 1);
Assertions.assertEquals(MAX_ARRAY_VM_LIMIT - 100, bd.readMapStart());
Assertions.assertEquals(100, bd.mapNext());
ex = Assertions.assertThrows(UnsupportedOperationException.class, bd::mapNext);
Assertions.assertEquals(ERROR_VM_LIMIT_COLLECTION, ex.getMessage());
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testMapMaxCustom(boolean useDirect) throws IOException {
try {
System.setProperty(SystemLimitException.MAX_COLLECTION_LENGTH_PROPERTY, Long.toString(128));
resetLimits();
Exception ex = Assertions.assertThrows(UnsupportedOperationException.class,
() -> newDecoder(useDirect, MAX_ARRAY_VM_LIMIT + 1).readMapStart());
Assertions.assertEquals(ERROR_VM_LIMIT_COLLECTION, ex.getMessage());
// Two OK reads followed by going over the custom limit.
Decoder bd = newDecoder(useDirect, 118, 10, 1);
Assertions.assertEquals(118, bd.readMapStart());
Assertions.assertEquals(10, bd.mapNext());
ex = Assertions.assertThrows(SystemLimitException.class, bd::mapNext);
Assertions.assertEquals("Collection length 129 exceeds maximum allowed", ex.getMessage());
// Two OK reads followed by going over the VM limit, where negative numbers are
// followed by the byte length of the items. For testing, the 999 values are
// read but ignored.
bd = newDecoder(useDirect, -118, 999, -10, 999, 1);
Assertions.assertEquals(118, bd.readMapStart());
Assertions.assertEquals(10, bd.mapNext());
ex = Assertions.assertThrows(SystemLimitException.class, bd::mapNext);
Assertions.assertEquals("Collection length 129 exceeds maximum allowed", ex.getMessage());
} finally {
System.clearProperty(SystemLimitException.MAX_COLLECTION_LENGTH_PROPERTY);
resetLimits();
}
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
void longLengthEncoding(boolean useDirect) {
// Size equivalent to Integer.MAX_VALUE + 1
byte[] bad = new byte[] { (byte) -128, (byte) -128, (byte) -128, (byte) -128, (byte) 16 };
Decoder bd = this.newDecoder(bad, useDirect);
Assertions.assertThrows(UnsupportedOperationException.class, bd::readString);
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
void intTooShort(boolean useDirect) {
byte[] badint = new byte[4];
Arrays.fill(badint, (byte) 0xff);
Assertions.assertThrows(EOFException.class, () -> newDecoder(badint, useDirect).readInt());
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
void longTooShort(boolean useDirect) {
byte[] badint = new byte[9];
Arrays.fill(badint, (byte) 0xff);
Assertions.assertThrows(EOFException.class, () -> newDecoder(badint, useDirect).readLong());
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
void floatTooShort(boolean useDirect) {
byte[] badint = new byte[3];
Arrays.fill(badint, (byte) 0xff);
Assertions.assertThrows(EOFException.class, () -> newDecoder(badint, useDirect).readInt());
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
void doubleTooShort(boolean useDirect) {
byte[] badint = new byte[7];
Arrays.fill(badint, (byte) 0xff);
Assertions.assertThrows(EOFException.class, () -> newDecoder(badint, useDirect).readLong());
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
void skipping(boolean useDirect) throws IOException {
BinaryDecoder bd = newDecoder(data, useDirect);
skipGenerated(bd);
try {
Assertions.assertTrue(bd.isEnd());
} catch (UnsupportedOperationException e) {
// this is ok if its a DirectBinaryDecoder.
if (bd.getClass() != DirectBinaryDecoder.class) {
throw e;
}
}
bd = this.newDecoder(new ByteArrayInputStream(data), bd, useDirect);
skipGenerated(bd);
try {
Assertions.assertTrue(bd.isEnd());
} catch (UnsupportedOperationException e) {
// this is ok if its a DirectBinaryDecoder.
if (bd.getClass() != DirectBinaryDecoder.class) {
throw e;
}
}
}
private void skipGenerated(Decoder bd) throws IOException {
for (int i = 0; i < records.size(); i++) {
bd.readInt();
bd.skipBytes();
bd.skipFixed(1);
bd.skipString();
bd.skipFixed(4);
bd.skipFixed(8);
long leftover = bd.skipArray();
// booleans are one byte, array trailer is one byte
bd.skipFixed((int) leftover + 1);
bd.skipFixed(0);
bd.readLong();
}
EOFException eof = null;
try {
bd.skipFixed(4);
} catch (EOFException e) {
eof = e;
}
Assertions.assertNotNull(eof);
}
@ParameterizedTest
@ValueSource(booleans = { true, false })
void eof(boolean useDirect) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
Encoder e = EncoderFactory.get().binaryEncoder(baos, null);
e.writeLong(0x10000000000000L);
e.flush();
Decoder d = newDecoder(new ByteArrayInputStream(baos.toByteArray()), useDirect);
Assertions.assertEquals(0x10000000000000L, d.readLong());
Assertions.assertThrows(EOFException.class, () -> d.readInt());
}
@Test
void testFloatPrecision() throws Exception {
String def = "{\"type\":\"record\",\"name\":\"X\",\"fields\":" + "[{\"type\":\"float\",\"name\":\"n\"}]}";
Schema schema = new Schema.Parser().parse(def);
DatumReader<GenericRecord> reader = new GenericDatumReader<>(schema);
float value = 33.33000183105469f;
GenericData.Record record = new GenericData.Record(schema);
record.put(0, value);
ByteArrayOutputStream out = new ByteArrayOutputStream();
Encoder encoder = EncoderFactory.get().directBinaryEncoder(out, null);
DatumWriter<GenericRecord> writer = new GenericDatumWriter<>(schema);
writer.write(record, encoder);
encoder.flush();
Decoder decoder = DecoderFactory.get().directBinaryDecoder(new ByteArrayInputStream(out.toByteArray()), null);
GenericRecord r = reader.read(null, decoder);
assertEquals(value + 0d, ((float) r.get("n")) + 0d);
}
}
| 7,201 |
0 | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro/io/TestBlockingDirectBinaryEncoder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.io;
import org.apache.avro.Schema;
import org.apache.avro.SchemaNormalization;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.message.BinaryMessageDecoder;
import org.apache.avro.specific.TestRecordWithMapsAndArrays;
import org.junit.jupiter.api.Test;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
import java.util.Map;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.is;
import static org.mockito.Mockito.*;
public class TestBlockingDirectBinaryEncoder {
@Test
void blockingDirectBinaryEncoder() throws IOException, NoSuchAlgorithmException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
BinaryEncoder encoder = EncoderFactory.get().blockingDirectBinaryEncoder(baos, null);
// This is needed because there is no BlockingDirectBinaryEncoder
// BinaryMessageWriter
// available out of the box
encoder.writeFixed(new byte[] { (byte) 0xC3, (byte) 0x01 });
encoder.writeFixed(SchemaNormalization.parsingFingerprint("CRC-64-AVRO", TestRecordWithMapsAndArrays.SCHEMA$));
int len = 5;
encoder.writeArrayStart();
encoder.setItemCount(len);
for (int i = 0; i < len; i++) {
encoder.startItem();
encoder.writeString(Integer.toString(i));
}
encoder.writeArrayEnd();
encoder.writeMapStart();
encoder.setItemCount(len);
for (long i = 0; i < len; i++) {
encoder.startItem();
encoder.writeString(Long.toString(i));
encoder.writeLong(i);
}
encoder.writeMapEnd();
encoder.flush();
BinaryMessageDecoder<TestRecordWithMapsAndArrays> decoder = TestRecordWithMapsAndArrays.getDecoder();
TestRecordWithMapsAndArrays r = decoder.decode(baos.toByteArray());
assertThat(r.getArr(), is(Arrays.asList("0", "1", "2", "3", "4")));
Map<String, Long> map = r.getMap();
assertThat(map.size(), is(5));
for (long i = 0; i < len; i++) {
assertThat(map.get(Long.toString(i)), is(i));
}
}
@Test
void testSkippingUsingBlocks() throws IOException, NoSuchAlgorithmException {
// Create an empty schema for read, so we skip over all the fields
Schema emptySchema = new Schema.Parser().parse(
"{\"type\":\"record\",\"name\":\"TestRecordWithMapsAndArrays\",\"namespace\":\"org.apache.avro.specific\",\"fields\":[]}");
GenericDatumReader<?> in = new GenericDatumReader<>(TestRecordWithMapsAndArrays.SCHEMA$, emptySchema);
Decoder mockDecoder = mock(BinaryDecoder.class);
for (long i = 0; i < 1; i++) {
in.read(null, mockDecoder);
}
verify(mockDecoder, times(1)).skipMap();
verify(mockDecoder, times(1)).skipArray();
verify(mockDecoder, times(0)).readString();
verify(mockDecoder, times(0)).readLong();
}
}
| 7,202 |
0 | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro/io | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro/io/parsing/SymbolTest.java | /*
* Copyright 2016 The Apache Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.io.parsing;
import static org.junit.jupiter.api.Assertions.fail;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
import org.apache.avro.Schema;
import org.junit.jupiter.api.Test;
/**
* Unit test to verify that recursive schemas are flattened correctly. See
* AVRO-1667.
*/
public class SymbolTest {
private static final String SCHEMA = "{\"type\":\"record\",\"name\":\"SampleNode\","
+ "\"namespace\":\"org.spf4j.ssdump2.avro\",\n" + " \"fields\":[\n"
+ " {\"name\":\"count\",\"type\":\"int\",\"default\":0},\n" + " {\"name\":\"subNodes\",\"type\":\n"
+ " {\"type\":\"array\",\"items\":{\n" + " \"type\":\"record\",\"name\":\"SamplePair\",\n"
+ " \"fields\":[\n" + " {\"name\":\"method\",\"type\":\n"
+ " {\"type\":\"record\",\"name\":\"Method\",\n" + " \"fields\":[\n"
+ " {\"name\":\"declaringClass\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}},\n"
+ " {\"name\":\"methodName\",\"type\":{\"type\":\"string\",\"avro.java.string\":\"String\"}}\n"
+ " ]}},\n" + " {\"name\":\"node\",\"type\":\"SampleNode\"}]}}}]}";
@Test
void someMethod() throws IOException {
Schema schema = new Schema.Parser().parse(SCHEMA);
Symbol root = new ResolvingGrammarGenerator().generate(schema, schema);
validateNonNull(root, new HashSet<>());
}
private static void validateNonNull(final Symbol symb, Set<Symbol> seen) {
if (seen.contains(symb)) {
return;
} else {
seen.add(symb);
}
if (symb.production != null) {
for (Symbol s : symb.production) {
if (s == null) {
fail("invalid parsing tree should not contain nulls");
} else if (s.kind != Symbol.Kind.ROOT) {
validateNonNull(s, seen);
}
}
}
}
}
| 7,203 |
0 | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro/io | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro/io/parsing/TestResolvingGrammarGenerator2.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.io.parsing;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.SchemaValidationException;
import org.apache.avro.SchemaValidatorBuilder;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.DecoderFactory;
import org.junit.jupiter.api.Test;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.not;
import static org.junit.jupiter.api.Assertions.*;
/** ResolvingGrammarGenerator tests that are not Parameterized. */
public class TestResolvingGrammarGenerator2 {
@Test
void fixed() throws java.io.IOException {
new ResolvingGrammarGenerator().generate(Schema.createFixed("MyFixed", null, null, 10),
Schema.create(Schema.Type.BYTES));
new ResolvingGrammarGenerator().generate(Schema.create(Schema.Type.BYTES),
Schema.createFixed("MyFixed", null, null, 10));
}
Schema point2dFullname = SchemaBuilder.record("Point").namespace("written").fields().requiredDouble("x")
.requiredDouble("y").endRecord();
Schema point3dNoDefault = SchemaBuilder.record("Point").fields().requiredDouble("x").requiredDouble("y")
.requiredDouble("z").endRecord();
Schema point2d = SchemaBuilder.record("Point2D").fields().requiredDouble("x").requiredDouble("y").endRecord();
Schema point3d = SchemaBuilder.record("Point3D").fields().requiredDouble("x").requiredDouble("y").name("z").type()
.doubleType().doubleDefault(0.0).endRecord();
Schema point3dMatchName = SchemaBuilder.record("Point").fields().requiredDouble("x").requiredDouble("y").name("z")
.type().doubleType().doubleDefault(0.0).endRecord();
@Test
void unionResolutionNoStructureMatch() throws Exception {
assertThrows(SchemaValidationException.class, () -> {
// there is a short name match, but the structure does not match
Schema read = Schema.createUnion(Arrays.asList(Schema.create(Schema.Type.NULL), point3dNoDefault));
new SchemaValidatorBuilder().canBeReadStrategy().validateAll().validate(point2dFullname,
Collections.singletonList(read));
});
}
@Test
void unionResolutionFirstStructureMatch2d() throws Exception {
// multiple structure matches with no short or full name matches
Schema read = Schema
.createUnion(Arrays.asList(Schema.create(Schema.Type.NULL), point3dNoDefault, point2d, point3d));
Symbol grammar = new ResolvingGrammarGenerator().generate(point2dFullname, read);
assertTrue(grammar.production[1] instanceof Symbol.UnionAdjustAction);
Symbol.UnionAdjustAction action = (Symbol.UnionAdjustAction) grammar.production[1];
assertEquals(2, action.rindex);
}
@Test
void unionResolutionFirstStructureMatch3d() throws Exception {
// multiple structure matches with no short or full name matches
Schema read = Schema
.createUnion(Arrays.asList(Schema.create(Schema.Type.NULL), point3dNoDefault, point3d, point2d));
Symbol grammar = new ResolvingGrammarGenerator().generate(point2dFullname, read);
assertTrue(grammar.production[1] instanceof Symbol.UnionAdjustAction);
Symbol.UnionAdjustAction action = (Symbol.UnionAdjustAction) grammar.production[1];
assertEquals(2, action.rindex);
}
@Test
void unionResolutionNamedStructureMatch() throws Exception {
// multiple structure matches with a short name match
Schema read = Schema
.createUnion(Arrays.asList(Schema.create(Schema.Type.NULL), point2d, point3dMatchName, point3d));
Symbol grammar = new ResolvingGrammarGenerator().generate(point2dFullname, read);
assertTrue(grammar.production[1] instanceof Symbol.UnionAdjustAction);
Symbol.UnionAdjustAction action = (Symbol.UnionAdjustAction) grammar.production[1];
assertEquals(2, action.rindex);
}
@Test
void unionResolutionFullNameMatch() throws Exception {
// there is a full name match, so it should be chosen
Schema read = Schema.createUnion(
Arrays.asList(Schema.create(Schema.Type.NULL), point2d, point3dMatchName, point3d, point2dFullname));
Symbol grammar = new ResolvingGrammarGenerator().generate(point2dFullname, read);
assertTrue(grammar.production[1] instanceof Symbol.UnionAdjustAction);
Symbol.UnionAdjustAction action = (Symbol.UnionAdjustAction) grammar.production[1];
assertEquals(4, action.rindex);
}
@Test
void avro2702StringProperties() throws IOException {
// Create a nested record schema with string fields at two levels.
Schema inner = SchemaBuilder.builder().record("B").fields().requiredString("b1").endRecord();
Schema outer = SchemaBuilder.builder().record("A").fields().requiredString("a1").name("inner").type().unionOf()
.nullType().and().type(inner).endUnion().noDefault().endRecord();
// Make a copy with the two string fields annotated.
Schema outer2 = new Schema.Parser().parse(outer.toString());
outer2.getField("a1").schema().addProp(GenericData.STRING_PROP, "String");
Schema inner2 = outer2.getField("inner").schema().getTypes().get(1);
inner2.getField("b1").schema().addProp(GenericData.STRING_PROP, "String");
// The two schemas are not the same, but they serialize to the same.
assertThat(outer, not(outer2));
// This is a serialized record.
byte[] serialized = { 2, 'a', // a1 is a one character string
2, // Pick the non-null UNION branch and
2, 'b' // Another one character string
};
GenericRecord out = null;
try (ByteArrayInputStream bais = new ByteArrayInputStream(serialized)) {
Decoder decoder = DecoderFactory.get().binaryDecoder(bais, null);
DatumReader<GenericRecord> r = new GenericDatumReader<>(outer, outer2, GenericData.get());
out = r.read(null, decoder);
}
// Assert that the two fields were read and are of type String.
assertThat(out.get("a1"), instanceOf(String.class));
assertThat(((GenericRecord) out.get("inner")).get("b1"), instanceOf(String.class));
}
}
| 7,204 |
0 | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro/io | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro/io/parsing/TestResolvingGrammarGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.io.parsing;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.StringReader;
import java.io.UncheckedIOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.stream.Stream;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.avro.AvroTypeException;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.file.DataFileStream;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.generic.GenericRecordBuilder;
import org.apache.avro.io.Encoder;
import org.apache.avro.io.EncoderFactory;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import static org.apache.avro.TestSchemas.ENUM1_AB_SCHEMA_NAMESPACE_1;
import static org.apache.avro.TestSchemas.ENUM1_AB_SCHEMA_NAMESPACE_2;
public class TestResolvingGrammarGenerator {
@ParameterizedTest
@MethodSource("data")
void test(Schema schema, JsonNode data) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
EncoderFactory factory = EncoderFactory.get();
Encoder e = factory.validatingEncoder(schema, factory.binaryEncoder(baos, null));
ResolvingGrammarGenerator.encode(e, schema, data);
e.flush();
}
@Test
void recordMissingRequiredFieldError() throws Exception {
Schema schemaWithoutField = SchemaBuilder.record("MyRecord").namespace("ns").fields().name("field1").type()
.stringType().noDefault().endRecord();
Schema schemaWithField = SchemaBuilder.record("MyRecord").namespace("ns").fields().name("field1").type()
.stringType().noDefault().name("field2").type().stringType().noDefault().endRecord();
GenericData.Record record = new GenericRecordBuilder(schemaWithoutField).set("field1", "someValue").build();
byte[] data = writeRecord(schemaWithoutField, record);
try {
readRecord(schemaWithField, data);
Assertions.fail("Expected exception not thrown");
} catch (AvroTypeException typeException) {
Assertions.assertEquals("Found ns.MyRecord, expecting ns.MyRecord, missing required field field2",
typeException.getMessage(), "Incorrect exception message");
}
}
@Test
void differingEnumNamespaces() throws Exception {
Schema schema1 = SchemaBuilder.record("MyRecord").fields().name("field").type(ENUM1_AB_SCHEMA_NAMESPACE_1)
.noDefault().endRecord();
Schema schema2 = SchemaBuilder.record("MyRecord").fields().name("field").type(ENUM1_AB_SCHEMA_NAMESPACE_2)
.noDefault().endRecord();
GenericData.EnumSymbol genericEnumSymbol = new GenericData.EnumSymbol(ENUM1_AB_SCHEMA_NAMESPACE_1, "A");
GenericData.Record record = new GenericRecordBuilder(schema1).set("field", genericEnumSymbol).build();
byte[] data = writeRecord(schema1, record);
Assertions.assertEquals(genericEnumSymbol, readRecord(schema1, data).get("field"));
Assertions.assertEquals(genericEnumSymbol, readRecord(schema2, data).get("field"));
}
public static Stream<Arguments> data() {
Collection<String[]> ret = Arrays.asList(new String[][] {
{ "{ \"type\": \"record\", \"name\": \"r\", \"fields\": [ " + " { \"name\" : \"f1\", \"type\": \"int\" }, "
+ " { \"name\" : \"f2\", \"type\": \"float\" } " + "] }", "{ \"f2\": 10.4, \"f1\": 10 } " },
{ "{ \"type\": \"enum\", \"name\": \"e\", \"symbols\": " + "[ \"s1\", \"s2\"] }", " \"s1\" " },
{ "{ \"type\": \"enum\", \"name\": \"e\", \"symbols\": " + "[ \"s1\", \"s2\"] }", " \"s2\" " },
{ "{ \"type\": \"fixed\", \"name\": \"f\", \"size\": 10 }", "\"hello\"" },
{ "{ \"type\": \"array\", \"items\": \"int\" }", "[ 10, 20, 30 ]" },
{ "{ \"type\": \"map\", \"values\": \"int\" }", "{ \"k1\": 10, \"k3\": 20, \"k3\": 30 }" },
{ "[ \"int\", \"long\" ]", "10" }, { "\"string\"", "\"hello\"" }, { "\"bytes\"", "\"hello\"" },
{ "\"int\"", "10" }, { "\"long\"", "10" }, { "\"float\"", "10.0" }, { "\"double\"", "10.0" },
{ "\"boolean\"", "true" }, { "\"boolean\"", "false" }, { "\"null\"", "null" }, });
final JsonFactory factory = new JsonFactory();
final ObjectMapper mapper = new ObjectMapper(factory);
return ret.stream().map((String[] args) -> {
Schema schema = new Schema.Parser().parse(args[0]);
try {
JsonNode data = mapper.readTree(new StringReader(args[1]));
return Arguments.of(schema, data);
} catch (IOException ex) {
throw new UncheckedIOException(ex);
}
});
}
private byte[] writeRecord(Schema schema, GenericData.Record record) throws Exception {
ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
GenericDatumWriter<GenericData.Record> datumWriter = new GenericDatumWriter<>(schema);
try (DataFileWriter<GenericData.Record> writer = new DataFileWriter<>(datumWriter)) {
writer.create(schema, byteStream);
writer.append(record);
}
return byteStream.toByteArray();
}
private GenericData.Record readRecord(Schema schema, byte[] data) throws Exception {
ByteArrayInputStream byteStream = new ByteArrayInputStream(data);
GenericDatumReader<GenericData.Record> datumReader = new GenericDatumReader<>(schema);
try (DataFileStream<GenericData.Record> reader = new DataFileStream<>(byteStream, datumReader)) {
return reader.next();
}
}
}
| 7,205 |
0 | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro/message/TestInteropSingleObjectEncoding.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.avro.message;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecordBuilder;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import java.io.File;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import java.io.FileInputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.file.Files;
import java.util.Arrays;
/**
* Tests that <code>test_message.bin</code> is properly encoded <a href=
* "https://avro.apache.org/docs/current/spec.html#single_object_encoding">single
* object</a>
*/
public class TestInteropSingleObjectEncoding {
private static final String RESOURCES_FOLDER = System.getProperty("share.dir", "../../../share")
+ "/test/data/messageV1";
private static final File SCHEMA_FILE = new File(RESOURCES_FOLDER + "/test_schema.avsc");
private static final File MESSAGE_FILE = new File(RESOURCES_FOLDER + "/test_message.bin");
private static Schema SCHEMA;
private static GenericRecordBuilder BUILDER;
@BeforeAll
public static void setup() throws IOException {
try (FileInputStream fileInputStream = new FileInputStream(SCHEMA_FILE)) {
SCHEMA = new Schema.Parser().parse(fileInputStream);
BUILDER = new GenericRecordBuilder(SCHEMA);
}
}
@Test
void checkSingleObjectEncoding() throws IOException {
MessageEncoder<GenericData.Record> encoder = new BinaryMessageEncoder<>(GenericData.get(), SCHEMA);
ByteBuffer buffer = encoder.encode(
BUILDER.set("id", 42L).set("name", "Bill").set("tags", Arrays.asList("dog_lover", "cat_hater")).build());
byte[] fileBuffer = Files.readAllBytes(MESSAGE_FILE.toPath());
assertArrayEquals(fileBuffer, buffer.array());
}
}
| 7,206 |
0 | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro/message/TestGenerateInteropSingleObjectEncoding.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.avro.message;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecordBuilder;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
/**
* Generates <code>test_message.bin</code> - a <a href=
* "https://avro.apache.org/docs/current/spec.html#single_object_encoding">single
* object encoded</a> Avro message.
*/
public class TestGenerateInteropSingleObjectEncoding {
private static final String RESOURCES_FOLDER = System.getProperty("share.dir", "../../../share")
+ "/test/data/messageV1";
private static final File SCHEMA_FILE = new File(RESOURCES_FOLDER + "/test_schema.avsc");
private static final File MESSAGE_FILE = new File(RESOURCES_FOLDER + "/test_message.bin");
private static Schema SCHEMA;
private static GenericRecordBuilder BUILDER;
@BeforeAll
public static void setup() throws IOException {
try (FileInputStream fileInputStream = new FileInputStream(SCHEMA_FILE)) {
SCHEMA = new Schema.Parser().parse(fileInputStream);
BUILDER = new GenericRecordBuilder(SCHEMA);
}
}
@Test
void generateData() throws IOException {
MessageEncoder<GenericData.Record> encoder = new BinaryMessageEncoder<>(GenericData.get(), SCHEMA);
BUILDER.set("id", 42L).set("name", "Bill").set("tags", Arrays.asList("dog_lover", "cat_hater")).build();
ByteBuffer buffer = encoder.encode(
BUILDER.set("id", 42L).set("name", "Bill").set("tags", Arrays.asList("dog_lover", "cat_hater")).build());
new FileOutputStream(MESSAGE_FILE).write(buffer.array());
}
}
| 7,207 |
0 | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro/message/TestBinaryMessageEncoding.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.avro.message;
import static org.junit.jupiter.api.Assertions.*;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericData.Record;
import org.apache.avro.generic.GenericRecordBuilder;
import org.junit.jupiter.api.Test;
public class TestBinaryMessageEncoding {
private static final Schema SCHEMA_V1 = SchemaBuilder.record("TestRecord").fields().requiredInt("id")
.optionalString("msg").endRecord();
private static final GenericRecordBuilder V1_BUILDER = new GenericRecordBuilder(SCHEMA_V1);
private static final List<Record> V1_RECORDS = Arrays.asList(V1_BUILDER.set("id", 1).set("msg", "m-1").build(),
V1_BUILDER.set("id", 2).set("msg", "m-2").build(), V1_BUILDER.set("id", 4).set("msg", "m-4").build(),
V1_BUILDER.set("id", 6).set("msg", "m-6").build());
private static final Schema SCHEMA_V2 = SchemaBuilder.record("TestRecord").fields().requiredLong("id").name("message")
.aliases("msg").type().optional().stringType().optionalDouble("data").endRecord();
private static final GenericRecordBuilder V2_BUILDER = new GenericRecordBuilder(SCHEMA_V2);
private static final List<Record> V2_RECORDS = Arrays.asList(
V2_BUILDER.set("id", 3L).set("message", "m-3").set("data", 12.3).build(),
V2_BUILDER.set("id", 5L).set("message", "m-5").set("data", 23.4).build(),
V2_BUILDER.set("id", 7L).set("message", "m-7").set("data", 34.5).build(),
V2_BUILDER.set("id", 8L).set("message", "m-8").set("data", 35.6).build());
@Test
void byteBufferRoundTrip() throws Exception {
MessageEncoder<Record> encoder = new BinaryMessageEncoder<>(GenericData.get(), SCHEMA_V2);
MessageDecoder<Record> decoder = new BinaryMessageDecoder<>(GenericData.get(), SCHEMA_V2);
Record copy = decoder.decode(encoder.encode(V2_RECORDS.get(0)));
assertNotSame(copy, V2_RECORDS.get(0), "Copy should not be the same object");
assertEquals(V2_RECORDS.get(0), copy, "Record should be identical after round-trip");
}
@Test
void schemaEvolution() throws Exception {
List<ByteBuffer> buffers = new ArrayList<>();
List<Record> records = new ArrayList<>();
records.addAll(V1_RECORDS);
records.addAll(V2_RECORDS);
MessageEncoder<Record> v1Encoder = new BinaryMessageEncoder<>(GenericData.get(), SCHEMA_V1);
MessageEncoder<Record> v2Encoder = new BinaryMessageEncoder<>(GenericData.get(), SCHEMA_V2);
for (Record record : records) {
if (record.getSchema().equals(SCHEMA_V1)) {
buffers.add(v1Encoder.encode(record));
} else {
buffers.add(v2Encoder.encode(record));
}
}
Set<Record> allAsV2 = new HashSet<>(V2_RECORDS);
allAsV2.add(V2_BUILDER.set("id", 1L).set("message", "m-1").clear("data").build());
allAsV2.add(V2_BUILDER.set("id", 2L).set("message", "m-2").clear("data").build());
allAsV2.add(V2_BUILDER.set("id", 4L).set("message", "m-4").clear("data").build());
allAsV2.add(V2_BUILDER.set("id", 6L).set("message", "m-6").clear("data").build());
BinaryMessageDecoder<Record> v2Decoder = new BinaryMessageDecoder<>(GenericData.get(), SCHEMA_V2);
v2Decoder.addSchema(SCHEMA_V1);
Set<Record> decodedUsingV2 = new HashSet<>();
for (ByteBuffer buffer : buffers) {
decodedUsingV2.add(v2Decoder.decode(buffer));
}
assertEquals(allAsV2, decodedUsingV2);
}
@Test
void compatibleReadFailsWithoutSchema() throws Exception {
assertThrows(MissingSchemaException.class, () -> {
MessageEncoder<Record> v1Encoder = new BinaryMessageEncoder<>(GenericData.get(), SCHEMA_V1);
BinaryMessageDecoder<Record> v2Decoder = new BinaryMessageDecoder<>(GenericData.get(), SCHEMA_V2);
ByteBuffer v1Buffer = v1Encoder.encode(V1_RECORDS.get(3));
v2Decoder.decode(v1Buffer);
});
}
@Test
void compatibleReadWithSchema() throws Exception {
MessageEncoder<Record> v1Encoder = new BinaryMessageEncoder<>(GenericData.get(), SCHEMA_V1);
BinaryMessageDecoder<Record> v2Decoder = new BinaryMessageDecoder<>(GenericData.get(), SCHEMA_V2);
v2Decoder.addSchema(SCHEMA_V1);
ByteBuffer v1Buffer = v1Encoder.encode(V1_RECORDS.get(3));
Record record = v2Decoder.decode(v1Buffer);
assertEquals(V2_BUILDER.set("id", 6L).set("message", "m-6").clear("data").build(), record);
}
@Test
void compatibleReadWithSchemaFromLookup() throws Exception {
MessageEncoder<Record> v1Encoder = new BinaryMessageEncoder<>(GenericData.get(), SCHEMA_V1);
SchemaStore.Cache schemaCache = new SchemaStore.Cache();
schemaCache.addSchema(SCHEMA_V1);
BinaryMessageDecoder<Record> v2Decoder = new BinaryMessageDecoder<>(GenericData.get(), SCHEMA_V2, schemaCache);
ByteBuffer v1Buffer = v1Encoder.encode(V1_RECORDS.get(2));
Record record = v2Decoder.decode(v1Buffer);
assertEquals(V2_BUILDER.set("id", 4L).set("message", "m-4").clear("data").build(), record);
}
@Test
void identicalReadWithSchemaFromLookup() throws Exception {
MessageEncoder<Record> v1Encoder = new BinaryMessageEncoder<>(GenericData.get(), SCHEMA_V1);
SchemaStore.Cache schemaCache = new SchemaStore.Cache();
schemaCache.addSchema(SCHEMA_V1);
// The null readSchema should not throw an NPE, but trigger the
// BinaryMessageEncoder to use the write schema as read schema
BinaryMessageDecoder<Record> genericDecoder = new BinaryMessageDecoder<>(GenericData.get(), null, schemaCache);
ByteBuffer v1Buffer = v1Encoder.encode(V1_RECORDS.get(2));
Record record = genericDecoder.decode(v1Buffer);
assertEquals(V1_RECORDS.get(2), record);
}
@Test
void bufferReuse() throws Exception {
// This test depends on the serialized version of record 1 being smaller or
// the same size as record 0 so that the reused ByteArrayOutputStream won't
// expand its internal buffer.
MessageEncoder<Record> encoder = new BinaryMessageEncoder<>(GenericData.get(), SCHEMA_V1, false);
ByteBuffer b0 = encoder.encode(V1_RECORDS.get(0));
ByteBuffer b1 = encoder.encode(V1_RECORDS.get(1));
assertEquals(b0.array(), b1.array());
MessageDecoder<Record> decoder = new BinaryMessageDecoder<>(GenericData.get(), SCHEMA_V1);
assertEquals(V1_RECORDS.get(1), decoder.decode(b0), "Buffer was reused, decode(b0) should be record 1");
}
@Test
void bufferCopy() throws Exception {
MessageEncoder<Record> encoder = new BinaryMessageEncoder<>(GenericData.get(), SCHEMA_V1);
ByteBuffer b0 = encoder.encode(V1_RECORDS.get(0));
ByteBuffer b1 = encoder.encode(V1_RECORDS.get(1));
assertNotEquals(b0.array(), b1.array());
MessageDecoder<Record> decoder = new BinaryMessageDecoder<>(GenericData.get(), SCHEMA_V1);
// bytes are not changed by reusing the encoder
assertEquals(V1_RECORDS.get(0), decoder.decode(b0), "Buffer was copied, decode(b0) should be record 0");
}
@Test
void byteBufferMissingPayload() throws Exception {
assertThrows(AvroRuntimeException.class, () -> {
MessageEncoder<Record> encoder = new BinaryMessageEncoder<>(GenericData.get(), SCHEMA_V2);
MessageDecoder<Record> decoder = new BinaryMessageDecoder<>(GenericData.get(), SCHEMA_V2);
ByteBuffer buffer = encoder.encode(V2_RECORDS.get(0));
buffer.limit(12);
decoder.decode(buffer);
});
}
@Test
void byteBufferMissingFullHeader() throws Exception {
assertThrows(BadHeaderException.class, () -> {
MessageEncoder<Record> encoder = new BinaryMessageEncoder<>(GenericData.get(), SCHEMA_V2);
MessageDecoder<Record> decoder = new BinaryMessageDecoder<>(GenericData.get(), SCHEMA_V2);
ByteBuffer buffer = encoder.encode(V2_RECORDS.get(0));
buffer.limit(8);
decoder.decode(buffer);
});
}
@Test
void byteBufferBadMarkerByte() throws Exception {
assertThrows(BadHeaderException.class, () -> {
MessageEncoder<Record> encoder = new BinaryMessageEncoder<>(GenericData.get(), SCHEMA_V2);
MessageDecoder<Record> decoder = new BinaryMessageDecoder<>(GenericData.get(), SCHEMA_V2);
ByteBuffer buffer = encoder.encode(V2_RECORDS.get(0));
buffer.array()[0] = 0x00;
decoder.decode(buffer);
});
}
@Test
void byteBufferBadVersionByte() throws Exception {
assertThrows(BadHeaderException.class, () -> {
MessageEncoder<Record> encoder = new BinaryMessageEncoder<>(GenericData.get(), SCHEMA_V2);
MessageDecoder<Record> decoder = new BinaryMessageDecoder<>(GenericData.get(), SCHEMA_V2);
ByteBuffer buffer = encoder.encode(V2_RECORDS.get(0));
buffer.array()[1] = 0x00;
decoder.decode(buffer);
});
}
@Test
void byteBufferUnknownSchema() throws Exception {
assertThrows(MissingSchemaException.class, () -> {
MessageEncoder<Record> encoder = new BinaryMessageEncoder<>(GenericData.get(), SCHEMA_V2);
MessageDecoder<Record> decoder = new BinaryMessageDecoder<>(GenericData.get(), SCHEMA_V2);
ByteBuffer buffer = encoder.encode(V2_RECORDS.get(0));
buffer.array()[4] = 0x00;
decoder.decode(buffer);
});
}
}
| 7,208 |
0 | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro/generic/TestGenericConcreteEnum.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.generic;
import org.apache.avro.FooBarSpecificRecord;
import org.apache.avro.TypeEnum;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.io.Encoder;
import org.apache.avro.io.EncoderFactory;
import org.apache.avro.specific.SpecificDatumReader;
import org.junit.jupiter.api.Test;
import java.io.ByteArrayOutputStream;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.IOException;
import java.util.Collections;
/**
* See AVRO-1810: GenericDatumWriter broken with Enum
*/
public class TestGenericConcreteEnum {
private static byte[] serializeRecord(FooBarSpecificRecord fooBarSpecificRecord) throws IOException {
GenericDatumWriter<FooBarSpecificRecord> datumWriter = new GenericDatumWriter<>(FooBarSpecificRecord.SCHEMA$);
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
Encoder encoder = EncoderFactory.get().binaryEncoder(byteArrayOutputStream, null);
datumWriter.write(fooBarSpecificRecord, encoder);
encoder.flush();
return byteArrayOutputStream.toByteArray();
}
@Test
void genericWriteAndRead() throws IOException {
FooBarSpecificRecord specificRecord = getRecord();
byte[] bytes = serializeRecord(specificRecord);
Decoder decoder = DecoderFactory.get().binaryDecoder(bytes, null);
GenericDatumReader<IndexedRecord> genericDatumReader = new GenericDatumReader<>(FooBarSpecificRecord.SCHEMA$);
IndexedRecord deserialized = new GenericData.Record(FooBarSpecificRecord.SCHEMA$);
genericDatumReader.read(deserialized, decoder);
assertEquals(0, GenericData.get().compare(specificRecord, deserialized, FooBarSpecificRecord.SCHEMA$));
}
@Test
void genericWriteSpecificRead() throws IOException {
FooBarSpecificRecord specificRecord = getRecord();
byte[] bytes = serializeRecord(specificRecord);
Decoder decoder = DecoderFactory.get().binaryDecoder(bytes, null);
SpecificDatumReader<FooBarSpecificRecord> specificDatumReader = new SpecificDatumReader<>(
FooBarSpecificRecord.SCHEMA$);
FooBarSpecificRecord deserialized = new FooBarSpecificRecord();
specificDatumReader.read(deserialized, decoder);
assertEquals(specificRecord, deserialized);
}
private FooBarSpecificRecord getRecord() {
return FooBarSpecificRecord.newBuilder().setId(42).setName("foo").setNicknames(Collections.singletonList("bar"))
.setRelatedids(Collections.singletonList(3)).setTypeEnum(TypeEnum.a).build();
}
}
| 7,209 |
0 | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro/generic/TestGenericRecordBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.generic;
import static org.junit.jupiter.api.Assertions.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.Schema;
import org.apache.avro.Schema.Field;
import org.apache.avro.Schema.Type;
import org.apache.avro.generic.GenericData.Record;
import org.junit.jupiter.api.Test;
/**
* Unit test for the GenericRecordBuilder class.
*/
public class TestGenericRecordBuilder {
@Test
void genericBuilder() {
Schema schema = recordSchema();
GenericRecordBuilder builder = new GenericRecordBuilder(schema);
// Verify that builder has no fields set after initialization:
for (Field field : schema.getFields()) {
assertFalse(builder.has(field.name()), "RecordBuilder should not have field " + field.name());
assertNull(builder.get(field.name()), "Field " + field.name() + " should be null");
}
// Set field in builder:
builder.set("intField", 1);
List<String> anArray = Arrays.asList("one", "two", "three");
builder.set("anArray", anArray);
assertTrue(builder.has("anArray"), "anArray should be set");
assertEquals(anArray, builder.get("anArray"));
assertFalse(builder.has("id"), "id should not be set");
assertNull(builder.get("id"));
// Build the record, and verify that fields are set:
Record record = builder.build();
assertEquals(1, record.get("intField"));
assertEquals(anArray, record.get("anArray"));
assertNotNull(record.get("id"));
assertEquals("0", record.get("id").toString());
// Test copy constructors:
assertEquals(builder, new GenericRecordBuilder(builder));
assertEquals(record, new GenericRecordBuilder(record).build());
// Test clear:
builder.clear("intField");
assertFalse(builder.has("intField"));
assertNull(builder.get("intField"));
}
@Test
void attemptToSetNonNullableFieldToNull() {
assertThrows(org.apache.avro.AvroRuntimeException.class, () -> {
new GenericRecordBuilder(recordSchema()).set("intField", null);
});
}
@Test
void buildWithoutSettingRequiredFields1() {
assertThrows(org.apache.avro.AvroRuntimeException.class, () -> {
new GenericRecordBuilder(recordSchema()).build();
});
}
@Test
void buildWithoutSettingRequiredFields2() {
try {
new GenericRecordBuilder(recordSchema()).set("anArray", Collections.singletonList("one")).build();
fail("Should have thrown " + AvroRuntimeException.class.getCanonicalName());
} catch (AvroRuntimeException e) {
assertTrue(e.getMessage().contains("intField"));
}
}
/** Creates a test record schema */
private static Schema recordSchema() {
List<Field> fields = new ArrayList<>();
fields.add(new Field("id", Schema.create(Type.STRING), null, "0"));
fields.add(new Field("intField", Schema.create(Type.INT), null, null));
fields.add(new Field("anArray", Schema.createArray(Schema.create(Type.STRING)), null, null));
fields.add(new Field("optionalInt",
Schema.createUnion(Arrays.asList(Schema.create(Type.NULL), Schema.create(Type.INT))), null, Schema.NULL_VALUE));
Schema schema = Schema.createRecord("Foo", "test", "mytest", false);
schema.setFields(fields);
return schema;
}
}
| 7,210 |
0 | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro/generic/TestGenericLogicalTypes.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.generic;
import org.apache.avro.Conversion;
import org.apache.avro.Conversions;
import org.apache.avro.CustomType;
import org.apache.avro.LogicalType;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import org.apache.avro.data.TimeConversions;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.file.FileReader;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.util.TimePeriod;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.io.IOException;
import java.math.BigDecimal;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.time.Instant;
import java.time.LocalDateTime;
import java.time.ZoneOffset;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import java.util.UUID;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.is;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotSame;
public class TestGenericLogicalTypes {
@TempDir
public File temp;
public static final GenericData GENERIC = new GenericData();
@BeforeAll
public static void addLogicalTypes() {
GENERIC.addLogicalTypeConversion(new Conversions.DecimalConversion());
GENERIC.addLogicalTypeConversion(new Conversions.UUIDConversion());
GENERIC.addLogicalTypeConversion(new Conversions.DurationConversion());
GENERIC.addLogicalTypeConversion(new TimeConversions.LocalTimestampMicrosConversion());
GENERIC.addLogicalTypeConversion(new TimeConversions.LocalTimestampMillisConversion());
}
@Test
public void readUUID() throws IOException {
Schema uuidSchema = Schema.create(Schema.Type.STRING);
LogicalTypes.uuid().addToSchema(uuidSchema);
UUID u1 = UUID.randomUUID();
UUID u2 = UUID.randomUUID();
List<UUID> expected = Arrays.asList(u1, u2);
File test = write(Schema.create(Schema.Type.STRING), u1.toString(), u2.toString());
assertEquals(expected, read(GENERIC.createDatumReader(uuidSchema), test), "Should convert Strings to UUIDs");
}
@Test
public void writeUUID() throws IOException {
Schema stringSchema = Schema.create(Schema.Type.STRING);
stringSchema.addProp(GenericData.STRING_PROP, "String");
Schema uuidSchema = Schema.create(Schema.Type.STRING);
LogicalTypes.uuid().addToSchema(uuidSchema);
UUID u1 = UUID.randomUUID();
UUID u2 = UUID.randomUUID();
List<String> expected = Arrays.asList(u1.toString(), u2.toString());
File test = write(GENERIC, uuidSchema, u1, u2);
assertEquals(expected, read(GenericData.get().createDatumReader(stringSchema), test),
"Should read UUIDs as Strings");
}
@Test
public void writeNullableUUID() throws IOException {
Schema stringSchema = Schema.create(Schema.Type.STRING);
stringSchema.addProp(GenericData.STRING_PROP, "String");
Schema nullableStringSchema = Schema.createUnion(Schema.create(Schema.Type.NULL), stringSchema);
Schema uuidSchema = Schema.create(Schema.Type.STRING);
LogicalTypes.uuid().addToSchema(uuidSchema);
Schema nullableUuidSchema = Schema.createUnion(Schema.create(Schema.Type.NULL), uuidSchema);
UUID u1 = UUID.randomUUID();
UUID u2 = UUID.randomUUID();
List<String> expected = Arrays.asList(u1.toString(), u2.toString());
File test = write(GENERIC, nullableUuidSchema, u1, u2);
assertEquals(expected, read(GenericData.get().createDatumReader(nullableStringSchema), test),
"Should read UUIDs as Strings");
}
@Test
public void readWriteDuration() throws IOException {
Schema fixedSchema = Schema.createFixed("bare.Fixed", null, null, 12);
Schema durationSchema = Schema.createFixed("time.Duration", null, null, 12);
LogicalTypes.duration().addToSchema(durationSchema);
// These two are necessary for schema evolution!
fixedSchema.addAlias(durationSchema.getFullName());
durationSchema.addAlias(fixedSchema.getFullName());
Random rng = new Random();
TimePeriod d1 = TimePeriod.of(rng.nextInt(1000), rng.nextInt(1000), rng.nextInt(1000));
ByteBuffer b1 = ByteBuffer.allocate(12).order(ByteOrder.LITTLE_ENDIAN).putInt((int) d1.getMonths())
.putInt((int) d1.getDays()).putInt((int) d1.getMillis());
GenericFixed f1 = new GenericData.Fixed(fixedSchema, b1.array());
TimePeriod d2 = TimePeriod.of(rng.nextInt(1000), rng.nextInt(1000), rng.nextInt(1000));
ByteBuffer b2 = ByteBuffer.allocate(12).order(ByteOrder.LITTLE_ENDIAN).putInt((int) d2.getMonths())
.putInt((int) d2.getDays()).putInt((int) d2.getMillis());
GenericFixed f2 = new GenericData.Fixed(fixedSchema, b2.array());
File test = write(fixedSchema, f1, f2);
assertEquals(Arrays.asList(d1, d2), read(GENERIC.createDatumReader(durationSchema), test),
"Should convert fixed bytes to durations");
test = write(GENERIC, durationSchema, d2, d1);
assertEquals(Arrays.asList(f2, f1), read(GenericData.get().createDatumReader(fixedSchema), test),
"Should convert durations to fixed bytes");
}
@Test
public void readDecimalFixed() throws IOException {
LogicalType decimal = LogicalTypes.decimal(9, 2);
Schema fixedSchema = Schema.createFixed("aFixed", null, null, 4);
Schema decimalSchema = decimal.addToSchema(Schema.createFixed("aFixed", null, null, 4));
BigDecimal d1 = new BigDecimal("-34.34");
BigDecimal d2 = new BigDecimal("117230.00");
List<BigDecimal> expected = Arrays.asList(d1, d2);
Conversion<BigDecimal> conversion = new Conversions.DecimalConversion();
// use the conversion directly instead of relying on the write side
GenericFixed d1fixed = conversion.toFixed(d1, fixedSchema, decimal);
GenericFixed d2fixed = conversion.toFixed(d2, fixedSchema, decimal);
File test = write(fixedSchema, d1fixed, d2fixed);
assertEquals(expected, read(GENERIC.createDatumReader(decimalSchema), test), "Should convert fixed to BigDecimals");
}
@Test
public void writeDecimalFixed() throws IOException {
LogicalType decimal = LogicalTypes.decimal(9, 2);
Schema fixedSchema = Schema.createFixed("aFixed", null, null, 4);
Schema decimalSchema = decimal.addToSchema(Schema.createFixed("aFixed", null, null, 4));
BigDecimal d1 = new BigDecimal("-34.34");
BigDecimal d2 = new BigDecimal("117230.00");
Conversion<BigDecimal> conversion = new Conversions.DecimalConversion();
GenericFixed d1fixed = conversion.toFixed(d1, fixedSchema, decimal);
GenericFixed d2fixed = conversion.toFixed(d2, fixedSchema, decimal);
List<GenericFixed> expected = Arrays.asList(d1fixed, d2fixed);
File test = write(GENERIC, decimalSchema, d1, d2);
assertEquals(expected, read(GenericData.get().createDatumReader(fixedSchema), test),
"Should read BigDecimals as fixed");
}
@Test
public void decimalToFromBytes() {
LogicalType decimal = LogicalTypes.decimal(9, 2);
Schema bytesSchema = Schema.create(Schema.Type.BYTES);
// Check the round trip to and from bytes
BigDecimal d1 = new BigDecimal("-34.34");
BigDecimal d2 = new BigDecimal("117230.00");
Conversion<BigDecimal> conversion = new Conversions.DecimalConversion();
ByteBuffer d1bytes = conversion.toBytes(d1, bytesSchema, decimal);
ByteBuffer d2bytes = conversion.toBytes(d2, bytesSchema, decimal);
assertThat(conversion.fromBytes(d1bytes, bytesSchema, decimal), is(d1));
assertThat(conversion.fromBytes(d2bytes, bytesSchema, decimal), is(d2));
assertThat("Ensure ByteBuffer not consumed by conversion", conversion.fromBytes(d1bytes, bytesSchema, decimal),
is(d1));
}
@Test
public void decimalToFromFixed() {
LogicalType decimal = LogicalTypes.decimal(9, 2);
Schema fixedSchema = Schema.createFixed("aFixed", null, null, 4);
// Check the round trip to and from fixed data.
BigDecimal d1 = new BigDecimal("-34.34");
BigDecimal d2 = new BigDecimal("117230.00");
Conversion<BigDecimal> conversion = new Conversions.DecimalConversion();
GenericFixed d1fixed = conversion.toFixed(d1, fixedSchema, decimal);
GenericFixed d2fixed = conversion.toFixed(d2, fixedSchema, decimal);
assertThat(conversion.fromFixed(d1fixed, fixedSchema, decimal), is(d1));
assertThat(conversion.fromFixed(d2fixed, fixedSchema, decimal), is(d2));
}
@Test
public void readDecimalBytes() throws IOException {
LogicalType decimal = LogicalTypes.decimal(9, 2);
Schema bytesSchema = Schema.create(Schema.Type.BYTES);
Schema decimalSchema = decimal.addToSchema(Schema.create(Schema.Type.BYTES));
BigDecimal d1 = new BigDecimal("-34.34");
BigDecimal d2 = new BigDecimal("117230.00");
List<BigDecimal> expected = Arrays.asList(d1, d2);
Conversion<BigDecimal> conversion = new Conversions.DecimalConversion();
// use the conversion directly instead of relying on the write side
ByteBuffer d1bytes = conversion.toBytes(d1, bytesSchema, decimal);
ByteBuffer d2bytes = conversion.toBytes(d2, bytesSchema, decimal);
File test = write(bytesSchema, d1bytes, d2bytes);
assertEquals(expected, read(GENERIC.createDatumReader(decimalSchema), test), "Should convert bytes to BigDecimals");
}
@Test
public void writeDecimalBytes() throws IOException {
LogicalType decimal = LogicalTypes.decimal(9, 2);
Schema bytesSchema = Schema.create(Schema.Type.BYTES);
Schema decimalSchema = decimal.addToSchema(Schema.create(Schema.Type.BYTES));
BigDecimal d1 = new BigDecimal("-34.34");
BigDecimal d2 = new BigDecimal("117230.00");
Conversion<BigDecimal> conversion = new Conversions.DecimalConversion();
// use the conversion directly instead of relying on the write side
ByteBuffer d1bytes = conversion.toBytes(d1, bytesSchema, decimal);
ByteBuffer d2bytes = conversion.toBytes(d2, bytesSchema, decimal);
List<ByteBuffer> expected = Arrays.asList(d1bytes, d2bytes);
File test = write(GENERIC, decimalSchema, d1bytes, d2bytes);
assertEquals(expected, read(GenericData.get().createDatumReader(bytesSchema), test),
"Should read BigDecimals as bytes");
}
private <D> List<D> read(DatumReader<D> reader, File file) throws IOException {
List<D> data = new ArrayList<>();
try (FileReader<D> fileReader = new DataFileReader<>(file, reader)) {
for (D datum : fileReader) {
data.add(datum);
}
}
return data;
}
@SafeVarargs
private final <D> File write(Schema schema, D... data) throws IOException {
return write(GenericData.get(), schema, data);
}
@SuppressWarnings("unchecked")
private <D> File write(GenericData model, Schema schema, D... data) throws IOException {
File file = new File(temp, "out.avro");
DatumWriter<D> writer = model.createDatumWriter(schema);
try (DataFileWriter<D> fileWriter = new DataFileWriter<>(writer)) {
fileWriter.create(schema, file);
for (D datum : data) {
fileWriter.append(datum);
}
}
return file;
}
@Test
public void copyUuid() {
testCopy(LogicalTypes.uuid().addToSchema(Schema.create(Schema.Type.STRING)), UUID.randomUUID(), GENERIC);
}
@Test
public void copyUuidRaw() {
testCopy(LogicalTypes.uuid().addToSchema(Schema.create(Schema.Type.STRING)), UUID.randomUUID().toString(), // use
// raw
// type
GenericData.get()); // with no conversions
}
@Test
public void copyDecimal() {
testCopy(LogicalTypes.decimal(9, 2).addToSchema(Schema.create(Schema.Type.BYTES)), new BigDecimal("-34.34"),
GENERIC);
}
@Test
public void copyDecimalRaw() {
testCopy(LogicalTypes.decimal(9, 2).addToSchema(Schema.create(Schema.Type.BYTES)),
ByteBuffer.wrap(new BigDecimal("-34.34").unscaledValue().toByteArray()), GenericData.get()); // no conversions
}
private void testCopy(Schema schema, Object value, GenericData model) {
// test direct copy of instance
checkCopy(value, model.deepCopy(schema, value), false);
// test nested in a record
Schema recordSchema = Schema.createRecord("X", "", "test", false);
List<Schema.Field> fields = new ArrayList<>();
fields.add(new Schema.Field("x", schema, "", null));
recordSchema.setFields(fields);
GenericRecordBuilder builder = new GenericRecordBuilder(recordSchema);
builder.set("x", value);
GenericData.Record record = builder.build();
checkCopy(record, model.deepCopy(recordSchema, record), true);
// test nested in array
Schema arraySchema = Schema.createArray(schema);
ArrayList<Object> array = new ArrayList<>(Collections.singletonList(value));
checkCopy(array, model.deepCopy(arraySchema, array), true);
// test record nested in array
Schema recordArraySchema = Schema.createArray(recordSchema);
ArrayList<GenericRecord> recordArray = new ArrayList<>(Collections.singletonList(record));
checkCopy(recordArray, model.deepCopy(recordArraySchema, recordArray), true);
}
private void checkCopy(Object original, Object copy, boolean notSame) {
if (notSame)
assertNotSame(original, copy);
assertEquals(original, copy);
}
@Test
public void readLocalTimestampMillis() throws IOException {
LogicalType timestamp = LogicalTypes.localTimestampMillis();
Schema longSchema = Schema.create(Schema.Type.LONG);
Schema timestampSchema = timestamp.addToSchema(Schema.create(Schema.Type.LONG));
LocalDateTime i1 = LocalDateTime.of(1986, 6, 26, 12, 7, 11, 42000000);
LocalDateTime i2 = LocalDateTime.ofInstant(Instant.ofEpochMilli(0), ZoneOffset.UTC);
List<LocalDateTime> expected = Arrays.asList(i1, i2);
Conversion<LocalDateTime> conversion = new TimeConversions.LocalTimestampMillisConversion();
// use the conversion directly instead of relying on the write side
Long i1long = conversion.toLong(i1, longSchema, timestamp);
Long i2long = 0L;
File test = write(longSchema, i1long, i2long);
assertEquals(expected, read(GENERIC.createDatumReader(timestampSchema), test),
"Should convert long to LocalDateTime");
}
@Test
public void writeLocalTimestampMillis() throws IOException {
LogicalType timestamp = LogicalTypes.localTimestampMillis();
Schema longSchema = Schema.create(Schema.Type.LONG);
Schema timestampSchema = timestamp.addToSchema(Schema.create(Schema.Type.LONG));
LocalDateTime i1 = LocalDateTime.of(1986, 6, 26, 12, 7, 11, 42000000);
LocalDateTime i2 = LocalDateTime.ofInstant(Instant.ofEpochMilli(0), ZoneOffset.UTC);
Conversion<LocalDateTime> conversion = new TimeConversions.LocalTimestampMillisConversion();
Long d1long = conversion.toLong(i1, longSchema, timestamp);
Long d2long = 0L;
List<Long> expected = Arrays.asList(d1long, d2long);
File test = write(GENERIC, timestampSchema, i1, i2);
assertEquals(expected, read(GenericData.get().createDatumReader(timestampSchema), test),
"Should read LocalDateTime as longs");
}
@Test
public void readLocalTimestampMicros() throws IOException {
LogicalType timestamp = LogicalTypes.localTimestampMicros();
Schema longSchema = Schema.create(Schema.Type.LONG);
Schema timestampSchema = timestamp.addToSchema(Schema.create(Schema.Type.LONG));
LocalDateTime i1 = LocalDateTime.of(1986, 6, 26, 12, 7, 11, 420000);
LocalDateTime i2 = LocalDateTime.ofInstant(Instant.ofEpochSecond(0, 4000), ZoneOffset.UTC);
List<LocalDateTime> expected = Arrays.asList(i1, i2);
Conversion<LocalDateTime> conversion = new TimeConversions.LocalTimestampMicrosConversion();
// use the conversion directly instead of relying on the write side
Long i1long = conversion.toLong(i1, longSchema, timestamp);
Long i2long = conversion.toLong(i2, longSchema, timestamp);
File test = write(longSchema, i1long, i2long);
assertEquals(expected, read(GENERIC.createDatumReader(timestampSchema), test),
"Should convert long to LocalDateTime");
}
@Test
public void writeLocalTimestampMicros() throws IOException {
LogicalType timestamp = LogicalTypes.localTimestampMicros();
Schema longSchema = Schema.create(Schema.Type.LONG);
Schema timestampSchema = timestamp.addToSchema(Schema.create(Schema.Type.LONG));
LocalDateTime i1 = LocalDateTime.of(1986, 6, 26, 12, 7, 11, 420000);
LocalDateTime i2 = LocalDateTime.ofInstant(Instant.ofEpochSecond(0, 4000), ZoneOffset.UTC);
Conversion<LocalDateTime> conversion = new TimeConversions.LocalTimestampMicrosConversion();
Long d1long = conversion.toLong(i1, longSchema, timestamp);
Long d2long = conversion.toLong(i2, longSchema, timestamp);
List<Long> expected = Arrays.asList(d1long, d2long);
File test = write(GENERIC, timestampSchema, i1, i2);
assertEquals(expected, read(GenericData.get().createDatumReader(timestampSchema), test),
"Should read LocalDateTime as longs");
}
@Test
public void testReadAutomaticallyRegisteredUri() throws IOException {
Schema stringSchema = Schema.create(Schema.Type.STRING);
GenericData.setStringType(stringSchema, GenericData.StringType.String);
LogicalType customType = LogicalTypes.getCustomRegisteredTypes().get("custom").fromSchema(stringSchema);
Schema customTypeSchema = customType.addToSchema(Schema.create(Schema.Type.STRING));
CustomType ct1 = new CustomType("foo");
CustomType ct2 = new CustomType("bar");
List<CustomType> expected = Arrays.asList(ct1, ct2);
Conversion<CustomType> conversion = GENERIC.getConversionFor(customType);
// use the conversion directly instead of relying on the write side
CharSequence ct1String = conversion.toCharSequence(ct1, stringSchema, customType);
CharSequence ct2String = conversion.toCharSequence(ct2, stringSchema, customType);
File test = write(stringSchema, ct1String, ct2String);
assertEquals(expected, read(GENERIC.createDatumReader(customTypeSchema), test),
"Should convert string to CustomType");
}
@Test
public void testWriteAutomaticallyRegisteredUri() throws IOException {
Schema stringSchema = Schema.create(Schema.Type.STRING);
GenericData.setStringType(stringSchema, GenericData.StringType.String);
LogicalType customType = LogicalTypes.getCustomRegisteredTypes().get("custom").fromSchema(stringSchema);
Schema customTypeSchema = customType.addToSchema(Schema.create(Schema.Type.STRING));
CustomType ct1 = new CustomType("foo");
CustomType ct2 = new CustomType("bar");
Conversion<CustomType> conversion = GENERIC.getConversionFor(customType);
// use the conversion directly instead of relying on the write side
CharSequence ct1String = conversion.toCharSequence(ct1, stringSchema, customType);
CharSequence ct2String = conversion.toCharSequence(ct2, stringSchema, customType);
List<CharSequence> expected = Arrays.asList(ct1String, ct2String);
File test = write(GENERIC, customTypeSchema, ct1, ct2);
// Note that this test still cannot read strings using the logical type
// schema, as all GenericData instances have the logical type and the
// conversions loaded. That's why this final assert is slightly different.
assertEquals(expected, read(GenericData.get().createDatumReader(stringSchema), test),
"Should read CustomType as strings");
}
}
| 7,211 |
0 | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro/generic/PrimitivesArraysTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.generic;
import org.apache.avro.Schema;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
class PrimitivesArraysTest {
@Test
void booleanArray() {
PrimitivesArrays.BooleanArray ba = new PrimitivesArrays.BooleanArray(4,
Schema.createArray(Schema.create(Schema.Type.BOOLEAN)));
Assertions.assertEquals(0, ba.size());
for (int i = 1; i < 100; i++) {
if (i % 3 == 0 || i % 5 == 0) {
ba.add(true);
} else {
ba.add(false);
}
}
Assertions.assertEquals(99, ba.size());
for (int i = 1; i < 100; i++) {
if (i % 3 == 0 || i % 5 == 0) {
Assertions.assertTrue(ba.get(i - 1), "Error for " + i);
} else {
Assertions.assertFalse(ba.get(i - 1), "Error for " + i);
}
}
Assertions.assertFalse(ba.remove(12));
Assertions.assertEquals(98, ba.size());
for (int i = 13; i < 99; i++) {
if ((i + 1) % 3 == 0 || (i + 1) % 5 == 0) {
Assertions.assertTrue(ba.get(i - 1), "After delete, Error for " + i);
} else {
Assertions.assertFalse(ba.get(i - 1), "After delete, Error for " + i);
}
}
ba.add(12, false);
Assertions.assertEquals(99, ba.size());
for (int i = 1; i < 100; i++) {
if (i % 3 == 0 || i % 5 == 0) {
Assertions.assertTrue(ba.get(i - 1), "Error for " + i);
} else {
Assertions.assertFalse(ba.get(i - 1), "Error for " + i);
}
}
Assertions.assertFalse(ba.remove(12));
ba.add(12, true);
for (int i = 1; i < 100; i++) {
if (i % 3 == 0 || i % 5 == 0 || i == 13) {
Assertions.assertTrue(ba.get(i - 1), "Error for " + i);
} else {
Assertions.assertFalse(ba.get(i - 1), "Error for " + i);
}
}
ba.add(99, true);
Assertions.assertTrue(ba.get(99), "Error for 99");
ba.remove(99);
ba.reverse();
for (int i = 1; i < 100; i++) {
if (i % 3 == 0 || i % 5 == 0 || i == 13) {
Assertions.assertTrue(ba.get(99 - i), "Error for " + i);
} else {
Assertions.assertFalse(ba.get(99 - i), "Error for " + i);
}
}
}
@Test
void booleanArrayIterator() {
PrimitivesArrays.BooleanArray ba = new PrimitivesArrays.BooleanArray(4,
Schema.createArray(Schema.create(Schema.Type.BOOLEAN)));
boolean[] model = new boolean[] { true, false, false, true, true, true, false, false, true, false, false };
for (boolean x : model) {
ba.add(x);
}
Assertions.assertEquals(model.length, ba.size());
int index = 0;
for (Boolean b : ba) {
Assertions.assertEquals(model[index], b);
index++;
}
}
@Test
void intArray() {
final PrimitivesArrays.IntArray intArray = new PrimitivesArrays.IntArray(4,
Schema.createArray(Schema.create(Schema.Type.INT)));
for (int i = 1; i <= 100; i++) {
intArray.add(i);
}
Assertions.assertEquals(100, intArray.size());
for (int i = 1; i <= 100; i++) {
Assertions.assertEquals(i, intArray.get(i - 1));
}
int expectedValue = 1;
for (Integer value : intArray) {
Assertions.assertEquals(expectedValue, value);
expectedValue++;
}
intArray.remove(40);
Assertions.assertEquals(99, intArray.size());
for (int i = 1; i <= 99; i++) {
if (i <= 40) {
Assertions.assertEquals(i, intArray.get(i - 1));
} else {
Assertions.assertEquals(i + 1, intArray.get(i - 1));
}
}
intArray.add(40, 41);
Assertions.assertEquals(100, intArray.size());
for (int i = 1; i <= 100; i++) {
Assertions.assertEquals(i, intArray.get(i - 1));
}
intArray.set(40, 25);
Assertions.assertEquals(25, intArray.get(40));
Assertions.assertEquals(0, intArray.peek());
intArray.set(40, 41);
intArray.reverse();
Assertions.assertEquals(100, intArray.size());
for (int i = 1; i <= 100; i++) {
Assertions.assertEquals(101 - i, intArray.get(i - 1));
}
}
@Test
void longArray() {
final PrimitivesArrays.LongArray longArray = new PrimitivesArrays.LongArray(4,
Schema.createArray(Schema.create(Schema.Type.LONG)));
for (long i = 1; i <= 100; i++) {
longArray.add(i);
}
Assertions.assertEquals(100l, longArray.size());
for (int i = 1; i <= 100; i++) {
Assertions.assertEquals(i, longArray.get(i - 1));
}
int expectedValue = 1;
for (Long value : longArray) {
Assertions.assertEquals(expectedValue, value);
expectedValue++;
}
longArray.remove(40);
Assertions.assertEquals(99, longArray.size());
for (int i = 1; i <= 99; i++) {
if (i <= 40) {
Assertions.assertEquals(i, longArray.get(i - 1));
} else {
Assertions.assertEquals(i + 1, longArray.get(i - 1));
}
}
longArray.add(40, 41);
Assertions.assertEquals(100, longArray.size());
for (int i = 1; i <= 100; i++) {
Assertions.assertEquals(i, longArray.get(i - 1));
}
longArray.set(40, 25);
Assertions.assertEquals(25, longArray.get(40));
Assertions.assertEquals(0, longArray.peek());
longArray.set(40, 41);
longArray.reverse();
Assertions.assertEquals(100, longArray.size());
for (int i = 1; i <= 100; i++) {
Assertions.assertEquals(101 - i, longArray.get(i - 1));
}
}
@Test
void floatArray() {
final PrimitivesArrays.FloatArray floatArray = new PrimitivesArrays.FloatArray(4,
Schema.createArray(Schema.create(Schema.Type.FLOAT)));
for (int i = 1; i <= 100; i++) {
floatArray.add(i * 3.3f);
}
Assertions.assertEquals(100, floatArray.size());
for (int i = 1; i <= 100; i++) {
Assertions.assertEquals(i * 3.3f, floatArray.get(i - 1));
}
float expectedValue = 1.0f;
for (Float value : floatArray) {
Assertions.assertEquals(expectedValue * 3.3f, value);
expectedValue++;
}
floatArray.remove(40);
Assertions.assertEquals(99, floatArray.size());
for (int i = 1; i <= 99; i++) {
if (i <= 40) {
Assertions.assertEquals(i * 3.3f, floatArray.get(i - 1));
} else {
Assertions.assertEquals((i + 1) * 3.3f, floatArray.get(i - 1));
}
}
floatArray.add(40, 41 * 3.3f);
Assertions.assertEquals(100, floatArray.size());
for (int i = 1; i <= 100; i++) {
Assertions.assertEquals(i * 3.3f, floatArray.get(i - 1));
}
floatArray.set(40, 25.2f);
Assertions.assertEquals(25.2f, floatArray.get(40));
Assertions.assertEquals(0.0f, floatArray.peek());
floatArray.set(40, 41 * 3.3f);
floatArray.reverse();
Assertions.assertEquals(100, floatArray.size());
for (int i = 1; i <= 100; i++) {
Assertions.assertEquals((101 - i) * 3.3f, floatArray.get(i - 1));
}
}
@Test
void doubleArray() {
final PrimitivesArrays.DoubleArray doubleArray = new PrimitivesArrays.DoubleArray(4,
Schema.createArray(Schema.create(Schema.Type.DOUBLE)));
for (int i = 1; i <= 100; i++) {
doubleArray.add(i * 3.0d);
}
Assertions.assertEquals(100, doubleArray.size());
for (int i = 1; i <= 100; i++) {
Assertions.assertEquals(i * 3.0d, doubleArray.get(i - 1));
}
double expectedValue = 1.0f;
for (Double value : doubleArray) {
Assertions.assertEquals(expectedValue * 3.0d, value);
expectedValue++;
}
doubleArray.remove(40);
Assertions.assertEquals(99, doubleArray.size());
for (int i = 1; i <= 99; i++) {
if (i <= 40) {
Assertions.assertEquals(i * 3.0d, doubleArray.get(i - 1));
} else {
Assertions.assertEquals((i + 1) * 3.0d, doubleArray.get(i - 1));
}
}
doubleArray.add(40, 41 * 3.0d);
Assertions.assertEquals(100, doubleArray.size());
for (int i = 1; i <= 100; i++) {
Assertions.assertEquals(i * 3.0d, doubleArray.get(i - 1));
}
doubleArray.set(40, 25.2d);
Assertions.assertEquals(25.2d, doubleArray.get(40));
Assertions.assertEquals(0.0d, doubleArray.peek());
doubleArray.set(40, 41 * 3.0d);
doubleArray.reverse();
Assertions.assertEquals(100, doubleArray.size());
for (int i = 1; i <= 100; i++) {
Assertions.assertEquals((101 - i) * 3.0d, doubleArray.get(i - 1));
}
}
}
| 7,212 |
0 | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro/generic/GenericDataArrayTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.generic;
import org.apache.avro.Schema;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
class GenericDataArrayTest {
@Test
void test() {
GenericData.Array<String> array = new GenericData.Array<>(10,
Schema.createArray(Schema.create(Schema.Type.STRING)));
array.add("One");
array.add("Two");
array.add("Two");
array.add("Three");
array.add(4, "Four");
array.remove(1);
Assertions.assertEquals(4, array.size());
Assertions.assertEquals("One", array.get(0));
Assertions.assertEquals("Two", array.get(1));
Assertions.assertEquals("Three", array.get(2));
Assertions.assertEquals("Four", array.get(3));
}
}
| 7,213 |
0 | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro/generic/TestSkipEnumSchema.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.generic;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.generic.GenericData.EnumSymbol;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.io.Encoder;
import org.apache.avro.io.EncoderFactory;
import org.junit.jupiter.api.Test;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
/**
* See AVRO-2908
*/
public class TestSkipEnumSchema {
@Test
void skipEnum() throws IOException {
Schema enumSchema = SchemaBuilder.builder().enumeration("enum").symbols("en1", "en2");
EnumSymbol enumSymbol = new EnumSymbol(enumSchema, "en1");
GenericDatumWriter<EnumSymbol> datumWriter = new GenericDatumWriter<>(enumSchema);
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
Encoder encoder = EncoderFactory.get().validatingEncoder(enumSchema,
EncoderFactory.get().binaryEncoder(byteArrayOutputStream, null));
datumWriter.write(enumSymbol, encoder);
encoder.flush();
Decoder decoder = DecoderFactory.get().validatingDecoder(enumSchema,
DecoderFactory.get().binaryDecoder(byteArrayOutputStream.toByteArray(), null));
GenericDatumReader.skip(enumSchema, decoder);
}
}
| 7,214 |
0 | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro/generic/TestGenericDatumReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.generic;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.apache.avro.Schema;
import org.junit.jupiter.api.Test;
public class TestGenericDatumReader {
private static final Random r = new Random(System.currentTimeMillis());
@Test
void readerCache() {
final GenericDatumReader.ReaderCache cache = new GenericDatumReader.ReaderCache(this::findStringClass);
List<Thread> threads = IntStream.rangeClosed(1, 200).mapToObj((int index) -> {
final Schema schema = TestGenericDatumReader.this.build(index);
final WithSchema s = new WithSchema(schema, cache);
return (Runnable) () -> s.test();
}).map(Thread::new).collect(Collectors.toList());
threads.forEach(Thread::start);
threads.forEach((Thread t) -> {
try {
t.join();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
});
}
@Test
void newInstanceFromString() {
final GenericDatumReader.ReaderCache cache = new GenericDatumReader.ReaderCache(this::findStringClass);
Object object = cache.newInstanceFromString(StringBuilder.class, "Hello");
assertEquals(StringBuilder.class, object.getClass());
StringBuilder builder = (StringBuilder) object;
assertEquals("Hello", builder.toString());
}
static class WithSchema {
private final Schema schema;
private final GenericDatumReader.ReaderCache cache;
public WithSchema(Schema schema, GenericDatumReader.ReaderCache cache) {
this.schema = schema;
this.cache = cache;
}
public void test() {
this.cache.getStringClass(schema);
}
}
private List<Schema> list = new ArrayList<>();
private Schema build(int index) {
int schemaNum = (index - 1) % 50;
if (index <= 50) {
Schema schema = Schema.createRecord("record_" + schemaNum, "doc", "namespace", false,
Arrays.asList(new Schema.Field("field" + schemaNum, Schema.create(Schema.Type.STRING))));
list.add(schema);
}
return list.get(schemaNum);
}
private Class findStringClass(Schema schema) {
this.sleep();
if (schema.getType() == Schema.Type.INT) {
return Integer.class;
}
if (schema.getType() == Schema.Type.STRING) {
return String.class;
}
if (schema.getType() == Schema.Type.LONG) {
return Long.class;
}
if (schema.getType() == Schema.Type.FLOAT) {
return Float.class;
}
return String.class;
}
private void sleep() {
long timeToSleep = r.nextInt(30) + 10L;
if (timeToSleep > 25) {
try {
Thread.sleep(timeToSleep);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
}
| 7,215 |
0 | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro/generic/TestGenericDatumWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.generic;
import static org.junit.jupiter.api.Assertions.*;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collections;
import java.util.ConcurrentModificationException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import org.apache.avro.AvroTypeException;
import org.apache.avro.Schema;
import org.apache.avro.UnresolvedUnionException;
import org.apache.avro.io.BinaryEncoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.io.Encoder;
import org.apache.avro.io.EncoderFactory;
import org.apache.avro.util.Utf8;
import org.junit.jupiter.api.Test;
public class TestGenericDatumWriter {
@Test
void unionUnresolvedExceptionExplicitWhichField() throws IOException {
Schema s = schemaWithExplicitNullDefault();
GenericRecord r = new GenericData.Record(s);
r.put("f", 100);
ByteArrayOutputStream bao = new ByteArrayOutputStream();
try {
new GenericDatumWriter<>(s).write(r, EncoderFactory.get().jsonEncoder(s, bao));
fail();
} catch (final UnresolvedUnionException uue) {
assertEquals("Not in union [\"null\",\"string\"]: 100 (field=f)", uue.getMessage());
}
}
@Test
void write() throws IOException {
String json = "{\"type\": \"record\", \"name\": \"r\", \"fields\": [" + "{ \"name\": \"f1\", \"type\": \"long\" }"
+ "]}";
Schema s = new Schema.Parser().parse(json);
GenericRecord r = new GenericData.Record(s);
r.put("f1", 100L);
ByteArrayOutputStream bao = new ByteArrayOutputStream();
GenericDatumWriter<GenericRecord> w = new GenericDatumWriter<>(s);
Encoder e = EncoderFactory.get().jsonEncoder(s, bao);
w.write(r, e);
e.flush();
Object o = new GenericDatumReader<GenericRecord>(s).read(null,
DecoderFactory.get().jsonDecoder(s, new ByteArrayInputStream(bao.toByteArray())));
assertEquals(r, o);
}
@Test
void arrayConcurrentModification() throws Exception {
String json = "{\"type\": \"array\", \"items\": \"int\" }";
Schema s = new Schema.Parser().parse(json);
final GenericArray<Integer> a = new GenericData.Array<>(1, s);
ByteArrayOutputStream bao = new ByteArrayOutputStream();
final GenericDatumWriter<GenericArray<Integer>> w = new GenericDatumWriter<>(s);
CountDownLatch sizeWrittenSignal = new CountDownLatch(1);
CountDownLatch eltAddedSignal = new CountDownLatch(1);
final TestEncoder e = new TestEncoder(EncoderFactory.get().directBinaryEncoder(bao, null), sizeWrittenSignal,
eltAddedSignal);
// call write in another thread
ExecutorService executor = Executors.newSingleThreadExecutor();
Future<Void> result = executor.submit(() -> {
w.write(a, e);
return null;
});
sizeWrittenSignal.await();
// size has been written so now add an element to the array
a.add(7);
// and signal for the element to be written
eltAddedSignal.countDown();
try {
result.get();
fail("Expected ConcurrentModificationException");
} catch (ExecutionException ex) {
assertTrue(ex.getCause() instanceof ConcurrentModificationException);
}
}
@Test
void mapConcurrentModification() throws Exception {
String json = "{\"type\": \"map\", \"values\": \"int\" }";
Schema s = new Schema.Parser().parse(json);
final Map<String, Integer> m = new HashMap<>();
ByteArrayOutputStream bao = new ByteArrayOutputStream();
final GenericDatumWriter<Map<String, Integer>> w = new GenericDatumWriter<>(s);
CountDownLatch sizeWrittenSignal = new CountDownLatch(1);
CountDownLatch eltAddedSignal = new CountDownLatch(1);
final TestEncoder e = new TestEncoder(EncoderFactory.get().directBinaryEncoder(bao, null), sizeWrittenSignal,
eltAddedSignal);
// call write in another thread
ExecutorService executor = Executors.newSingleThreadExecutor();
Future<Void> result = executor.submit(() -> {
w.write(m, e);
return null;
});
sizeWrittenSignal.await();
// size has been written so now add an entry to the map
m.put("a", 7);
// and signal for the entry to be written
eltAddedSignal.countDown();
try {
result.get();
fail("Expected ConcurrentModificationException");
} catch (ExecutionException ex) {
assertTrue(ex.getCause() instanceof ConcurrentModificationException);
}
}
@Test
void allowWritingPrimitives() throws IOException {
Schema doubleType = Schema.create(Schema.Type.DOUBLE);
Schema.Field field = new Schema.Field("double", doubleType);
List<Schema.Field> fields = Collections.singletonList(field);
Schema schema = Schema.createRecord("test", "doc", "", false, fields);
GenericRecord record = new GenericData.Record(schema);
record.put("double", 456.4);
record.put("double", 100000L);
record.put("double", 444);
ByteArrayOutputStream bao = new ByteArrayOutputStream();
GenericDatumWriter<GenericRecord> writer = new GenericDatumWriter<>(schema);
Encoder encoder = EncoderFactory.get().jsonEncoder(schema, bao);
writer.write(record, encoder);
}
static class TestEncoder extends Encoder {
Encoder e;
CountDownLatch sizeWrittenSignal;
CountDownLatch eltAddedSignal;
TestEncoder(Encoder encoder, CountDownLatch sizeWrittenSignal, CountDownLatch eltAddedSignal) {
this.e = encoder;
this.sizeWrittenSignal = sizeWrittenSignal;
this.eltAddedSignal = eltAddedSignal;
}
@Override
public void writeArrayStart() throws IOException {
e.writeArrayStart();
sizeWrittenSignal.countDown();
try {
eltAddedSignal.await();
} catch (InterruptedException e) {
// ignore
}
}
@Override
public void writeMapStart() throws IOException {
e.writeMapStart();
sizeWrittenSignal.countDown();
try {
eltAddedSignal.await();
} catch (InterruptedException e) {
// ignore
}
}
@Override
public void flush() throws IOException {
e.flush();
}
@Override
public void writeNull() throws IOException {
e.writeNull();
}
@Override
public void writeBoolean(boolean b) throws IOException {
e.writeBoolean(b);
}
@Override
public void writeInt(int n) throws IOException {
e.writeInt(n);
}
@Override
public void writeLong(long n) throws IOException {
e.writeLong(n);
}
@Override
public void writeFloat(float f) throws IOException {
e.writeFloat(f);
}
@Override
public void writeDouble(double d) throws IOException {
e.writeDouble(d);
}
@Override
public void writeString(Utf8 utf8) throws IOException {
e.writeString(utf8);
}
@Override
public void writeBytes(ByteBuffer bytes) throws IOException {
e.writeBytes(bytes);
}
@Override
public void writeBytes(byte[] bytes, int start, int len) throws IOException {
e.writeBytes(bytes, start, len);
}
@Override
public void writeFixed(byte[] bytes, int start, int len) throws IOException {
e.writeFixed(bytes, start, len);
}
@Override
public void writeEnum(int en) throws IOException {
e.writeEnum(en);
}
@Override
public void setItemCount(long itemCount) throws IOException {
e.setItemCount(itemCount);
}
@Override
public void startItem() throws IOException {
e.startItem();
}
@Override
public void writeArrayEnd() throws IOException {
e.writeArrayEnd();
}
@Override
public void writeMapEnd() throws IOException {
e.writeMapEnd();
}
@Override
public void writeIndex(int unionIndex) throws IOException {
e.writeIndex(unionIndex);
}
}
@Test
void writeDoesNotAllowStringForGenericEnum() throws IOException {
assertThrows(AvroTypeException.class, () -> {
final String json = "{\"type\": \"record\", \"name\": \"recordWithEnum\"," + "\"fields\": [ "
+ "{\"name\": \"field\", \"type\": " + "{\"type\": \"enum\", \"name\": \"enum\", \"symbols\": "
+ "[\"ONE\",\"TWO\",\"THREE\"] " + "}" + "}" + "]}";
Schema schema = new Schema.Parser().parse(json);
GenericRecord record = new GenericData.Record(schema);
record.put("field", "ONE");
ByteArrayOutputStream bao = new ByteArrayOutputStream();
GenericDatumWriter<GenericRecord> writer = new GenericDatumWriter<>(schema);
Encoder encoder = EncoderFactory.get().jsonEncoder(schema, bao);
writer.write(record, encoder);
});
}
private enum AnEnum {
ONE, TWO, THREE
}
@Test
void writeDoesNotAllowJavaEnumForGenericEnum() throws IOException {
assertThrows(AvroTypeException.class, () -> {
final String json = "{\"type\": \"record\", \"name\": \"recordWithEnum\"," + "\"fields\": [ "
+ "{\"name\": \"field\", \"type\": " + "{\"type\": \"enum\", \"name\": \"enum\", \"symbols\": "
+ "[\"ONE\",\"TWO\",\"THREE\"] " + "}" + "}" + "]}";
Schema schema = new Schema.Parser().parse(json);
GenericRecord record = new GenericData.Record(schema);
record.put("field", AnEnum.ONE);
ByteArrayOutputStream bao = new ByteArrayOutputStream();
GenericDatumWriter<GenericRecord> writer = new GenericDatumWriter<>(schema);
Encoder encoder = EncoderFactory.get().jsonEncoder(schema, bao);
writer.write(record, encoder);
});
}
@Test
void writeFieldWithDefaultWithExplicitNullDefaultInSchema() throws Exception {
Schema schema = schemaWithExplicitNullDefault();
GenericRecord record = createRecordWithDefaultField(schema);
writeObject(record);
}
@Test
void writeFieldWithDefaultWithoutExplicitNullDefaultInSchema() throws Exception {
Schema schema = schemaWithoutExplicitNullDefault();
GenericRecord record = createRecordWithDefaultField(schema);
writeObject(record);
}
@Test
void nestedNPEErrorClarity() throws Exception {
GenericData.Record topLevelRecord = buildComplexRecord();
@SuppressWarnings("unchecked")
Map<String, GenericData.Record> map = (Map<String, GenericData.Record>) ((List<GenericData.Record>) ((GenericData.Record) topLevelRecord
.get("unionField")).get("arrayField")).get(0).get("mapField");
map.get("a").put("strField", null);
try {
writeObject(topLevelRecord);
fail("expected to throw");
} catch (NullPointerException expected) {
assertTrue(
expected.getMessage()
.contains("RecordWithRequiredFields.unionField[UnionRecord].arrayField[0].mapField[\"a\"].strField"),
"unexpected message " + expected.getMessage());
}
}
@Test
void nPEForMapKeyErrorClarity() throws Exception {
GenericData.Record topLevelRecord = buildComplexRecord();
@SuppressWarnings("unchecked")
Map<String, GenericData.Record> map = (Map<String, GenericData.Record>) ((List<GenericData.Record>) ((GenericData.Record) topLevelRecord
.get("unionField")).get("arrayField")).get(0).get("mapField");
map.put(null, map.get("a")); // value is valid, but key is null
try {
writeObject(topLevelRecord);
fail("expected to throw");
} catch (NullPointerException expected) {
assertTrue(
expected.getMessage()
.contains("null key in map at RecordWithRequiredFields.unionField[UnionRecord].arrayField[0].mapField"),
"unexpected message " + expected.getMessage());
}
}
@Test
void shortPathNPEErrorClarity() throws Exception {
try {
writeObject(Schema.create(Schema.Type.STRING), null);
fail("expected to throw");
} catch (NullPointerException expected) {
assertTrue(expected.getMessage().contains("null value for (non-nullable) string"),
"unexpected message " + expected.getMessage());
}
}
@Test
void nestedCCEErrorClarity() throws Exception {
GenericData.Record topLevelRecord = buildComplexRecord();
@SuppressWarnings("unchecked")
Map<String, GenericData.Record> map = (Map<String, GenericData.Record>) ((List<GenericData.Record>) ((GenericData.Record) topLevelRecord
.get("unionField")).get("arrayField")).get(0).get("mapField");
map.get("a").put("strField", 42); // not a string
try {
writeObject(topLevelRecord);
fail("expected to throw");
} catch (ClassCastException expected) {
assertTrue(
expected.getMessage()
.contains("RecordWithRequiredFields.unionField[UnionRecord].arrayField[0].mapField[\"a\"].strField"),
"unexpected message " + expected.getMessage());
}
}
@Test
void shortPathCCEErrorClarity() throws Exception {
try {
writeObject(Schema.create(Schema.Type.STRING), 42);
fail("expected to throw");
} catch (ClassCastException expected) {
assertTrue(
expected.getMessage().contains("value 42 (a java.lang.Integer) cannot be cast to expected type string"),
"unexpected message " + expected.getMessage());
}
}
@Test
void nestedATEErrorClarity() throws Exception {
GenericData.Record topLevelRecord = buildComplexRecord();
@SuppressWarnings("unchecked")
Map<String, GenericData.Record> map = (Map<String, GenericData.Record>) ((List<GenericData.Record>) ((GenericData.Record) topLevelRecord
.get("unionField")).get("arrayField")).get(0).get("mapField");
map.get("a").put("enumField", 42); // not an enum
try {
writeObject(topLevelRecord);
fail("expected to throw");
} catch (AvroTypeException expected) {
assertTrue(
expected.getMessage()
.contains("RecordWithRequiredFields.unionField[UnionRecord].arrayField[0].mapField[\"a\"].enumField"),
"unexpected message " + expected.getMessage());
assertTrue(expected.getMessage().contains("42 (a java.lang.Integer) is not a MapRecordEnum"),
"unexpected message " + expected.getMessage());
}
}
private GenericData.Record buildComplexRecord() throws IOException {
Schema schema = new Schema.Parser().parse(new File("../../../share/test/schemas/RecordWithRequiredFields.avsc"));
GenericData.Record topLevelRecord = new GenericData.Record(schema);
GenericData.Record unionRecord = new GenericData.Record(schema.getField("unionField").schema().getTypes().get(1));
Schema arraySchema = unionRecord.getSchema().getField("arrayField").schema();
GenericData.Record arrayRecord1 = new GenericData.Record(arraySchema.getElementType());
GenericData.Record arrayRecord2 = new GenericData.Record(arraySchema.getElementType());
GenericData.Array<GenericData.Record> array = new GenericData.Array<>(arraySchema,
Arrays.asList(arrayRecord1, arrayRecord2));
Schema mapRecordSchema = arraySchema.getElementType().getField("mapField").schema().getValueType();
GenericData.Record mapRecordA = new GenericData.Record(mapRecordSchema);
Schema mapRecordEnumSchema = mapRecordSchema.getField("enumField").schema();
mapRecordA.put("enumField", new GenericData.EnumSymbol(mapRecordEnumSchema, "B"));
mapRecordA.put("strField", "4");
arrayRecord1.put("strField", "2");
HashMap<String, GenericData.Record> map1 = new HashMap<>();
map1.put("a", mapRecordA);
arrayRecord1.put("mapField", map1);
arrayRecord2.put("strField", "2");
HashMap<String, GenericData.Record> map2 = new HashMap<>();
map2.put("a", mapRecordA);
arrayRecord2.put("mapField", map2);
unionRecord.put(unionRecord.getSchema().getField("strField").pos(), "1");
unionRecord.put(unionRecord.getSchema().getField("arrayField").pos(), array); // BOOM
topLevelRecord.put(topLevelRecord.getSchema().getField("strField").pos(), "0");
topLevelRecord.put(topLevelRecord.getSchema().getField("unionField").pos(), unionRecord);
return topLevelRecord;
}
private Schema schemaWithExplicitNullDefault() {
String schema = "{\"type\":\"record\",\"name\":\"my_record\",\"namespace\":\"mytest.namespace\",\"doc\":\"doc\","
+ "\"fields\":[{\"name\":\"f\",\"type\":[\"null\",\"string\"],\"doc\":\"field doc doc\", "
+ "\"default\":null}]}";
return new Schema.Parser().parse(schema);
}
private Schema schemaWithoutExplicitNullDefault() {
String schema = "{\"type\":\"record\",\"name\":\"my_record\",\"namespace\":\"mytest.namespace\",\"doc\":\"doc\","
+ "\"fields\":[{\"name\":\"f\",\"type\":[\"null\",\"string\"],\"doc\":\"field doc doc\"}]}";
return new Schema.Parser().parse(schema);
}
private void writeObject(GenericRecord datum) throws Exception {
writeObject(datum.getSchema(), datum);
}
private void writeObject(Schema schema, Object datum) throws Exception {
BinaryEncoder encoder = EncoderFactory.get().binaryEncoder(new ByteArrayOutputStream(), null);
GenericDatumWriter<Object> writer = new GenericDatumWriter<>(schema);
writer.write(datum, encoder);
encoder.flush();
}
private GenericRecord createRecordWithDefaultField(Schema schema) {
GenericRecord record = new GenericData.Record(schema);
record.put("f", schema.getField("f").defaultVal());
return record;
}
}
| 7,216 |
0 | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro/generic/TestGenericData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.generic;
import static org.apache.avro.TestCircularReferences.Reference;
import static org.apache.avro.TestCircularReferences.Referenceable;
import static org.junit.jupiter.api.Assertions.*;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonParseException;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.time.Instant;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.Schema;
import org.apache.avro.Schema.Field;
import org.apache.avro.Schema.Type;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.TestCircularReferences.ReferenceManager;
import org.apache.avro.generic.GenericData.Record;
import org.apache.avro.io.BinaryData;
import org.apache.avro.io.BinaryEncoder;
import org.apache.avro.io.EncoderFactory;
import org.apache.avro.util.Utf8;
import org.junit.jupiter.api.Test;
public class TestGenericData {
@Test
void recordConstructorNullSchema() throws Exception {
assertThrows(AvroRuntimeException.class, () -> {
new GenericData.Record(null);
});
}
@Test
void recordConstructorWrongSchema() throws Exception {
assertThrows(AvroRuntimeException.class, () -> {
new GenericData.Record(Schema.create(Schema.Type.INT));
});
}
@Test
void arrayConstructorNullSchema() throws Exception {
assertThrows(AvroRuntimeException.class, () -> {
new GenericData.Array<>(1, null);
});
}
@Test
void arrayConstructorWrongSchema() throws Exception {
assertThrows(AvroRuntimeException.class, () -> {
new GenericData.Array<>(1, Schema.create(Schema.Type.INT));
});
}
@Test
void recordCreateEmptySchema() throws Exception {
assertThrows(AvroRuntimeException.class, () -> {
Schema s = Schema.createRecord("schemaName", "schemaDoc", "namespace", false);
new GenericData.Record(s);
});
}
@Test
void getEmptySchemaFields() throws Exception {
assertThrows(AvroRuntimeException.class, () -> {
Schema s = Schema.createRecord("schemaName", "schemaDoc", "namespace", false);
s.getFields();
});
}
@Test
void getEmptySchemaField() throws Exception {
assertThrows(AvroRuntimeException.class, () -> {
Schema s = Schema.createRecord("schemaName", "schemaDoc", "namespace", false);
s.getField("foo");
});
}
@Test
void recordPutInvalidField() throws Exception {
assertThrows(AvroRuntimeException.class, () -> {
Schema s = Schema.createRecord("schemaName", "schemaDoc", "namespace", false);
List<Schema.Field> fields = new ArrayList<>();
fields.add(new Schema.Field("someFieldName", s, "docs", null));
s.setFields(fields);
Record r = new GenericData.Record(s);
r.put("invalidFieldName", "someValue");
});
}
/** Make sure that even with nulls, hashCode() doesn't throw NPE. */
@Test
void testHashCode() {
GenericData.get().hashCode(null, Schema.create(Type.NULL));
GenericData.get().hashCode(null,
Schema.createUnion(Arrays.asList(Schema.create(Type.BOOLEAN), Schema.create(Type.STRING))));
List<CharSequence> stuff = new ArrayList<>();
stuff.add("string");
Schema schema = recordSchema();
GenericRecord r = new GenericData.Record(schema);
r.put(0, stuff);
GenericData.get().hashCode(r, schema);
}
@Test
void testEquals() {
Schema s = recordSchema();
GenericRecord r0 = new GenericData.Record(s);
GenericRecord r1 = new GenericData.Record(s);
GenericRecord r2 = new GenericData.Record(s);
Collection<CharSequence> l0 = new ArrayDeque<>();
List<CharSequence> l1 = new ArrayList<>();
GenericArray<CharSequence> l2 = new GenericData.Array<>(1, s.getFields().get(0).schema());
String foo = "foo";
l0.add(new StringBuilder(foo));
l1.add(foo);
l2.add(new Utf8(foo));
r0.put(0, l0);
r1.put(0, l1);
r2.put(0, l2);
assertEquals(r0, r1);
assertEquals(r0, r2);
assertEquals(r1, r2);
}
@Test
public void testMapKeyEqualsStringAndUtf8Compatibility() {
Field myMapField = new Field("my_map", Schema.createMap(Schema.create(Schema.Type.STRING)), null, null);
Schema schema = Schema.createRecord("my_record", "doc", "mytest", false);
schema.setFields(Arrays.asList(myMapField));
GenericRecord r0 = new GenericData.Record(schema);
GenericRecord r1 = new GenericData.Record(schema);
HashMap<CharSequence, String> pair1 = new HashMap<>();
pair1.put("keyOne", "valueOne");
r0.put("my_map", pair1);
HashMap<CharSequence, String> pair2 = new HashMap<>();
pair2.put(new Utf8("keyOne"), "valueOne");
r1.put("my_map", pair2);
assertEquals(r0, r1);
assertEquals(r1, r0);
}
@Test
public void testMapValuesEqualsStringAndUtf8Compatibility() {
Field myMapField = new Field("my_map", Schema.createMap(Schema.create(Schema.Type.STRING)), null, null);
Schema schema = Schema.createRecord("my_record", "doc", "mytest", false);
schema.setFields(Arrays.asList(myMapField));
GenericRecord r0 = new GenericData.Record(schema);
GenericRecord r1 = new GenericData.Record(schema);
HashMap<CharSequence, CharSequence> pair1 = new HashMap<>();
pair1.put("keyOne", "valueOne");
r0.put("my_map", pair1);
HashMap<CharSequence, CharSequence> pair2 = new HashMap<>();
pair2.put("keyOne", new Utf8("valueOne"));
r1.put("my_map", pair2);
assertEquals(r0, r1);
assertEquals(r1, r0);
}
@Test
public void testEqualsEmptyMaps() {
Field myMapField = new Field("my_map", Schema.createMap(Schema.create(Schema.Type.STRING)), null, null);
Schema schema = Schema.createRecord("my_record", "doc", "mytest", false);
schema.setFields(Arrays.asList(myMapField));
GenericRecord r0 = new GenericData.Record(schema);
r0.put("my_map", new HashMap<>());
GenericRecord r1 = new GenericData.Record(schema);
r1.put("my_map", new HashMap<>());
assertEquals(r0, r1);
assertEquals(r1, r0);
}
@Test
public void testEqualsEmptyMapAndNonEmptyMap() {
Field myMapField = new Field("my_map", Schema.createMap(Schema.create(Schema.Type.STRING)), null, null);
Schema schema = Schema.createRecord("my_record", "doc", "mytest", false);
schema.setFields(Arrays.asList(myMapField));
GenericRecord r0 = new GenericData.Record(schema);
r0.put("my_map", new HashMap<>());
GenericRecord r1 = new GenericData.Record(schema);
HashMap<CharSequence, CharSequence> pair1 = new HashMap<>();
pair1.put("keyOne", "valueOne");
r1.put("my_map", pair1);
assertNotEquals(r0, r1);
assertNotEquals(r1, r0);
}
@Test
public void testEqualsMapAndSubset() {
Field myMapField = new Field("my_map", Schema.createMap(Schema.create(Schema.Type.STRING)), null, null);
Schema schema = Schema.createRecord("my_record", "doc", "mytest", false);
schema.setFields(Arrays.asList(myMapField));
GenericRecord r0 = new GenericData.Record(schema);
HashMap<CharSequence, String> m1 = new HashMap<>();
m1.put("keyOne", "valueOne");
m1.put("keyTwo", "valueTwo");
r0.put("my_map", m1);
GenericRecord r1 = new GenericData.Record(schema);
HashMap<CharSequence, String> m2 = new HashMap<>();
m2.put("keyOne", "valueOne");
r1.put("my_map", m2);
assertNotEquals(r0, r1);
assertNotEquals(r1, r0);
}
@Test
public void testEqualsMapAndSameSizeMapWithDifferentKeys() {
Field myMapField = new Field("my_map", Schema.createMap(Schema.create(Schema.Type.STRING)), null, null);
Schema schema = Schema.createRecord("my_record", "doc", "mytest", false);
schema.setFields(Arrays.asList(myMapField));
GenericRecord r0 = new GenericData.Record(schema);
HashMap<CharSequence, String> m1 = new HashMap<>();
m1.put("keyOne", "valueOne");
r0.put("my_map", m1);
GenericRecord r1 = new GenericData.Record(schema);
HashMap<CharSequence, String> m2 = new HashMap<>();
m2.put("keyTwo", "valueTwo");
r1.put("my_map", m2);
assertNotEquals(r0, r1);
assertNotEquals(r1, r0);
}
@Test
public void testEqualsMapAndSameSizeMapWithDifferentValues() {
Field myMapField = new Field("my_map", Schema.createMap(Schema.create(Schema.Type.STRING)), null, null);
Schema schema = Schema.createRecord("my_record", "doc", "mytest", false);
schema.setFields(Arrays.asList(myMapField));
GenericRecord r0 = new GenericData.Record(schema);
HashMap<CharSequence, String> m1 = new HashMap<>();
m1.put("keyOne", "valueOne");
r0.put("my_map", m1);
GenericRecord r1 = new GenericData.Record(schema);
HashMap<CharSequence, String> m2 = new HashMap<>();
m2.put("keyOne", "valueTwo");
r1.put("my_map", m2);
assertNotEquals(r0, r1);
assertNotEquals(r1, r0);
}
@Test
public void testArrayValuesEqualsStringAndUtf8Compatibility() {
Field myArrayField = new Field("my_array", Schema.createArray(Schema.create(Schema.Type.STRING)), null, null);
Schema schema = Schema.createRecord("my_record", "doc", "mytest", false);
schema.setFields(Arrays.asList(myArrayField));
GenericRecord r0 = new GenericData.Record(schema);
GenericRecord r1 = new GenericData.Record(schema);
List<CharSequence> array1 = Arrays.asList("valueOne");
r0.put("my_array", array1);
List<CharSequence> array2 = Arrays.asList(new Utf8("valueOne"));
r1.put("my_array", array2);
assertEquals(r0, r1);
assertEquals(r1, r0);
}
private Schema recordSchema() {
List<Field> fields = new ArrayList<>();
fields.add(new Field("anArray", Schema.createArray(Schema.create(Type.STRING)), null, null));
Schema schema = Schema.createRecord("arrayFoo", "test", "mytest", false);
schema.setFields(fields);
return schema;
}
@Test
void equals2() {
Schema schema1 = Schema.createRecord("r", null, "x", false);
List<Field> fields1 = new ArrayList<>();
fields1.add(new Field("a", Schema.create(Schema.Type.STRING), null, null, Field.Order.IGNORE));
schema1.setFields(fields1);
// only differs in field order
Schema schema2 = Schema.createRecord("r", null, "x", false);
List<Field> fields2 = new ArrayList<>();
fields2.add(new Field("a", Schema.create(Schema.Type.STRING), null, null, Field.Order.ASCENDING));
schema2.setFields(fields2);
GenericRecord record1 = new GenericData.Record(schema1);
record1.put("a", "1");
GenericRecord record2 = new GenericData.Record(schema2);
record2.put("a", "2");
assertNotEquals(record2, record1);
assertNotEquals(record1, record2);
}
@Test
void recordGetFieldDoesntExist() throws Exception {
assertThrows(AvroRuntimeException.class, () -> {
Schema schema = Schema.createRecord("test", "doc", "test", false, Collections.EMPTY_LIST);
GenericData.Record record = new GenericData.Record(schema);
record.get("does not exist");
});
}
@Test
void arrayReversal() {
Schema schema = Schema.createArray(Schema.create(Schema.Type.INT));
GenericArray<Integer> forward = new GenericData.Array<>(10, schema);
GenericArray<Integer> backward = new GenericData.Array<>(10, schema);
for (int i = 0; i <= 9; i++) {
forward.add(i);
}
for (int i = 9; i >= 0; i--) {
backward.add(i);
}
forward.reverse();
assertEquals(forward, backward);
}
@Test
void arrayListInterface() {
Schema schema = Schema.createArray(Schema.create(Schema.Type.INT));
GenericArray<Integer> array = new GenericData.Array<>(1, schema);
array.add(99);
assertEquals(Integer.valueOf(99), array.get(0));
List<Integer> list = new ArrayList<>();
list.add(99);
assertEquals(array, list);
assertEquals(list, array);
assertEquals(list.hashCode(), array.hashCode());
try {
array.get(2);
fail("Expected IndexOutOfBoundsException getting index 2");
} catch (IndexOutOfBoundsException e) {
}
array.clear();
assertEquals(0, array.size());
try {
array.get(0);
fail("Expected IndexOutOfBoundsException getting index 0 after clear()");
} catch (IndexOutOfBoundsException e) {
}
}
@Test
void arrayAddAtLocation() {
Schema schema = Schema.createArray(Schema.create(Schema.Type.INT));
GenericArray<Integer> array = new GenericData.Array<>(6, schema);
array.clear();
for (int i = 0; i < 5; ++i)
array.add(i);
assertEquals(5, array.size());
array.add(0, 6);
assertEquals(Integer.valueOf(6), array.get(0));
assertEquals(6, array.size());
assertEquals(Integer.valueOf(0), array.get(1));
assertEquals(Integer.valueOf(4), array.get(5));
array.add(6, 7);
assertEquals(Integer.valueOf(7), array.get(6));
assertEquals(7, array.size());
assertEquals(Integer.valueOf(6), array.get(0));
assertEquals(Integer.valueOf(4), array.get(5));
array.add(1, 8);
assertEquals(Integer.valueOf(8), array.get(1));
assertEquals(Integer.valueOf(0), array.get(2));
assertEquals(Integer.valueOf(6), array.get(0));
assertEquals(8, array.size());
try {
array.get(9);
fail("Expected IndexOutOfBoundsException after adding elements");
} catch (IndexOutOfBoundsException e) {
}
}
@Test
void arrayRemove() {
Schema schema = Schema.createArray(Schema.create(Schema.Type.INT));
GenericArray<Integer> array = new GenericData.Array<>(10, schema);
array.clear();
for (int i = 0; i < 10; ++i)
array.add(i);
assertEquals(10, array.size());
assertEquals(Integer.valueOf(0), array.get(0));
assertEquals(Integer.valueOf(9), array.get(9));
array.remove(0);
assertEquals(9, array.size());
assertEquals(Integer.valueOf(1), array.get(0));
assertEquals(Integer.valueOf(2), array.get(1));
assertEquals(Integer.valueOf(9), array.get(8));
// Test boundary errors.
try {
array.get(9);
fail("Expected IndexOutOfBoundsException after removing an element");
} catch (IndexOutOfBoundsException e) {
}
try {
array.set(9, 99);
fail("Expected IndexOutOfBoundsException after removing an element");
} catch (IndexOutOfBoundsException e) {
}
try {
array.remove(9);
fail("Expected IndexOutOfBoundsException after removing an element");
} catch (IndexOutOfBoundsException e) {
}
// Test that we can still remove for properly sized arrays, and the rval
assertEquals(Integer.valueOf(9), array.remove(8));
assertEquals(8, array.size());
// Test insertion after remove
array.add(88);
assertEquals(Integer.valueOf(88), array.get(8));
}
@Test
void arraySet() {
Schema schema = Schema.createArray(Schema.create(Schema.Type.INT));
GenericArray<Integer> array = new GenericData.Array<>(10, schema);
array.clear();
for (int i = 0; i < 10; ++i)
array.add(i);
assertEquals(10, array.size());
assertEquals(Integer.valueOf(0), array.get(0));
assertEquals(Integer.valueOf(5), array.get(5));
assertEquals(Integer.valueOf(5), array.set(5, 55));
assertEquals(10, array.size());
assertEquals(Integer.valueOf(55), array.get(5));
}
@Test
void toStringIsJson() throws JsonParseException, IOException {
Field stringField = new Field("string", Schema.create(Type.STRING), null, null);
Field enumField = new Field("enum", Schema.createEnum("my_enum", "doc", null, Arrays.asList("a", "b", "c")), null,
null);
Schema schema = Schema.createRecord("my_record", "doc", "mytest", false);
schema.setFields(Arrays.asList(stringField, enumField));
GenericRecord r = new GenericData.Record(schema);
// \u2013 is EN DASH
r.put(stringField.name(), "hello\nthere\"\tyou\u2013}");
r.put(enumField.name(), new GenericData.EnumSymbol(enumField.schema(), "a"));
String json = r.toString();
JsonFactory factory = new JsonFactory();
JsonParser parser = factory.createParser(json);
ObjectMapper mapper = new ObjectMapper();
// will throw exception if string is not parsable json
mapper.readTree(parser);
}
@Test
void mapWithNonStringKeyToStringIsJson() throws Exception {
Schema intMapSchema = new Schema.Parser()
.parse("{\"type\": \"map\", \"values\": \"string\", \"java-key-class\" : \"java.lang.Integer\"}");
Field intMapField = new Field("intMap", Schema.createMap(intMapSchema), null, null);
Schema decMapSchema = new Schema.Parser()
.parse("{\"type\": \"map\", \"values\": \"string\", \"java-key-class\" : \"java.math.BigDecimal\"}");
Field decMapField = new Field("decMap", Schema.createMap(decMapSchema), null, null);
Schema boolMapSchema = new Schema.Parser()
.parse("{\"type\": \"map\", \"values\": \"string\", \"java-key-class\" : \"java.lang.Boolean\"}");
Field boolMapField = new Field("boolMap", Schema.createMap(boolMapSchema), null, null);
Schema fileMapSchema = new Schema.Parser()
.parse("{\"type\": \"map\", \"values\": \"string\", \"java-key-class\" : \"java.io.File\"}");
Field fileMapField = new Field("fileMap", Schema.createMap(fileMapSchema), null, null);
Schema schema = Schema.createRecord("my_record", "doc", "mytest", false);
schema.setFields(Arrays.asList(intMapField, decMapField, boolMapField, fileMapField));
HashMap<Integer, String> intPair = new HashMap<>();
intPair.put(1, "one");
intPair.put(2, "two");
HashMap<java.math.BigDecimal, String> decPair = new HashMap<>();
decPair.put(java.math.BigDecimal.valueOf(1), "one");
decPair.put(java.math.BigDecimal.valueOf(2), "two");
HashMap<Boolean, String> boolPair = new HashMap<>();
boolPair.put(true, "isTrue");
boolPair.put(false, "isFalse");
boolPair.put(null, null);
HashMap<java.io.File, String> filePair = new HashMap<>();
java.io.File f = new java.io.File(getClass().getResource("/SchemaBuilder.avsc").toURI());
filePair.put(f, "File");
GenericRecord r = new GenericData.Record(schema);
r.put(intMapField.name(), intPair);
r.put(decMapField.name(), decPair);
r.put(boolMapField.name(), boolPair);
r.put(fileMapField.name(), filePair);
String json = r.toString();
JsonFactory factory = new JsonFactory();
JsonParser parser = factory.createParser(json);
ObjectMapper mapper = new ObjectMapper();
// will throw exception if string is not parsable json
mapper.readTree(parser);
}
@Test
void toStringEscapesControlCharsInBytes() throws Exception {
GenericData data = GenericData.get();
ByteBuffer bytes = ByteBuffer.wrap(new byte[] { 'a', '\n', 'b' });
assertEquals("\"a\\nb\"", data.toString(bytes));
assertEquals("\"a\\nb\"", data.toString(bytes));
}
@Test
void toStringEscapesControlCharsInMap() {
GenericData data = GenericData.get();
Map<String, String> m = new HashMap<>();
m.put("a\n\\b", "a\n\\b");
assertEquals("{\"a\\n\\\\b\": \"a\\n\\\\b\"}", data.toString(m));
}
@Test
void toStringFixed() throws Exception {
GenericData data = GenericData.get();
assertEquals("[97, 10, 98]",
data.toString(new GenericData.Fixed(Schema.createFixed("test", null, null, 3), new byte[] { 'a', '\n', 'b' })));
}
@Test
void toStringDoesNotEscapeForwardSlash() throws Exception {
GenericData data = GenericData.get();
assertEquals("\"/\"", data.toString("/"));
}
@Test
void toStringNanInfinity() throws Exception {
GenericData data = GenericData.get();
assertEquals("\"Infinity\"", data.toString(Float.POSITIVE_INFINITY));
assertEquals("\"-Infinity\"", data.toString(Float.NEGATIVE_INFINITY));
assertEquals("\"NaN\"", data.toString(Float.NaN));
assertEquals("\"Infinity\"", data.toString(Double.POSITIVE_INFINITY));
assertEquals("\"-Infinity\"", data.toString(Double.NEGATIVE_INFINITY));
assertEquals("\"NaN\"", data.toString(Double.NaN));
}
@Test
void toStringConvertsDatesAsStrings() throws Exception {
GenericData data = GenericData.get();
assertEquals("\"1961-04-12T06:07:10Z\"", data.toString(Instant.parse("1961-04-12T06:07:10Z")));
assertEquals("\"1961-04-12\"", data.toString(LocalDate.parse("1961-04-12")));
assertEquals("\"1961-04-12T06:07:10\"", data.toString(LocalDateTime.parse("1961-04-12T06:07:10")));
assertEquals("\"10:10:10\"", data.toString(LocalTime.parse("10:10:10")));
}
@Test
void ToStringConvertsUuidsAsStrings() throws Exception {
GenericData data = GenericData.get();
assertEquals("\"abf2f1e8-cece-4fdc-290a-babaca09ec74\"",
data.toString(UUID.fromString("abf2f1e8-cece-4fdc-290a-babaca09ec74")));
}
@Test
void compare() {
// Prepare a schema for testing.
Field integerField = new Field("test", Schema.create(Type.INT), null, null);
List<Field> fields = new ArrayList<>();
fields.add(integerField);
Schema record = Schema.createRecord("test", null, null, false);
record.setFields(fields);
ByteArrayOutputStream b1 = new ByteArrayOutputStream(5);
ByteArrayOutputStream b2 = new ByteArrayOutputStream(5);
BinaryEncoder b1Enc = EncoderFactory.get().binaryEncoder(b1, null);
BinaryEncoder b2Enc = EncoderFactory.get().binaryEncoder(b2, null);
// Prepare two different datums
Record testDatum1 = new Record(record);
testDatum1.put(0, 1);
Record testDatum2 = new Record(record);
testDatum2.put(0, 2);
GenericDatumWriter<Record> gWriter = new GenericDatumWriter<>(record);
Integer start1 = 0, start2 = 0;
try {
// Write two datums in each stream
// and get the offset length after the first write in each.
gWriter.write(testDatum1, b1Enc);
b1Enc.flush();
start1 = b1.size();
gWriter.write(testDatum1, b1Enc);
b1Enc.flush();
b1.close();
gWriter.write(testDatum2, b2Enc);
b2Enc.flush();
start2 = b2.size();
gWriter.write(testDatum2, b2Enc);
b2Enc.flush();
b2.close();
// Compare to check if offset-based compare works right.
assertEquals(-1, BinaryData.compare(b1.toByteArray(), start1, b2.toByteArray(), start2, record));
} catch (IOException e) {
fail("IOException while writing records to output stream.");
}
}
@Test
void enumCompare() {
Schema s = Schema.createEnum("Kind", null, null, Arrays.asList("Z", "Y", "X"));
GenericEnumSymbol z = new GenericData.EnumSymbol(s, "Z");
GenericEnumSymbol z2 = new GenericData.EnumSymbol(s, "Z");
assertEquals(0, z.compareTo(z2));
GenericEnumSymbol y = new GenericData.EnumSymbol(s, "Y");
assertTrue(y.compareTo(z) > 0);
assertTrue(z.compareTo(y) < 0);
}
@Test
void byteBufferDeepCopy() {
// Test that a deep copy of a byte buffer respects the byte buffer
// limits and capacity.
byte[] buffer_value = { 0, 1, 2, 3, 0, 0, 0 };
ByteBuffer buffer = ByteBuffer.wrap(buffer_value, 1, 4);
Schema schema = Schema.createRecord("my_record", "doc", "mytest", false);
Field byte_field = new Field("bytes", Schema.create(Type.BYTES), null, null);
schema.setFields(Collections.singletonList(byte_field));
GenericRecord record = new GenericData.Record(schema);
record.put(byte_field.name(), buffer);
GenericRecord copy = GenericData.get().deepCopy(schema, record);
ByteBuffer buffer_copy = (ByteBuffer) copy.get(byte_field.name());
assertEquals(buffer, buffer_copy);
}
@Test
void validateNullableEnum() {
List<Schema> unionTypes = new ArrayList<>();
Schema schema;
Schema nullSchema = Schema.create(Type.NULL);
Schema enumSchema = Schema.createEnum("AnEnum", null, null, Arrays.asList("X", "Y", "Z"));
GenericEnumSymbol w = new GenericData.EnumSymbol(enumSchema, "W");
GenericEnumSymbol x = new GenericData.EnumSymbol(enumSchema, "X");
GenericEnumSymbol y = new GenericData.EnumSymbol(enumSchema, "Y");
GenericEnumSymbol z = new GenericData.EnumSymbol(enumSchema, "Z");
// null is first
unionTypes.clear();
unionTypes.add(nullSchema);
unionTypes.add(enumSchema);
schema = Schema.createUnion(unionTypes);
assertTrue(GenericData.get().validate(schema, z));
assertTrue(GenericData.get().validate(schema, y));
assertTrue(GenericData.get().validate(schema, x));
assertFalse(GenericData.get().validate(schema, w));
assertTrue(GenericData.get().validate(schema, null));
// null is last
unionTypes.clear();
unionTypes.add(enumSchema);
unionTypes.add(nullSchema);
schema = Schema.createUnion(unionTypes);
assertTrue(GenericData.get().validate(schema, z));
assertTrue(GenericData.get().validate(schema, y));
assertTrue(GenericData.get().validate(schema, x));
assertFalse(GenericData.get().validate(schema, w));
assertTrue(GenericData.get().validate(schema, null));
}
private enum anEnum {
ONE, TWO, THREE
}
@Test
void validateRequiresGenericSymbolForEnumSchema() {
final Schema schema = Schema.createEnum("my_enum", "doc", "namespace", Arrays.asList("ONE", "TWO", "THREE"));
final GenericData gd = GenericData.get();
/* positive cases */
assertTrue(gd.validate(schema, new GenericData.EnumSymbol(schema, "ONE")));
assertTrue(gd.validate(schema, new GenericData.EnumSymbol(schema, anEnum.ONE)));
/* negative cases */
assertFalse(gd.validate(schema, "ONE"), "We don't expect GenericData to allow a String datum for an enum schema");
assertFalse(gd.validate(schema, anEnum.ONE), "We don't expect GenericData to allow a Java Enum for an enum schema");
}
@Test
void validateUnion() {
Schema type1Schema = SchemaBuilder.record("Type1").fields().requiredString("myString").requiredInt("myInt")
.endRecord();
Schema type2Schema = SchemaBuilder.record("Type2").fields().requiredString("myString").endRecord();
Schema unionSchema = SchemaBuilder.unionOf().type(type1Schema).and().type(type2Schema).endUnion();
GenericRecord record = new GenericData.Record(type2Schema);
record.put("myString", "myValue");
assertTrue(GenericData.get().validate(unionSchema, record));
}
/*
* The toString has a detection for circular references to abort. This detection
* has the risk of detecting that same value as being a circular reference. For
* Record, Map and Array this is correct, for the rest is is not.
*/
@Test
void toStringSameValues() throws IOException {
List<Field> fields = new ArrayList<>();
fields.add(new Field("nullstring1", Schema.create(Type.STRING), null, null));
fields.add(new Field("nullstring2", Schema.create(Type.STRING), null, null));
fields.add(new Field("string1", Schema.create(Type.STRING), null, null));
fields.add(new Field("string2", Schema.create(Type.STRING), null, null));
fields.add(new Field("bytes1", Schema.create(Type.BYTES), null, null));
fields.add(new Field("bytes2", Schema.create(Type.BYTES), null, null));
fields.add(new Field("int1", Schema.create(Type.INT), null, null));
fields.add(new Field("int2", Schema.create(Type.INT), null, null));
fields.add(new Field("long1", Schema.create(Type.LONG), null, null));
fields.add(new Field("long2", Schema.create(Type.LONG), null, null));
fields.add(new Field("float1", Schema.create(Type.FLOAT), null, null));
fields.add(new Field("float2", Schema.create(Type.FLOAT), null, null));
fields.add(new Field("double1", Schema.create(Type.DOUBLE), null, null));
fields.add(new Field("double2", Schema.create(Type.DOUBLE), null, null));
fields.add(new Field("boolean1", Schema.create(Type.BOOLEAN), null, null));
fields.add(new Field("boolean2", Schema.create(Type.BOOLEAN), null, null));
List<String> enumValues = new ArrayList<>();
enumValues.add("One");
enumValues.add("Two");
Schema enumSchema = Schema.createEnum("myEnum", null, null, enumValues);
fields.add(new Field("enum1", enumSchema, null, null));
fields.add(new Field("enum2", enumSchema, null, null));
Schema recordSchema = SchemaBuilder.record("aRecord").fields().requiredString("myString").endRecord();
fields.add(new Field("record1", recordSchema, null, null));
fields.add(new Field("record2", recordSchema, null, null));
Schema arraySchema = Schema.createArray(Schema.create(Type.STRING));
fields.add(new Field("array1", arraySchema, null, null));
fields.add(new Field("array2", arraySchema, null, null));
Schema mapSchema = Schema.createMap(Schema.create(Type.STRING));
fields.add(new Field("map1", mapSchema, null, null));
fields.add(new Field("map2", mapSchema, null, null));
Schema schema = Schema.createRecord("Foo", "test", "mytest", false);
schema.setFields(fields);
Record testRecord = new Record(schema);
testRecord.put("nullstring1", null);
testRecord.put("nullstring2", null);
String fortyTwo = "42";
testRecord.put("string1", fortyTwo);
testRecord.put("string2", fortyTwo);
testRecord.put("bytes1", 0x42);
testRecord.put("bytes2", 0x42);
testRecord.put("int1", 42);
testRecord.put("int2", 42);
testRecord.put("long1", 42L);
testRecord.put("long2", 42L);
testRecord.put("float1", 42F);
testRecord.put("float2", 42F);
testRecord.put("double1", 42D);
testRecord.put("double2", 42D);
testRecord.put("boolean1", true);
testRecord.put("boolean2", true);
testRecord.put("enum1", "One");
testRecord.put("enum2", "One");
GenericRecord record = new GenericData.Record(recordSchema);
record.put("myString", "42");
testRecord.put("record1", record);
testRecord.put("record2", record);
GenericArray<String> array = new GenericData.Array<>(1, arraySchema);
array.clear();
array.add("42");
testRecord.put("array1", array);
testRecord.put("array2", array);
Map<String, String> map = new HashMap<>();
map.put("42", "42");
testRecord.put("map1", map);
testRecord.put("map2", map);
String testString = testRecord.toString();
assertFalse(testString.contains("CIRCULAR REFERENCE"),
"Record with duplicated values results in wrong 'toString()'");
}
// Test copied from Apache Parquet:
// org.apache.parquet.avro.TestCircularReferences
@Test
void toStringRecursive() throws IOException {
ReferenceManager manager = new ReferenceManager();
GenericData model = new GenericData();
model.addLogicalTypeConversion(manager.getTracker());
model.addLogicalTypeConversion(manager.getHandler());
Schema parentSchema = Schema.createRecord("Parent", null, null, false);
Schema placeholderSchema = Schema.createRecord("Placeholder", null, null, false);
List<Schema.Field> placeholderFields = new ArrayList<>();
placeholderFields.add( // at least one field is needed to be a valid schema
new Schema.Field("id", Schema.create(Schema.Type.LONG), null, null));
placeholderSchema.setFields(placeholderFields);
Referenceable idRef = new Referenceable("id");
Schema parentRefSchema = Schema.createUnion(Schema.create(Schema.Type.NULL), Schema.create(Schema.Type.LONG),
idRef.addToSchema(placeholderSchema));
Reference parentRef = new Reference("parent");
List<Schema.Field> childFields = new ArrayList<>();
childFields.add(new Schema.Field("c", Schema.create(Schema.Type.STRING), null, null));
childFields.add(new Schema.Field("parent", parentRefSchema, null, null));
Schema childSchema = parentRef.addToSchema(Schema.createRecord("Child", null, null, false, childFields));
List<Schema.Field> parentFields = new ArrayList<>();
parentFields.add(new Schema.Field("id", Schema.create(Schema.Type.LONG), null, null));
parentFields.add(new Schema.Field("p", Schema.create(Schema.Type.STRING), null, null));
parentFields.add(new Schema.Field("child", childSchema, null, null));
parentSchema.setFields(parentFields);
Schema schema = idRef.addToSchema(parentSchema);
Record parent = new Record(schema);
parent.put("id", 1L);
parent.put("p", "parent data!");
Record child = new Record(childSchema);
child.put("c", "child data!");
child.put("parent", parent);
parent.put("child", child);
try {
assertNotNull(parent.toString()); // This should not fail with an infinite recursion (StackOverflowError)
} catch (StackOverflowError e) {
fail("StackOverflowError occurred");
}
}
/**
* check that GenericArray.reset() retains reusable elements and that
* GenericArray.prune() cleans them up properly.
*/
@Test
void genericArrayPeek() {
Schema elementSchema = SchemaBuilder.record("element").fields().requiredString("value").endRecord();
Schema arraySchema = Schema.createArray(elementSchema);
GenericRecord record = new GenericData.Record(elementSchema);
record.put("value", "string");
GenericArray<GenericRecord> list = new GenericData.Array<>(1, arraySchema);
list.add(record);
list.reset();
assertTrue(record == list.peek());
list.prune();
assertNull(list.peek());
}
}
| 7,217 |
0 | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro/data/TestTimeConversions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.data;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.time.Instant;
import java.time.LocalDate;
import java.time.LocalTime;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
import org.apache.avro.Conversion;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import org.apache.avro.data.TimeConversions.DateConversion;
import org.apache.avro.data.TimeConversions.TimeMicrosConversion;
import org.apache.avro.data.TimeConversions.TimeMillisConversion;
import org.apache.avro.data.TimeConversions.TimestampMicrosConversion;
import org.apache.avro.data.TimeConversions.TimestampMillisConversion;
import org.apache.avro.reflect.ReflectData;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
public class TestTimeConversions {
public static Schema DATE_SCHEMA;
public static Schema TIME_MILLIS_SCHEMA;
public static Schema TIME_MICROS_SCHEMA;
public static Schema TIMESTAMP_MILLIS_SCHEMA;
public static Schema TIMESTAMP_MICROS_SCHEMA;
@BeforeAll
public static void createSchemas() {
TestTimeConversions.DATE_SCHEMA = LogicalTypes.date().addToSchema(Schema.create(Schema.Type.INT));
TestTimeConversions.TIME_MILLIS_SCHEMA = LogicalTypes.timeMillis().addToSchema(Schema.create(Schema.Type.INT));
TestTimeConversions.TIME_MICROS_SCHEMA = LogicalTypes.timeMicros().addToSchema(Schema.create(Schema.Type.LONG));
TestTimeConversions.TIMESTAMP_MILLIS_SCHEMA = LogicalTypes.timestampMillis()
.addToSchema(Schema.create(Schema.Type.LONG));
TestTimeConversions.TIMESTAMP_MICROS_SCHEMA = LogicalTypes.timestampMicros()
.addToSchema(Schema.create(Schema.Type.LONG));
}
@Test
void dateConversion() throws Exception {
DateConversion conversion = new DateConversion();
LocalDate Jan_6_1970 = LocalDate.of(1970, 1, 6); // 5
LocalDate Jan_1_1970 = LocalDate.of(1970, 1, 1); // 0
LocalDate Dec_27_1969 = LocalDate.of(1969, 12, 27); // -5
assertEquals(5, (int) conversion.toInt(Jan_6_1970, DATE_SCHEMA, LogicalTypes.date()), "6 Jan 1970 should be 5");
assertEquals(0, (int) conversion.toInt(Jan_1_1970, DATE_SCHEMA, LogicalTypes.date()), "1 Jan 1970 should be 0");
assertEquals(-5, (int) conversion.toInt(Dec_27_1969, DATE_SCHEMA, LogicalTypes.date()), "27 Dec 1969 should be -5");
assertEquals(conversion.fromInt(5, DATE_SCHEMA, LogicalTypes.date()), Jan_6_1970, "6 Jan 1970 should be 5");
assertEquals(conversion.fromInt(0, DATE_SCHEMA, LogicalTypes.date()), Jan_1_1970, "1 Jan 1970 should be 0");
assertEquals(conversion.fromInt(-5, DATE_SCHEMA, LogicalTypes.date()), Dec_27_1969, "27 Dec 1969 should be -5");
}
@Test
void timeMillisConversion() {
TimeMillisConversion conversion = new TimeMillisConversion();
LocalTime oneAM = LocalTime.of(1, 0);
LocalTime afternoon = LocalTime.of(15, 14, 15, 926_000_000);
int afternoonMillis = ((15 * 60 + 14) * 60 + 15) * 1000 + 926;
assertEquals(0, (int) conversion.toInt(LocalTime.MIDNIGHT, TIME_MILLIS_SCHEMA, LogicalTypes.timeMillis()),
"Midnight should be 0");
assertEquals(3_600_000, (int) conversion.toInt(oneAM, TIME_MILLIS_SCHEMA, LogicalTypes.timeMillis()),
"01:00 should be 3,600,000");
assertEquals(afternoonMillis, (int) conversion.toInt(afternoon, TIME_MILLIS_SCHEMA, LogicalTypes.timeMillis()),
"15:14:15.926 should be " + afternoonMillis);
assertEquals(LocalTime.MIDNIGHT, conversion.fromInt(0, TIME_MILLIS_SCHEMA, LogicalTypes.timeMillis()),
"Midnight should be 0");
assertEquals(oneAM, conversion.fromInt(3600000, TIME_MILLIS_SCHEMA, LogicalTypes.timeMillis()),
"01:00 should be 3,600,000");
assertEquals(afternoon, conversion.fromInt(afternoonMillis, TIME_MILLIS_SCHEMA, LogicalTypes.timeMillis()),
"15:14:15.926 should be " + afternoonMillis);
}
@Test
void timeMicrosConversion() throws Exception {
TimeMicrosConversion conversion = new TimeMicrosConversion();
LocalTime oneAM = LocalTime.of(1, 0);
LocalTime afternoon = LocalTime.of(15, 14, 15, 926_551_000);
long afternoonMicros = ((long) (15 * 60 + 14) * 60 + 15) * 1_000_000 + 926_551;
assertEquals(LocalTime.MIDNIGHT, conversion.fromLong(0L, TIME_MICROS_SCHEMA, LogicalTypes.timeMicros()),
"Midnight should be 0");
assertEquals(oneAM, conversion.fromLong(3_600_000_000L, TIME_MICROS_SCHEMA, LogicalTypes.timeMicros()),
"01:00 should be 3,600,000,000");
assertEquals(afternoon, conversion.fromLong(afternoonMicros, TIME_MICROS_SCHEMA, LogicalTypes.timeMicros()),
"15:14:15.926551 should be " + afternoonMicros);
assertEquals(0, (long) conversion.toLong(LocalTime.MIDNIGHT, TIME_MICROS_SCHEMA, LogicalTypes.timeMicros()),
"Midnight should be 0");
assertEquals(3_600_000_000L, (long) conversion.toLong(oneAM, TIME_MICROS_SCHEMA, LogicalTypes.timeMicros()),
"01:00 should be 3,600,000,000");
assertEquals(afternoonMicros, (long) conversion.toLong(afternoon, TIME_MICROS_SCHEMA, LogicalTypes.timeMicros()),
"15:14:15.926551 should be " + afternoonMicros);
}
@Test
void timestampMillisConversion() throws Exception {
TimestampMillisConversion conversion = new TimestampMillisConversion();
long nowInstant = Instant.now().toEpochMilli(); // ms precision
// round trip
Instant now = conversion.fromLong(nowInstant, TIMESTAMP_MILLIS_SCHEMA, LogicalTypes.timestampMillis());
long roundTrip = conversion.toLong(now, TIMESTAMP_MILLIS_SCHEMA, LogicalTypes.timestampMillis());
assertEquals(nowInstant, roundTrip, "Round-trip conversion should work");
long May_28_2015_21_46_53_221_instant = 1432849613221L;
Instant May_28_2015_21_46_53_221 = ZonedDateTime.of(2015, 5, 28, 21, 46, 53, 221_000_000, ZoneOffset.UTC)
.toInstant();
// known dates from https://www.epochconverter.com/
// > Epoch
assertEquals(May_28_2015_21_46_53_221,
conversion.fromLong(May_28_2015_21_46_53_221_instant, TIMESTAMP_MILLIS_SCHEMA, LogicalTypes.timestampMillis()),
"Known date should be correct");
assertEquals(May_28_2015_21_46_53_221_instant,
(long) conversion.toLong(May_28_2015_21_46_53_221, TIMESTAMP_MILLIS_SCHEMA, LogicalTypes.timestampMillis()),
"Known date should be correct");
// Epoch
assertEquals(Instant.EPOCH, conversion.fromLong(0L, TIMESTAMP_MILLIS_SCHEMA, LogicalTypes.timestampMillis()),
"1970-01-01 should be 0");
assertEquals(0L, (long) conversion.toLong(ZonedDateTime.ofInstant(Instant.EPOCH, ZoneOffset.UTC).toInstant(),
TIMESTAMP_MILLIS_SCHEMA, LogicalTypes.timestampMillis()), "1970-01-01 should be 0");
// < Epoch
long Jul_01_1969_12_00_00_123_instant = -15854400000L + 123;
Instant Jul_01_1969_12_00_00_123 = ZonedDateTime.of(1969, 7, 1, 12, 0, 0, 123_000_000, ZoneOffset.UTC).toInstant();
assertEquals(Jul_01_1969_12_00_00_123,
conversion.fromLong(Jul_01_1969_12_00_00_123_instant, TIMESTAMP_MILLIS_SCHEMA, LogicalTypes.timestampMillis()),
"Pre 1970 date should be correct");
assertEquals(Jul_01_1969_12_00_00_123_instant,
(long) conversion.toLong(Jul_01_1969_12_00_00_123, TIMESTAMP_MILLIS_SCHEMA, LogicalTypes.timestampMillis()),
"Pre 1970 date should be correct");
}
@Test
void timestampMicrosConversion() throws Exception {
TimestampMicrosConversion conversion = new TimestampMicrosConversion();
// known dates from https://www.epochconverter.com/
// > Epoch
long May_28_2015_21_46_53_221_843_instant = 1432849613221L * 1000 + 843;
Instant May_28_2015_21_46_53_221_843 = ZonedDateTime.of(2015, 5, 28, 21, 46, 53, 221_843_000, ZoneOffset.UTC)
.toInstant();
assertEquals(May_28_2015_21_46_53_221_843, conversion.fromLong(May_28_2015_21_46_53_221_843_instant,
TIMESTAMP_MICROS_SCHEMA, LogicalTypes.timestampMicros()), "Known date should be correct");
assertEquals(May_28_2015_21_46_53_221_843_instant,
(long) conversion.toLong(May_28_2015_21_46_53_221_843, TIMESTAMP_MICROS_SCHEMA, LogicalTypes.timestampMillis()),
"Known date should be correct");
// Epoch
assertEquals(Instant.EPOCH, conversion.fromLong(0L, TIMESTAMP_MILLIS_SCHEMA, LogicalTypes.timestampMillis()),
"1970-01-01 should be 0");
assertEquals(0L, (long) conversion.toLong(ZonedDateTime.ofInstant(Instant.EPOCH, ZoneOffset.UTC).toInstant(),
TIMESTAMP_MILLIS_SCHEMA, LogicalTypes.timestampMillis()), "1970-01-01 should be 0");
// < Epoch
long Jul_01_1969_12_00_00_000_123_instant = -15854400000L * 1000 + 123;
Instant Jul_01_1969_12_00_00_000_123 = ZonedDateTime.of(1969, 7, 1, 12, 0, 0, 123_000, ZoneOffset.UTC).toInstant();
assertEquals(Jul_01_1969_12_00_00_000_123, conversion.fromLong(Jul_01_1969_12_00_00_000_123_instant,
TIMESTAMP_MILLIS_SCHEMA, LogicalTypes.timestampMillis()), "Pre 1970 date should be correct");
assertEquals(Jul_01_1969_12_00_00_000_123_instant,
(long) conversion.toLong(Jul_01_1969_12_00_00_000_123, TIMESTAMP_MILLIS_SCHEMA, LogicalTypes.timestampMillis()),
"Pre 1970 date should be correct");
}
@Test
void dynamicSchemaWithDateConversion() throws ClassNotFoundException {
Schema schema = getReflectedSchemaByName("java.time.LocalDate", new TimeConversions.DateConversion());
assertEquals(DATE_SCHEMA, schema, "Reflected schema should be logicalType date");
}
@Test
void dynamicSchemaWithTimeConversion() throws ClassNotFoundException {
Schema schema = getReflectedSchemaByName("java.time.LocalTime", new TimeConversions.TimeMillisConversion());
assertEquals(TIME_MILLIS_SCHEMA, schema, "Reflected schema should be logicalType timeMillis");
}
@Test
void dynamicSchemaWithTimeMicrosConversion() throws ClassNotFoundException {
Schema schema = getReflectedSchemaByName("java.time.LocalTime", new TimeConversions.TimeMicrosConversion());
assertEquals(TIME_MICROS_SCHEMA, schema, "Reflected schema should be logicalType timeMicros");
}
@Test
void dynamicSchemaWithDateTimeConversion() throws ClassNotFoundException {
Schema schema = getReflectedSchemaByName("java.time.Instant", new TimeConversions.TimestampMillisConversion());
assertEquals(TIMESTAMP_MILLIS_SCHEMA, schema, "Reflected schema should be logicalType timestampMillis");
}
@Test
void dynamicSchemaWithDateTimeMicrosConversion() throws ClassNotFoundException {
Schema schema = getReflectedSchemaByName("java.time.Instant", new TimeConversions.TimestampMicrosConversion());
assertEquals(TIMESTAMP_MICROS_SCHEMA, schema, "Reflected schema should be logicalType timestampMicros");
}
private Schema getReflectedSchemaByName(String className, Conversion<?> conversion) throws ClassNotFoundException {
// one argument: a fully qualified class name
Class<?> cls = Class.forName(className);
// get the reflected schema for the given class
ReflectData model = new ReflectData();
model.addLogicalTypeConversion(conversion);
return model.getSchema(cls);
}
}
| 7,218 |
0 | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/test/java/org/apache/avro/data/RecordBuilderBaseTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.data;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import org.apache.avro.Schema;
import org.apache.avro.Schema.Field;
import org.apache.avro.Schema.Type;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
/**
* Unit test for RecordBuilderBase.
*/
public class RecordBuilderBaseTest {
private static Set<Type> primitives;
private static Set<Type> nonNullPrimitives;
@BeforeAll()
public static void setUpBeforeClass() {
primitives = new HashSet<>(Arrays.asList(Type.values()));
primitives.removeAll(Arrays.asList(Type.RECORD, Type.ENUM, Type.ARRAY, Type.MAP, Type.UNION, Type.FIXED));
nonNullPrimitives = new HashSet<>(primitives);
nonNullPrimitives.remove(Type.NULL);
}
@Test
void isValidValueWithPrimitives() {
// Verify that a non-null value is valid for all primitives:
for (Type type : primitives) {
Field f = new Field("f", Schema.create(type), null, null);
assertTrue(RecordBuilderBase.isValidValue(f, new Object()));
}
// Verify that null is not valid for all non-null primitives:
for (Type type : nonNullPrimitives) {
Field f = new Field("f", Schema.create(type), null, null);
assertFalse(RecordBuilderBase.isValidValue(f, null));
}
}
@Test
void isValidValueWithNullField() {
// Verify that null is a valid value for null fields:
assertTrue(RecordBuilderBase.isValidValue(new Field("f", Schema.create(Type.NULL), null, null), null));
}
@Test
void isValidValueWithUnion() {
// Verify that null values are not valid for a union with no null type:
Schema unionWithoutNull = Schema
.createUnion(Arrays.asList(Schema.create(Type.STRING), Schema.create(Type.BOOLEAN)));
assertTrue(RecordBuilderBase.isValidValue(new Field("f", unionWithoutNull, null, null), new Object()));
assertFalse(RecordBuilderBase.isValidValue(new Field("f", unionWithoutNull, null, null), null));
// Verify that null values are valid for a union with a null type:
Schema unionWithNull = Schema.createUnion(Arrays.asList(Schema.create(Type.STRING), Schema.create(Type.NULL)));
assertTrue(RecordBuilderBase.isValidValue(new Field("f", unionWithNull, null, null), new Object()));
assertTrue(RecordBuilderBase.isValidValue(new Field("f", unionWithNull, null, null), null));
}
}
| 7,219 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/Conversion.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Map;
import java.util.ServiceLoader;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericEnumSymbol;
import org.apache.avro.generic.GenericFixed;
import org.apache.avro.generic.IndexedRecord;
/**
* Conversion between generic and logical type instances.
* <p>
* Instances of this class can be added to GenericData to convert a logical type
* to a particular representation. This can be done manually, using
* {@link GenericData#addLogicalTypeConversion(Conversion)}, or automatically.
* This last option uses the Java {@link ServiceLoader}, and requires the
* implementation to be a public class with a public no-arg constructor, be
* named in a file called {@code /META-INF/services/org.apache.avro.Conversion},
* and both must available in the classpath.</li>
* <p>
* Implementations must provide:
* <ul>
* <li>{@link #getConvertedType()}: get the Java class used for the logical
* type</li>
* <li>{@link #getLogicalTypeName()}: get the logical type this implements</li>
* </ul>
* <p>
* Subclasses must also override the conversion methods for Avro's base types
* that are valid for the logical type, or else risk causing
* {@code UnsupportedOperationException} at runtime.
* <p>
* Optionally, use {@link #getRecommendedSchema()} to provide a Schema that will
* be used when generating a Schema for the class. This is useful when using
* {@code ReflectData} or {@code ProtobufData}, for example.
*
* @param <T> a Java type that can represent the named logical type
* @see ServiceLoader
*/
@SuppressWarnings("unused")
public abstract class Conversion<T> {
/**
* Return the Java class representing the logical type.
*
* @return a Java class returned by from methods and accepted by to methods
*/
public abstract Class<T> getConvertedType();
/**
* Return the logical type this class converts.
*
* @return a String logical type name
*/
public abstract String getLogicalTypeName();
/**
* Certain logical types may require adjusting the code within the "setter"
* methods to make sure the data that is set is properly formatted. This method
* allows the Conversion to generate custom setter code if required.
*
* @param varName the name of the variable holding the converted value
* @param valParamName the name of the parameter with the new converted value
* @return a String for the body of the setter method
*/
public String adjustAndSetValue(String varName, String valParamName) {
return varName + " = " + valParamName + ";";
}
public Schema getRecommendedSchema() {
throw new UnsupportedOperationException("No recommended schema for " + getLogicalTypeName());
}
public T fromBoolean(Boolean value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("fromBoolean is not supported for " + type.getName());
}
public T fromInt(Integer value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("fromInt is not supported for " + type.getName());
}
public T fromLong(Long value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("fromLong is not supported for " + type.getName());
}
public T fromFloat(Float value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("fromFloat is not supported for " + type.getName());
}
public T fromDouble(Double value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("fromDouble is not supported for " + type.getName());
}
public T fromCharSequence(CharSequence value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("fromCharSequence is not supported for " + type.getName());
}
public T fromEnumSymbol(GenericEnumSymbol<?> value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("fromEnumSymbol is not supported for " + type.getName());
}
public T fromFixed(GenericFixed value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("fromFixed is not supported for " + type.getName());
}
public T fromBytes(ByteBuffer value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("fromBytes is not supported for " + type.getName());
}
public T fromArray(Collection<?> value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("fromArray is not supported for " + type.getName());
}
public T fromMap(Map<?, ?> value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("fromMap is not supported for " + type.getName());
}
public T fromRecord(IndexedRecord value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("fromRecord is not supported for " + type.getName());
}
public Boolean toBoolean(T value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("toBoolean is not supported for " + type.getName());
}
public Integer toInt(T value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("toInt is not supported for " + type.getName());
}
public Long toLong(T value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("toLong is not supported for " + type.getName());
}
public Float toFloat(T value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("toFloat is not supported for " + type.getName());
}
public Double toDouble(T value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("toDouble is not supported for " + type.getName());
}
public CharSequence toCharSequence(T value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("toCharSequence is not supported for " + type.getName());
}
public GenericEnumSymbol<?> toEnumSymbol(T value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("toEnumSymbol is not supported for " + type.getName());
}
public GenericFixed toFixed(T value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("toFixed is not supported for " + type.getName());
}
public ByteBuffer toBytes(T value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("toBytes is not supported for " + type.getName());
}
public Collection<?> toArray(T value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("toArray is not supported for " + type.getName());
}
public Map<?, ?> toMap(T value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("toMap is not supported for " + type.getName());
}
public IndexedRecord toRecord(T value, Schema schema, LogicalType type) {
throw new UnsupportedOperationException("toRecord is not supported for " + type.getName());
}
}
| 7,220 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/UnresolvedUnionException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
/** Thrown when the expected contents of a union cannot be resolved. */
public class UnresolvedUnionException extends AvroRuntimeException {
private Object unresolvedDatum;
private Schema unionSchema;
public UnresolvedUnionException(Schema unionSchema, Object unresolvedDatum) {
super("Not in union " + unionSchema + ": " + unresolvedDatum);
this.unionSchema = unionSchema;
this.unresolvedDatum = unresolvedDatum;
}
public UnresolvedUnionException(Schema unionSchema, Schema.Field field, Object unresolvedDatum) {
super("Not in union " + unionSchema + ": " + unresolvedDatum + " (field=" + field.name() + ")");
this.unionSchema = unionSchema;
this.unresolvedDatum = unresolvedDatum;
}
public Object getUnresolvedDatum() {
return unresolvedDatum;
}
public Schema getUnionSchema() {
return unionSchema;
}
}
| 7,221 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/ValidateMutualRead.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro;
import java.io.IOException;
import org.apache.avro.io.parsing.ResolvingGrammarGenerator;
import org.apache.avro.io.parsing.Symbol;
/**
* A {@link SchemaValidationStrategy} that checks that the {@link Schema} to
* validate and the existing schema can mutually read each other according to
* the default Avro schema resolution rules.
*
*/
class ValidateMutualRead implements SchemaValidationStrategy {
/**
* Validate that the schemas provided can mutually read data written by each
* other according to the default Avro schema resolution rules.
*
* @throws SchemaValidationException if the schemas are not mutually compatible.
*/
@Override
public void validate(Schema toValidate, Schema existing) throws SchemaValidationException {
canRead(toValidate, existing);
canRead(existing, toValidate);
}
/**
* Validates that data written with one schema can be read using another, based
* on the default Avro schema resolution rules.
*
* @param writtenWith The "writer's" schema, representing data to be read.
* @param readUsing The "reader's" schema, representing how the reader will
* interpret data.
* @throws SchemaValidationException if the schema <b>readUsing<b/> cannot be
* used to read data written with
* <b>writtenWith<b/>
*/
static void canRead(Schema writtenWith, Schema readUsing) throws SchemaValidationException {
boolean error;
try {
error = Symbol.hasErrors(new ResolvingGrammarGenerator().generate(writtenWith, readUsing));
} catch (IOException e) {
throw new SchemaValidationException(readUsing, writtenWith, e);
}
if (error) {
throw new SchemaValidationException(readUsing, writtenWith);
}
}
}
| 7,222 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/ValidateCanRead.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro;
/**
* A {@link SchemaValidationStrategy} that checks that the {@link Schema} to
* validate can read the existing schema according to the default Avro schema
* resolution rules.
*
*/
class ValidateCanRead implements SchemaValidationStrategy {
/**
* Validate that the first schema provided can be used to read data written with
* the second schema, according to the default Avro schema resolution rules.
*
* @throws SchemaValidationException if the first schema cannot read data
* written by the second.
*/
@Override
public void validate(Schema toValidate, Schema existing) throws SchemaValidationException {
ValidateMutualRead.canRead(existing, toValidate);
}
}
| 7,223 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/SchemaParseException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
/** Thrown for errors parsing schemas and protocols. */
public class SchemaParseException extends AvroRuntimeException {
public SchemaParseException(Throwable cause) {
super(cause);
}
public SchemaParseException(String message) {
super(message);
}
}
| 7,224 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/AvroMissingFieldException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
import org.apache.avro.Schema.Field;
import java.util.ArrayList;
import java.util.List;
/** Avro exception in case of missing fields. */
public class AvroMissingFieldException extends AvroRuntimeException {
private List<Field> chainOfFields = new ArrayList<>(8);
public AvroMissingFieldException(String message, Field field) {
super(message);
chainOfFields.add(field);
}
public void addParentField(Field field) {
chainOfFields.add(field);
}
@Override
public String toString() {
StringBuilder result = new StringBuilder();
for (Field field : chainOfFields) {
result.insert(0, " --> " + field.name());
}
return "Path in schema:" + result;
}
}
| 7,225 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/SchemaCompatibility.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Deque;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.TreeSet;
import org.apache.avro.Schema.Field;
import org.apache.avro.Schema.Type;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Evaluate the compatibility between a reader schema and a writer schema. A
* reader and a writer schema are declared compatible if all datum instances of
* the writer schema can be successfully decoded using the specified reader
* schema.
*/
public class SchemaCompatibility {
private static final Logger LOG = LoggerFactory.getLogger(SchemaCompatibility.class);
/** Utility class cannot be instantiated. */
private SchemaCompatibility() {
}
/** Message to annotate reader/writer schema pairs that are compatible. */
public static final String READER_WRITER_COMPATIBLE_MESSAGE = "Reader schema can always successfully decode data written using the writer schema.";
/**
* Validates that the provided reader schema can be used to decode avro data
* written with the provided writer schema.
*
* @param reader schema to check.
* @param writer schema to check.
* @return a result object identifying any compatibility errors.
*/
public static SchemaPairCompatibility checkReaderWriterCompatibility(final Schema reader, final Schema writer) {
final SchemaCompatibilityResult compatibility = new ReaderWriterCompatibilityChecker().getCompatibility(reader,
writer);
final String message;
switch (compatibility.getCompatibility()) {
case INCOMPATIBLE: {
message = String.format(
"Data encoded using writer schema:%n%s%n" + "will or may fail to decode using reader schema:%n%s%n",
writer.toString(true), reader.toString(true));
break;
}
case COMPATIBLE: {
message = READER_WRITER_COMPATIBLE_MESSAGE;
break;
}
default:
throw new AvroRuntimeException("Unknown compatibility: " + compatibility);
}
return new SchemaPairCompatibility(compatibility, reader, writer, message);
}
// -----------------------------------------------------------------------------------------------
/**
* Tests the equality of two Avro named schemas.
*
* <p>
* Matching includes reader name aliases.
* </p>
*
* @param reader Named reader schema.
* @param writer Named writer schema.
* @return whether the names of the named schemas match or not.
*/
public static boolean schemaNameEquals(final Schema reader, final Schema writer) {
if (objectsEqual(reader.getName(), writer.getName())) {
return true;
}
// Apply reader aliases:
return reader.getAliases().contains(writer.getFullName());
}
/**
* Identifies the writer field that corresponds to the specified reader field.
*
* <p>
* Matching includes reader name aliases.
* </p>
*
* @param writerSchema Schema of the record where to look for the writer field.
* @param readerField Reader field to identify the corresponding writer field
* of.
* @return the writer field, if any does correspond, or None.
*/
public static Field lookupWriterField(final Schema writerSchema, final Field readerField) {
assert (writerSchema.getType() == Type.RECORD);
final List<Field> writerFields = new ArrayList<>();
final Field direct = writerSchema.getField(readerField.name());
if (direct != null) {
writerFields.add(direct);
}
for (final String readerFieldAliasName : readerField.aliases()) {
final Field writerField = writerSchema.getField(readerFieldAliasName);
if (writerField != null) {
writerFields.add(writerField);
}
}
switch (writerFields.size()) {
case 0:
return null;
case 1:
return writerFields.get(0);
default: {
throw new AvroRuntimeException(String.format(
"Reader record field %s matches multiple fields in writer record schema %s", readerField, writerSchema));
}
}
}
/**
* Reader/writer schema pair that can be used as a key in a hash map.
*
* This reader/writer pair differentiates Schema objects based on their system
* hash code.
*/
private static final class ReaderWriter {
private final Schema mReader;
private final Schema mWriter;
/**
* Initializes a new reader/writer pair.
*
* @param reader Reader schema.
* @param writer Writer schema.
*/
public ReaderWriter(final Schema reader, final Schema writer) {
mReader = reader;
mWriter = writer;
}
/** {@inheritDoc} */
@Override
public int hashCode() {
return System.identityHashCode(mReader) ^ System.identityHashCode(mWriter);
}
/** {@inheritDoc} */
@Override
public boolean equals(Object obj) {
if (!(obj instanceof ReaderWriter)) {
return false;
}
final ReaderWriter that = (ReaderWriter) obj;
// Use pointer comparison here:
return (this.mReader == that.mReader) && (this.mWriter == that.mWriter);
}
/** {@inheritDoc} */
@Override
public String toString() {
return String.format("ReaderWriter{reader:%s, writer:%s}", mReader, mWriter);
}
}
/**
* Determines the compatibility of a reader/writer schema pair.
*
* <p>
* Provides memoization to handle recursive schemas.
* </p>
*/
private static final class ReaderWriterCompatibilityChecker {
private static final String ROOT_REFERENCE_TOKEN = "";
private final Map<ReaderWriter, SchemaCompatibilityResult> mMemoizeMap = new HashMap<>();
/**
* Reports the compatibility of a reader/writer schema pair.
*
* <p>
* Memoizes the compatibility results.
* </p>
*
* @param reader Reader schema to test.
* @param writer Writer schema to test.
* @return the compatibility of the reader/writer schema pair.
*/
public SchemaCompatibilityResult getCompatibility(final Schema reader, final Schema writer) {
Deque<String> location = new ArrayDeque<>();
return getCompatibility(ROOT_REFERENCE_TOKEN, reader, writer, location);
}
/**
* Reports the compatibility of a reader/writer schema pair.
* <p>
* Memoizes the compatibility results.
* </p>
*
* @param referenceToken The equivalent JSON pointer reference token
* representation of the schema node being visited.
* @param reader Reader schema to test.
* @param writer Writer schema to test.
* @param location Stack with which to track the location within the
* schema.
* @return the compatibility of the reader/writer schema pair.
*/
private SchemaCompatibilityResult getCompatibility(String referenceToken, final Schema reader, final Schema writer,
final Deque<String> location) {
location.addFirst(referenceToken);
LOG.debug("Checking compatibility of reader {} with writer {}", reader, writer);
final ReaderWriter pair = new ReaderWriter(reader, writer);
SchemaCompatibilityResult result = mMemoizeMap.get(pair);
if (result != null) {
if (result.getCompatibility() == SchemaCompatibilityType.RECURSION_IN_PROGRESS) {
// Break the recursion here.
// schemas are compatible unless proven incompatible:
result = SchemaCompatibilityResult.compatible();
}
} else {
// Mark this reader/writer pair as "in progress":
mMemoizeMap.put(pair, SchemaCompatibilityResult.recursionInProgress());
result = calculateCompatibility(reader, writer, location);
mMemoizeMap.put(pair, result);
}
location.removeFirst();
return result;
}
/**
* Calculates the compatibility of a reader/writer schema pair.
*
* <p>
* Relies on external memoization performed by
* {@link #getCompatibility(Schema, Schema)}.
* </p>
*
* @param reader Reader schema to test.
* @param writer Writer schema to test.
* @param location Stack with which to track the location within the schema.
* @return the compatibility of the reader/writer schema pair.
*/
private SchemaCompatibilityResult calculateCompatibility(final Schema reader, final Schema writer,
final Deque<String> location) {
assert (reader != null);
assert (writer != null);
SchemaCompatibilityResult result = SchemaCompatibilityResult.compatible();
if (reader.getType() == writer.getType()) {
switch (reader.getType()) {
case NULL:
case BOOLEAN:
case INT:
case LONG:
case FLOAT:
case DOUBLE:
case BYTES:
case STRING: {
return result;
}
case ARRAY: {
return result
.mergedWith(getCompatibility("items", reader.getElementType(), writer.getElementType(), location));
}
case MAP: {
return result.mergedWith(getCompatibility("values", reader.getValueType(), writer.getValueType(), location));
}
case FIXED: {
result = result.mergedWith(checkSchemaNames(reader, writer, location));
return result.mergedWith(checkFixedSize(reader, writer, location));
}
case ENUM: {
result = result.mergedWith(checkSchemaNames(reader, writer, location));
return result.mergedWith(checkReaderEnumContainsAllWriterEnumSymbols(reader, writer, location));
}
case RECORD: {
result = result.mergedWith(checkSchemaNames(reader, writer, location));
return result.mergedWith(checkReaderWriterRecordFields(reader, writer, location));
}
case UNION: {
// Check that each individual branch of the writer union can be decoded:
int i = 0;
for (final Schema writerBranch : writer.getTypes()) {
location.addFirst(Integer.toString(i));
SchemaCompatibilityResult compatibility = getCompatibility(reader, writerBranch);
if (compatibility.getCompatibility() == SchemaCompatibilityType.INCOMPATIBLE) {
String message = String.format("reader union lacking writer type: %s", writerBranch.getType());
result = result.mergedWith(SchemaCompatibilityResult.incompatible(
SchemaIncompatibilityType.MISSING_UNION_BRANCH, reader, writer, message, asList(location)));
}
location.removeFirst();
i++;
}
// Each schema in the writer union can be decoded with the reader:
return result;
}
default: {
throw new AvroRuntimeException("Unknown schema type: " + reader.getType());
}
}
} else {
// Reader and writer have different schema types:
// Reader compatible with all branches of a writer union is compatible
if (writer.getType() == Schema.Type.UNION) {
int index = 0;
for (Schema s : writer.getTypes()) {
result = result.mergedWith(getCompatibility(Integer.toString(index), reader, s, location));
index++;
}
return result;
}
switch (reader.getType()) {
case NULL:
return result.mergedWith(typeMismatch(reader, writer, location));
case BOOLEAN:
return result.mergedWith(typeMismatch(reader, writer, location));
case INT:
return result.mergedWith(typeMismatch(reader, writer, location));
case LONG: {
return (writer.getType() == Type.INT) ? result : result.mergedWith(typeMismatch(reader, writer, location));
}
case FLOAT: {
return ((writer.getType() == Type.INT) || (writer.getType() == Type.LONG)) ? result
: result.mergedWith(typeMismatch(reader, writer, location));
}
case DOUBLE: {
return ((writer.getType() == Type.INT) || (writer.getType() == Type.LONG) || (writer.getType() == Type.FLOAT))
? result
: result.mergedWith(typeMismatch(reader, writer, location));
}
case BYTES: {
return (writer.getType() == Type.STRING) ? result : result.mergedWith(typeMismatch(reader, writer, location));
}
case STRING: {
return (writer.getType() == Type.BYTES) ? result : result.mergedWith(typeMismatch(reader, writer, location));
}
case ARRAY:
return result.mergedWith(typeMismatch(reader, writer, location));
case MAP:
return result.mergedWith(typeMismatch(reader, writer, location));
case FIXED:
return result.mergedWith(typeMismatch(reader, writer, location));
case ENUM:
return result.mergedWith(typeMismatch(reader, writer, location));
case RECORD:
return result.mergedWith(typeMismatch(reader, writer, location));
case UNION: {
for (final Schema readerBranch : reader.getTypes()) {
SchemaCompatibilityResult compatibility = getCompatibility(readerBranch, writer);
if (compatibility.getCompatibility() == SchemaCompatibilityType.COMPATIBLE) {
return result;
}
}
// No branch in the reader union has been found compatible with the writer
// schema:
String message = String.format("reader union lacking writer type: %s", writer.getType());
return result.mergedWith(SchemaCompatibilityResult
.incompatible(SchemaIncompatibilityType.MISSING_UNION_BRANCH, reader, writer, message, asList(location)));
}
default: {
throw new AvroRuntimeException("Unknown schema type: " + reader.getType());
}
}
}
}
private SchemaCompatibilityResult checkReaderWriterRecordFields(final Schema reader, final Schema writer,
final Deque<String> location) {
SchemaCompatibilityResult result = SchemaCompatibilityResult.compatible();
location.addFirst("fields");
// Check that each field in the reader record can be populated from the writer
// record:
for (final Field readerField : reader.getFields()) {
location.addFirst(Integer.toString(readerField.pos()));
final Field writerField = lookupWriterField(writer, readerField);
if (writerField == null) {
// Reader field does not correspond to any field in the writer record schema, so
// the
// reader field must have a default value.
if (!readerField.hasDefaultValue()) {
// reader field has no default value. Check for the enum default value
if (readerField.schema().getType() == Type.ENUM && readerField.schema().getEnumDefault() != null) {
result = result.mergedWith(getCompatibility("type", readerField.schema(), writer, location));
} else {
result = result.mergedWith(
SchemaCompatibilityResult.incompatible(SchemaIncompatibilityType.READER_FIELD_MISSING_DEFAULT_VALUE,
reader, writer, readerField.name(), asList(location)));
}
}
} else {
result = result.mergedWith(getCompatibility("type", readerField.schema(), writerField.schema(), location));
}
// POP field index
location.removeFirst();
}
// All fields in the reader record can be populated from the writer record:
// POP "fields" literal
location.removeFirst();
return result;
}
private SchemaCompatibilityResult checkReaderEnumContainsAllWriterEnumSymbols(final Schema reader,
final Schema writer, final Deque<String> location) {
SchemaCompatibilityResult result = SchemaCompatibilityResult.compatible();
location.addFirst("symbols");
final Set<String> symbols = new TreeSet<>(writer.getEnumSymbols());
symbols.removeAll(reader.getEnumSymbols());
if (!symbols.isEmpty()) {
if (reader.getEnumDefault() != null && reader.getEnumSymbols().contains(reader.getEnumDefault())) {
symbols.clear();
result = SchemaCompatibilityResult.compatible();
} else {
result = SchemaCompatibilityResult.incompatible(SchemaIncompatibilityType.MISSING_ENUM_SYMBOLS, reader,
writer, symbols.toString(), asList(location));
}
}
// POP "symbols" literal
location.removeFirst();
return result;
}
private SchemaCompatibilityResult checkFixedSize(final Schema reader, final Schema writer,
final Deque<String> location) {
SchemaCompatibilityResult result = SchemaCompatibilityResult.compatible();
location.addFirst("size");
int actual = reader.getFixedSize();
int expected = writer.getFixedSize();
if (actual != expected) {
String message = String.format("expected: %d, found: %d", expected, actual);
result = SchemaCompatibilityResult.incompatible(SchemaIncompatibilityType.FIXED_SIZE_MISMATCH, reader, writer,
message, asList(location));
}
// POP "size" literal
location.removeFirst();
return result;
}
private SchemaCompatibilityResult checkSchemaNames(final Schema reader, final Schema writer,
final Deque<String> location) {
SchemaCompatibilityResult result = SchemaCompatibilityResult.compatible();
location.addFirst("name");
if (!schemaNameEquals(reader, writer)) {
String message = String.format("expected: %s", writer.getFullName());
result = SchemaCompatibilityResult.incompatible(SchemaIncompatibilityType.NAME_MISMATCH, reader, writer,
message, asList(location));
}
// POP "name" literal
location.removeFirst();
return result;
}
private SchemaCompatibilityResult typeMismatch(final Schema reader, final Schema writer,
final Deque<String> location) {
String message = String.format("reader type: %s not compatible with writer type: %s", reader.getType(),
writer.getType());
return SchemaCompatibilityResult.incompatible(SchemaIncompatibilityType.TYPE_MISMATCH, reader, writer, message,
asList(location));
}
}
/**
* Identifies the type of a schema compatibility result.
*/
public enum SchemaCompatibilityType {
COMPATIBLE, INCOMPATIBLE,
/** Used internally to tag a reader/writer schema pair and prevent recursion. */
RECURSION_IN_PROGRESS;
}
public enum SchemaIncompatibilityType {
NAME_MISMATCH, FIXED_SIZE_MISMATCH, MISSING_ENUM_SYMBOLS, READER_FIELD_MISSING_DEFAULT_VALUE, TYPE_MISMATCH,
MISSING_UNION_BRANCH;
}
/**
* Immutable class representing details about a particular schema pair
* compatibility check.
*/
public static final class SchemaCompatibilityResult {
/**
* Merges the current {@code SchemaCompatibilityResult} with the supplied result
* into a new instance, combining the list of
* {@code Incompatibility Incompatibilities} and regressing to the
* {@code SchemaCompatibilityType#INCOMPATIBLE INCOMPATIBLE} state if any
* incompatibilities are encountered.
*
* @param toMerge The {@code SchemaCompatibilityResult} to merge with the
* current instance.
* @return A {@code SchemaCompatibilityResult} that combines the state of the
* current and supplied instances.
*/
public SchemaCompatibilityResult mergedWith(SchemaCompatibilityResult toMerge) {
List<Incompatibility> mergedIncompatibilities = new ArrayList<>(mIncompatibilities);
mergedIncompatibilities.addAll(toMerge.getIncompatibilities());
SchemaCompatibilityType compatibilityType = mCompatibilityType == SchemaCompatibilityType.COMPATIBLE
? toMerge.mCompatibilityType
: SchemaCompatibilityType.INCOMPATIBLE;
return new SchemaCompatibilityResult(compatibilityType, mergedIncompatibilities);
}
private final SchemaCompatibilityType mCompatibilityType;
// the below fields are only valid if INCOMPATIBLE
private final List<Incompatibility> mIncompatibilities;
// cached objects for stateless details
private static final SchemaCompatibilityResult COMPATIBLE = new SchemaCompatibilityResult(
SchemaCompatibilityType.COMPATIBLE, Collections.emptyList());
private static final SchemaCompatibilityResult RECURSION_IN_PROGRESS = new SchemaCompatibilityResult(
SchemaCompatibilityType.RECURSION_IN_PROGRESS, Collections.emptyList());
private SchemaCompatibilityResult(SchemaCompatibilityType compatibilityType,
List<Incompatibility> incompatibilities) {
this.mCompatibilityType = compatibilityType;
this.mIncompatibilities = incompatibilities;
}
/**
* Returns a details object representing a compatible schema pair.
*
* @return a SchemaCompatibilityDetails object with COMPATIBLE
* SchemaCompatibilityType, and no other state.
*/
public static SchemaCompatibilityResult compatible() {
return COMPATIBLE;
}
/**
* Returns a details object representing a state indicating that recursion is in
* progress.
*
* @return a SchemaCompatibilityDetails object with RECURSION_IN_PROGRESS
* SchemaCompatibilityType, and no other state.
*/
public static SchemaCompatibilityResult recursionInProgress() {
return RECURSION_IN_PROGRESS;
}
/**
* Returns a details object representing an incompatible schema pair, including
* error details.
*
* @return a SchemaCompatibilityDetails object with INCOMPATIBLE
* SchemaCompatibilityType, and state representing the violating part.
*/
public static SchemaCompatibilityResult incompatible(SchemaIncompatibilityType incompatibilityType,
Schema readerFragment, Schema writerFragment, String message, List<String> location) {
Incompatibility incompatibility = new Incompatibility(incompatibilityType, readerFragment, writerFragment,
message, location);
return new SchemaCompatibilityResult(SchemaCompatibilityType.INCOMPATIBLE,
Collections.singletonList(incompatibility));
}
/**
* Returns the SchemaCompatibilityType, always non-null.
*
* @return a SchemaCompatibilityType instance, always non-null
*/
public SchemaCompatibilityType getCompatibility() {
return mCompatibilityType;
}
/**
* If the compatibility is INCOMPATIBLE, returns {@link Incompatibility
* Incompatibilities} found, otherwise an empty list.
*
* @return a list of {@link Incompatibility Incompatibilities}, may be empty,
* never null.
*/
public List<Incompatibility> getIncompatibilities() {
return mIncompatibilities;
}
/** {@inheritDoc} */
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((mCompatibilityType == null) ? 0 : mCompatibilityType.hashCode());
result = prime * result + ((mIncompatibilities == null) ? 0 : mIncompatibilities.hashCode());
return result;
}
/** {@inheritDoc} */
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
SchemaCompatibilityResult other = (SchemaCompatibilityResult) obj;
if (mIncompatibilities == null) {
if (other.mIncompatibilities != null)
return false;
} else if (!mIncompatibilities.equals(other.mIncompatibilities))
return false;
return mCompatibilityType == other.mCompatibilityType;
}
/** {@inheritDoc} */
@Override
public String toString() {
return String.format("SchemaCompatibilityResult{compatibility:%s, incompatibilities:%s}", mCompatibilityType,
mIncompatibilities);
}
}
// -----------------------------------------------------------------------------------------------
public static final class Incompatibility {
private final SchemaIncompatibilityType mType;
private final Schema mReaderFragment;
private final Schema mWriterFragment;
private final String mMessage;
private final List<String> mLocation;
Incompatibility(SchemaIncompatibilityType type, Schema readerFragment, Schema writerFragment, String message,
List<String> location) {
super();
this.mType = type;
this.mReaderFragment = readerFragment;
this.mWriterFragment = writerFragment;
this.mMessage = message;
this.mLocation = location;
}
/**
* Returns the SchemaIncompatibilityType.
*
* @return a SchemaIncompatibilityType instance.
*/
public SchemaIncompatibilityType getType() {
return mType;
}
/**
* Returns the fragment of the reader schema that failed compatibility check.
*
* @return a Schema instance (fragment of the reader schema).
*/
public Schema getReaderFragment() {
return mReaderFragment;
}
/**
* Returns the fragment of the writer schema that failed compatibility check.
*
* @return a Schema instance (fragment of the writer schema).
*/
public Schema getWriterFragment() {
return mWriterFragment;
}
/**
* Returns a human-readable message with more details about what failed. Syntax
* depends on the SchemaIncompatibilityType.
*
* @see #getType()
* @return a String with details about the incompatibility.
*/
public String getMessage() {
return mMessage;
}
/**
* Returns a
* <a href="https://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-08">JSON
* Pointer</a> describing the node location within the schema's JSON document
* tree where the incompatibility was encountered.
*
* @return JSON Pointer encoded as a string.
*/
public String getLocation() {
StringBuilder s = new StringBuilder("/");
boolean first = true;
// ignore root element
for (String coordinate : mLocation.subList(1, mLocation.size())) {
if (first) {
first = false;
} else {
s.append('/');
}
// Apply JSON pointer escaping.
s.append(coordinate.replace("~", "~0").replace("/", "~1"));
}
return s.toString();
}
/** {@inheritDoc} */
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((mType == null) ? 0 : mType.hashCode());
result = prime * result + ((mReaderFragment == null) ? 0 : mReaderFragment.hashCode());
result = prime * result + ((mWriterFragment == null) ? 0 : mWriterFragment.hashCode());
result = prime * result + ((mMessage == null) ? 0 : mMessage.hashCode());
result = prime * result + ((mLocation == null) ? 0 : mLocation.hashCode());
return result;
}
/** {@inheritDoc} */
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Incompatibility other = (Incompatibility) obj;
if (mType != other.mType) {
return false;
}
if (mReaderFragment == null) {
if (other.mReaderFragment != null) {
return false;
}
} else if (!mReaderFragment.equals(other.mReaderFragment)) {
return false;
}
if (mWriterFragment == null) {
if (other.mWriterFragment != null) {
return false;
}
} else if (!mWriterFragment.equals(other.mWriterFragment)) {
return false;
}
if (mMessage == null) {
if (other.mMessage != null) {
return false;
}
} else if (!mMessage.equals(other.mMessage)) {
return false;
}
if (mLocation == null) {
return other.mLocation == null;
} else
return mLocation.equals(other.mLocation);
}
/** {@inheritDoc} */
@Override
public String toString() {
return String.format("Incompatibility{type:%s, location:%s, message:%s, reader:%s, writer:%s}", mType,
getLocation(), mMessage, mReaderFragment, mWriterFragment);
}
}
// -----------------------------------------------------------------------------------------------
/**
* Provides information about the compatibility of a single reader and writer
* schema pair.
*
* Note: This class represents a one-way relationship from the reader to the
* writer schema.
*/
public static final class SchemaPairCompatibility {
/** The details of this result. */
private final SchemaCompatibilityResult mResult;
/** Validated reader schema. */
private final Schema mReader;
/** Validated writer schema. */
private final Schema mWriter;
/** Human readable description of this result. */
private final String mDescription;
/**
* Constructs a new instance.
*
* @param result The result of the compatibility check.
* @param reader schema that was validated.
* @param writer schema that was validated.
* @param description of this compatibility result.
*/
public SchemaPairCompatibility(SchemaCompatibilityResult result, Schema reader, Schema writer, String description) {
mResult = result;
mReader = reader;
mWriter = writer;
mDescription = description;
}
/**
* Gets the type of this result.
*
* @return the type of this result.
*/
public SchemaCompatibilityType getType() {
return mResult.getCompatibility();
}
/**
* Gets more details about the compatibility, in particular if getType() is
* INCOMPATIBLE.
*
* @return the details of this compatibility check.
*/
public SchemaCompatibilityResult getResult() {
return mResult;
}
/**
* Gets the reader schema that was validated.
*
* @return reader schema that was validated.
*/
public Schema getReader() {
return mReader;
}
/**
* Gets the writer schema that was validated.
*
* @return writer schema that was validated.
*/
public Schema getWriter() {
return mWriter;
}
/**
* Gets a human readable description of this validation result.
*
* @return a human readable description of this validation result.
*/
public String getDescription() {
return mDescription;
}
/** {@inheritDoc} */
@Override
public String toString() {
return String.format("SchemaPairCompatibility{result:%s, readerSchema:%s, writerSchema:%s, description:%s}",
mResult, mReader, mWriter, mDescription);
}
/** {@inheritDoc} */
@Override
public boolean equals(Object other) {
if ((other instanceof SchemaPairCompatibility)) {
final SchemaPairCompatibility result = (SchemaPairCompatibility) other;
return objectsEqual(result.mResult, mResult) && objectsEqual(result.mReader, mReader)
&& objectsEqual(result.mWriter, mWriter) && objectsEqual(result.mDescription, mDescription);
} else {
return false;
}
}
/** {@inheritDoc} */
@Override
public int hashCode() {
return Arrays.hashCode(new Object[] { mResult, mReader, mWriter, mDescription });
}
}
/** Borrowed from Guava's Objects.equal(a, b) */
private static boolean objectsEqual(Object obj1, Object obj2) {
return Objects.equals(obj1, obj2);
}
private static List<String> asList(Deque<String> deque) {
List<String> list = new ArrayList<>(deque);
Collections.reverse(list);
return Collections.unmodifiableList(list);
}
}
| 7,226 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/SchemaBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
import java.io.IOException;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import com.fasterxml.jackson.core.io.JsonStringEncoder;
import org.apache.avro.Schema.Field;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.util.internal.JacksonUtils;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.NullNode;
import com.fasterxml.jackson.databind.node.TextNode;
/**
* <p>
* A fluent interface for building {@link Schema} instances. The flow of the API
* is designed to mimic the
* <a href="https://avro.apache.org/docs/current/spec.html#schemas">Avro Schema
* Specification</a>
* </p>
* For example, the below JSON schema and the fluent builder code to create it
* are very similar:
*
* <pre>
* {
* "type": "record",
* "name": "HandshakeRequest", "namespace":"org.apache.avro.ipc",
* "fields": [
* {"name": "clientHash",
* "type": {"type": "fixed", "name": "MD5", "size": 16}},
* {"name": "clientProtocol", "type": ["null", "string"]},
* {"name": "serverHash", "type": "MD5"},
* {"name": "meta", "type": ["null", {"type": "map", "values": "bytes"}]}
* ]
* }
* </pre>
*
* <pre>
* Schema schema = SchemaBuilder.record("HandshakeRequest").namespace("org.apache.avro.ipc").fields().name("clientHash")
* .type().fixed("MD5").size(16).noDefault().name("clientProtocol").type().nullable().stringType().noDefault()
* .name("serverHash").type("MD5").noDefault().name("meta").type().nullable().map().values().bytesType().noDefault()
* .endRecord();
* </pre>
* <p/>
*
* <h5>Usage Guide</h5> SchemaBuilder chains together many smaller builders and
* maintains nested context in order to mimic the Avro Schema specification.
* Every Avro type in JSON has required and optional JSON properties, as well as
* user-defined properties.
* <p/>
* <h6>Selecting and Building an Avro Type</h6> The API analogy for the right
* hand side of the Avro Schema JSON
*
* <pre>
* "type":
* </pre>
*
* is a {@link TypeBuilder}, {@link FieldTypeBuilder}, or
* {@link UnionFieldTypeBuilder}, depending on the context. These types all
* share a similar API for selecting and building types.
* <p/>
* <h5>Primitive Types</h5> All Avro primitive types are trivial to configure. A
* primitive type in Avro JSON can be declared two ways, one that supports
* custom properties and one that does not:
*
* <pre>
* {"type":"int"}
* {"type":{"name":"int"}}
* {"type":{"name":"int", "customProp":"val"}}
* </pre>
*
* The analogous code form for the above three JSON lines are the below three
* lines:
*
* <pre>
* .intType()
* .intBuilder().endInt()
* .intBuilder().prop("customProp", "val").endInt()
* </pre>
*
* Every primitive type has a shortcut to create the trivial type, and a builder
* when custom properties are required. The first line above is a shortcut for
* the second, analogous to the JSON case.
* <h6>Named Types</h6> Avro named types have names, namespace, aliases, and
* doc. In this API these share a common parent, {@link NamespacedBuilder}. The
* builders for named types require a name to be constructed, and optional
* configuration via:
* <li>{@link NamespacedBuilder#doc()}</li>
* <li>{@link NamespacedBuilder#namespace(String)}</li>
* <li>{@link NamespacedBuilder#aliases(String...)}</li>
* <li>{@link PropBuilder#prop(String, String)}</li>
* <p/>
* Each named type completes configuration of the optional properties with its
* own method:
* <li>{@link FixedBuilder#size(int)}</li>
* <li>{@link EnumBuilder#symbols(String...)}</li>
* <li>{@link RecordBuilder#fields()}</li> Example use of a named type with all
* optional parameters:
*
* <pre>
* .enumeration("Suit").namespace("org.apache.test")
* .aliases("org.apache.test.OldSuit")
* .doc("CardSuits")
* .prop("customProp", "val")
* .symbols("SPADES", "HEARTS", "DIAMONDS", "CLUBS")
* </pre>
*
* Which is equivalent to the JSON:
*
* <pre>
* { "type":"enum",
* "name":"Suit", "namespace":"org.apache.test",
* "aliases":["org.apache.test.OldSuit"],
* "doc":"Card Suits",
* "customProp":"val",
* "symbols":["SPADES", "HEARTS", "DIAMONDS", "CLUBS"]
* }
* </pre>
*
* <h6>Nested Types</h6> The Avro nested types, map and array, can have custom
* properties like all avro types, are not named, and must specify a nested
* type. After configuration of optional properties, an array or map builds or
* selects its nested type with {@link ArrayBuilder#items()} and
* {@link MapBuilder#values()}, respectively.
*
* <h6>Fields</h6> {@link RecordBuilder#fields()} returns a
* {@link FieldAssembler} for defining the fields of the record and completing
* it. Each field must have a name, specified via
* {@link FieldAssembler#name(String)}, which returns a {@link FieldBuilder} for
* defining aliases, custom properties, and documentation of the field. After
* configuring these optional values for a field, the type is selected or built
* with {@link FieldBuilder#type()}.
* <p/>
* Fields have default values that must be specified to complete the field.
* {@link FieldDefault#noDefault()} is available for all field types, and a
* specific method is available for each type to use a default, for example
* {@link IntDefault#intDefault(int)}
* <p/>
* There are field shortcut methods on {@link FieldAssembler} for primitive
* types. These shortcuts create required, optional, and nullable fields, but do
* not support field aliases, doc, or custom properties.
*
* <h6>Unions</h6> Union types are built via {@link TypeBuilder#unionOf()} or
* {@link FieldTypeBuilder#unionOf()} in the context of type selection. This
* chains together multiple types, in union order. For example:
*
* <pre>
* .unionOf()
* .fixed("IPv4").size(4).and()
* .fixed("IPv6").size(16).and()
* .nullType().endUnion()
* </pre>
*
* is equivalent to the Avro schema JSON:
*
* <pre>
* [
* {"type":"fixed", "name":"IPv4", "size":4},
* {"type":"fixed", "name":"IPv6", "size":16},
* "null"
* ]
* </pre>
*
* In a field context, the first type of a union defines what default type is
* allowed.
* </p>
* Unions have two shortcuts for common cases. nullable() creates a union of a
* type and null. In a field type context, optional() is available and creates a
* union of null and a type, with a null default. The below two are equivalent:
*
* <pre>
* .unionOf().intType().and().nullType().endUnion()
* .nullable().intType()
* </pre>
*
* The below two field declarations are equivalent:
*
* <pre>
* .name("f").type().unionOf().nullType().and().longType().endUnion().nullDefault()
* .name("f").type().optional().longType()
* </pre>
*
* <h6>Explicit Types and Types by Name</h6> Types can also be specified
* explicitly by passing in a Schema, or by name:
*
* <pre>
* .type(Schema.create(Schema.Type.INT)) // explicitly specified
* .type("MD5") // reference by full name or short name
* .type("MD5", "org.apache.avro.test") // reference by name and namespace
* </pre>
*
* When a type is specified by name, and the namespace is absent or null, the
* namespace is inherited from the enclosing context. A namespace will propagate
* as a default to child fields, nested types, or later defined types in a
* union. To specify a name that has no namespace and ignore the inherited
* namespace, set the namespace to "".
* <p/>
* {@link SchemaBuilder#builder(String)} returns a type builder with a default
* namespace. {@link SchemaBuilder#builder()} returns a type builder with no
* default namespace.
*/
public class SchemaBuilder {
private SchemaBuilder() {
}
/**
* Create a builder for Avro schemas.
*/
public static TypeBuilder<Schema> builder() {
return new TypeBuilder<>(new SchemaCompletion(), new NameContext());
}
/**
* Create a builder for Avro schemas with a default namespace. Types created
* without namespaces will inherit the namespace provided.
*/
public static TypeBuilder<Schema> builder(String namespace) {
return new TypeBuilder<>(new SchemaCompletion(), new NameContext().namespace(namespace));
}
/**
* Create a builder for an Avro record with the specified name. This is
* equivalent to:
*
* <pre>
* builder().record(name);
* </pre>
*
* @param name the record name
*/
public static RecordBuilder<Schema> record(String name) {
return builder().record(name);
}
/**
* Create a builder for an Avro enum with the specified name and symbols
* (values). This is equivalent to:
*
* <pre>
* builder().enumeration(name);
* </pre>
*
* @param name the enum name
*/
public static EnumBuilder<Schema> enumeration(String name) {
return builder().enumeration(name);
}
/**
* Create a builder for an Avro fixed type with the specified name and size.
* This is equivalent to:
*
* <pre>
* builder().fixed(name);
* </pre>
*
* @param name the fixed name
*/
public static FixedBuilder<Schema> fixed(String name) {
return builder().fixed(name);
}
/**
* Create a builder for an Avro array This is equivalent to:
*
* <pre>
* builder().array();
* </pre>
*/
public static ArrayBuilder<Schema> array() {
return builder().array();
}
/**
* Create a builder for an Avro map This is equivalent to:
*
* <pre>
* builder().map();
* </pre>
*/
public static MapBuilder<Schema> map() {
return builder().map();
}
/**
* Create a builder for an Avro union This is equivalent to:
*
* <pre>
* builder().unionOf();
* </pre>
*/
public static BaseTypeBuilder<UnionAccumulator<Schema>> unionOf() {
return builder().unionOf();
}
/**
* Create a builder for a union of a type and null. This is a shortcut for:
*
* <pre>
* builder().nullable();
* </pre>
*
* and the following two lines are equivalent:
*
* <pre>
* nullable().intType();
* </pre>
*
* <pre>
* unionOf().intType().and().nullType().endUnion();
* </pre>
*/
public static BaseTypeBuilder<Schema> nullable() {
return builder().nullable();
}
/**
* An abstract builder for all Avro types. All Avro types can have arbitrary
* string key-value properties.
*/
public static abstract class PropBuilder<S extends PropBuilder<S>> {
private Map<String, JsonNode> props = null;
protected PropBuilder() {
}
/**
* Set name-value pair properties for this type or field.
*/
public final S prop(String name, String val) {
return prop(name, TextNode.valueOf(val));
}
/**
* Set name-value pair properties for this type or field.
*/
public final S prop(String name, Object value) {
return prop(name, JacksonUtils.toJsonNode(value));
}
// for internal use by the Parser
final S prop(String name, JsonNode val) {
if (!hasProps()) {
props = new HashMap<>();
}
props.put(name, val);
return self();
}
private boolean hasProps() {
return (props != null);
}
final <T extends JsonProperties> T addPropsTo(T jsonable) {
if (hasProps()) {
for (Map.Entry<String, JsonNode> prop : props.entrySet()) {
jsonable.addProp(prop.getKey(), prop.getValue());
}
}
return jsonable;
}
/**
* a self-type for chaining builder subclasses. Concrete subclasses must return
* 'this'
**/
protected abstract S self();
}
/**
* An abstract type that provides builder methods for configuring the name, doc,
* and aliases of all Avro types that have names (fields, Fixed, Record, and
* Enum).
* <p/>
* All Avro named types and fields have 'doc', 'aliases', and 'name' components.
* 'name' is required, and provided to this builder. 'doc' and 'aliases' are
* optional.
*/
public static abstract class NamedBuilder<S extends NamedBuilder<S>> extends PropBuilder<S> {
private final String name;
private final NameContext names;
private String doc;
private String[] aliases;
protected NamedBuilder(NameContext names, String name) {
this.name = Objects.requireNonNull(name, "Type must have a name");
this.names = names;
}
/** configure this type's optional documentation string **/
public final S doc(String doc) {
this.doc = doc;
return self();
}
/** configure this type's optional name aliases **/
public final S aliases(String... aliases) {
this.aliases = aliases;
return self();
}
final String doc() {
return doc;
}
final String name() {
return name;
}
final NameContext names() {
return names;
}
final Schema addAliasesTo(Schema schema) {
if (null != aliases) {
for (String alias : aliases) {
schema.addAlias(alias);
}
}
return schema;
}
final Field addAliasesTo(Field field) {
if (null != aliases) {
for (String alias : aliases) {
field.addAlias(alias);
}
}
return field;
}
}
/**
* An abstract type that provides builder methods for configuring the namespace
* for all Avro types that have namespaces (Fixed, Record, and Enum).
*/
public static abstract class NamespacedBuilder<R, S extends NamespacedBuilder<R, S>> extends NamedBuilder<S> {
private final Completion<R> context;
private String namespace;
protected NamespacedBuilder(Completion<R> context, NameContext names, String name) {
super(names, name);
this.context = context;
}
/**
* Set the namespace of this type. To clear the namespace, set empty string.
* <p/>
* When the namespace is null or unset, the namespace of the type defaults to
* the namespace of the enclosing context.
**/
public final S namespace(String namespace) {
this.namespace = namespace;
return self();
}
final String space() {
if (null == namespace) {
return names().namespace;
}
return namespace;
}
final Schema completeSchema(Schema schema) {
addPropsTo(schema);
addAliasesTo(schema);
names().put(schema);
return schema;
}
final Completion<R> context() {
return context;
}
}
/**
* An abstraction for sharing code amongst all primitive type builders.
*/
private static abstract class PrimitiveBuilder<R, P extends PrimitiveBuilder<R, P>> extends PropBuilder<P> {
private final Completion<R> context;
private final Schema immutable;
protected PrimitiveBuilder(Completion<R> context, NameContext names, Schema.Type type) {
this.context = context;
this.immutable = names.getFullname(type.getName());
}
private R end() {
Schema schema = immutable;
if (super.hasProps()) {
schema = Schema.create(immutable.getType());
addPropsTo(schema);
}
return context.complete(schema);
}
}
/**
* Builds an Avro boolean type with optional properties. Set properties with
* {@link #prop(String, String)}, and finalize with {@link #endBoolean()}
**/
public static final class BooleanBuilder<R> extends PrimitiveBuilder<R, BooleanBuilder<R>> {
private BooleanBuilder(Completion<R> context, NameContext names) {
super(context, names, Schema.Type.BOOLEAN);
}
private static <R> BooleanBuilder<R> create(Completion<R> context, NameContext names) {
return new BooleanBuilder<>(context, names);
}
@Override
protected BooleanBuilder<R> self() {
return this;
}
/** complete building this type, return control to context **/
public R endBoolean() {
return super.end();
}
}
/**
* Builds an Avro int type with optional properties. Set properties with
* {@link #prop(String, String)}, and finalize with {@link #endInt()}
**/
public static final class IntBuilder<R> extends PrimitiveBuilder<R, IntBuilder<R>> {
private IntBuilder(Completion<R> context, NameContext names) {
super(context, names, Schema.Type.INT);
}
private static <R> IntBuilder<R> create(Completion<R> context, NameContext names) {
return new IntBuilder<>(context, names);
}
@Override
protected IntBuilder<R> self() {
return this;
}
/** complete building this type, return control to context **/
public R endInt() {
return super.end();
}
}
/**
* Builds an Avro long type with optional properties. Set properties with
* {@link #prop(String, String)}, and finalize with {@link #endLong()}
**/
public static final class LongBuilder<R> extends PrimitiveBuilder<R, LongBuilder<R>> {
private LongBuilder(Completion<R> context, NameContext names) {
super(context, names, Schema.Type.LONG);
}
private static <R> LongBuilder<R> create(Completion<R> context, NameContext names) {
return new LongBuilder<>(context, names);
}
@Override
protected LongBuilder<R> self() {
return this;
}
/** complete building this type, return control to context **/
public R endLong() {
return super.end();
}
}
/**
* Builds an Avro float type with optional properties. Set properties with
* {@link #prop(String, String)}, and finalize with {@link #endFloat()}
**/
public static final class FloatBuilder<R> extends PrimitiveBuilder<R, FloatBuilder<R>> {
private FloatBuilder(Completion<R> context, NameContext names) {
super(context, names, Schema.Type.FLOAT);
}
private static <R> FloatBuilder<R> create(Completion<R> context, NameContext names) {
return new FloatBuilder<>(context, names);
}
@Override
protected FloatBuilder<R> self() {
return this;
}
/** complete building this type, return control to context **/
public R endFloat() {
return super.end();
}
}
/**
* Builds an Avro double type with optional properties. Set properties with
* {@link #prop(String, String)}, and finalize with {@link #endDouble()}
**/
public static final class DoubleBuilder<R> extends PrimitiveBuilder<R, DoubleBuilder<R>> {
private DoubleBuilder(Completion<R> context, NameContext names) {
super(context, names, Schema.Type.DOUBLE);
}
private static <R> DoubleBuilder<R> create(Completion<R> context, NameContext names) {
return new DoubleBuilder<>(context, names);
}
@Override
protected DoubleBuilder<R> self() {
return this;
}
/** complete building this type, return control to context **/
public R endDouble() {
return super.end();
}
}
/**
* Builds an Avro string type with optional properties. Set properties with
* {@link #prop(String, String)}, and finalize with {@link #endString()}
**/
public static final class StringBldr<R> extends PrimitiveBuilder<R, StringBldr<R>> {
private StringBldr(Completion<R> context, NameContext names) {
super(context, names, Schema.Type.STRING);
}
private static <R> StringBldr<R> create(Completion<R> context, NameContext names) {
return new StringBldr<>(context, names);
}
@Override
protected StringBldr<R> self() {
return this;
}
/** complete building this type, return control to context **/
public R endString() {
return super.end();
}
}
/**
* Builds an Avro bytes type with optional properties. Set properties with
* {@link #prop(String, String)}, and finalize with {@link #endBytes()}
**/
public static final class BytesBuilder<R> extends PrimitiveBuilder<R, BytesBuilder<R>> {
private BytesBuilder(Completion<R> context, NameContext names) {
super(context, names, Schema.Type.BYTES);
}
private static <R> BytesBuilder<R> create(Completion<R> context, NameContext names) {
return new BytesBuilder<>(context, names);
}
@Override
protected BytesBuilder<R> self() {
return this;
}
/** complete building this type, return control to context **/
public R endBytes() {
return super.end();
}
}
/**
* Builds an Avro null type with optional properties. Set properties with
* {@link #prop(String, String)}, and finalize with {@link #endNull()}
**/
public static final class NullBuilder<R> extends PrimitiveBuilder<R, NullBuilder<R>> {
private NullBuilder(Completion<R> context, NameContext names) {
super(context, names, Schema.Type.NULL);
}
private static <R> NullBuilder<R> create(Completion<R> context, NameContext names) {
return new NullBuilder<>(context, names);
}
@Override
protected NullBuilder<R> self() {
return this;
}
/** complete building this type, return control to context **/
public R endNull() {
return super.end();
}
}
/**
* Builds an Avro Fixed type with optional properties, namespace, doc, and
* aliases.
* <p/>
* Set properties with {@link #prop(String, String)}, namespace with
* {@link #namespace(String)}, doc with {@link #doc(String)}, and aliases with
* {@link #aliases(String[])}.
* <p/>
* The Fixed schema is finalized when its required size is set via
* {@link #size(int)}.
**/
public static final class FixedBuilder<R> extends NamespacedBuilder<R, FixedBuilder<R>> {
private FixedBuilder(Completion<R> context, NameContext names, String name) {
super(context, names, name);
}
private static <R> FixedBuilder<R> create(Completion<R> context, NameContext names, String name) {
return new FixedBuilder<>(context, names, name);
}
@Override
protected FixedBuilder<R> self() {
return this;
}
/** Configure this fixed type's size, and end its configuration. **/
public R size(int size) {
Schema schema = Schema.createFixed(name(), super.doc(), space(), size);
completeSchema(schema);
return context().complete(schema);
}
}
/**
* Builds an Avro Enum type with optional properties, namespace, doc, and
* aliases.
* <p/>
* Set properties with {@link #prop(String, String)}, namespace with
* {@link #namespace(String)}, doc with {@link #doc(String)}, and aliases with
* {@link #aliases(String[])}.
* <p/>
* The Enum schema is finalized when its required symbols are set via
* {@link #symbols(String[])}.
**/
public static final class EnumBuilder<R> extends NamespacedBuilder<R, EnumBuilder<R>> {
private EnumBuilder(Completion<R> context, NameContext names, String name) {
super(context, names, name);
}
private String enumDefault = null;
private static <R> EnumBuilder<R> create(Completion<R> context, NameContext names, String name) {
return new EnumBuilder<>(context, names, name);
}
@Override
protected EnumBuilder<R> self() {
return this;
}
/**
* Configure this enum type's symbols, and end its configuration. Populates the
* default if it was set.
**/
public R symbols(String... symbols) {
Schema schema = Schema.createEnum(name(), doc(), space(), Arrays.asList(symbols), this.enumDefault);
completeSchema(schema);
return context().complete(schema);
}
/** Set the default value of the enum. */
public EnumBuilder<R> defaultSymbol(String enumDefault) {
this.enumDefault = enumDefault;
return self();
}
}
/**
* Builds an Avro Map type with optional properties.
* <p/>
* Set properties with {@link #prop(String, String)}.
* <p/>
* The Map schema's properties are finalized when {@link #values()} or
* {@link #values(Schema)} is called.
**/
public static final class MapBuilder<R> extends PropBuilder<MapBuilder<R>> {
private final Completion<R> context;
private final NameContext names;
private MapBuilder(Completion<R> context, NameContext names) {
this.context = context;
this.names = names;
}
private static <R> MapBuilder<R> create(Completion<R> context, NameContext names) {
return new MapBuilder<>(context, names);
}
@Override
protected MapBuilder<R> self() {
return this;
}
/**
* Return a type builder for configuring the map's nested values schema. This
* builder will return control to the map's enclosing context when complete.
**/
public TypeBuilder<R> values() {
return new TypeBuilder<>(new MapCompletion<>(this, context), names);
}
/**
* Complete configuration of this map, setting the schema of the map values to
* the schema provided. Returns control to the enclosing context.
**/
public R values(Schema valueSchema) {
return new MapCompletion<>(this, context).complete(valueSchema);
}
}
/**
* Builds an Avro Array type with optional properties.
* <p/>
* Set properties with {@link #prop(String, String)}.
* <p/>
* The Array schema's properties are finalized when {@link #items()} or
* {@link #items(Schema)} is called.
**/
public static final class ArrayBuilder<R> extends PropBuilder<ArrayBuilder<R>> {
private final Completion<R> context;
private final NameContext names;
public ArrayBuilder(Completion<R> context, NameContext names) {
this.context = context;
this.names = names;
}
private static <R> ArrayBuilder<R> create(Completion<R> context, NameContext names) {
return new ArrayBuilder<>(context, names);
}
@Override
protected ArrayBuilder<R> self() {
return this;
}
/**
* Return a type builder for configuring the array's nested items schema. This
* builder will return control to the array's enclosing context when complete.
**/
public TypeBuilder<R> items() {
return new TypeBuilder<>(new ArrayCompletion<>(this, context), names);
}
/**
* Complete configuration of this array, setting the schema of the array items
* to the schema provided. Returns control to the enclosing context.
**/
public R items(Schema itemsSchema) {
return new ArrayCompletion<>(this, context).complete(itemsSchema);
}
}
/**
* internal class for passing the naming context around. This allows for the
* following:
* <li>Cache and re-use primitive schemas when they do not set properties.</li>
* <li>Provide a default namespace for nested contexts (as the JSON Schema spec
* does).</li>
* <li>Allow previously defined named types or primitive types to be referenced
* by name.</li>
**/
private static class NameContext {
private static final Set<String> PRIMITIVES = new HashSet<>();
static {
PRIMITIVES.add("null");
PRIMITIVES.add("boolean");
PRIMITIVES.add("int");
PRIMITIVES.add("long");
PRIMITIVES.add("float");
PRIMITIVES.add("double");
PRIMITIVES.add("bytes");
PRIMITIVES.add("string");
}
private final HashMap<String, Schema> schemas;
private final String namespace;
private NameContext() {
this.schemas = new HashMap<>();
this.namespace = null;
schemas.put("null", Schema.create(Schema.Type.NULL));
schemas.put("boolean", Schema.create(Schema.Type.BOOLEAN));
schemas.put("int", Schema.create(Schema.Type.INT));
schemas.put("long", Schema.create(Schema.Type.LONG));
schemas.put("float", Schema.create(Schema.Type.FLOAT));
schemas.put("double", Schema.create(Schema.Type.DOUBLE));
schemas.put("bytes", Schema.create(Schema.Type.BYTES));
schemas.put("string", Schema.create(Schema.Type.STRING));
}
private NameContext(HashMap<String, Schema> schemas, String namespace) {
this.schemas = schemas;
this.namespace = "".equals(namespace) ? null : namespace;
}
private NameContext namespace(String namespace) {
return new NameContext(schemas, namespace);
}
private Schema get(String name, String namespace) {
return getFullname(resolveName(name, namespace));
}
private Schema getFullname(String fullName) {
Schema schema = schemas.get(fullName);
if (schema == null) {
throw new SchemaParseException("Undefined name: " + fullName);
}
return schema;
}
private void put(Schema schema) {
String fullName = schema.getFullName();
if (schemas.containsKey(fullName)) {
throw new SchemaParseException("Can't redefine: " + fullName);
}
schemas.put(fullName, schema);
}
private String resolveName(String name, String space) {
if (PRIMITIVES.contains(name) && space == null) {
return name;
}
int lastDot = name.lastIndexOf('.');
if (lastDot < 0) { // short name
if (space == null) {
space = namespace;
}
if (space != null && !"".equals(space)) {
return space + "." + name;
}
}
return name;
}
}
/**
* A common API for building types within a context. BaseTypeBuilder can build
* all types other than Unions. {@link TypeBuilder} can additionally build
* Unions.
* <p/>
* The builder has two contexts:
* <li>A naming context provides a default namespace and allows for previously
* defined named types to be referenced from {@link #type(String)}</li>
* <li>A completion context representing the scope that the builder was created
* in. A builder created in a nested context (for example,
* {@link MapBuilder#values()} will have a completion context assigned by the
* {@link MapBuilder}</li>
**/
public static class BaseTypeBuilder<R> {
private final Completion<R> context;
private final NameContext names;
private BaseTypeBuilder(Completion<R> context, NameContext names) {
this.context = context;
this.names = names;
}
/** Use the schema provided as the type. **/
public final R type(Schema schema) {
return context.complete(schema);
}
/**
* Look up the type by name. This type must be previously defined in the context
* of this builder.
* <p/>
* The name may be fully qualified or a short name. If it is a short name, the
* default namespace of the current context will additionally be searched.
**/
public final R type(String name) {
return type(name, null);
}
/**
* Look up the type by name and namespace. This type must be previously defined
* in the context of this builder.
* <p/>
* The name may be fully qualified or a short name. If it is a fully qualified
* name, the namespace provided is ignored. If it is a short name, the namespace
* provided is used if not null, else the default namespace of the current
* context will be used.
**/
public final R type(String name, String namespace) {
return type(names.get(name, namespace));
}
/**
* A plain boolean type without custom properties. This is equivalent to:
*
* <pre>
* booleanBuilder().endBoolean();
* </pre>
*/
public final R booleanType() {
return booleanBuilder().endBoolean();
}
/**
* Build a boolean type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #booleanType()}.
*/
public final BooleanBuilder<R> booleanBuilder() {
return BooleanBuilder.create(context, names);
}
/**
* A plain int type without custom properties. This is equivalent to:
*
* <pre>
* intBuilder().endInt();
* </pre>
*/
public final R intType() {
return intBuilder().endInt();
}
/**
* Build an int type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #intType()}.
*/
public final IntBuilder<R> intBuilder() {
return IntBuilder.create(context, names);
}
/**
* A plain long type without custom properties. This is equivalent to:
*
* <pre>
* longBuilder().endLong();
* </pre>
*/
public final R longType() {
return longBuilder().endLong();
}
/**
* Build a long type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #longType()}.
*/
public final LongBuilder<R> longBuilder() {
return LongBuilder.create(context, names);
}
/**
* A plain float type without custom properties. This is equivalent to:
*
* <pre>
* floatBuilder().endFloat();
* </pre>
*/
public final R floatType() {
return floatBuilder().endFloat();
}
/**
* Build a float type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #floatType()}.
*/
public final FloatBuilder<R> floatBuilder() {
return FloatBuilder.create(context, names);
}
/**
* A plain double type without custom properties. This is equivalent to:
*
* <pre>
* doubleBuilder().endDouble();
* </pre>
*/
public final R doubleType() {
return doubleBuilder().endDouble();
}
/**
* Build a double type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #doubleType()}.
*/
public final DoubleBuilder<R> doubleBuilder() {
return DoubleBuilder.create(context, names);
}
/**
* A plain string type without custom properties. This is equivalent to:
*
* <pre>
* stringBuilder().endString();
* </pre>
*/
public final R stringType() {
return stringBuilder().endString();
}
/**
* Build a string type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #stringType()}.
*/
public final StringBldr<R> stringBuilder() {
return StringBldr.create(context, names);
}
/**
* A plain bytes type without custom properties. This is equivalent to:
*
* <pre>
* bytesBuilder().endBytes();
* </pre>
*/
public final R bytesType() {
return bytesBuilder().endBytes();
}
/**
* Build a bytes type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #bytesType()}.
*/
public final BytesBuilder<R> bytesBuilder() {
return BytesBuilder.create(context, names);
}
/**
* A plain null type without custom properties. This is equivalent to:
*
* <pre>
* nullBuilder().endNull();
* </pre>
*/
public final R nullType() {
return nullBuilder().endNull();
}
/**
* Build a null type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #nullType()}.
*/
public final NullBuilder<R> nullBuilder() {
return NullBuilder.create(context, names);
}
/**
* Build an Avro map type Example usage:
*
* <pre>
* map().values().intType()
* </pre>
*
* Equivalent to Avro JSON Schema:
*
* <pre>
* {"type":"map", "values":"int"}
* </pre>
**/
public final MapBuilder<R> map() {
return MapBuilder.create(context, names);
}
/**
* Build an Avro array type Example usage:
*
* <pre>
* array().items().longType()
* </pre>
*
* Equivalent to Avro JSON Schema:
*
* <pre>
* {"type":"array", "values":"long"}
* </pre>
**/
public final ArrayBuilder<R> array() {
return ArrayBuilder.create(context, names);
}
/**
* Build an Avro fixed type. Example usage:
*
* <pre>
* fixed("com.foo.IPv4").size(4)
* </pre>
*
* Equivalent to Avro JSON Schema:
*
* <pre>
* {"type":"fixed", "name":"com.foo.IPv4", "size":4}
* </pre>
**/
public final FixedBuilder<R> fixed(String name) {
return FixedBuilder.create(context, names, name);
}
/**
* Build an Avro enum type. Example usage:
*
* <pre>
* enumeration("Suits").namespace("org.cards").doc("card suit names").defaultSymbol("HEART").symbols("HEART", "SPADE",
* "DIAMOND", "CLUB")
* </pre>
*
* Equivalent to Avro JSON Schema:
*
* <pre>
* {"type":"enum", "name":"Suits", "namespace":"org.cards",
* "doc":"card suit names", "symbols":[
* "HEART", "SPADE", "DIAMOND", "CLUB"], "default":"HEART"}
* </pre>
**/
public final EnumBuilder<R> enumeration(String name) {
return EnumBuilder.create(context, names, name);
}
/**
* Build an Avro record type. Example usage:
*
* <pre>
* record("com.foo.Foo").fields().name("field1").typeInt().intDefault(1).name("field2").typeString().noDefault()
* .name("field3").optional().typeFixed("FooFixed").size(4).endRecord()
* </pre>
*
* Equivalent to Avro JSON Schema:
*
* <pre>
* {"type":"record", "name":"com.foo.Foo", "fields": [
* {"name":"field1", "type":"int", "default":1},
* {"name":"field2", "type":"string"},
* {"name":"field3", "type":[
* null, {"type":"fixed", "name":"FooFixed", "size":4}
* ]}
* ]}
* </pre>
**/
public final RecordBuilder<R> record(String name) {
return RecordBuilder.create(context, names, name);
}
/**
* Build an Avro union schema type. Example usage:
*
* <pre>
* unionOf().stringType().and().bytesType().endUnion()
* </pre>
**/
protected BaseTypeBuilder<UnionAccumulator<R>> unionOf() {
return UnionBuilder.create(context, names);
}
/**
* A shortcut for building a union of a type and null.
* <p/>
* For example, the code snippets below are equivalent:
*
* <pre>
* nullable().booleanType()
* </pre>
*
* <pre>
* unionOf().booleanType().and().nullType().endUnion()
* </pre>
**/
protected BaseTypeBuilder<R> nullable() {
return new BaseTypeBuilder<>(new NullableCompletion<>(context), names);
}
}
/**
* A Builder for creating any Avro schema type.
**/
public static final class TypeBuilder<R> extends BaseTypeBuilder<R> {
private TypeBuilder(Completion<R> context, NameContext names) {
super(context, names);
}
@Override
public BaseTypeBuilder<UnionAccumulator<R>> unionOf() {
return super.unionOf();
}
@Override
public BaseTypeBuilder<R> nullable() {
return super.nullable();
}
}
/** A special builder for unions. Unions cannot nest unions directly **/
private static final class UnionBuilder<R> extends BaseTypeBuilder<UnionAccumulator<R>> {
private UnionBuilder(Completion<R> context, NameContext names) {
this(context, names, Collections.emptyList());
}
private static <R> UnionBuilder<R> create(Completion<R> context, NameContext names) {
return new UnionBuilder<>(context, names);
}
private UnionBuilder(Completion<R> context, NameContext names, List<Schema> schemas) {
super(new UnionCompletion<>(context, names, schemas), names);
}
}
/**
* A special Builder for Record fields. The API is very similar to
* {@link BaseTypeBuilder}. However, fields have their own names, properties,
* and default values.
* <p/>
* The methods on this class create builder instances that return their control
* to the {@link FieldAssembler} of the enclosing record context after
* configuring a default for the field.
* <p/>
* For example, an int field with default value 1:
*
* <pre>
* intSimple().withDefault(1);
* </pre>
*
* or an array with items that are optional int types:
*
* <pre>
* array().items().optional().intType();
* </pre>
*/
public static class BaseFieldTypeBuilder<R> {
protected final FieldBuilder<R> bldr;
protected final NameContext names;
private final CompletionWrapper wrapper;
protected BaseFieldTypeBuilder(FieldBuilder<R> bldr, CompletionWrapper wrapper) {
this.bldr = bldr;
this.names = bldr.names();
this.wrapper = wrapper;
}
/**
* A plain boolean type without custom properties. This is equivalent to:
*
* <pre>
* booleanBuilder().endBoolean();
* </pre>
*/
public final BooleanDefault<R> booleanType() {
return booleanBuilder().endBoolean();
}
/**
* Build a boolean type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #booleanType()}.
*/
public final BooleanBuilder<BooleanDefault<R>> booleanBuilder() {
return BooleanBuilder.create(wrap(new BooleanDefault<>(bldr)), names);
}
/**
* A plain int type without custom properties. This is equivalent to:
*
* <pre>
* intBuilder().endInt();
* </pre>
*/
public final IntDefault<R> intType() {
return intBuilder().endInt();
}
/**
* Build an int type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #intType()}.
*/
public final IntBuilder<IntDefault<R>> intBuilder() {
return IntBuilder.create(wrap(new IntDefault<>(bldr)), names);
}
/**
* A plain long type without custom properties. This is equivalent to:
*
* <pre>
* longBuilder().endLong();
* </pre>
*/
public final LongDefault<R> longType() {
return longBuilder().endLong();
}
/**
* Build a long type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #longType()}.
*/
public final LongBuilder<LongDefault<R>> longBuilder() {
return LongBuilder.create(wrap(new LongDefault<>(bldr)), names);
}
/**
* A plain float type without custom properties. This is equivalent to:
*
* <pre>
* floatBuilder().endFloat();
* </pre>
*/
public final FloatDefault<R> floatType() {
return floatBuilder().endFloat();
}
/**
* Build a float type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #floatType()}.
*/
public final FloatBuilder<FloatDefault<R>> floatBuilder() {
return FloatBuilder.create(wrap(new FloatDefault<>(bldr)), names);
}
/**
* A plain double type without custom properties. This is equivalent to:
*
* <pre>
* doubleBuilder().endDouble();
* </pre>
*/
public final DoubleDefault<R> doubleType() {
return doubleBuilder().endDouble();
}
/**
* Build a double type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #doubleType()}.
*/
public final DoubleBuilder<DoubleDefault<R>> doubleBuilder() {
return DoubleBuilder.create(wrap(new DoubleDefault<>(bldr)), names);
}
/**
* A plain string type without custom properties. This is equivalent to:
*
* <pre>
* stringBuilder().endString();
* </pre>
*/
public final StringDefault<R> stringType() {
return stringBuilder().endString();
}
/**
* Build a string type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #stringType()}.
*/
public final StringBldr<StringDefault<R>> stringBuilder() {
return StringBldr.create(wrap(new StringDefault<>(bldr)), names);
}
/**
* A plain bytes type without custom properties. This is equivalent to:
*
* <pre>
* bytesBuilder().endBytes();
* </pre>
*/
public final BytesDefault<R> bytesType() {
return bytesBuilder().endBytes();
}
/**
* Build a bytes type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #bytesType()}.
*/
public final BytesBuilder<BytesDefault<R>> bytesBuilder() {
return BytesBuilder.create(wrap(new BytesDefault<>(bldr)), names);
}
/**
* A plain null type without custom properties. This is equivalent to:
*
* <pre>
* nullBuilder().endNull();
* </pre>
*/
public final NullDefault<R> nullType() {
return nullBuilder().endNull();
}
/**
* Build a null type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #nullType()}.
*/
public final NullBuilder<NullDefault<R>> nullBuilder() {
return NullBuilder.create(wrap(new NullDefault<>(bldr)), names);
}
/** Build an Avro map type **/
public final MapBuilder<MapDefault<R>> map() {
return MapBuilder.create(wrap(new MapDefault<>(bldr)), names);
}
/** Build an Avro array type **/
public final ArrayBuilder<ArrayDefault<R>> array() {
return ArrayBuilder.create(wrap(new ArrayDefault<>(bldr)), names);
}
/** Build an Avro fixed type. **/
public final FixedBuilder<FixedDefault<R>> fixed(String name) {
return FixedBuilder.create(wrap(new FixedDefault<>(bldr)), names, name);
}
/** Build an Avro enum type. **/
public final EnumBuilder<EnumDefault<R>> enumeration(String name) {
return EnumBuilder.create(wrap(new EnumDefault<>(bldr)), names, name);
}
/** Build an Avro record type. **/
public final RecordBuilder<RecordDefault<R>> record(String name) {
return RecordBuilder.create(wrap(new RecordDefault<>(bldr)), names, name);
}
private <C> Completion<C> wrap(Completion<C> completion) {
if (wrapper != null) {
return wrapper.wrap(completion);
}
return completion;
}
}
/**
* FieldTypeBuilder adds {@link #unionOf()}, {@link #nullable()}, and
* {@link #optional()} to BaseFieldTypeBuilder.
**/
public static final class FieldTypeBuilder<R> extends BaseFieldTypeBuilder<R> {
private FieldTypeBuilder(FieldBuilder<R> bldr) {
super(bldr, null);
}
/** Build an Avro union schema type. **/
public UnionFieldTypeBuilder<R> unionOf() {
return new UnionFieldTypeBuilder<>(bldr);
}
/**
* A shortcut for building a union of a type and null, with an optional default
* value of the non-null type.
* <p/>
* For example, the two code snippets below are equivalent:
*
* <pre>
* nullable().booleanType().booleanDefault(true)
* </pre>
*
* <pre>
* unionOf().booleanType().and().nullType().endUnion().booleanDefault(true)
* </pre>
**/
public BaseFieldTypeBuilder<R> nullable() {
return new BaseFieldTypeBuilder<>(bldr, new NullableCompletionWrapper());
}
/**
* A shortcut for building a union of null and a type, with a null default.
* <p/>
* For example, the two code snippets below are equivalent:
*
* <pre>
* optional().booleanType()
* </pre>
*
* <pre>
* unionOf().nullType().and().booleanType().endUnion().nullDefault()
* </pre>
*/
public BaseTypeBuilder<FieldAssembler<R>> optional() {
return new BaseTypeBuilder<>(new OptionalCompletion<>(bldr), names);
}
}
/**
* Builder for a union field. The first type in the union corresponds to the
* possible default value type.
*/
public static final class UnionFieldTypeBuilder<R> {
private final FieldBuilder<R> bldr;
private final NameContext names;
private UnionFieldTypeBuilder(FieldBuilder<R> bldr) {
this.bldr = bldr;
this.names = bldr.names();
}
/**
* A plain boolean type without custom properties. This is equivalent to:
*
* <pre>
* booleanBuilder().endBoolean();
* </pre>
*/
public UnionAccumulator<BooleanDefault<R>> booleanType() {
return booleanBuilder().endBoolean();
}
/**
* Build a boolean type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #booleanType()}.
*/
public BooleanBuilder<UnionAccumulator<BooleanDefault<R>>> booleanBuilder() {
return BooleanBuilder.create(completion(new BooleanDefault<>(bldr)), names);
}
/**
* A plain int type without custom properties. This is equivalent to:
*
* <pre>
* intBuilder().endInt();
* </pre>
*/
public UnionAccumulator<IntDefault<R>> intType() {
return intBuilder().endInt();
}
/**
* Build an int type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #intType()}.
*/
public IntBuilder<UnionAccumulator<IntDefault<R>>> intBuilder() {
return IntBuilder.create(completion(new IntDefault<>(bldr)), names);
}
/**
* A plain long type without custom properties. This is equivalent to:
*
* <pre>
* longBuilder().endLong();
* </pre>
*/
public UnionAccumulator<LongDefault<R>> longType() {
return longBuilder().endLong();
}
/**
* Build a long type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #longType()}.
*/
public LongBuilder<UnionAccumulator<LongDefault<R>>> longBuilder() {
return LongBuilder.create(completion(new LongDefault<>(bldr)), names);
}
/**
* A plain float type without custom properties. This is equivalent to:
*
* <pre>
* floatBuilder().endFloat();
* </pre>
*/
public UnionAccumulator<FloatDefault<R>> floatType() {
return floatBuilder().endFloat();
}
/**
* Build a float type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #floatType()}.
*/
public FloatBuilder<UnionAccumulator<FloatDefault<R>>> floatBuilder() {
return FloatBuilder.create(completion(new FloatDefault<>(bldr)), names);
}
/**
* A plain double type without custom properties. This is equivalent to:
*
* <pre>
* doubleBuilder().endDouble();
* </pre>
*/
public UnionAccumulator<DoubleDefault<R>> doubleType() {
return doubleBuilder().endDouble();
}
/**
* Build a double type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #doubleType()}.
*/
public DoubleBuilder<UnionAccumulator<DoubleDefault<R>>> doubleBuilder() {
return DoubleBuilder.create(completion(new DoubleDefault<>(bldr)), names);
}
/**
* A plain string type without custom properties. This is equivalent to:
*
* <pre>
* stringBuilder().endString();
* </pre>
*/
public UnionAccumulator<StringDefault<R>> stringType() {
return stringBuilder().endString();
}
/**
* Build a string type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #stringType()}.
*/
public StringBldr<UnionAccumulator<StringDefault<R>>> stringBuilder() {
return StringBldr.create(completion(new StringDefault<>(bldr)), names);
}
/**
* A plain bytes type without custom properties. This is equivalent to:
*
* <pre>
* bytesBuilder().endBytes();
* </pre>
*/
public UnionAccumulator<BytesDefault<R>> bytesType() {
return bytesBuilder().endBytes();
}
/**
* Build a bytes type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #bytesType()}.
*/
public BytesBuilder<UnionAccumulator<BytesDefault<R>>> bytesBuilder() {
return BytesBuilder.create(completion(new BytesDefault<>(bldr)), names);
}
/**
* A plain null type without custom properties. This is equivalent to:
*
* <pre>
* nullBuilder().endNull();
* </pre>
*/
public UnionAccumulator<NullDefault<R>> nullType() {
return nullBuilder().endNull();
}
/**
* Build a null type that can set custom properties. If custom properties are
* not needed it is simpler to use {@link #nullType()}.
*/
public NullBuilder<UnionAccumulator<NullDefault<R>>> nullBuilder() {
return NullBuilder.create(completion(new NullDefault<>(bldr)), names);
}
/** Build an Avro map type **/
public MapBuilder<UnionAccumulator<MapDefault<R>>> map() {
return MapBuilder.create(completion(new MapDefault<>(bldr)), names);
}
/** Build an Avro array type **/
public ArrayBuilder<UnionAccumulator<ArrayDefault<R>>> array() {
return ArrayBuilder.create(completion(new ArrayDefault<>(bldr)), names);
}
/** Build an Avro fixed type. **/
public FixedBuilder<UnionAccumulator<FixedDefault<R>>> fixed(String name) {
return FixedBuilder.create(completion(new FixedDefault<>(bldr)), names, name);
}
/** Build an Avro enum type. **/
public EnumBuilder<UnionAccumulator<EnumDefault<R>>> enumeration(String name) {
return EnumBuilder.create(completion(new EnumDefault<>(bldr)), names, name);
}
/** Build an Avro record type. **/
public RecordBuilder<UnionAccumulator<RecordDefault<R>>> record(String name) {
return RecordBuilder.create(completion(new RecordDefault<>(bldr)), names, name);
}
private <C> UnionCompletion<C> completion(Completion<C> context) {
return new UnionCompletion<>(context, names, Collections.emptyList());
}
}
public final static class RecordBuilder<R> extends NamespacedBuilder<R, RecordBuilder<R>> {
private RecordBuilder(Completion<R> context, NameContext names, String name) {
super(context, names, name);
}
private static <R> RecordBuilder<R> create(Completion<R> context, NameContext names, String name) {
return new RecordBuilder<>(context, names, name);
}
@Override
protected RecordBuilder<R> self() {
return this;
}
public FieldAssembler<R> fields() {
Schema record = Schema.createRecord(name(), doc(), space(), false);
// place the record in the name context, fields yet to be set.
completeSchema(record);
return new FieldAssembler<>(context(), names().namespace(record.getNamespace()), record);
}
}
public final static class FieldAssembler<R> {
private final List<Field> fields = new ArrayList<>();
private final Completion<R> context;
private final NameContext names;
private final Schema record;
private FieldAssembler(Completion<R> context, NameContext names, Schema record) {
this.context = context;
this.names = names;
this.record = record;
}
/**
* Add a field with the given name.
*
* @return A {@link FieldBuilder} for the given name.
*/
public FieldBuilder<R> name(String fieldName) {
return new FieldBuilder<>(this, names, fieldName);
}
/**
* Shortcut for creating a boolean field with the given name and no default.
* <p/>
* This is equivalent to:
*
* <pre>
* name(fieldName).type().booleanType().noDefault()
* </pre>
*/
public FieldAssembler<R> requiredBoolean(String fieldName) {
return name(fieldName).type().booleanType().noDefault();
}
/**
* Shortcut for creating an optional boolean field: a union of null and boolean
* with null default.
* <p/>
* This is equivalent to:
*
* <pre>
* name(fieldName).type().optional().booleanType()
* </pre>
*/
public FieldAssembler<R> optionalBoolean(String fieldName) {
return name(fieldName).type().optional().booleanType();
}
/**
* Shortcut for creating a nullable boolean field: a union of boolean and null
* with an boolean default.
* <p/>
* This is equivalent to:
*
* <pre>
* name(fieldName).type().nullable().booleanType().booleanDefault(defaultVal)
* </pre>
*/
public FieldAssembler<R> nullableBoolean(String fieldName, boolean defaultVal) {
return name(fieldName).type().nullable().booleanType().booleanDefault(defaultVal);
}
/**
* Shortcut for creating an int field with the given name and no default.
* <p/>
* This is equivalent to:
*
* <pre>
* name(fieldName).type().intType().noDefault()
* </pre>
*/
public FieldAssembler<R> requiredInt(String fieldName) {
return name(fieldName).type().intType().noDefault();
}
/**
* Shortcut for creating an optional int field: a union of null and int with
* null default.
* <p/>
* This is equivalent to:
*
* <pre>
* name(fieldName).type().optional().intType()
* </pre>
*/
public FieldAssembler<R> optionalInt(String fieldName) {
return name(fieldName).type().optional().intType();
}
/**
* Shortcut for creating a nullable int field: a union of int and null with an
* int default.
* <p/>
* This is equivalent to:
*
* <pre>
* name(fieldName).type().nullable().intType().intDefault(defaultVal)
* </pre>
*/
public FieldAssembler<R> nullableInt(String fieldName, int defaultVal) {
return name(fieldName).type().nullable().intType().intDefault(defaultVal);
}
/**
* Shortcut for creating a long field with the given name and no default.
* <p/>
* This is equivalent to:
*
* <pre>
* name(fieldName).type().longType().noDefault()
* </pre>
*/
public FieldAssembler<R> requiredLong(String fieldName) {
return name(fieldName).type().longType().noDefault();
}
/**
* Shortcut for creating an optional long field: a union of null and long with
* null default.
* <p/>
* This is equivalent to:
*
* <pre>
* name(fieldName).type().optional().longType()
* </pre>
*/
public FieldAssembler<R> optionalLong(String fieldName) {
return name(fieldName).type().optional().longType();
}
/**
* Shortcut for creating a nullable long field: a union of long and null with a
* long default.
* <p/>
* This is equivalent to:
*
* <pre>
* name(fieldName).type().nullable().longType().longDefault(defaultVal)
* </pre>
*/
public FieldAssembler<R> nullableLong(String fieldName, long defaultVal) {
return name(fieldName).type().nullable().longType().longDefault(defaultVal);
}
/**
* Shortcut for creating a float field with the given name and no default.
* <p/>
* This is equivalent to:
*
* <pre>
* name(fieldName).type().floatType().noDefault()
* </pre>
*/
public FieldAssembler<R> requiredFloat(String fieldName) {
return name(fieldName).type().floatType().noDefault();
}
/**
* Shortcut for creating an optional float field: a union of null and float with
* null default.
* <p/>
* This is equivalent to:
*
* <pre>
* name(fieldName).type().optional().floatType()
* </pre>
*/
public FieldAssembler<R> optionalFloat(String fieldName) {
return name(fieldName).type().optional().floatType();
}
/**
* Shortcut for creating a nullable float field: a union of float and null with
* a float default.
* <p/>
* This is equivalent to:
*
* <pre>
* name(fieldName).type().nullable().floatType().floatDefault(defaultVal)
* </pre>
*/
public FieldAssembler<R> nullableFloat(String fieldName, float defaultVal) {
return name(fieldName).type().nullable().floatType().floatDefault(defaultVal);
}
/**
* Shortcut for creating a double field with the given name and no default.
* <p/>
* This is equivalent to:
*
* <pre>
* name(fieldName).type().doubleType().noDefault()
* </pre>
*/
public FieldAssembler<R> requiredDouble(String fieldName) {
return name(fieldName).type().doubleType().noDefault();
}
/**
* Shortcut for creating an optional double field: a union of null and double
* with null default.
* <p/>
* This is equivalent to:
*
* <pre>
* name(fieldName).type().optional().doubleType()
* </pre>
*/
public FieldAssembler<R> optionalDouble(String fieldName) {
return name(fieldName).type().optional().doubleType();
}
/**
* Shortcut for creating a nullable double field: a union of double and null
* with a double default.
* <p/>
* This is equivalent to:
*
* <pre>
* name(fieldName).type().nullable().doubleType().doubleDefault(defaultVal)
* </pre>
*/
public FieldAssembler<R> nullableDouble(String fieldName, double defaultVal) {
return name(fieldName).type().nullable().doubleType().doubleDefault(defaultVal);
}
/**
* Shortcut for creating a string field with the given name and no default.
* <p/>
* This is equivalent to:
*
* <pre>
* name(fieldName).type().stringType().noDefault()
* </pre>
*/
public FieldAssembler<R> requiredString(String fieldName) {
return name(fieldName).type().stringType().noDefault();
}
/**
* Shortcut for creating an optional string field: a union of null and string
* with null default.
* <p/>
* This is equivalent to:
*
* <pre>
* name(fieldName).type().optional().stringType()
* </pre>
*/
public FieldAssembler<R> optionalString(String fieldName) {
return name(fieldName).type().optional().stringType();
}
/**
* Shortcut for creating a nullable string field: a union of string and null
* with a string default.
* <p/>
* This is equivalent to:
*
* <pre>
* name(fieldName).type().nullable().stringType().stringDefault(defaultVal)
* </pre>
*/
public FieldAssembler<R> nullableString(String fieldName, String defaultVal) {
return name(fieldName).type().nullable().stringType().stringDefault(defaultVal);
}
/**
* Shortcut for creating a bytes field with the given name and no default.
* <p/>
* This is equivalent to:
*
* <pre>
* name(fieldName).type().bytesType().noDefault()
* </pre>
*/
public FieldAssembler<R> requiredBytes(String fieldName) {
return name(fieldName).type().bytesType().noDefault();
}
/**
* Shortcut for creating an optional bytes field: a union of null and bytes with
* null default.
* <p/>
* This is equivalent to:
*
* <pre>
* name(fieldName).type().optional().bytesType()
* </pre>
*/
public FieldAssembler<R> optionalBytes(String fieldName) {
return name(fieldName).type().optional().bytesType();
}
/**
* Shortcut for creating a nullable bytes field: a union of bytes and null with
* a bytes default.
* <p/>
* This is equivalent to:
*
* <pre>
* name(fieldName).type().nullable().bytesType().bytesDefault(defaultVal)
* </pre>
*/
public FieldAssembler<R> nullableBytes(String fieldName, byte[] defaultVal) {
return name(fieldName).type().nullable().bytesType().bytesDefault(defaultVal);
}
/**
* End adding fields to this record, returning control to the context that this
* record builder was created in.
*/
public R endRecord() {
record.setFields(fields);
return context.complete(record);
}
private FieldAssembler<R> addField(Field field) {
fields.add(field);
return this;
}
}
/**
* Builds a Field in the context of a {@link FieldAssembler}.
*
* Usage is to first configure any of the optional parameters and then to call
* one of the type methods to complete the field. For example
*
* <pre>
* .namespace("org.apache.example").orderDescending().type()
* </pre>
*
* Optional parameters for a field are namespace, doc, order, and aliases.
*/
public final static class FieldBuilder<R> extends NamedBuilder<FieldBuilder<R>> {
private final FieldAssembler<R> fields;
private Schema.Field.Order order = Schema.Field.Order.ASCENDING;
private boolean validatingDefaults = true;
private FieldBuilder(FieldAssembler<R> fields, NameContext names, String name) {
super(names, name);
this.fields = fields;
}
/** Set this field to have ascending order. Ascending is the default **/
public FieldBuilder<R> orderAscending() {
order = Schema.Field.Order.ASCENDING;
return self();
}
/** Set this field to have descending order. Descending is the default **/
public FieldBuilder<R> orderDescending() {
order = Schema.Field.Order.DESCENDING;
return self();
}
/** Set this field to ignore order. **/
public FieldBuilder<R> orderIgnore() {
order = Schema.Field.Order.IGNORE;
return self();
}
/**
* Validate field default value during {@link #completeField(Schema, JsonNode)}.
**/
public FieldBuilder<R> validatingDefaults() {
validatingDefaults = true;
return self();
}
/**
* Skip field default value validation during
* {@link #completeField(Schema, JsonNode)}}
**/
public FieldBuilder<R> notValidatingDefaults() {
validatingDefaults = false;
return self();
}
/**
* Final step in configuring this field, finalizing name, namespace, alias, and
* order.
*
* @return A builder for the field's type and default value.
*/
public FieldTypeBuilder<R> type() {
return new FieldTypeBuilder<>(this);
}
/**
* Final step in configuring this field, finalizing name, namespace, alias, and
* order. Sets the field's type to the provided schema, returns a
* {@link GenericDefault}.
*/
public GenericDefault<R> type(Schema type) {
return new GenericDefault<>(this, type);
}
/**
* Final step in configuring this field, finalizing name, namespace, alias, and
* order. Sets the field's type to the schema by name reference.
* <p/>
* The name must correspond with a named schema that has already been created in
* the context of this builder. The name may be a fully qualified name, or a
* short name. If it is a short name, the namespace context of this builder will
* be used.
* <p/>
* The name and namespace context rules are the same as the Avro schema JSON
* specification.
*/
public GenericDefault<R> type(String name) {
return type(name, null);
}
/**
* Final step in configuring this field, finalizing name, namespace, alias, and
* order. Sets the field's type to the schema by name reference.
* <p/>
* The name must correspond with a named schema that has already been created in
* the context of this builder. The name may be a fully qualified name, or a
* short name. If it is a full name, the namespace is ignored. If it is a short
* name, the namespace provided is used. If the namespace provided is null, the
* namespace context of this builder will be used.
* <p/>
* The name and namespace context rules are the same as the Avro schema JSON
* specification.
*/
public GenericDefault<R> type(String name, String namespace) {
Schema schema = names().get(name, namespace);
return type(schema);
}
private FieldAssembler<R> completeField(Schema schema, Object defaultVal) {
JsonNode defaultNode = defaultVal == null ? NullNode.getInstance() : toJsonNode(defaultVal);
return completeField(schema, defaultNode);
}
private FieldAssembler<R> completeField(Schema schema) {
return completeField(schema, (JsonNode) null);
}
private FieldAssembler<R> completeField(Schema schema, JsonNode defaultVal) {
Field field = new Field(name(), schema, doc(), defaultVal, validatingDefaults, order);
addPropsTo(field);
addAliasesTo(field);
return fields.addField(field);
}
@Override
protected FieldBuilder<R> self() {
return this;
}
}
/** Abstract base class for field defaults. **/
public static abstract class FieldDefault<R, S extends FieldDefault<R, S>> extends Completion<S> {
private final FieldBuilder<R> field;
private Schema schema;
FieldDefault(FieldBuilder<R> field) {
this.field = field;
}
/** Completes this field with no default value **/
public final FieldAssembler<R> noDefault() {
return field.completeField(schema);
}
private FieldAssembler<R> usingDefault(Object defaultVal) {
return field.completeField(schema, defaultVal);
}
@Override
final S complete(Schema schema) {
this.schema = schema;
return self();
}
abstract S self();
}
/** Choose whether to use a default value for the field or not. **/
public static class BooleanDefault<R> extends FieldDefault<R, BooleanDefault<R>> {
private BooleanDefault(FieldBuilder<R> field) {
super(field);
}
/** Completes this field with the default value provided **/
public final FieldAssembler<R> booleanDefault(boolean defaultVal) {
return super.usingDefault(defaultVal);
}
@Override
final BooleanDefault<R> self() {
return this;
}
}
/** Choose whether to use a default value for the field or not. **/
public static class IntDefault<R> extends FieldDefault<R, IntDefault<R>> {
private IntDefault(FieldBuilder<R> field) {
super(field);
}
/** Completes this field with the default value provided **/
public final FieldAssembler<R> intDefault(int defaultVal) {
return super.usingDefault(defaultVal);
}
@Override
final IntDefault<R> self() {
return this;
}
}
/** Choose whether to use a default value for the field or not. **/
public static class LongDefault<R> extends FieldDefault<R, LongDefault<R>> {
private LongDefault(FieldBuilder<R> field) {
super(field);
}
/** Completes this field with the default value provided **/
public final FieldAssembler<R> longDefault(long defaultVal) {
return super.usingDefault(defaultVal);
}
@Override
final LongDefault<R> self() {
return this;
}
}
/** Choose whether to use a default value for the field or not. **/
public static class FloatDefault<R> extends FieldDefault<R, FloatDefault<R>> {
private FloatDefault(FieldBuilder<R> field) {
super(field);
}
/** Completes this field with the default value provided **/
public final FieldAssembler<R> floatDefault(float defaultVal) {
return super.usingDefault(defaultVal);
}
@Override
final FloatDefault<R> self() {
return this;
}
}
/** Choose whether to use a default value for the field or not. **/
public static class DoubleDefault<R> extends FieldDefault<R, DoubleDefault<R>> {
private DoubleDefault(FieldBuilder<R> field) {
super(field);
}
/** Completes this field with the default value provided **/
public final FieldAssembler<R> doubleDefault(double defaultVal) {
return super.usingDefault(defaultVal);
}
@Override
final DoubleDefault<R> self() {
return this;
}
}
/** Choose whether to use a default value for the field or not. **/
public static class StringDefault<R> extends FieldDefault<R, StringDefault<R>> {
private StringDefault(FieldBuilder<R> field) {
super(field);
}
/** Completes this field with the default value provided. Cannot be null. **/
public final FieldAssembler<R> stringDefault(String defaultVal) {
return super.usingDefault(defaultVal);
}
@Override
final StringDefault<R> self() {
return this;
}
}
/** Choose whether to use a default value for the field or not. **/
public static class BytesDefault<R> extends FieldDefault<R, BytesDefault<R>> {
private BytesDefault(FieldBuilder<R> field) {
super(field);
}
/** Completes this field with the default value provided, cannot be null **/
public final FieldAssembler<R> bytesDefault(byte[] defaultVal) {
return super.usingDefault(ByteBuffer.wrap(defaultVal));
}
/** Completes this field with the default value provided, cannot be null **/
public final FieldAssembler<R> bytesDefault(ByteBuffer defaultVal) {
return super.usingDefault(defaultVal);
}
/**
* Completes this field with the default value provided, cannot be null. The
* string is interpreted as a byte[], with each character code point value
* equalling the byte value, as in the Avro spec JSON default.
**/
public final FieldAssembler<R> bytesDefault(String defaultVal) {
return super.usingDefault(defaultVal);
}
@Override
final BytesDefault<R> self() {
return this;
}
}
/** Choose whether to use a default value for the field or not. **/
public static class NullDefault<R> extends FieldDefault<R, NullDefault<R>> {
private NullDefault(FieldBuilder<R> field) {
super(field);
}
/** Completes this field with a default value of null **/
public final FieldAssembler<R> nullDefault() {
return super.usingDefault(null);
}
@Override
final NullDefault<R> self() {
return this;
}
}
/** Choose whether to use a default value for the field or not. **/
public static class MapDefault<R> extends FieldDefault<R, MapDefault<R>> {
private MapDefault(FieldBuilder<R> field) {
super(field);
}
/** Completes this field with the default value provided, cannot be null **/
public final <K, V> FieldAssembler<R> mapDefault(Map<K, V> defaultVal) {
return super.usingDefault(defaultVal);
}
@Override
final MapDefault<R> self() {
return this;
}
}
/** Choose whether to use a default value for the field or not. **/
public static class ArrayDefault<R> extends FieldDefault<R, ArrayDefault<R>> {
private ArrayDefault(FieldBuilder<R> field) {
super(field);
}
/** Completes this field with the default value provided, cannot be null **/
public final <V> FieldAssembler<R> arrayDefault(List<V> defaultVal) {
return super.usingDefault(defaultVal);
}
@Override
final ArrayDefault<R> self() {
return this;
}
}
/** Choose whether to use a default value for the field or not. **/
public static class FixedDefault<R> extends FieldDefault<R, FixedDefault<R>> {
private FixedDefault(FieldBuilder<R> field) {
super(field);
}
/** Completes this field with the default value provided, cannot be null **/
public final FieldAssembler<R> fixedDefault(byte[] defaultVal) {
return super.usingDefault(ByteBuffer.wrap(defaultVal));
}
/** Completes this field with the default value provided, cannot be null **/
public final FieldAssembler<R> fixedDefault(ByteBuffer defaultVal) {
return super.usingDefault(defaultVal);
}
/**
* Completes this field with the default value provided, cannot be null. The
* string is interpreted as a byte[], with each character code point value
* equalling the byte value, as in the Avro spec JSON default.
**/
public final FieldAssembler<R> fixedDefault(String defaultVal) {
return super.usingDefault(defaultVal);
}
@Override
final FixedDefault<R> self() {
return this;
}
}
/** Choose whether to use a default value for the field or not. **/
public static class EnumDefault<R> extends FieldDefault<R, EnumDefault<R>> {
private EnumDefault(FieldBuilder<R> field) {
super(field);
}
/** Completes this field with the default value provided, cannot be null **/
public final FieldAssembler<R> enumDefault(String defaultVal) {
return super.usingDefault(defaultVal);
}
@Override
final EnumDefault<R> self() {
return this;
}
}
/** Choose whether to use a default value for the field or not. **/
public static class RecordDefault<R> extends FieldDefault<R, RecordDefault<R>> {
private RecordDefault(FieldBuilder<R> field) {
super(field);
}
/** Completes this field with the default value provided, cannot be null **/
public final FieldAssembler<R> recordDefault(GenericRecord defaultVal) {
return super.usingDefault(defaultVal);
}
@Override
final RecordDefault<R> self() {
return this;
}
}
public final static class GenericDefault<R> {
private final FieldBuilder<R> field;
private final Schema schema;
private GenericDefault(FieldBuilder<R> field, Schema schema) {
this.field = field;
this.schema = schema;
}
/** Do not use a default value for this field. **/
public FieldAssembler<R> noDefault() {
return field.completeField(schema);
}
/**
* Completes this field with the default value provided. The value must conform
* to the schema of the field.
**/
public FieldAssembler<R> withDefault(Object defaultVal) {
return field.completeField(schema, defaultVal);
}
}
/**
* Completion<R> is for internal builder use, all subclasses are private.
*
* Completion is an object that takes a Schema and returns some result.
*/
private abstract static class Completion<R> {
abstract R complete(Schema schema);
}
private static class SchemaCompletion extends Completion<Schema> {
@Override
protected Schema complete(Schema schema) {
return schema;
}
}
private static final Schema NULL_SCHEMA = Schema.create(Schema.Type.NULL);
private static class NullableCompletion<R> extends Completion<R> {
private final Completion<R> context;
private NullableCompletion(Completion<R> context) {
this.context = context;
}
@Override
protected R complete(Schema schema) {
// wrap the schema as a union of the schema and null
Schema nullable = Schema.createUnion(Arrays.asList(schema, NULL_SCHEMA));
return context.complete(nullable);
}
}
private static class OptionalCompletion<R> extends Completion<FieldAssembler<R>> {
private final FieldBuilder<R> bldr;
public OptionalCompletion(FieldBuilder<R> bldr) {
this.bldr = bldr;
}
@Override
protected FieldAssembler<R> complete(Schema schema) {
// wrap the schema as a union of null and the schema
Schema optional = Schema.createUnion(Arrays.asList(NULL_SCHEMA, schema));
return bldr.completeField(optional, (Object) null);
}
}
private abstract static class CompletionWrapper {
abstract <R> Completion<R> wrap(Completion<R> completion);
}
private static final class NullableCompletionWrapper extends CompletionWrapper {
@Override
<R> Completion<R> wrap(Completion<R> completion) {
return new NullableCompletion<>(completion);
}
}
private static abstract class NestedCompletion<R> extends Completion<R> {
private final Completion<R> context;
private final PropBuilder<?> assembler;
private NestedCompletion(PropBuilder<?> assembler, Completion<R> context) {
this.context = context;
this.assembler = assembler;
}
@Override
protected final R complete(Schema schema) {
Schema outer = outerSchema(schema);
assembler.addPropsTo(outer);
return context.complete(outer);
}
protected abstract Schema outerSchema(Schema inner);
}
private static class MapCompletion<R> extends NestedCompletion<R> {
private MapCompletion(MapBuilder<R> assembler, Completion<R> context) {
super(assembler, context);
}
@Override
protected Schema outerSchema(Schema inner) {
return Schema.createMap(inner);
}
}
private static class ArrayCompletion<R> extends NestedCompletion<R> {
private ArrayCompletion(ArrayBuilder<R> assembler, Completion<R> context) {
super(assembler, context);
}
@Override
protected Schema outerSchema(Schema inner) {
return Schema.createArray(inner);
}
}
private static class UnionCompletion<R> extends Completion<UnionAccumulator<R>> {
private final Completion<R> context;
private final NameContext names;
private final List<Schema> schemas;
private UnionCompletion(Completion<R> context, NameContext names, List<Schema> schemas) {
this.context = context;
this.names = names;
this.schemas = schemas;
}
@Override
protected UnionAccumulator<R> complete(Schema schema) {
List<Schema> updated = new ArrayList<>(this.schemas);
updated.add(schema);
return new UnionAccumulator<>(context, names, updated);
}
}
/**
* Accumulates all of the types in a union. Add an additional type with
* {@link #and()}. Complete the union with {@link #endUnion()}
*/
public static final class UnionAccumulator<R> {
private final Completion<R> context;
private final NameContext names;
private final List<Schema> schemas;
private UnionAccumulator(Completion<R> context, NameContext names, List<Schema> schemas) {
this.context = context;
this.names = names;
this.schemas = schemas;
}
/** Add an additional type to this union **/
public BaseTypeBuilder<UnionAccumulator<R>> and() {
return new UnionBuilder<>(context, names, schemas);
}
/** Complete this union **/
public R endUnion() {
Schema schema = Schema.createUnion(schemas);
return context.complete(schema);
}
}
// create default value JsonNodes from objects
private static JsonNode toJsonNode(Object o) {
try {
String s;
if (o instanceof ByteBuffer) {
// special case since GenericData.toString() is incorrect for bytes
// note that this does not handle the case of a default value with nested bytes
ByteBuffer bytes = ((ByteBuffer) o);
((Buffer) bytes).mark();
byte[] data = new byte[bytes.remaining()];
bytes.get(data);
((Buffer) bytes).reset(); // put the buffer back the way we got it
s = new String(data, StandardCharsets.ISO_8859_1);
char[] quoted = JsonStringEncoder.getInstance().quoteAsString(s);
s = "\"" + new String(quoted) + "\"";
} else if (o instanceof byte[]) {
s = new String((byte[]) o, StandardCharsets.ISO_8859_1);
char[] quoted = JsonStringEncoder.getInstance().quoteAsString(s);
s = '\"' + new String(quoted) + '\"';
} else {
s = GenericData.get().toString(o);
}
return new ObjectMapper().readTree(s);
} catch (IOException e) {
throw new SchemaBuilderException(e);
}
}
}
| 7,227 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/LogicalType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
import org.apache.avro.generic.GenericData;
import org.apache.avro.specific.SpecificData;
/**
* Logical types provides an opt-in way to extend Avro's types. Logical types
* specify a way of representing a high-level type as a base Avro type. For
* example, a date is specified as the number of days after the unix epoch (or
* before using a negative value). This enables extensions to Avro's type system
* without breaking binary compatibility. Older versions see the base type and
* ignore the logical type.
*/
public class LogicalType {
public static final String LOGICAL_TYPE_PROP = "logicalType";
private static final String[] INCOMPATIBLE_PROPS = new String[] { GenericData.STRING_PROP, SpecificData.CLASS_PROP,
SpecificData.KEY_CLASS_PROP, SpecificData.ELEMENT_PROP };
private final String name;
public LogicalType(String logicalTypeName) {
this.name = logicalTypeName.intern();
}
/**
* Get the name of this logical type.
* <p>
* This name is set as the Schema property "logicalType".
*
* @return the String name of the logical type
*/
public String getName() {
return name;
}
/**
* Add this logical type to the given Schema.
* <p>
* The "logicalType" property will be set to this type's name, and other
* type-specific properties may be added. The Schema is first validated to
* ensure it is compatible.
*
* @param schema a Schema
* @return the modified Schema
* @throws IllegalArgumentException if the type and schema are incompatible
*/
public Schema addToSchema(Schema schema) {
validate(schema);
schema.addProp(LOGICAL_TYPE_PROP, name);
schema.setLogicalType(this);
return schema;
}
/**
* Validate this logical type for the given Schema.
* <p>
* This will throw an exception if the Schema is incompatible with this type.
* For example, a date is stored as an int and is incompatible with a fixed
* Schema.
*
* @param schema a Schema
* @throws IllegalArgumentException if the type and schema are incompatible
*/
public void validate(Schema schema) {
for (String incompatible : INCOMPATIBLE_PROPS) {
if (schema.getProp(incompatible) != null) {
throw new IllegalArgumentException(LOGICAL_TYPE_PROP + " cannot be used with " + incompatible);
}
}
}
}
| 7,228 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/SchemaValidator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
/**
* <p>
* A SchemaValidator has one method, which validates that a {@link Schema} is
* <b>compatible<b/> with the other schemas provided.
* </p>
* <p>
* What makes one Schema compatible with another is not part of the interface
* contract.
* </p>
*/
public interface SchemaValidator {
/**
* Validate one schema against others. The order of the schemas to validate
* against is chronological from most recent to oldest, if there is a natural
* chronological order. This allows some validators to identify which schemas
* are the most "recent" in order to validate only against the most recent
* schema(s).
*
* @param toValidate The schema to validate
* @param existing The schemas to validate against, in order from most recent
* to latest if applicable
* @throws SchemaValidationException if the schema fails to validate.
*/
void validate(Schema toValidate, Iterable<Schema> existing) throws SchemaValidationException;
}
| 7,229 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/UnknownAvroCodecException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
import java.io.IOException;
public class UnknownAvroCodecException extends IOException {
public UnknownAvroCodecException(String message) {
super(message);
}
}
| 7,230 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/Resolver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.avro.generic.GenericData;
import org.apache.avro.Schema.Field;
import org.apache.avro.Schema.SeenPair;
import org.apache.avro.Resolver.ErrorAction.ErrorType;
/**
* Encapsulate schema-resolution logic in an easy-to-consume representation. See
* {@link #resolve} and also the separate document entitled
* <tt>refactoring-resolution</tt> for more information. It might also be
* helpful to study {@link org.apache.avro.io.parsing.ResolvingGrammarGenerator}
* as an example of how to use this class.
*/
public class Resolver {
/**
* Returns a {@link Resolver.Action} tree for resolving the writer schema
* <tt>writer</tt> and the reader schema <tt>reader</tt>.
*
* This method walks the reader's and writer's schemas together, generating an
* appropriate subclass of {@link Action} to encapsulate the information needed
* to resolve the corresponding parts of each schema tree. For convenience,
* every {@link Action} object has a pointer to the corresponding parts of the
* reader's and writer's trees being resolved by the action. Each subclass of
* {@link Action} has additional information needed for different types of
* schema, e.g., the {@link EnumAdjust} subclass has information about
* re-ordering and deletion of enumeration symbols, while {@link RecordAdjust}
* has information about re-ordering and deletion of record fields.
*
* Note that aliases are applied to the writer's schema before resolution
* actually takes place. This means that the <tt>writer</tt> field of the
* resulting {@link Action} objects will not be the same schema as provided to
* this method. However, the <tt>reader</tt> field will be.
*
* @param writer The schema used by the writer
* @param reader The schema used by the reader
* @param data Used for <tt>getDefaultValue</tt> and getting conversions
* @return Nested actions for resolving the two
*/
public static Action resolve(Schema writer, Schema reader, GenericData data) {
return resolve(Schema.applyAliases(writer, reader), reader, data, new HashMap<>());
}
/**
* Uses <tt>GenericData.get()</tt> for the <tt>data</tt> param.
*/
public static Action resolve(Schema writer, Schema reader) {
return resolve(writer, reader, GenericData.get());
}
private static Action resolve(Schema w, Schema r, GenericData d, Map<SeenPair, Action> seen) {
final Schema.Type wType = w.getType();
final Schema.Type rType = r.getType();
if (wType == Schema.Type.UNION) {
return WriterUnion.resolve(w, r, d, seen);
}
if (wType == rType) {
switch (wType) {
case NULL:
case BOOLEAN:
case INT:
case LONG:
case FLOAT:
case DOUBLE:
case STRING:
case BYTES:
return new DoNothing(w, r, d);
case FIXED:
if (w.getName() != null && !w.getName().equals(r.getName())) {
return new ErrorAction(w, r, d, ErrorType.NAMES_DONT_MATCH);
} else if (w.getFixedSize() != r.getFixedSize()) {
return new ErrorAction(w, r, d, ErrorType.SIZES_DONT_MATCH);
} else {
return new DoNothing(w, r, d);
}
case ARRAY:
Action et = resolve(w.getElementType(), r.getElementType(), d, seen);
return new Container(w, r, d, et);
case MAP:
Action vt = resolve(w.getValueType(), r.getValueType(), d, seen);
return new Container(w, r, d, vt);
case ENUM:
return EnumAdjust.resolve(w, r, d);
case RECORD:
return RecordAdjust.resolve(w, r, d, seen);
default:
throw new IllegalArgumentException("Unknown type for schema: " + wType);
}
} else if (rType == Schema.Type.UNION) {
return ReaderUnion.resolve(w, r, d, seen);
} else {
return Promote.resolve(w, r, d);
}
}
/**
* An abstract class for an action to be taken to resolve a writer's schema
* (found in public instance variable <tt>writer</tt>) against a reader's schema
* (in <tt>reader</tt>). Ordinarily, neither field can be <tt>null</tt>, except
* that the <tt>reader</tt> field can be <tt>null</tt> in a {@link Skip}, which
* is used to skip a field in a writer's record that doesn't exist in the
* reader's (and thus there is no reader schema to resolve to).
*/
public static abstract class Action {
/** Helps us traverse faster. */
public enum Type {
DO_NOTHING, ERROR, PROMOTE, CONTAINER, ENUM, SKIP, RECORD, WRITER_UNION, READER_UNION
}
public final Schema writer, reader;
public final Type type;
/**
* If the reader has a logical type, it's stored here for fast access, otherwise
* this will be null.
*/
public final LogicalType logicalType;
/**
* If the reader has a conversion that needs to be applied, it's stored here for
* fast access, otherwise this will be null.
*/
public final Conversion<?> conversion;
protected Action(Schema w, Schema r, GenericData data, Type t) {
this.writer = w;
this.reader = r;
this.type = t;
if (r == null) {
this.logicalType = null;
this.conversion = null;
} else {
this.logicalType = r.getLogicalType();
this.conversion = data.getConversionFor(logicalType);
}
}
}
/**
* In this case, there's nothing to be done for resolution: the two schemas are
* effectively the same. This action will be generated <em>only</em> for
* primitive types and fixed types, and not for any other kind of schema.
*/
public static class DoNothing extends Action {
public DoNothing(Schema w, Schema r, GenericData d) {
super(w, r, d, Action.Type.DO_NOTHING);
}
}
/**
* In this case there is an error. We put error Actions into trees because Avro
* reports these errors in a lazy fashion: if a particular input doesn't
* "tickle" the error (typically because it's in a branch of a union that isn't
* found in the data being read), then it's safe to ignore it.
*/
public static class ErrorAction extends Action {
public enum ErrorType {
/**
* Use when Schema types don't match and can't be converted. For example,
* resolving "int" and "enum".
*/
INCOMPATIBLE_SCHEMA_TYPES,
/**
* Use when Schema types match but, in the case of record, enum, or fixed, the
* names don't match.
*/
NAMES_DONT_MATCH,
/**
* Use when two fixed types match and their names match by their sizes don't.
*/
SIZES_DONT_MATCH,
/**
* Use when matching two records and the reader has a field with no default
* value and that field is missing in the writer..
*/
MISSING_REQUIRED_FIELD,
/**
* Use when matching a reader's union against a non-union and can't find a
* branch that matches.
*/
NO_MATCHING_BRANCH
}
public final ErrorType error;
public ErrorAction(Schema w, Schema r, GenericData d, ErrorType e) {
super(w, r, d, Action.Type.ERROR);
this.error = e;
}
@Override
public String toString() {
switch (this.error) {
case INCOMPATIBLE_SCHEMA_TYPES:
case NAMES_DONT_MATCH:
case SIZES_DONT_MATCH:
case NO_MATCHING_BRANCH:
return "Found " + writer.getFullName() + ", expecting " + reader.getFullName();
case MISSING_REQUIRED_FIELD: {
final List<Field> rfields = reader.getFields();
String fname = "<oops>";
for (Field rf : rfields) {
if (writer.getField(rf.name()) == null && rf.defaultValue() == null) {
fname = rf.name();
}
}
return ("Found " + writer.getFullName() + ", expecting " + reader.getFullName() + ", missing required field "
+ fname);
}
default:
throw new IllegalArgumentException("Unknown error.");
}
}
}
/**
* In this case, the writer's type needs to be promoted to the reader's. These
* are constructed by {@link Promote#resolve}, which will only construct one
* when the writer's and reader's schemas are different (ie, no "self
* promotion"), and whent the promotion is one allowed by the Avro spec.
*/
public static class Promote extends Action {
private Promote(Schema w, Schema r, GenericData d) {
super(w, r, d, Action.Type.PROMOTE);
}
/**
* Return a promotion.
*
* @param w Writer's schema
* @param r Rearder's schema
* @return a {@link Promote} schema if the two schemas are compatible, or
* {@link ErrorType#INCOMPATIBLE_SCHEMA_TYPES} if they are not.
* @throws IllegalArgumentException if <em>getType()</em> of the two schemas are
* not different.
*/
public static Action resolve(Schema w, Schema r, GenericData d) {
if (isValid(w, r)) {
return new Promote(w, r, d);
} else {
return new ErrorAction(w, r, d, ErrorType.INCOMPATIBLE_SCHEMA_TYPES);
}
}
/**
* Returns true iff <tt>w</tt> and <tt>r</tt> are both primitive types and
* either they are the same type or <tt>w</tt> is promotable to <tt>r</tt>.
* Should
*/
public static boolean isValid(Schema w, Schema r) {
if (w.getType() == r.getType())
throw new IllegalArgumentException("Only use when reader and writer are different.");
Schema.Type wt = w.getType();
switch (r.getType()) {
case LONG:
switch (wt) {
case INT:
return true;
}
break;
case FLOAT:
switch (wt) {
case INT:
case LONG:
return true;
}
break;
case DOUBLE:
switch (wt) {
case INT:
case LONG:
case FLOAT:
return true;
}
break;
case BYTES:
case STRING:
switch (wt) {
case STRING:
case BYTES:
return true;
}
break;
}
return false;
}
}
/**
* Used for array and map schemas: the public instance variable
* <tt>elementAction</tt> contains the resolving action needed for the element
* type of an array or value top of a map.
*/
public static class Container extends Action {
public final Action elementAction;
public Container(Schema w, Schema r, GenericData d, Action e) {
super(w, r, d, Action.Type.CONTAINER);
this.elementAction = e;
}
}
/**
* Contains information needed to resolve enumerations. When resolving enums,
* adjustments need to be made in two scenarios: the index for an enum symbol
* might be different in the reader or writer, or the reader might not have a
* symbol that was written out for the writer (which is an error, but one we can
* only detect when decoding data).
*
* These adjustments are reflected in the instance variable
* <tt>adjustments</tt>. For the symbol with index <tt>i</tt> in the writer's
* enum definition, <tt>adjustments[i]</tt> -- and integer -- contains the
* adjustment for that symbol. If the integer is positive, then reader also has
* the symbol and the integer is its index in the reader's schema. If
* <tt>adjustment[i]</tt> is negative, then the reader does <em>not</em> have
* the corresponding symbol (which is the error case).
*
* Sometimes there's no adjustments needed: all symbols in the reader have the
* same index in the reader's and writer's schema. This is a common case, and it
* allows for some optimization. To signal that this is the case,
* <tt>noAdjustmentsNeeded</tt> is set to true.
*/
public static class EnumAdjust extends Action {
public final int[] adjustments;
public final Object[] values;
public final boolean noAdjustmentsNeeded;
private EnumAdjust(Schema w, Schema r, GenericData d, int[] adj, Object[] values) {
super(w, r, d, Action.Type.ENUM);
this.adjustments = adj;
boolean noAdj;
int rsymCount = r.getEnumSymbols().size();
int count = Math.min(rsymCount, adj.length);
noAdj = (adj.length <= rsymCount);
for (int i = 0; noAdj && i < count; i++) {
noAdj &= (i == adj[i]);
}
this.noAdjustmentsNeeded = noAdj;
this.values = values;
}
/**
* If writer and reader don't have same name, a
* {@link ErrorAction.ErrorType#NAMES_DONT_MATCH} is returned, otherwise an
* appropriate {@link EnumAdjust} is.
*/
public static Action resolve(Schema w, Schema r, GenericData d) {
if (w.getName() != null && !w.getName().equals(r.getName()))
return new ErrorAction(w, r, d, ErrorType.NAMES_DONT_MATCH);
final List<String> wsymbols = w.getEnumSymbols();
final List<String> rsymbols = r.getEnumSymbols();
final int defaultIndex = (r.getEnumDefault() == null ? -1 : rsymbols.indexOf(r.getEnumDefault()));
int[] adjustments = new int[wsymbols.size()];
Object[] values = new Object[wsymbols.size()];
Object defaultValue = (defaultIndex == -1) ? null : d.createEnum(r.getEnumDefault(), r);
for (int i = 0; i < adjustments.length; i++) {
int j = rsymbols.indexOf(wsymbols.get(i));
if (j < 0) {
j = defaultIndex;
}
adjustments[i] = j;
values[i] = (j == defaultIndex) ? defaultValue : d.createEnum(rsymbols.get(j), r);
}
return new EnumAdjust(w, r, d, adjustments, values);
}
}
/**
* This only appears inside {@link RecordAdjust#fieldActions}, i.e., the actions
* for adjusting the fields of a record. This action indicates that the writer's
* schema has a field that the reader's does <em>not</em> have, and thus the
* field should be skipped. Since there is no corresponding reader's schema for
* the writer's in this case, the {@link Action#reader} field is <tt>null</tt>
* for this subclass.
*/
public static class Skip extends Action {
public Skip(Schema w, GenericData d) {
super(w, null, d, Action.Type.SKIP);
}
}
/**
* Instructions for resolving two record schemas. Includes instructions on how
* to recursively resolve each field, an indication of when to skip (writer
* fields), plus information about which reader fields should be populated by
* defaults (because the writer doesn't have corresponding fields).
*/
public static class RecordAdjust extends Action {
/**
* An action for each field of the writer. If the corresponding field is to be
* skipped during reading, then this will contain a {@link Skip}. For fields to
* be read into the reading datum, will contain a regular action for resolving
* the writer/reader schemas of the matching fields.
*/
public final Action[] fieldActions;
/**
* Contains (all of) the reader's fields. The first <i>n</i> of these are the
* fields that will be read from the writer: these <i>n</i> are in the order
* dictated by writer's schema. The remaining <i>m</i> fields will be read from
* default values (actions for these default values are found in
* {@link RecordAdjust#defaults}.
*/
public final Field[] readerOrder;
/**
* Pointer into {@link RecordAdjust#readerOrder} of the first reader field whose
* value comes from a default value. Set to length of
* {@link RecordAdjust#readerOrder} if there are none.
*/
public final int firstDefault;
/**
* Contains the default values to be used for the last
* <tt>readerOrder.length-firstDefault</tt> fields in rearderOrder. The
* <tt>i</tt>th element of <tt>defaults</tt> is the default value for the
* <tt>i+firstDefault</tt> member of <tt>readerOrder</tt>.
*/
public final Object[] defaults;
/**
* Supplier that offers an optimized alternative to data.newRecord()
*/
public final GenericData.InstanceSupplier instanceSupplier;
/**
* Returns true iff <code>i == readerOrder[i].pos()</code> for all
* indices <code>i</code>. Which is to say: the order of the reader's fields is
* the same in both the reader's and writer's schema.
*/
public boolean noReorder() {
boolean result = true;
for (int i = 0; result && i < readerOrder.length; i++) {
result &= (i == readerOrder[i].pos());
}
return result;
}
private RecordAdjust(Schema w, Schema r, GenericData d, Action[] fa, Field[] ro, int firstD, Object[] defaults) {
super(w, r, d, Action.Type.RECORD);
this.fieldActions = fa;
this.readerOrder = ro;
this.firstDefault = firstD;
this.defaults = defaults;
this.instanceSupplier = d.getNewRecordSupplier(r);
}
/**
* Returns a {@link RecordAdjust} for the two schemas, or an {@link ErrorAction}
* if there was a problem resolving. An {@link ErrorAction} is returned when
* either the two record-schemas don't have the same name, or if the writer is
* missing a field for which the reader does not have a default value.
*
* @throws RuntimeException if writer and reader schemas are not both records
*/
static Action resolve(Schema writeSchema, Schema readSchema, GenericData data, Map<SeenPair, Action> seen) {
final SeenPair writeReadPair = new SeenPair(writeSchema, readSchema);
Action result = seen.get(writeReadPair);
if (result != null) {
return result;
}
/*
* Current implementation doesn't do this check. To pass regressions tests, we
* can't either. if (w.getFullName() != null && !
* w.getFullName().equals(r.getFullName())) { result = new ErrorAction(w, r, d,
* ErrorType.NAMES_DONT_MATCH); seen.put(wr, result); return result; }
*/
final List<Field> writeFields = writeSchema.getFields();
final List<Field> readFields = readSchema.getFields();
int firstDefault = 0;
for (Schema.Field writeField : writeFields) {
// The writeFields that are also in the readschema
if (readSchema.getField(writeField.name()) != null) {
++firstDefault;
}
}
final Action[] actions = new Action[writeFields.size()];
final Field[] reordered = new Field[readFields.size()];
final Object[] defaults = new Object[reordered.length - firstDefault];
result = new RecordAdjust(writeSchema, readSchema, data, actions, reordered, firstDefault, defaults);
seen.put(writeReadPair, result); // Insert early to handle recursion
int i = 0;
int ridx = 0;
for (Field writeField : writeFields) {
final Field readField = readSchema.getField(writeField.name());
if (readField != null) {
reordered[ridx++] = readField;
actions[i++] = Resolver.resolve(writeField.schema(), readField.schema(), data, seen);
} else {
actions[i++] = new Skip(writeField.schema(), data);
}
}
for (Field readField : readFields) {
// The field is not in the writeSchema, so we can never read it
// Use the default value, or throw an error otherwise
final Field writeField = writeSchema.getField(readField.name());
if (writeField == null) {
if (readField.defaultValue() == null) {
result = new ErrorAction(writeSchema, readSchema, data, ErrorType.MISSING_REQUIRED_FIELD);
seen.put(writeReadPair, result);
return result;
} else {
defaults[ridx - firstDefault] = data.getDefaultValue(readField);
reordered[ridx++] = readField;
}
}
}
return result;
}
}
/**
* In this case, the writer was a union. There are two subcases here:
*
* If the reader and writer are the same union, then the <tt>unionEquiv</tt>
* variable is set to true and the <tt>actions</tt> list holds the resolutions
* of each branch of the writer against the corresponding branch of the reader
* (which will result in no material resolution work, because the branches will
* be equivalent). If they reader is not a union or is a different union, then
* <tt>unionEquiv</tt> is false and the <tt>actions</tt> list holds the
* resolution of each of the writer's branches against the entire schema of the
* reader (if the reader is a union, that will result in ReaderUnion actions).
*/
public static class WriterUnion extends Action {
public final Action[] actions;
public final boolean unionEquiv;
private WriterUnion(Schema w, Schema r, GenericData d, boolean ue, Action[] a) {
super(w, r, d, Action.Type.WRITER_UNION);
unionEquiv = ue;
actions = a;
}
public static Action resolve(Schema writeSchema, Schema readSchema, GenericData data, Map<SeenPair, Action> seen) {
boolean unionEquivalent = unionEquiv(writeSchema, readSchema, new HashMap<>());
final List<Schema> writeTypes = writeSchema.getTypes();
final List<Schema> readTypes = (unionEquivalent ? readSchema.getTypes() : null);
int writeTypeLength = writeTypes.size();
final Action[] actions = new Action[writeTypeLength];
for (int i = 0; i < writeTypeLength; i++) {
actions[i] = Resolver.resolve(writeTypes.get(i), (unionEquivalent ? readTypes.get(i) : readSchema), data, seen);
}
return new WriterUnion(writeSchema, readSchema, data, unionEquivalent, actions);
}
}
/**
* In this case, the reader is a union and the writer is not. For this case, we
* need to pick the first branch of the reader that matches the writer and
* pretend to the reader that the index of this branch was found in the writer's
* data stream.
*
* To support this case, the {@link ReaderUnion} object has two (public) fields:
* <tt>firstMatch</tt> gives the index of the first matching branch in the
* reader's schema, and <tt>actualResolution</tt> is the {@link Action} that
* resolves the writer's schema with the schema found in the <tt>firstMatch</tt>
* branch of the reader's schema.
*/
public static class ReaderUnion extends Action {
public final int firstMatch;
public final Action actualAction;
public ReaderUnion(Schema w, Schema r, GenericData d, int firstMatch, Action actual) {
super(w, r, d, Action.Type.READER_UNION);
this.firstMatch = firstMatch;
this.actualAction = actual;
}
/**
* Returns a {@link ReaderUnion} action for resolving <tt>w</tt> and <tt>r</tt>,
* or an {@link ErrorAction} if there is no branch in the reader that matches
* the writer.
*
* @throws RuntimeException if <tt>r</tt> is not a union schema or <tt>w</tt>
* <em>is</em> a union schema
*/
public static Action resolve(Schema w, Schema r, GenericData d, Map<SeenPair, Action> seen) {
if (w.getType() == Schema.Type.UNION) {
throw new IllegalArgumentException("Writer schema is union.");
}
int i = firstMatchingBranch(w, r, d, seen);
if (0 <= i) {
return new ReaderUnion(w, r, d, i, Resolver.resolve(w, r.getTypes().get(i), d, seen));
}
return new ErrorAction(w, r, d, ErrorType.NO_MATCHING_BRANCH);
}
// Note: This code was taken verbatim from the 1.8.x branch of Avro. It
// implements
// a "soft match" algorithm that seems to disagree with the spec. However, in
// the
// interest of "bug-for-bug" compatibility, we imported the old algorithm.
private static int firstMatchingBranch(Schema w, Schema r, GenericData d, Map<SeenPair, Action> seen) {
final Schema.Type vt = w.getType();
// first scan for exact match
int j = 0;
int structureMatch = -1;
for (Schema b : r.getTypes()) {
if (vt == b.getType()) {
if (vt == Schema.Type.RECORD || vt == Schema.Type.ENUM || vt == Schema.Type.FIXED) {
final String vname = w.getFullName();
final String bname = b.getFullName();
// return immediately if the name matches exactly according to spec
if (vname != null && vname.equals(bname))
return j;
if (vt == Schema.Type.RECORD && !hasMatchError(RecordAdjust.resolve(w, b, d, seen))) {
final String vShortName = w.getName();
final String bShortName = b.getName();
// use the first structure match or one where the name matches
if ((structureMatch < 0) || (vShortName != null && vShortName.equals(bShortName))) {
structureMatch = j;
}
}
} else {
return j;
}
}
j++;
}
// if there is a record structure match, return it
if (structureMatch >= 0) {
return structureMatch;
}
// then scan match via numeric promotion
j = 0;
for (Schema b : r.getTypes()) {
switch (vt) {
case INT:
switch (b.getType()) {
case LONG:
case DOUBLE:
case FLOAT:
return j;
}
break;
case LONG:
switch (b.getType()) {
case DOUBLE:
case FLOAT:
return j;
}
break;
case FLOAT:
switch (b.getType()) {
case DOUBLE:
return j;
}
break;
case STRING:
switch (b.getType()) {
case BYTES:
return j;
}
break;
case BYTES:
switch (b.getType()) {
case STRING:
return j;
}
break;
}
j++;
}
return -1;
}
private static boolean hasMatchError(Action action) {
if (action instanceof ErrorAction)
return true;
else
for (Action a : ((RecordAdjust) action).fieldActions) {
if (a instanceof ErrorAction) {
return true;
}
}
return false;
}
}
private static boolean unionEquiv(Schema write, Schema read, Map<SeenPair, Boolean> seen) {
final Schema.Type wt = write.getType();
if (wt != read.getType()) {
return false;
}
// Previously, the spec was somewhat ambiguous as to whether getFullName or
// getName should be used here. Using name rather than fully qualified name
// maintains backwards compatibility.
if ((wt == Schema.Type.RECORD || wt == Schema.Type.FIXED || wt == Schema.Type.ENUM)
&& !(write.getName() == null || write.getName().equals(read.getName()))) {
return false;
}
switch (wt) {
case NULL:
case BOOLEAN:
case INT:
case LONG:
case FLOAT:
case DOUBLE:
case STRING:
case BYTES:
return true;
case ARRAY:
return unionEquiv(write.getElementType(), read.getElementType(), seen);
case MAP:
return unionEquiv(write.getValueType(), read.getValueType(), seen);
case FIXED:
return write.getFixedSize() == read.getFixedSize();
case ENUM: {
final List<String> ws = write.getEnumSymbols();
final List<String> rs = read.getEnumSymbols();
return ws.equals(rs);
}
case UNION: {
final List<Schema> wb = write.getTypes();
final List<Schema> rb = read.getTypes();
if (wb.size() != rb.size()) {
return false;
}
for (int i = 0; i < wb.size(); i++) {
if (!unionEquiv(wb.get(i), rb.get(i), seen)) {
return false;
}
}
return true;
}
case RECORD: {
final SeenPair wsc = new SeenPair(write, read);
if (!seen.containsKey(wsc)) {
seen.put(wsc, true); // Be optimistic, but we may change our minds
final List<Field> wb = write.getFields();
final List<Field> rb = read.getFields();
if (wb.size() != rb.size()) {
seen.put(wsc, false);
} else {
for (int i = 0; i < wb.size(); i++) {
// Loop through each of the elements, and check if they are equal
if (!wb.get(i).name().equals(rb.get(i).name())
|| !unionEquiv(wb.get(i).schema(), rb.get(i).schema(), seen)) {
seen.put(wsc, false);
break;
}
}
}
}
return seen.get(wsc);
}
default:
throw new IllegalArgumentException("Unknown schema type: " + write.getType());
}
}
}
| 7,231 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/Schema.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.core.JsonParseException;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.DoubleNode;
import com.fasterxml.jackson.databind.node.NullNode;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.Serializable;
import java.io.StringWriter;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.IdentityHashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import org.apache.avro.util.internal.Accessor;
import org.apache.avro.util.internal.Accessor.FieldAccessor;
import org.apache.avro.util.internal.JacksonUtils;
import org.apache.avro.util.internal.ThreadLocalWithInitial;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.avro.LogicalType.LOGICAL_TYPE_PROP;
/**
* An abstract data type.
* <p>
* A schema may be one of:
* <ul>
* <li>A <i>record</i>, mapping field names to field value data;
* <li>An <i>enum</i>, containing one of a small set of symbols;
* <li>An <i>array</i> of values, all of the same schema;
* <li>A <i>map</i>, containing string/value pairs, of a declared schema;
* <li>A <i>union</i> of other schemas;
* <li>A <i>fixed</i> sized binary object;
* <li>A unicode <i>string</i>;
* <li>A sequence of <i>bytes</i>;
* <li>A 32-bit signed <i>int</i>;
* <li>A 64-bit signed <i>long</i>;
* <li>A 32-bit IEEE single-<i>float</i>; or
* <li>A 64-bit IEEE <i>double</i>-float; or
* <li>A <i>boolean</i>; or
* <li><i>null</i>.
* </ul>
*
* A schema can be constructed using one of its static <tt>createXXX</tt>
* methods, or more conveniently using {@link SchemaBuilder}. The schema objects
* are <i>logically</i> immutable. There are only two mutating methods -
* {@link #setFields(List)} and {@link #addProp(String, String)}. The following
* restrictions apply on these two methods.
* <ul>
* <li>{@link #setFields(List)}, can be called at most once. This method exists
* in order to enable clients to build recursive schemas.
* <li>{@link #addProp(String, String)} can be called with property names that
* are not present already. It is not possible to change or delete an existing
* property.
* </ul>
*/
public abstract class Schema extends JsonProperties implements Serializable {
private static final long serialVersionUID = 1L;
protected Object writeReplace() {
SerializableSchema ss = new SerializableSchema();
ss.schemaString = toString();
return ss;
}
private static final class SerializableSchema implements Serializable {
private static final long serialVersionUID = 1L;
private String schemaString;
private Object readResolve() {
return new Schema.Parser().parse(schemaString);
}
}
static final JsonFactory FACTORY = new JsonFactory();
static final Logger LOG = LoggerFactory.getLogger(Schema.class);
static final ObjectMapper MAPPER = new ObjectMapper(FACTORY);
private static final int NO_HASHCODE = Integer.MIN_VALUE;
static {
FACTORY.enable(JsonParser.Feature.ALLOW_COMMENTS);
FACTORY.setCodec(MAPPER);
}
/** The type of a schema. */
public enum Type {
RECORD, ENUM, ARRAY, MAP, UNION, FIXED, STRING, BYTES, INT, LONG, FLOAT, DOUBLE, BOOLEAN, NULL;
private final String name;
private Type() {
this.name = this.name().toLowerCase(Locale.ENGLISH);
}
public String getName() {
return name;
}
};
private final Type type;
private LogicalType logicalType = null;
Schema(Type type) {
super(type == Type.ENUM ? ENUM_RESERVED : SCHEMA_RESERVED);
this.type = type;
}
/** Create a schema for a primitive type. */
public static Schema create(Type type) {
switch (type) {
case STRING:
return new StringSchema();
case BYTES:
return new BytesSchema();
case INT:
return new IntSchema();
case LONG:
return new LongSchema();
case FLOAT:
return new FloatSchema();
case DOUBLE:
return new DoubleSchema();
case BOOLEAN:
return new BooleanSchema();
case NULL:
return new NullSchema();
default:
throw new AvroRuntimeException("Can't create a: " + type);
}
}
private static final Set<String> SCHEMA_RESERVED = new HashSet<>(
Arrays.asList("doc", "fields", "items", "name", "namespace", "size", "symbols", "values", "type", "aliases"));
private static final Set<String> ENUM_RESERVED = new HashSet<>(SCHEMA_RESERVED);
static {
ENUM_RESERVED.add("default");
}
int hashCode = NO_HASHCODE;
@Override
public void addProp(String name, String value) {
super.addProp(name, value);
hashCode = NO_HASHCODE;
}
@Override
public void addProp(String name, Object value) {
super.addProp(name, value);
hashCode = NO_HASHCODE;
}
public LogicalType getLogicalType() {
return logicalType;
}
void setLogicalType(LogicalType logicalType) {
this.logicalType = logicalType;
}
/**
* Create an anonymous record schema.
*
* @deprecated This method allows to create Schema objects that cannot be parsed
* by {@link Schema.Parser#parse(String)}. It will be removed in a
* future version of Avro. Better use
* i{@link #createRecord(String, String, String, boolean, List)} to
* produce a fully qualified Schema.
*/
@Deprecated
public static Schema createRecord(List<Field> fields) {
Schema result = createRecord(null, null, null, false);
result.setFields(fields);
return result;
}
/** Create a named record schema. */
public static Schema createRecord(String name, String doc, String namespace, boolean isError) {
return new RecordSchema(new Name(name, namespace), doc, isError);
}
/** Create a named record schema with fields already set. */
public static Schema createRecord(String name, String doc, String namespace, boolean isError, List<Field> fields) {
return new RecordSchema(new Name(name, namespace), doc, isError, fields);
}
/** Create an enum schema. */
public static Schema createEnum(String name, String doc, String namespace, List<String> values) {
return new EnumSchema(new Name(name, namespace), doc, new LockableArrayList<>(values), null);
}
/** Create an enum schema. */
public static Schema createEnum(String name, String doc, String namespace, List<String> values, String enumDefault) {
return new EnumSchema(new Name(name, namespace), doc, new LockableArrayList<>(values), enumDefault);
}
/** Create an array schema. */
public static Schema createArray(Schema elementType) {
return new ArraySchema(elementType);
}
/** Create a map schema. */
public static Schema createMap(Schema valueType) {
return new MapSchema(valueType);
}
/** Create a union schema. */
public static Schema createUnion(List<Schema> types) {
return new UnionSchema(new LockableArrayList<>(types));
}
/** Create a union schema. */
public static Schema createUnion(Schema... types) {
return createUnion(new LockableArrayList<>(types));
}
/** Create a fixed schema. */
public static Schema createFixed(String name, String doc, String space, int size) {
return new FixedSchema(new Name(name, space), doc, size);
}
/** Return the type of this schema. */
public Type getType() {
return type;
}
/**
* If this is a record, returns the Field with the given name
* <tt>fieldName</tt>. If there is no field by that name, a <tt>null</tt> is
* returned.
*/
public Field getField(String fieldname) {
throw new AvroRuntimeException("Not a record: " + this);
}
/**
* If this is a record, returns the fields in it. The returned list is in the
* order of their positions.
*/
public List<Field> getFields() {
throw new AvroRuntimeException("Not a record: " + this);
}
/**
* If this is a record, returns whether the fields have been set.
*/
public boolean hasFields() {
throw new AvroRuntimeException("Not a record: " + this);
}
/**
* If this is a record, set its fields. The fields can be set only once in a
* schema.
*/
public void setFields(List<Field> fields) {
throw new AvroRuntimeException("Not a record: " + this);
}
/** If this is an enum, return its symbols. */
public List<String> getEnumSymbols() {
throw new AvroRuntimeException("Not an enum: " + this);
}
/** If this is an enum, return its default value. */
public String getEnumDefault() {
throw new AvroRuntimeException("Not an enum: " + this);
}
/** If this is an enum, return a symbol's ordinal value. */
public int getEnumOrdinal(String symbol) {
throw new AvroRuntimeException("Not an enum: " + this);
}
/** If this is an enum, returns true if it contains given symbol. */
public boolean hasEnumSymbol(String symbol) {
throw new AvroRuntimeException("Not an enum: " + this);
}
/**
* If this is a record, enum or fixed, returns its name, otherwise the name of
* the primitive type.
*/
public String getName() {
return type.name;
}
/**
* If this is a record, enum, or fixed, returns its docstring, if available.
* Otherwise, returns null.
*/
public String getDoc() {
return null;
}
/** If this is a record, enum or fixed, returns its namespace, if any. */
public String getNamespace() {
throw new AvroRuntimeException("Not a named type: " + this);
}
/**
* If this is a record, enum or fixed, returns its namespace-qualified name,
* otherwise returns the name of the primitive type.
*/
public String getFullName() {
return getName();
}
/** If this is a record, enum or fixed, add an alias. */
public void addAlias(String alias) {
throw new AvroRuntimeException("Not a named type: " + this);
}
/** If this is a record, enum or fixed, add an alias. */
public void addAlias(String alias, String space) {
throw new AvroRuntimeException("Not a named type: " + this);
}
/** If this is a record, enum or fixed, return its aliases, if any. */
public Set<String> getAliases() {
throw new AvroRuntimeException("Not a named type: " + this);
}
/** Returns true if this record is an error type. */
public boolean isError() {
throw new AvroRuntimeException("Not a record: " + this);
}
/** If this is an array, returns its element type. */
public Schema getElementType() {
throw new AvroRuntimeException("Not an array: " + this);
}
/** If this is a map, returns its value type. */
public Schema getValueType() {
throw new AvroRuntimeException("Not a map: " + this);
}
/** If this is a union, returns its types. */
public List<Schema> getTypes() {
throw new AvroRuntimeException("Not a union: " + this);
}
/** If this is a union, return the branch with the provided full name. */
public Integer getIndexNamed(String name) {
throw new AvroRuntimeException("Not a union: " + this);
}
/** If this is fixed, returns its size. */
public int getFixedSize() {
throw new AvroRuntimeException("Not fixed: " + this);
}
/** Render this as <a href="https://json.org/">JSON</a>. */
@Override
public String toString() {
return toString(false);
}
/**
* Render this as <a href="https://json.org/">JSON</a>.
*
* @param pretty if true, pretty-print JSON.
*/
public String toString(boolean pretty) {
return toString(new Names(), pretty);
}
/**
* Render this as <a href="https://json.org/">JSON</a>, but without inlining the
* referenced schemas.
*
* @param referencedSchemas referenced schemas
* @param pretty if true, pretty-print JSON.
*/
// Use at your own risk. This method should be removed with AVRO-2832.
@Deprecated
public String toString(Collection<Schema> referencedSchemas, boolean pretty) {
Schema.Names names = new Schema.Names();
if (referencedSchemas != null) {
for (Schema s : referencedSchemas) {
names.add(s);
}
}
return toString(names, pretty);
}
String toString(Names names, boolean pretty) {
try {
StringWriter writer = new StringWriter();
JsonGenerator gen = FACTORY.createGenerator(writer);
if (pretty)
gen.useDefaultPrettyPrinter();
toJson(names, gen);
gen.flush();
return writer.toString();
} catch (IOException e) {
throw new AvroRuntimeException(e);
}
}
void toJson(Names names, JsonGenerator gen) throws IOException {
if (!hasProps()) { // no props defined
gen.writeString(getName()); // just write name
} else {
gen.writeStartObject();
gen.writeStringField("type", getName());
writeProps(gen);
gen.writeEndObject();
}
}
void fieldsToJson(Names names, JsonGenerator gen) throws IOException {
throw new AvroRuntimeException("Not a record: " + this);
}
@Override
public boolean equals(Object o) {
if (o == this)
return true;
if (!(o instanceof Schema))
return false;
Schema that = (Schema) o;
if (!(this.type == that.type))
return false;
return equalCachedHash(that) && propsEqual(that);
}
@Override
public final int hashCode() {
if (hashCode == NO_HASHCODE)
hashCode = computeHash();
return hashCode;
}
int computeHash() {
return getType().hashCode() + propsHashCode();
}
final boolean equalCachedHash(Schema other) {
return (hashCode == other.hashCode) || (hashCode == NO_HASHCODE) || (other.hashCode == NO_HASHCODE);
}
private static final Set<String> FIELD_RESERVED = Collections
.unmodifiableSet(new HashSet<>(Arrays.asList("default", "doc", "name", "order", "type", "aliases")));
/** Returns true if this record is an union type. */
public boolean isUnion() {
return this instanceof UnionSchema;
}
/** Returns true if this record is an union type containing null. */
public boolean isNullable() {
if (!isUnion()) {
return getType().equals(Schema.Type.NULL);
}
for (Schema schema : getTypes()) {
if (schema.isNullable()) {
return true;
}
}
return false;
}
/** A field within a record. */
public static class Field extends JsonProperties {
static {
Accessor.setAccessor(new FieldAccessor() {
@Override
protected JsonNode defaultValue(Field field) {
return field.defaultValue();
}
@Override
protected Field createField(String name, Schema schema, String doc, JsonNode defaultValue) {
return new Field(name, schema, doc, defaultValue, true, Order.ASCENDING);
}
@Override
protected Field createField(String name, Schema schema, String doc, JsonNode defaultValue, boolean validate,
Order order) {
return new Field(name, schema, doc, defaultValue, validate, order);
}
});
}
/** How values of this field should be ordered when sorting records. */
public enum Order {
ASCENDING, DESCENDING, IGNORE;
private final String name;
Order() {
this.name = this.name().toLowerCase(Locale.ENGLISH);
}
}
/**
* For Schema unions with a "null" type as the first entry, this can be used to
* specify that the default for the union is null.
*/
public static final Object NULL_DEFAULT_VALUE = new Object();
private final String name; // name of the field.
private int position = -1;
private final Schema schema;
private final String doc;
private final JsonNode defaultValue;
private final Order order;
private Set<String> aliases;
Field(String name, Schema schema, String doc, JsonNode defaultValue, boolean validateDefault, Order order) {
super(FIELD_RESERVED);
this.name = validateName(name);
this.schema = Objects.requireNonNull(schema, "schema is required and cannot be null");
this.doc = doc;
this.defaultValue = validateDefault ? validateDefault(name, schema, defaultValue) : defaultValue;
this.order = Objects.requireNonNull(order, "Order cannot be null");
}
/**
* Constructs a new Field instance with the same {@code name}, {@code doc},
* {@code defaultValue}, and {@code order} as {@code field} has with changing
* the schema to the specified one. It also copies all the {@code props} and
* {@code aliases}.
*/
public Field(Field field, Schema schema) {
this(field.name, schema, field.doc, field.defaultValue, true, field.order);
putAll(field);
if (field.aliases != null)
aliases = new LinkedHashSet<>(field.aliases);
}
/**
*
*/
public Field(String name, Schema schema) {
this(name, schema, (String) null, (JsonNode) null, true, Order.ASCENDING);
}
/**
*
*/
public Field(String name, Schema schema, String doc) {
this(name, schema, doc, (JsonNode) null, true, Order.ASCENDING);
}
/**
* @param defaultValue the default value for this field specified using the
* mapping in {@link JsonProperties}
*/
public Field(String name, Schema schema, String doc, Object defaultValue) {
this(name, schema, doc,
defaultValue == NULL_DEFAULT_VALUE ? NullNode.getInstance() : JacksonUtils.toJsonNode(defaultValue), true,
Order.ASCENDING);
}
/**
* @param defaultValue the default value for this field specified using the
* mapping in {@link JsonProperties}
*/
public Field(String name, Schema schema, String doc, Object defaultValue, Order order) {
this(name, schema, doc,
defaultValue == NULL_DEFAULT_VALUE ? NullNode.getInstance() : JacksonUtils.toJsonNode(defaultValue), true,
Objects.requireNonNull(order));
}
public String name() {
return name;
};
/** The position of this field within the record. */
public int pos() {
return position;
}
/** This field's {@link Schema}. */
public Schema schema() {
return schema;
}
/** Field's documentation within the record, if set. May return null. */
public String doc() {
return doc;
}
/**
* @return true if this Field has a default value set. Can be used to determine
* if a "null" return from defaultVal() is due to that being the default
* value or just not set.
*/
public boolean hasDefaultValue() {
return defaultValue != null;
}
JsonNode defaultValue() {
return defaultValue;
}
/**
* @return the default value for this field specified using the mapping in
* {@link JsonProperties}
*/
public Object defaultVal() {
return JacksonUtils.toObject(defaultValue, schema);
}
public Order order() {
return order;
}
public void addAlias(String alias) {
if (aliases == null)
this.aliases = new LinkedHashSet<>();
aliases.add(alias);
}
/** Return the defined aliases as an unmodifiable Set. */
public Set<String> aliases() {
if (aliases == null)
return Collections.emptySet();
return Collections.unmodifiableSet(aliases);
}
@Override
public boolean equals(Object other) {
if (other == this)
return true;
if (!(other instanceof Field))
return false;
Field that = (Field) other;
return (name.equals(that.name)) && (schema.equals(that.schema)) && defaultValueEquals(that.defaultValue)
&& (order == that.order) && propsEqual(that);
}
@Override
public int hashCode() {
return name.hashCode() + schema.computeHash();
}
private boolean defaultValueEquals(JsonNode thatDefaultValue) {
if (defaultValue == null)
return thatDefaultValue == null;
if (thatDefaultValue == null)
return false;
if (Double.isNaN(defaultValue.doubleValue()))
return Double.isNaN(thatDefaultValue.doubleValue());
return defaultValue.equals(thatDefaultValue);
}
@Override
public String toString() {
return name + " type:" + schema.type + " pos:" + position;
}
/**
* Parse field.
*
* @param field : json field definition.
* @param names : names map.
* @param namespace : current working namespace.
* @return field.
*/
static Field parse(JsonNode field, Names names, String namespace) {
String fieldName = getRequiredText(field, "name", "No field name");
String fieldDoc = getOptionalText(field, "doc");
JsonNode fieldTypeNode = field.get("type");
if (fieldTypeNode == null) {
throw new SchemaParseException("No field type: " + field);
}
Schema fieldSchema = null;
if (fieldTypeNode.isTextual()) {
Schema schemaField = names.get(fieldTypeNode.textValue());
if (schemaField == null) {
schemaField = names.get(namespace + "." + fieldTypeNode.textValue());
}
if (schemaField == null) {
throw new SchemaParseException(fieldTypeNode + " is not a defined name." + " The type of the \"" + fieldName
+ "\" field must be a defined name or a {\"type\": ...} expression.");
}
fieldSchema = schemaField;
} else if (fieldTypeNode.isObject()) {
fieldSchema = resolveSchema(fieldTypeNode, names, namespace);
if (fieldSchema == null) {
fieldSchema = Schema.parseCompleteSchema(fieldTypeNode, names, namespace);
}
} else if (fieldTypeNode.isArray()) {
List<Schema> unionTypes = new ArrayList<>();
fieldTypeNode.forEach((JsonNode node) -> {
Schema subSchema = null;
if (node.isTextual()) {
subSchema = names.get(node.asText());
if (subSchema == null) {
subSchema = names.get(namespace + "." + node.asText());
}
} else if (node.isObject()) {
subSchema = Schema.parseCompleteSchema(node, names, namespace);
} else {
throw new SchemaParseException("Illegal type in union : " + node);
}
if (subSchema == null) {
throw new SchemaParseException("Null element in union : " + node);
}
unionTypes.add(subSchema);
});
fieldSchema = Schema.createUnion(unionTypes);
}
if (fieldSchema == null) {
throw new SchemaParseException("Can't find type for field " + fieldName);
}
Field.Order order = Field.Order.ASCENDING;
JsonNode orderNode = field.get("order");
if (orderNode != null)
order = Field.Order.valueOf(orderNode.textValue().toUpperCase(Locale.ENGLISH));
JsonNode defaultValue = field.get("default");
if (defaultValue != null
&& (Type.FLOAT.equals(fieldSchema.getType()) || Type.DOUBLE.equals(fieldSchema.getType()))
&& defaultValue.isTextual()) {
try {
defaultValue = new DoubleNode(Double.valueOf(defaultValue.textValue()));
} catch (NumberFormatException ex) {
throw new SchemaParseException(
"Can't parse number '" + defaultValue.textValue() + "' for field '" + fieldName);
}
}
Field f = new Field(fieldName, fieldSchema, fieldDoc, defaultValue, true, order);
Iterator<String> i = field.fieldNames();
while (i.hasNext()) { // add field props
String prop = i.next();
if (!FIELD_RESERVED.contains(prop))
f.addProp(prop, field.get(prop));
}
f.aliases = parseAliases(field);
return f;
}
}
static class Name {
private final String name;
private final String space;
private final String full;
public Name(String name, String space) {
if (name == null) { // anonymous
this.name = this.space = this.full = null;
return;
}
int lastDot = name.lastIndexOf('.');
if (lastDot < 0) { // unqualified name
this.name = validateName(name);
} else { // qualified name
space = name.substring(0, lastDot); // get space from name
this.name = validateName(name.substring(lastDot + 1));
}
if ("".equals(space))
space = null;
this.space = space;
this.full = (this.space == null) ? this.name : this.space + "." + this.name;
}
@Override
public boolean equals(Object o) {
if (o == this)
return true;
if (!(o instanceof Name))
return false;
Name that = (Name) o;
return Objects.equals(full, that.full);
}
@Override
public int hashCode() {
return full == null ? 0 : full.hashCode();
}
@Override
public String toString() {
return full;
}
public void writeName(Names names, JsonGenerator gen) throws IOException {
if (name != null)
gen.writeStringField("name", name);
if (space != null) {
if (!space.equals(names.space()))
gen.writeStringField("namespace", space);
} else if (names.space() != null) { // null within non-null
gen.writeStringField("namespace", "");
}
}
public String getQualified(String defaultSpace) {
return this.shouldWriteFull(defaultSpace) ? full : name;
}
/**
* Determine if full name must be written. There are 2 cases for true :
* defaultSpace != from this.space or name is already a Schema.Type (int, array
* ...)
*
* @param defaultSpace : default name space.
* @return true if full name must be written.
*/
private boolean shouldWriteFull(String defaultSpace) {
if (space != null && space.equals(defaultSpace)) {
for (Type schemaType : Type.values()) {
if (schemaType.name.equals(name)) {
// name is a 'Type', so namespace must be written
return true;
}
}
// this.space == defaultSpace
return false;
}
// this.space != defaultSpace, so namespace must be written.
return true;
}
}
private static abstract class NamedSchema extends Schema {
final Name name;
final String doc;
Set<Name> aliases;
public NamedSchema(Type type, Name name, String doc) {
super(type);
this.name = name;
this.doc = doc;
if (PRIMITIVES.containsKey(name.full)) {
throw new AvroTypeException("Schemas may not be named after primitives: " + name.full);
}
}
@Override
public String getName() {
return name.name;
}
@Override
public String getDoc() {
return doc;
}
@Override
public String getNamespace() {
return name.space;
}
@Override
public String getFullName() {
return name.full;
}
@Override
public void addAlias(String alias) {
addAlias(alias, null);
}
@Override
public void addAlias(String name, String space) {
if (aliases == null)
this.aliases = new LinkedHashSet<>();
if (space == null)
space = this.name.space;
aliases.add(new Name(name, space));
}
@Override
public Set<String> getAliases() {
Set<String> result = new LinkedHashSet<>();
if (aliases != null)
for (Name alias : aliases)
result.add(alias.full);
return result;
}
public boolean writeNameRef(Names names, JsonGenerator gen) throws IOException {
if (this.equals(names.get(name))) {
gen.writeString(name.getQualified(names.space()));
return true;
} else if (name.name != null) {
names.put(name, this);
}
return false;
}
public void writeName(Names names, JsonGenerator gen) throws IOException {
name.writeName(names, gen);
}
public boolean equalNames(NamedSchema that) {
return this.name.equals(that.name);
}
@Override
int computeHash() {
return super.computeHash() + name.hashCode();
}
public void aliasesToJson(JsonGenerator gen) throws IOException {
if (aliases == null || aliases.isEmpty())
return;
gen.writeFieldName("aliases");
gen.writeStartArray();
for (Name alias : aliases)
gen.writeString(alias.getQualified(name.space));
gen.writeEndArray();
}
}
/**
* Useful as key of {@link Map}s when traversing two schemas at the same time
* and need to watch for recursion.
*/
public static class SeenPair {
private Object s1;
private Object s2;
public SeenPair(Object s1, Object s2) {
this.s1 = s1;
this.s2 = s2;
}
public boolean equals(Object o) {
if (!(o instanceof SeenPair))
return false;
return this.s1 == ((SeenPair) o).s1 && this.s2 == ((SeenPair) o).s2;
}
@Override
public int hashCode() {
return System.identityHashCode(s1) + System.identityHashCode(s2);
}
}
private static final ThreadLocal<Set<SeenPair>> SEEN_EQUALS = ThreadLocalWithInitial.of(HashSet::new);
private static final ThreadLocal<Map<Schema, Schema>> SEEN_HASHCODE = ThreadLocalWithInitial.of(IdentityHashMap::new);
@SuppressWarnings(value = "unchecked")
private static class RecordSchema extends NamedSchema {
private List<Field> fields;
private Map<String, Field> fieldMap;
private final boolean isError;
public RecordSchema(Name name, String doc, boolean isError) {
super(Type.RECORD, name, doc);
this.isError = isError;
}
public RecordSchema(Name name, String doc, boolean isError, List<Field> fields) {
super(Type.RECORD, name, doc);
this.isError = isError;
setFields(fields);
}
@Override
public boolean isError() {
return isError;
}
@Override
public Field getField(String fieldname) {
if (fieldMap == null)
throw new AvroRuntimeException("Schema fields not set yet");
return fieldMap.get(fieldname);
}
@Override
public List<Field> getFields() {
if (fields == null)
throw new AvroRuntimeException("Schema fields not set yet");
return fields;
}
@Override
public boolean hasFields() {
return fields != null;
}
@Override
public void setFields(List<Field> fields) {
if (this.fields != null) {
throw new AvroRuntimeException("Fields are already set");
}
int i = 0;
fieldMap = new HashMap<>(Math.multiplyExact(2, fields.size()));
LockableArrayList<Field> ff = new LockableArrayList<>(fields.size());
for (Field f : fields) {
if (f.position != -1) {
throw new AvroRuntimeException("Field already used: " + f);
}
f.position = i++;
final Field existingField = fieldMap.put(f.name(), f);
if (existingField != null) {
throw new AvroRuntimeException(
String.format("Duplicate field %s in record %s: %s and %s.", f.name(), name, f, existingField));
}
ff.add(f);
}
this.fields = ff.lock();
this.hashCode = NO_HASHCODE;
}
@Override
public boolean equals(Object o) {
if (o == this)
return true;
if (!(o instanceof RecordSchema))
return false;
RecordSchema that = (RecordSchema) o;
if (!equalCachedHash(that))
return false;
if (!equalNames(that))
return false;
if (!propsEqual(that))
return false;
Set seen = SEEN_EQUALS.get();
SeenPair here = new SeenPair(this, o);
if (seen.contains(here))
return true; // prevent stack overflow
boolean first = seen.isEmpty();
try {
seen.add(here);
return Objects.equals(fields, that.fields);
} finally {
if (first)
seen.clear();
}
}
@Override
int computeHash() {
Map<Schema, Schema> seen = SEEN_HASHCODE.get();
if (seen.containsKey(this))
return 0; // prevent stack overflow
boolean first = seen.isEmpty();
try {
seen.put(this, this);
return super.computeHash() + fields.hashCode();
} finally {
if (first)
seen.clear();
}
}
@Override
void toJson(Names names, JsonGenerator gen) throws IOException {
if (writeNameRef(names, gen))
return;
String savedSpace = names.space; // save namespace
gen.writeStartObject();
gen.writeStringField("type", isError ? "error" : "record");
writeName(names, gen);
names.space = name.space; // set default namespace
if (this.getDoc() != null)
gen.writeStringField("doc", this.getDoc());
if (fields != null) {
gen.writeFieldName("fields");
fieldsToJson(names, gen);
}
writeProps(gen);
aliasesToJson(gen);
gen.writeEndObject();
names.space = savedSpace; // restore namespace
}
@Override
void fieldsToJson(Names names, JsonGenerator gen) throws IOException {
gen.writeStartArray();
for (Field f : fields) {
gen.writeStartObject();
gen.writeStringField("name", f.name());
gen.writeFieldName("type");
f.schema().toJson(names, gen);
if (f.doc() != null)
gen.writeStringField("doc", f.doc());
if (f.hasDefaultValue()) {
gen.writeFieldName("default");
gen.writeTree(f.defaultValue());
}
if (f.order() != Field.Order.ASCENDING)
gen.writeStringField("order", f.order().name);
if (f.aliases != null && f.aliases.size() != 0) {
gen.writeFieldName("aliases");
gen.writeStartArray();
for (String alias : f.aliases)
gen.writeString(alias);
gen.writeEndArray();
}
f.writeProps(gen);
gen.writeEndObject();
}
gen.writeEndArray();
}
}
private static class EnumSchema extends NamedSchema {
private final List<String> symbols;
private final Map<String, Integer> ordinals;
private final String enumDefault;
public EnumSchema(Name name, String doc, LockableArrayList<String> symbols, String enumDefault) {
super(Type.ENUM, name, doc);
this.symbols = symbols.lock();
this.ordinals = new HashMap<>(Math.multiplyExact(2, symbols.size()));
this.enumDefault = enumDefault;
int i = 0;
for (String symbol : symbols) {
if (ordinals.put(validateName(symbol), i++) != null) {
throw new SchemaParseException("Duplicate enum symbol: " + symbol);
}
}
if (enumDefault != null && !symbols.contains(enumDefault)) {
throw new SchemaParseException(
"The Enum Default: " + enumDefault + " is not in the enum symbol set: " + symbols);
}
}
@Override
public List<String> getEnumSymbols() {
return symbols;
}
@Override
public boolean hasEnumSymbol(String symbol) {
return ordinals.containsKey(symbol);
}
@Override
public int getEnumOrdinal(String symbol) {
return ordinals.get(symbol);
}
@Override
public boolean equals(Object o) {
if (o == this)
return true;
if (!(o instanceof EnumSchema))
return false;
EnumSchema that = (EnumSchema) o;
return equalCachedHash(that) && equalNames(that) && symbols.equals(that.symbols) && propsEqual(that);
}
@Override
public String getEnumDefault() {
return enumDefault;
}
@Override
int computeHash() {
return super.computeHash() + symbols.hashCode();
}
@Override
void toJson(Names names, JsonGenerator gen) throws IOException {
if (writeNameRef(names, gen))
return;
gen.writeStartObject();
gen.writeStringField("type", "enum");
writeName(names, gen);
if (getDoc() != null)
gen.writeStringField("doc", getDoc());
gen.writeArrayFieldStart("symbols");
for (String symbol : symbols)
gen.writeString(symbol);
gen.writeEndArray();
if (getEnumDefault() != null)
gen.writeStringField("default", getEnumDefault());
writeProps(gen);
aliasesToJson(gen);
gen.writeEndObject();
}
}
private static class ArraySchema extends Schema {
private final Schema elementType;
public ArraySchema(Schema elementType) {
super(Type.ARRAY);
this.elementType = elementType;
}
@Override
public Schema getElementType() {
return elementType;
}
@Override
public boolean equals(Object o) {
if (o == this)
return true;
if (!(o instanceof ArraySchema))
return false;
ArraySchema that = (ArraySchema) o;
return equalCachedHash(that) && elementType.equals(that.elementType) && propsEqual(that);
}
@Override
int computeHash() {
return super.computeHash() + elementType.computeHash();
}
@Override
void toJson(Names names, JsonGenerator gen) throws IOException {
gen.writeStartObject();
gen.writeStringField("type", "array");
gen.writeFieldName("items");
elementType.toJson(names, gen);
writeProps(gen);
gen.writeEndObject();
}
}
private static class MapSchema extends Schema {
private final Schema valueType;
public MapSchema(Schema valueType) {
super(Type.MAP);
this.valueType = valueType;
}
@Override
public Schema getValueType() {
return valueType;
}
@Override
public boolean equals(Object o) {
if (o == this)
return true;
if (!(o instanceof MapSchema))
return false;
MapSchema that = (MapSchema) o;
return equalCachedHash(that) && valueType.equals(that.valueType) && propsEqual(that);
}
@Override
int computeHash() {
return super.computeHash() + valueType.computeHash();
}
@Override
void toJson(Names names, JsonGenerator gen) throws IOException {
gen.writeStartObject();
gen.writeStringField("type", "map");
gen.writeFieldName("values");
valueType.toJson(names, gen);
writeProps(gen);
gen.writeEndObject();
}
}
private static class UnionSchema extends Schema {
private final List<Schema> types;
private final Map<String, Integer> indexByName;
public UnionSchema(LockableArrayList<Schema> types) {
super(Type.UNION);
this.indexByName = new HashMap<>(Math.multiplyExact(2, types.size()));
this.types = types.lock();
int index = 0;
for (Schema type : types) {
if (type.getType() == Type.UNION) {
throw new AvroRuntimeException("Nested union: " + this);
}
String name = type.getFullName();
if (name == null) {
throw new AvroRuntimeException("Nameless in union:" + this);
}
if (indexByName.put(name, index++) != null) {
throw new AvroRuntimeException("Duplicate in union:" + name);
}
}
}
/**
* Checks if a JSON value matches the schema.
*
* @param jsonValue a value to check against the schema
* @return true if the value is valid according to this schema
*/
public boolean isValidDefault(JsonNode jsonValue) {
return this.types.stream().anyMatch((Schema s) -> s.isValidDefault(jsonValue));
}
@Override
public List<Schema> getTypes() {
return types;
}
@Override
public Integer getIndexNamed(String name) {
return indexByName.get(name);
}
@Override
public boolean equals(Object o) {
if (o == this)
return true;
if (!(o instanceof UnionSchema))
return false;
UnionSchema that = (UnionSchema) o;
return equalCachedHash(that) && types.equals(that.types) && propsEqual(that);
}
@Override
int computeHash() {
int hash = super.computeHash();
for (Schema type : types)
hash += type.computeHash();
return hash;
}
@Override
public void addProp(String name, String value) {
throw new AvroRuntimeException("Can't set properties on a union: " + this);
}
@Override
void toJson(Names names, JsonGenerator gen) throws IOException {
gen.writeStartArray();
for (Schema type : types)
type.toJson(names, gen);
gen.writeEndArray();
}
@Override
public String getName() {
return super.getName()
+ this.getTypes().stream().map(Schema::getName).collect(Collectors.joining(", ", "[", "]"));
}
}
private static class FixedSchema extends NamedSchema {
private final int size;
public FixedSchema(Name name, String doc, int size) {
super(Type.FIXED, name, doc);
SystemLimitException.checkMaxBytesLength(size);
this.size = size;
}
@Override
public int getFixedSize() {
return size;
}
@Override
public boolean equals(Object o) {
if (o == this)
return true;
if (!(o instanceof FixedSchema))
return false;
FixedSchema that = (FixedSchema) o;
return equalCachedHash(that) && equalNames(that) && size == that.size && propsEqual(that);
}
@Override
int computeHash() {
return super.computeHash() + size;
}
@Override
void toJson(Names names, JsonGenerator gen) throws IOException {
if (writeNameRef(names, gen))
return;
gen.writeStartObject();
gen.writeStringField("type", "fixed");
writeName(names, gen);
if (getDoc() != null)
gen.writeStringField("doc", getDoc());
gen.writeNumberField("size", size);
writeProps(gen);
aliasesToJson(gen);
gen.writeEndObject();
}
}
private static class StringSchema extends Schema {
public StringSchema() {
super(Type.STRING);
}
}
private static class BytesSchema extends Schema {
public BytesSchema() {
super(Type.BYTES);
}
}
private static class IntSchema extends Schema {
public IntSchema() {
super(Type.INT);
}
}
private static class LongSchema extends Schema {
public LongSchema() {
super(Type.LONG);
}
}
private static class FloatSchema extends Schema {
public FloatSchema() {
super(Type.FLOAT);
}
}
private static class DoubleSchema extends Schema {
public DoubleSchema() {
super(Type.DOUBLE);
}
}
private static class BooleanSchema extends Schema {
public BooleanSchema() {
super(Type.BOOLEAN);
}
}
private static class NullSchema extends Schema {
public NullSchema() {
super(Type.NULL);
}
}
/**
* A parser for JSON-format schemas. Each named schema parsed with a parser is
* added to the names known to the parser so that subsequently parsed schemas
* may refer to it by name.
*/
public static class Parser {
private Names names = new Names();
private final Schema.NameValidator validate;
private boolean validateDefaults = true;
public Parser() {
this(NameValidator.UTF_VALIDATOR);
}
public Parser(final NameValidator validate) {
this.validate = validate;
}
/**
* Adds the provided types to the set of defined, named types known to this
* parser. deprecated: use addTypes(Iterable<Schema> types)
*/
@Deprecated
public Parser addTypes(Map<String, Schema> types) {
return this.addTypes(types.values());
}
/**
* Adds the provided types to the set of defined, named types known to this
* parser.
*/
public Parser addTypes(Iterable<Schema> types) {
for (Schema s : types)
names.add(s);
return this;
}
/** Returns the set of defined, named types known to this parser. */
public Map<String, Schema> getTypes() {
Map<String, Schema> result = new LinkedHashMap<>();
for (Schema s : names.values())
result.put(s.getFullName(), s);
return result;
}
/** Enable or disable default value validation. */
public Parser setValidateDefaults(boolean validateDefaults) {
this.validateDefaults = validateDefaults;
return this;
}
/** True iff default values are validated. False by default. */
public boolean getValidateDefaults() {
return this.validateDefaults;
}
/**
* Parse a schema from the provided file. If named, the schema is added to the
* names known to this parser.
*/
public Schema parse(File file) throws IOException {
return parse(FACTORY.createParser(file), false);
}
public List<Schema> parse(Iterable<File> sources) throws IOException {
final List<Schema> schemas = new ArrayList<>();
for (File source : sources) {
final Schema emptySchema = parseNamesDeclared(FACTORY.createParser(source));
schemas.add(emptySchema);
}
for (File source : sources) {
parseFieldsOnly(FACTORY.createParser(source));
}
return schemas;
}
/**
* Parse a schema from the provided stream. If named, the schema is added to the
* names known to this parser. The input stream stays open after the parsing.
*/
public Schema parse(InputStream in) throws IOException {
return parse(FACTORY.createParser(in).disable(JsonParser.Feature.AUTO_CLOSE_SOURCE), true);
}
/** Read a schema from one or more json strings */
public Schema parse(String s, String... more) {
StringBuilder b = new StringBuilder(s);
for (String part : more)
b.append(part);
return parse(b.toString());
}
/**
* Parse a schema from the provided string. If named, the schema is added to the
* names known to this parser.
*/
public Schema parse(String s) {
try {
return parse(FACTORY.createParser(s), false);
} catch (IOException e) {
throw new SchemaParseException(e);
}
}
private static interface ParseFunction {
Schema parse(JsonNode node) throws IOException;
}
private Schema runParser(JsonParser parser, ParseFunction f) throws IOException {
NameValidator saved = validateNames.get();
boolean savedValidateDefaults = VALIDATE_DEFAULTS.get();
try {
validateNames.set(validate);
VALIDATE_DEFAULTS.set(validateDefaults);
JsonNode jsonNode = MAPPER.readTree(parser);
return f.parse(jsonNode);
} catch (JsonParseException e) {
throw new SchemaParseException(e);
} finally {
parser.close();
validateNames.set(saved);
VALIDATE_DEFAULTS.set(savedValidateDefaults);
}
}
private Schema parse(JsonParser parser, final boolean allowDanglingContent) throws IOException {
return this.runParser(parser, (JsonNode jsonNode) -> {
Schema schema = Schema.parse(jsonNode, names);
if (!allowDanglingContent) {
String dangling;
StringWriter danglingWriter = new StringWriter();
int numCharsReleased = parser.releaseBuffered(danglingWriter);
if (numCharsReleased == -1) {
ByteArrayOutputStream danglingOutputStream = new ByteArrayOutputStream();
parser.releaseBuffered(danglingOutputStream); // if input isnt chars above it must be bytes
dangling = new String(danglingOutputStream.toByteArray(), StandardCharsets.UTF_8).trim();
} else {
dangling = danglingWriter.toString().trim();
}
if (!dangling.isEmpty()) {
throw new SchemaParseException("dangling content after end of schema: " + dangling);
}
}
return schema;
});
}
private Schema parseNamesDeclared(JsonParser parser) throws IOException {
return this.runParser(parser, (JsonNode jsonNode) -> Schema.parseNamesDeclared(jsonNode, names, names.space));
}
private Schema parseFieldsOnly(JsonParser parser) throws IOException {
return this.runParser(parser, (JsonNode jsonNode) -> Schema.parseCompleteSchema(jsonNode, names, names.space));
}
}
/**
* Constructs a Schema object from JSON schema file <tt>file</tt>. The contents
* of <tt>file</tt> is expected to be in UTF-8 format.
*
* @param file The file to read the schema from.
* @return The freshly built Schema.
* @throws IOException if there was trouble reading the contents or they are
* invalid
* @deprecated use {@link Schema.Parser} instead.
*/
@Deprecated
public static Schema parse(File file) throws IOException {
return new Parser().parse(file);
}
/**
* Constructs a Schema object from JSON schema stream <tt>in</tt>. The contents
* of <tt>in</tt> is expected to be in UTF-8 format.
*
* @param in The input stream to read the schema from.
* @return The freshly built Schema.
* @throws IOException if there was trouble reading the contents or they are
* invalid
* @deprecated use {@link Schema.Parser} instead.
*/
@Deprecated
public static Schema parse(InputStream in) throws IOException {
return new Parser().parse(in);
}
/**
* Construct a schema from <a href="https://json.org/">JSON</a> text.
*
* @deprecated use {@link Schema.Parser} instead.
*/
@Deprecated
public static Schema parse(String jsonSchema) {
return new Parser().parse(jsonSchema);
}
/**
* Construct a schema from <a href="https://json.org/">JSON</a> text.
*
* @param validate true if names should be validated, false if not.
* @deprecated use {@link Schema.Parser} instead.
*/
@Deprecated
public static Schema parse(String jsonSchema, boolean validate) {
final NameValidator validator = validate ? NameValidator.UTF_VALIDATOR : NameValidator.NO_VALIDATION;
return new Parser(validator).parse(jsonSchema);
}
static final Map<String, Type> PRIMITIVES = new HashMap<>();
static {
PRIMITIVES.put("string", Type.STRING);
PRIMITIVES.put("bytes", Type.BYTES);
PRIMITIVES.put("int", Type.INT);
PRIMITIVES.put("long", Type.LONG);
PRIMITIVES.put("float", Type.FLOAT);
PRIMITIVES.put("double", Type.DOUBLE);
PRIMITIVES.put("boolean", Type.BOOLEAN);
PRIMITIVES.put("null", Type.NULL);
}
static class Names extends LinkedHashMap<Name, Schema> {
private static final long serialVersionUID = 1L;
private String space; // default namespace
public Names() {
}
public Names(String space) {
this.space = space;
}
public String space() {
return space;
}
public void space(String space) {
this.space = space;
}
public Schema get(String o) {
Type primitive = PRIMITIVES.get(o);
if (primitive != null) {
return Schema.create(primitive);
}
Name name = new Name(o, space);
if (!containsKey(name)) {
// if not in default try anonymous
name = new Name(o, "");
}
return super.get(name);
}
public boolean contains(Schema schema) {
return get(((NamedSchema) schema).name) != null;
}
public void add(Schema schema) {
put(((NamedSchema) schema).name, schema);
}
@Override
public Schema put(Name name, Schema schema) {
if (containsKey(name)) {
final Schema other = super.get(name);
if (!Objects.equals(other, schema)) {
throw new SchemaParseException("Can't redefine: " + name);
} else {
return schema;
}
}
return super.put(name, schema);
}
}
private static ThreadLocal<Schema.NameValidator> validateNames = ThreadLocalWithInitial
.of(() -> NameValidator.UTF_VALIDATOR);
private static String validateName(String name) {
NameValidator.Result result = validateNames.get().validate(name);
if (!result.isOK()) {
throw new SchemaParseException(result.errors);
}
return name;
}
public static void setNameValidator(final Schema.NameValidator validator) {
Schema.validateNames.set(validator);
}
private static final ThreadLocal<Boolean> VALIDATE_DEFAULTS = ThreadLocalWithInitial.of(() -> true);
private static JsonNode validateDefault(String fieldName, Schema schema, JsonNode defaultValue) {
if (VALIDATE_DEFAULTS.get() && (defaultValue != null) && !schema.isValidDefault(defaultValue)) { // invalid default
String message = "Invalid default for field " + fieldName + ": " + defaultValue + " not a " + schema;
throw new AvroTypeException(message); // throw exception
}
return defaultValue;
}
/**
* Checks if a JSON value matches the schema.
*
* @param jsonValue a value to check against the schema
* @return true if the value is valid according to this schema
*/
public boolean isValidDefault(JsonNode jsonValue) {
return isValidDefault(this, jsonValue);
}
private static boolean isValidDefault(Schema schema, JsonNode defaultValue) {
if (defaultValue == null)
return false;
switch (schema.getType()) {
case STRING:
case BYTES:
case ENUM:
case FIXED:
return defaultValue.isTextual();
case INT:
return defaultValue.isIntegralNumber() && defaultValue.canConvertToInt();
case LONG:
return defaultValue.isIntegralNumber() && defaultValue.canConvertToLong();
case FLOAT:
case DOUBLE:
return defaultValue.isNumber();
case BOOLEAN:
return defaultValue.isBoolean();
case NULL:
return defaultValue.isNull();
case ARRAY:
if (!defaultValue.isArray())
return false;
for (JsonNode element : defaultValue)
if (!isValidDefault(schema.getElementType(), element))
return false;
return true;
case MAP:
if (!defaultValue.isObject())
return false;
for (JsonNode value : defaultValue)
if (!isValidDefault(schema.getValueType(), value))
return false;
return true;
case UNION: // union default: any branch
return schema.getTypes().stream().anyMatch((Schema s) -> isValidValue(s, defaultValue));
case RECORD:
if (!defaultValue.isObject())
return false;
for (Field field : schema.getFields())
if (!isValidValue(field.schema(),
defaultValue.has(field.name()) ? defaultValue.get(field.name()) : field.defaultValue()))
return false;
return true;
default:
return false;
}
}
/**
* Validate a value against the schema.
*
* @param schema : schema for value.
* @param value : value to validate.
* @return true if ok.
*/
private static boolean isValidValue(Schema schema, JsonNode value) {
if (value == null)
return false;
if (schema.isUnion()) {
// For Union, only need that one sub schema is ok.
for (Schema sub : schema.getTypes()) {
if (Schema.isValidDefault(sub, value)) {
return true;
}
}
return false;
} else {
// for other types, same as validate default.
return Schema.isValidDefault(schema, value);
}
}
/**
* Parse named schema in order to fill names map. This method does not parse
* field of record/error schema.
*
* @param schema : json schema representation.
* @param names : map of named schema.
* @param currentNameSpace : current working name space.
* @return schema.
*/
static Schema parseNamesDeclared(JsonNode schema, Names names, String currentNameSpace) {
if (schema == null) {
return null;
}
if (schema.isObject()) {
String type = Schema.getOptionalText(schema, "type");
Name name = null;
String doc = null;
Schema result = null;
final boolean isTypeError = "error".equals(type);
final boolean isTypeRecord = "record".equals(type);
final boolean isTypeEnum = "enum".equals(type);
final boolean isTypeFixed = "fixed".equals(type);
if (isTypeRecord || isTypeError || isTypeEnum || isTypeFixed) {
String space = getOptionalText(schema, "namespace");
doc = getOptionalText(schema, "doc");
if (space == null)
space = currentNameSpace;
name = new Name(getRequiredText(schema, "name", "No name in schema"), space);
}
if (isTypeRecord || isTypeError) { // record
result = new RecordSchema(name, doc, isTypeError);
names.add(result);
JsonNode fieldsNode = schema.get("fields");
if (fieldsNode == null || !fieldsNode.isArray())
throw new SchemaParseException("Record has no fields: " + schema);
exploreFields(fieldsNode, names, name != null ? name.space : null);
} else if (isTypeEnum) { // enum
JsonNode symbolsNode = schema.get("symbols");
if (symbolsNode == null || !symbolsNode.isArray())
throw new SchemaParseException("Enum has no symbols: " + schema);
LockableArrayList<String> symbols = new LockableArrayList<>(symbolsNode.size());
for (JsonNode n : symbolsNode)
symbols.add(n.textValue());
JsonNode enumDefault = schema.get("default");
String defaultSymbol = null;
if (enumDefault != null)
defaultSymbol = enumDefault.textValue();
result = new EnumSchema(name, doc, symbols, defaultSymbol);
names.add(result);
} else if (type.equals("array")) { // array
JsonNode itemsNode = schema.get("items");
if (itemsNode == null)
throw new SchemaParseException("Array has no items type: " + schema);
final Schema items = Schema.parseNamesDeclared(itemsNode, names, currentNameSpace);
result = Schema.createArray(items);
} else if (type.equals("map")) { // map
JsonNode valuesNode = schema.get("values");
if (valuesNode == null)
throw new SchemaParseException("Map has no values type: " + schema);
final Schema values = Schema.parseNamesDeclared(valuesNode, names, currentNameSpace);
result = Schema.createMap(values);
} else if (isTypeFixed) { // fixed
JsonNode sizeNode = schema.get("size");
if (sizeNode == null || !sizeNode.isInt())
throw new SchemaParseException("Invalid or no size: " + schema);
result = new FixedSchema(name, doc, sizeNode.intValue());
if (name != null)
names.add(result);
} else if (PRIMITIVES.containsKey(type)) {
result = Schema.create(PRIMITIVES.get(type));
}
if (result != null) {
Set<String> reserved = SCHEMA_RESERVED;
if (isTypeEnum) {
reserved = ENUM_RESERVED;
}
Schema.addProperties(schema, reserved, result);
}
return result;
} else if (schema.isArray()) {
List<Schema> subs = new ArrayList<>(schema.size());
schema.forEach((JsonNode item) -> {
Schema sub = Schema.parseNamesDeclared(item, names, currentNameSpace);
if (sub != null) {
subs.add(sub);
}
});
return Schema.createUnion(subs);
} else if (schema.isTextual()) {
String value = schema.asText();
return names.get(value);
}
return null;
}
private static void addProperties(JsonNode schema, Set<String> reserved, Schema avroSchema) {
Iterator<String> i = schema.fieldNames();
while (i.hasNext()) { // add properties
String prop = i.next();
if (!reserved.contains(prop)) // ignore reserved
avroSchema.addProp(prop, schema.get(prop));
}
// parse logical type if present
avroSchema.logicalType = LogicalTypes.fromSchemaIgnoreInvalid(avroSchema);
// names.space(savedSpace); // restore space
if (avroSchema instanceof NamedSchema) {
Set<String> aliases = parseAliases(schema);
if (aliases != null) // add aliases
for (String alias : aliases)
avroSchema.addAlias(alias);
}
}
/**
* Explore record fields in order to fill names map with inner defined named
* types.
*
* @param fieldsNode : json node for field.
* @param names : names map.
* @param nameSpace : current working namespace.
*/
private static void exploreFields(JsonNode fieldsNode, Names names, String nameSpace) {
for (JsonNode field : fieldsNode) {
final JsonNode fieldType = field.get("type");
if (fieldType != null) {
if (fieldType.isObject()) {
parseNamesDeclared(fieldType, names, nameSpace);
} else if (fieldType.isArray()) {
exploreFields(fieldType, names, nameSpace);
} else if (fieldType.isTextual() && field.isObject()) {
parseNamesDeclared(field, names, nameSpace);
}
}
}
}
/**
* in complement of parseNamesDeclared, this method parse schema in details.
*
* @param schema : json schema.
* @param names : names map.
* @param currentSpace : current working name space.
* @return complete schema.
*/
static Schema parseCompleteSchema(JsonNode schema, Names names, String currentSpace) {
if (schema == null) {
throw new SchemaParseException("Cannot parse <null> schema");
}
if (schema.isTextual()) {
String type = schema.asText();
Schema avroSchema = names.get(type);
if (avroSchema == null) {
avroSchema = names.get(currentSpace + "." + type);
}
return avroSchema;
}
if (schema.isArray()) {
List<Schema> schemas = StreamSupport.stream(schema.spliterator(), false)
.map((JsonNode sub) -> parseCompleteSchema(sub, names, currentSpace)).collect(Collectors.toList());
return Schema.createUnion(schemas);
}
if (schema.isObject()) {
Schema result = null;
String type = getRequiredText(schema, "type", "No type");
Name name = null;
final boolean isTypeError = "error".equals(type);
final boolean isTypeRecord = "record".equals(type);
final boolean isTypeArray = "array".equals(type);
if (isTypeRecord || isTypeError || "enum".equals(type) || "fixed".equals(type)) {
// named schema
String space = getOptionalText(schema, "namespace");
if (space == null)
space = currentSpace;
name = new Name(getRequiredText(schema, "name", "No name in schema"), space);
result = names.get(name);
if (result == null) {
throw new SchemaParseException("Unparsed field type " + name);
}
}
if (isTypeRecord || isTypeError) {
if (result != null && !result.hasFields()) {
final List<Field> fields = new ArrayList<>();
JsonNode fieldsNode = schema.get("fields");
if (fieldsNode == null || !fieldsNode.isArray())
throw new SchemaParseException("Record has no fields: " + schema);
for (JsonNode field : fieldsNode) {
Field f = Field.parse(field, names, name.space);
fields.add(f);
if (f.schema.getLogicalType() == null && getOptionalText(field, LOGICAL_TYPE_PROP) != null)
LOG.warn(
"Ignored the {}.{}.logicalType property (\"{}\"). It should probably be nested inside the \"type\" for the field.",
name, f.name, getOptionalText(field, "logicalType"));
}
result.setFields(fields);
}
} else if (isTypeArray) {
JsonNode items = schema.get("items");
Schema schemaItems = parseCompleteSchema(items, names, currentSpace);
result = Schema.createArray(schemaItems);
} else if ("map".equals(type)) {
JsonNode values = schema.get("values");
Schema mapItems = parseCompleteSchema(values, names, currentSpace);
result = Schema.createMap(mapItems);
} else if (result == null) {
result = names.get(currentSpace + "." + type);
if (result == null) {
result = names.get(type);
}
}
Set<String> reserved = SCHEMA_RESERVED;
if ("enum".equals(type)) {
reserved = ENUM_RESERVED;
}
Schema.addProperties(schema, reserved, result);
return result;
}
return null;
}
static Schema parse(JsonNode schema, Names names) {
if (schema == null) {
throw new SchemaParseException("Cannot parse <null> schema");
}
Schema result = Schema.parseNamesDeclared(schema, names, names.space);
Schema.parseCompleteSchema(schema, names, names.space);
return result;
}
static Schema resolveSchema(JsonNode schema, Names names, String currentNameSpace) {
String np = currentNameSpace;
String nodeName = getOptionalText(schema, "name");
if (nodeName != null) {
final JsonNode nameSpace = schema.get("namespace");
StringBuilder fullName = new StringBuilder();
if (nameSpace != null && nameSpace.isTextual()) {
fullName.append(nameSpace.asText()).append(".");
np = nameSpace.asText();
}
fullName.append(nodeName);
Schema schema1 = names.get(fullName.toString());
if (schema1 != null && schema1.getType() == Type.RECORD && !schema1.hasFields()) {
Schema.parseCompleteSchema(schema, names, np);
}
return schema1;
}
return null;
}
static Set<String> parseAliases(JsonNode node) {
JsonNode aliasesNode = node.get("aliases");
if (aliasesNode == null)
return null;
if (!aliasesNode.isArray())
throw new SchemaParseException("aliases not an array: " + node);
Set<String> aliases = new LinkedHashSet<>();
for (JsonNode aliasNode : aliasesNode) {
if (!aliasNode.isTextual())
throw new SchemaParseException("alias not a string: " + aliasNode);
aliases.add(aliasNode.textValue());
}
return aliases;
}
/**
* Extracts text value associated to key from the container JsonNode, and throws
* {@link SchemaParseException} if it doesn't exist.
*
* @param container Container where to find key.
* @param key Key to look for in container.
* @param error String to prepend to the SchemaParseException.
*/
private static String getRequiredText(JsonNode container, String key, String error) {
String out = getOptionalText(container, key);
if (null == out) {
throw new SchemaParseException(error + ": " + container);
}
return out;
}
/** Extracts text value associated to key from the container JsonNode. */
private static String getOptionalText(JsonNode container, String key) {
JsonNode jsonNode = container.get(key);
return jsonNode != null ? jsonNode.textValue() : null;
}
static JsonNode parseJson(String s) {
try {
return MAPPER.readTree(FACTORY.createParser(s));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/**
* Parses the specified json string to an object.
*/
public static Object parseJsonToObject(String s) {
return JacksonUtils.toObject(parseJson(s));
}
/**
* Rewrite a writer's schema using the aliases from a reader's schema. This
* permits reading records, enums and fixed schemas whose names have changed,
* and records whose field names have changed. The returned schema always
* contains the same data elements in the same order, but with possibly
* different names.
*/
public static Schema applyAliases(Schema writer, Schema reader) {
if (writer.equals(reader))
return writer; // same schema
// create indexes of names
Map<Schema, Schema> seen = new IdentityHashMap<>(1);
Map<Name, Name> aliases = new HashMap<>(1);
Map<Name, Map<String, String>> fieldAliases = new HashMap<>(1);
getAliases(reader, seen, aliases, fieldAliases);
if (aliases.size() == 0 && fieldAliases.size() == 0)
return writer; // no aliases
seen.clear();
return applyAliases(writer, seen, aliases, fieldAliases);
}
private static Schema applyAliases(Schema s, Map<Schema, Schema> seen, Map<Name, Name> aliases,
Map<Name, Map<String, String>> fieldAliases) {
Name name = s instanceof NamedSchema ? ((NamedSchema) s).name : null;
Schema result = s;
switch (s.getType()) {
case RECORD:
if (seen.containsKey(s))
return seen.get(s); // break loops
if (aliases.containsKey(name))
name = aliases.get(name);
result = Schema.createRecord(name.full, s.getDoc(), null, s.isError());
seen.put(s, result);
List<Field> newFields = new ArrayList<>();
for (Field f : s.getFields()) {
Schema fSchema = applyAliases(f.schema, seen, aliases, fieldAliases);
String fName = getFieldAlias(name, f.name, fieldAliases);
Field newF = new Field(fName, fSchema, f.doc, f.defaultValue, true, f.order);
newF.putAll(f); // copy props
newFields.add(newF);
}
result.setFields(newFields);
break;
case ENUM:
if (aliases.containsKey(name))
result = Schema.createEnum(aliases.get(name).full, s.getDoc(), null, s.getEnumSymbols(), s.getEnumDefault());
break;
case ARRAY:
Schema e = applyAliases(s.getElementType(), seen, aliases, fieldAliases);
if (!e.equals(s.getElementType()))
result = Schema.createArray(e);
break;
case MAP:
Schema v = applyAliases(s.getValueType(), seen, aliases, fieldAliases);
if (!v.equals(s.getValueType()))
result = Schema.createMap(v);
break;
case UNION:
List<Schema> types = new ArrayList<>();
for (Schema branch : s.getTypes())
types.add(applyAliases(branch, seen, aliases, fieldAliases));
result = Schema.createUnion(types);
break;
case FIXED:
if (aliases.containsKey(name))
result = Schema.createFixed(aliases.get(name).full, s.getDoc(), null, s.getFixedSize());
break;
default:
// NO-OP
}
if (!result.equals(s))
result.putAll(s); // copy props
return result;
}
private static void getAliases(Schema schema, Map<Schema, Schema> seen, Map<Name, Name> aliases,
Map<Name, Map<String, String>> fieldAliases) {
if (schema instanceof NamedSchema) {
NamedSchema namedSchema = (NamedSchema) schema;
if (namedSchema.aliases != null)
for (Name alias : namedSchema.aliases)
aliases.put(alias, namedSchema.name);
}
switch (schema.getType()) {
case RECORD:
if (seen.containsKey(schema))
return; // break loops
seen.put(schema, schema);
RecordSchema record = (RecordSchema) schema;
for (Field field : schema.getFields()) {
if (field.aliases != null)
for (String fieldAlias : field.aliases) {
Map<String, String> recordAliases = fieldAliases.computeIfAbsent(record.name, k -> new HashMap<>());
recordAliases.put(fieldAlias, field.name);
}
getAliases(field.schema, seen, aliases, fieldAliases);
}
if (record.aliases != null && fieldAliases.containsKey(record.name))
for (Name recordAlias : record.aliases)
fieldAliases.put(recordAlias, fieldAliases.get(record.name));
break;
case ARRAY:
getAliases(schema.getElementType(), seen, aliases, fieldAliases);
break;
case MAP:
getAliases(schema.getValueType(), seen, aliases, fieldAliases);
break;
case UNION:
for (Schema s : schema.getTypes())
getAliases(s, seen, aliases, fieldAliases);
break;
}
}
private static String getFieldAlias(Name record, String field, Map<Name, Map<String, String>> fieldAliases) {
Map<String, String> recordAliases = fieldAliases.get(record);
if (recordAliases == null)
return field;
String alias = recordAliases.get(field);
if (alias == null)
return field;
return alias;
}
public interface NameValidator {
class Result {
private final String errors;
public Result(final String errors) {
this.errors = errors;
}
public boolean isOK() {
return this == NameValidator.OK;
}
public String getErrors() {
return errors;
}
}
Result OK = new Result(null);
default Result validate(String name) {
return OK;
}
NameValidator NO_VALIDATION = new NameValidator() {
};
NameValidator UTF_VALIDATOR = new NameValidator() {
@Override
public Result validate(final String name) {
if (name == null)
return new Result("Null name");
int length = name.length();
if (length == 0)
return new Result("Empty name");
char first = name.charAt(0);
if (!(Character.isLetter(first) || first == '_'))
return new Result("Illegal initial character: " + name);
for (int i = 1; i < length; i++) {
char c = name.charAt(i);
if (!(Character.isLetterOrDigit(c) || c == '_'))
return new Result("Illegal character in: " + name);
}
return OK;
}
};
NameValidator STRICT_VALIDATOR = new NameValidator() {
@Override
public Result validate(final String name) {
if (name == null)
return new Result("Null name");
int length = name.length();
if (length == 0)
return new Result("Empty name");
char first = name.charAt(0);
if (!(isLetter(first) || first == '_'))
return new Result("Illegal initial character: " + name);
for (int i = 1; i < length; i++) {
char c = name.charAt(i);
if (!(isLetter(c) || isDigit(c) || c == '_'))
return new Result("Illegal character in: " + name);
}
return OK;
}
private boolean isLetter(char c) {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
}
private boolean isDigit(char c) {
return c >= '0' && c <= '9';
}
};
}
/**
* No change is permitted on LockableArrayList once lock() has been called on
* it.
*
* @param <E>
*/
/*
* This class keeps a boolean variable <tt>locked</tt> which is set to
* <tt>true</tt> in the lock() method. It's legal to call lock() any number of
* times. Any lock() other than the first one is a no-op.
*
* This class throws <tt>IllegalStateException</tt> if a mutating operation is
* performed after being locked. Since modifications through iterator also use
* the list's mutating operations, this effectively blocks all modifications.
*/
static class LockableArrayList<E> extends ArrayList<E> {
private static final long serialVersionUID = 1L;
private boolean locked = false;
public LockableArrayList() {
}
public LockableArrayList(int size) {
super(size);
}
public LockableArrayList(List<E> types) {
super(types);
}
public LockableArrayList(E... types) {
super(types.length);
Collections.addAll(this, types);
}
public List<E> lock() {
locked = true;
return this;
}
private void ensureUnlocked() {
if (locked) {
throw new IllegalStateException();
}
}
@Override
public boolean add(E e) {
ensureUnlocked();
return super.add(e);
}
@Override
public boolean remove(Object o) {
ensureUnlocked();
return super.remove(o);
}
@Override
public E remove(int index) {
ensureUnlocked();
return super.remove(index);
}
@Override
public boolean addAll(Collection<? extends E> c) {
ensureUnlocked();
return super.addAll(c);
}
@Override
public boolean addAll(int index, Collection<? extends E> c) {
ensureUnlocked();
return super.addAll(index, c);
}
@Override
public boolean removeAll(Collection<?> c) {
ensureUnlocked();
return super.removeAll(c);
}
@Override
public boolean retainAll(Collection<?> c) {
ensureUnlocked();
return super.retainAll(c);
}
@Override
public void clear() {
ensureUnlocked();
super.clear();
}
}
}
| 7,232 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/SchemaValidationStrategy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
/**
* An interface for validating the compatibility of a single schema against
* another.
* <p>
* What makes one schema compatible with another is not defined by the contract.
* <p/>
*/
public interface SchemaValidationStrategy {
/**
* Validates that one schema is compatible with another.
*
* @throws SchemaValidationException if the schemas are not compatible.
*/
void validate(Schema toValidate, Schema existing) throws SchemaValidationException;
}
| 7,233 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/Protocol.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.databind.JsonNode;
import org.apache.avro.Schema.Field;
import org.apache.avro.Schema.Field.Order;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.StringWriter;
import java.nio.charset.StandardCharsets;
import java.security.MessageDigest;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* A set of messages forming an application protocol.
* <p>
* A protocol consists of:
* <ul>
* <li>a <i>name</i> for the protocol;
* <li>an optional <i>namespace</i>, further qualifying the name;
* <li>a list of <i>types</i>, or named {@link Schema schemas};
* <li>a list of <i>errors</i>, or named {@link Schema schemas} for exceptions;
* <li>a list of named <i>messages</i>, each of which specifies,
* <ul>
* <li><i>request</i>, the parameter schemas;
* <li>one of either;
* <ul>
* <li>one-way</li>
* </ul>
* or
* <ul>
* <li><i>response</i>, the response schema;
* <li><i>errors</i>, an optional list of potential error schema names.
* </ul>
* </ul>
* </ul>
*/
public class Protocol extends JsonProperties {
/** The version of the protocol specification implemented here. */
public static final long VERSION = 1;
// Support properties for both Protocol and Message objects
private static final Set<String> MESSAGE_RESERVED = Collections
.unmodifiableSet(new HashSet<>(Arrays.asList("doc", "response", "request", "errors", "one-way")));
private static final Set<String> FIELD_RESERVED = Collections
.unmodifiableSet(new HashSet<>(Arrays.asList("name", "type", "doc", "default", "aliases")));
/** A protocol message. */
public class Message extends JsonProperties {
private String name;
private String doc;
private Schema request;
/** Construct a message. */
private Message(String name, String doc, JsonProperties propMap, Schema request) {
super(MESSAGE_RESERVED);
this.name = name;
this.doc = doc;
this.request = request;
if (propMap != null)
// copy props
addAllProps(propMap);
}
private Message(String name, String doc, Map<String, ?> propMap, Schema request) {
super(MESSAGE_RESERVED, propMap);
this.name = name;
this.doc = doc;
this.request = request;
}
/** The name of this message. */
public String getName() {
return name;
}
/** The parameters of this message. */
public Schema getRequest() {
return request;
}
/** The returned data. */
public Schema getResponse() {
return Schema.create(Schema.Type.NULL);
}
/** Errors that might be thrown. */
public Schema getErrors() {
return Schema.createUnion(Collections.emptyList());
}
/** Returns true if this is a one-way message, with no response or errors. */
public boolean isOneWay() {
return true;
}
@Override
public String toString() {
try {
StringWriter writer = new StringWriter();
JsonGenerator gen = Schema.FACTORY.createGenerator(writer);
toJson(gen);
gen.flush();
return writer.toString();
} catch (IOException e) {
throw new AvroRuntimeException(e);
}
}
void toJson(JsonGenerator gen) throws IOException {
gen.writeStartObject();
if (doc != null)
gen.writeStringField("doc", doc);
writeProps(gen); // write out properties
gen.writeFieldName("request");
request.fieldsToJson(types, gen);
toJson1(gen);
gen.writeEndObject();
}
void toJson1(JsonGenerator gen) throws IOException {
gen.writeStringField("response", "null");
gen.writeBooleanField("one-way", true);
}
@Override
public boolean equals(Object o) {
if (o == this)
return true;
if (!(o instanceof Message))
return false;
Message that = (Message) o;
return this.name.equals(that.name) && this.request.equals(that.request) && propsEqual(that);
}
@Override
public int hashCode() {
return name.hashCode() + request.hashCode() + propsHashCode();
}
public String getDoc() {
return doc;
}
}
private class TwoWayMessage extends Message {
private Schema response;
private Schema errors;
/** Construct a message. */
private TwoWayMessage(String name, String doc, Map<String, ?> propMap, Schema request, Schema response,
Schema errors) {
super(name, doc, propMap, request);
this.response = response;
this.errors = errors;
}
private TwoWayMessage(String name, String doc, JsonProperties propMap, Schema request, Schema response,
Schema errors) {
super(name, doc, propMap, request);
this.response = response;
this.errors = errors;
}
@Override
public Schema getResponse() {
return response;
}
@Override
public Schema getErrors() {
return errors;
}
@Override
public boolean isOneWay() {
return false;
}
@Override
public boolean equals(Object o) {
if (!super.equals(o))
return false;
if (!(o instanceof TwoWayMessage))
return false;
TwoWayMessage that = (TwoWayMessage) o;
return this.response.equals(that.response) && this.errors.equals(that.errors);
}
@Override
public int hashCode() {
return super.hashCode() + response.hashCode() + errors.hashCode();
}
@Override
void toJson1(JsonGenerator gen) throws IOException {
gen.writeFieldName("response");
response.toJson(types, gen);
List<Schema> errs = errors.getTypes(); // elide system error
if (errs.size() > 1) {
Schema union = Schema.createUnion(errs.subList(1, errs.size()));
gen.writeFieldName("errors");
union.toJson(types, gen);
}
}
}
private String name;
private String namespace;
private String doc;
private Schema.Names types = new Schema.Names();
private final Map<String, Message> messages = new LinkedHashMap<>();
private byte[] md5;
/** An error that can be thrown by any message. */
public static final Schema SYSTEM_ERROR = Schema.create(Schema.Type.STRING);
/** Union type for generating system errors. */
public static final Schema SYSTEM_ERRORS = Schema.createUnion(Collections.singletonList(SYSTEM_ERROR));
private static final Set<String> PROTOCOL_RESERVED = Collections
.unmodifiableSet(new HashSet<>(Arrays.asList("namespace", "protocol", "doc", "messages", "types", "errors")));
private Protocol() {
super(PROTOCOL_RESERVED);
}
/**
* Constructs a similar Protocol instance with the same {@code name},
* {@code doc}, and {@code namespace} as {code p} has. It also copies all the
* {@code props}.
*/
public Protocol(Protocol p) {
this(p.getName(), p.getDoc(), p.getNamespace());
putAll(p);
}
public Protocol(String name, String doc, String namespace) {
super(PROTOCOL_RESERVED);
setName(name, namespace);
this.doc = doc;
}
public Protocol(String name, String namespace) {
this(name, null, namespace);
}
private void setName(String name, String namespace) {
int lastDot = name.lastIndexOf('.');
if (lastDot < 0) {
this.name = name;
this.namespace = namespace;
} else {
this.name = name.substring(lastDot + 1);
this.namespace = name.substring(0, lastDot);
}
if (this.namespace != null && this.namespace.isEmpty()) {
this.namespace = null;
}
types.space(this.namespace);
}
/** The name of this protocol. */
public String getName() {
return name;
}
/** The namespace of this protocol. Qualifies its name. */
public String getNamespace() {
return namespace;
}
/** Doc string for this protocol. */
public String getDoc() {
return doc;
}
/** The types of this protocol. */
public Collection<Schema> getTypes() {
return types.values();
}
/** Returns the named type. */
public Schema getType(String name) {
return types.get(name);
}
/** Set the types of this protocol. */
public void setTypes(Collection<Schema> newTypes) {
types = new Schema.Names();
for (Schema s : newTypes)
types.add(s);
}
/** The messages of this protocol. */
public Map<String, Message> getMessages() {
return messages;
}
/** Create a one-way message. */
@Deprecated
public Message createMessage(String name, String doc, Schema request) {
return new Message(name, doc, Collections.emptyMap(), request);
}
/**
* Create a one-way message using the {@code name}, {@code doc}, and
* {@code props} of {@code m}.
*/
public Message createMessage(Message m, Schema request) {
return new Message(m.name, m.doc, m, request);
}
/** Create a one-way message. */
public <T> Message createMessage(String name, String doc, JsonProperties propMap, Schema request) {
return new Message(name, doc, propMap, request);
}
/** Create a one-way message. */
public <T> Message createMessage(String name, String doc, Map<String, ?> propMap, Schema request) {
return new Message(name, doc, propMap, request);
}
/** Create a two-way message. */
@Deprecated
public Message createMessage(String name, String doc, Schema request, Schema response, Schema errors) {
return new TwoWayMessage(name, doc, new LinkedHashMap<String, String>(), request, response, errors);
}
/**
* Create a two-way message using the {@code name}, {@code doc}, and
* {@code props} of {@code m}.
*/
public Message createMessage(Message m, Schema request, Schema response, Schema errors) {
return new TwoWayMessage(m.getName(), m.getDoc(), m, request, response, errors);
}
/** Create a two-way message. */
public <T> Message createMessage(String name, String doc, JsonProperties propMap, Schema request, Schema response,
Schema errors) {
return new TwoWayMessage(name, doc, propMap, request, response, errors);
}
/** Create a two-way message. */
public <T> Message createMessage(String name, String doc, Map<String, ?> propMap, Schema request, Schema response,
Schema errors) {
return new TwoWayMessage(name, doc, propMap, request, response, errors);
}
@Override
public boolean equals(Object o) {
if (o == this)
return true;
if (!(o instanceof Protocol))
return false;
Protocol that = (Protocol) o;
return this.name.equals(that.name) && this.namespace.equals(that.namespace) && this.types.equals(that.types)
&& this.messages.equals(that.messages) && this.propsEqual(that);
}
@Override
public int hashCode() {
return name.hashCode() + namespace.hashCode() + types.hashCode() + messages.hashCode() + propsHashCode();
}
/** Render this as <a href="https://json.org/">JSON</a>. */
@Override
public String toString() {
return toString(false);
}
/**
* Render this as <a href="https://json.org/">JSON</a>.
*
* @param pretty if true, pretty-print JSON.
*/
public String toString(boolean pretty) {
try {
StringWriter writer = new StringWriter();
JsonGenerator gen = Schema.FACTORY.createGenerator(writer);
if (pretty)
gen.useDefaultPrettyPrinter();
toJson(gen);
gen.flush();
return writer.toString();
} catch (IOException e) {
throw new AvroRuntimeException(e);
}
}
void toJson(JsonGenerator gen) throws IOException {
types.space(namespace);
gen.writeStartObject();
gen.writeStringField("protocol", name);
if (namespace != null) {
gen.writeStringField("namespace", namespace);
}
if (doc != null)
gen.writeStringField("doc", doc);
writeProps(gen);
gen.writeArrayFieldStart("types");
Schema.Names resolved = new Schema.Names(namespace);
for (Schema type : types.values())
if (!resolved.contains(type))
type.toJson(resolved, gen);
gen.writeEndArray();
gen.writeObjectFieldStart("messages");
for (Map.Entry<String, Message> e : messages.entrySet()) {
gen.writeFieldName(e.getKey());
e.getValue().toJson(gen);
}
gen.writeEndObject();
gen.writeEndObject();
}
/** Return the MD5 hash of the text of this protocol. */
public byte[] getMD5() {
if (md5 == null)
try {
md5 = MessageDigest.getInstance("MD5").digest(this.toString().getBytes(StandardCharsets.UTF_8));
} catch (Exception e) {
throw new AvroRuntimeException(e);
}
return md5;
}
/** Read a protocol from a Json file. */
public static Protocol parse(File file) throws IOException {
try (JsonParser jsonParser = Schema.FACTORY.createParser(file)) {
return parse(jsonParser);
}
}
/** Read a protocol from a Json stream. */
public static Protocol parse(InputStream stream) throws IOException {
return parse(Schema.FACTORY.createParser(stream));
}
/** Read a protocol from one or more json strings */
public static Protocol parse(String string, String... more) {
StringBuilder b = new StringBuilder(string);
for (String part : more)
b.append(part);
return parse(b.toString());
}
/** Read a protocol from a Json string. */
public static Protocol parse(String string) {
try {
return parse(Schema.FACTORY.createParser(new ByteArrayInputStream(string.getBytes(StandardCharsets.UTF_8))));
} catch (IOException e) {
throw new AvroRuntimeException(e);
}
}
private static Protocol parse(JsonParser parser) {
try {
Protocol protocol = new Protocol();
protocol.parse((JsonNode) Schema.MAPPER.readTree(parser));
return protocol;
} catch (IOException e) {
throw new SchemaParseException(e);
}
}
private void parse(JsonNode json) {
parseNameAndNamespace(json);
parseTypes(json);
parseMessages(json);
parseDoc(json);
parseProps(json);
}
private void parseNameAndNamespace(JsonNode json) {
JsonNode nameNode = json.get("protocol");
if (nameNode == null) {
throw new SchemaParseException("No protocol name specified: " + json);
}
JsonNode namespaceNode = json.get("namespace");
String namespace = namespaceNode == null ? null : namespaceNode.textValue();
setName(nameNode.textValue(), namespace);
}
private void parseDoc(JsonNode json) {
this.doc = parseDocNode(json);
}
private String parseDocNode(JsonNode json) {
JsonNode nameNode = json.get("doc");
if (nameNode == null)
return null; // no doc defined
return nameNode.textValue();
}
private void parseTypes(JsonNode json) {
JsonNode defs = json.get("types");
if (defs == null)
return; // no types defined
if (!defs.isArray())
throw new SchemaParseException("Types not an array: " + defs);
for (JsonNode type : defs) {
if (!type.isObject())
throw new SchemaParseException("Type not an object: " + type);
Schema.parseNamesDeclared(type, types, types.space());
}
for (JsonNode type : defs) {
Schema.parseCompleteSchema(type, types, types.space());
}
}
private void parseProps(JsonNode json) {
for (Iterator<String> i = json.fieldNames(); i.hasNext();) {
String p = i.next(); // add non-reserved as props
if (!PROTOCOL_RESERVED.contains(p))
this.addProp(p, json.get(p));
}
}
private void parseMessages(JsonNode json) {
JsonNode defs = json.get("messages");
if (defs == null)
return; // no messages defined
for (Iterator<String> i = defs.fieldNames(); i.hasNext();) {
String prop = i.next();
this.messages.put(prop, parseMessage(prop, defs.get(prop)));
}
}
private Message parseMessage(String messageName, JsonNode json) {
String doc = parseDocNode(json);
Map<String, JsonNode> mProps = new LinkedHashMap<>();
for (Iterator<String> i = json.fieldNames(); i.hasNext();) {
String p = i.next(); // add non-reserved as props
if (!MESSAGE_RESERVED.contains(p))
mProps.put(p, json.get(p));
}
JsonNode requestNode = json.get("request");
if (requestNode == null || !requestNode.isArray())
throw new SchemaParseException("No request specified: " + json);
List<Field> fields = new ArrayList<>();
for (JsonNode field : requestNode) {
JsonNode fieldNameNode = field.get("name");
if (fieldNameNode == null)
throw new SchemaParseException("No param name: " + field);
JsonNode fieldTypeNode = field.get("type");
if (fieldTypeNode == null)
throw new SchemaParseException("No param type: " + field);
String name = fieldNameNode.textValue();
String fieldDoc = null;
JsonNode fieldDocNode = field.get("doc");
if (fieldDocNode != null)
fieldDoc = fieldDocNode.textValue();
Field newField = new Field(name, Schema.parse(fieldTypeNode, types), fieldDoc, field.get("default"), true,
Order.ASCENDING);
Set<String> aliases = Schema.parseAliases(field);
if (aliases != null) { // add aliases
for (String alias : aliases)
newField.addAlias(alias);
}
Iterator<String> i = field.fieldNames();
while (i.hasNext()) { // add properties
String prop = i.next();
if (!FIELD_RESERVED.contains(prop)) // ignore reserved
newField.addProp(prop, field.get(prop));
}
fields.add(newField);
}
Schema request = Schema.createRecord(fields);
boolean oneWay = false;
JsonNode oneWayNode = json.get("one-way");
if (oneWayNode != null) {
if (!oneWayNode.isBoolean())
throw new SchemaParseException("one-way must be boolean: " + json);
oneWay = oneWayNode.booleanValue();
}
JsonNode responseNode = json.get("response");
if (!oneWay && responseNode == null)
throw new SchemaParseException("No response specified: " + json);
JsonNode decls = json.get("errors");
if (oneWay) {
if (decls != null)
throw new SchemaParseException("one-way can't have errors: " + json);
if (responseNode != null && Schema.parse(responseNode, types).getType() != Schema.Type.NULL)
throw new SchemaParseException("One way response must be null: " + json);
return new Message(messageName, doc, mProps, request);
}
Schema response = Schema.parse(responseNode, types);
List<Schema> errs = new ArrayList<>();
errs.add(SYSTEM_ERROR); // every method can throw
if (decls != null) {
if (!decls.isArray())
throw new SchemaParseException("Errors not an array: " + json);
for (JsonNode decl : decls) {
String name = decl.textValue();
Schema schema = this.types.get(name);
if (schema == null)
throw new SchemaParseException("Undefined error: " + name);
if (!schema.isError())
throw new SchemaParseException("Not an error: " + name);
errs.add(schema);
}
}
return new TwoWayMessage(messageName, doc, mProps, request, response, Schema.createUnion(errs));
}
public static void main(String[] args) throws Exception {
System.out.println(Protocol.parse(new File(args[0])));
}
}
| 7,234 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/SchemaValidationException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro;
/**
* Thrown when {@link SchemaValidator} fails to validate a schema.
*/
public class SchemaValidationException extends Exception {
public SchemaValidationException(Schema reader, Schema writer) {
super(getMessage(reader, writer));
}
public SchemaValidationException(Schema reader, Schema writer, Throwable cause) {
super(getMessage(reader, writer), cause);
}
private static String getMessage(Schema reader, Schema writer) {
return "Unable to read schema: \n" + writer.toString(true) + "\nusing schema:\n" + reader.toString(true);
}
}
| 7,235 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/ValidateCanBeRead.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro;
/**
* A {@link SchemaValidationStrategy} that checks that the data written with the
* {@link Schema} to validate can be read by the existing schema according to
* the default Avro schema resolution rules.
*
*/
class ValidateCanBeRead implements SchemaValidationStrategy {
/**
* Validate that data written with first schema provided can be read using the
* second schema, according to the default Avro schema resolution rules.
*
* @throws SchemaValidationException if the second schema cannot read data
* written by the first.
*/
@Override
public void validate(Schema toValidate, Schema existing) throws SchemaValidationException {
ValidateMutualRead.canRead(toValidate, existing);
}
}
| 7,236 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/InvalidNumberEncodingException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
import java.io.IOException;
public class InvalidNumberEncodingException extends IOException {
public InvalidNumberEncodingException(String message) {
super(message);
}
}
| 7,237 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/ValidateLatest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro;
import java.util.Iterator;
/**
* <p>
* A {@link SchemaValidator} for validating the provided schema against the
* first {@link Schema} in the iterable in {@link #validate(Schema, Iterable)}.
* </p>
* <p>
* Uses the {@link SchemaValidationStrategy} provided in the constructor to
* validate the schema against the first Schema in the iterable, if it exists,
* via {@link SchemaValidationStrategy#validate(Schema, Schema)}.
* </p>
*/
public final class ValidateLatest implements SchemaValidator {
private final SchemaValidationStrategy strategy;
/**
* @param strategy The strategy to use for validation of pairwise schemas.
*/
public ValidateLatest(SchemaValidationStrategy strategy) {
this.strategy = strategy;
}
@Override
public void validate(Schema toValidate, Iterable<Schema> schemasInOrder) throws SchemaValidationException {
Iterator<Schema> schemas = schemasInOrder.iterator();
if (schemas.hasNext()) {
Schema existing = schemas.next();
strategy.validate(toValidate, existing);
}
}
}
| 7,238 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/SchemaValidatorBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
/**
* <p>
* A Builder for creating SchemaValidators.
* </p>
*/
public final class SchemaValidatorBuilder {
private SchemaValidationStrategy strategy;
public SchemaValidatorBuilder strategy(SchemaValidationStrategy strategy) {
this.strategy = strategy;
return this;
}
/**
* Use a strategy that validates that a schema can be used to read existing
* schema(s) according to the Avro default schema resolution.
*/
public SchemaValidatorBuilder canReadStrategy() {
this.strategy = new ValidateCanRead();
return this;
}
/**
* Use a strategy that validates that a schema can be read by existing schema(s)
* according to the Avro default schema resolution.
*/
public SchemaValidatorBuilder canBeReadStrategy() {
this.strategy = new ValidateCanBeRead();
return this;
}
/**
* Use a strategy that validates that a schema can read existing schema(s), and
* vice-versa, according to the Avro default schema resolution.
*/
public SchemaValidatorBuilder mutualReadStrategy() {
this.strategy = new ValidateMutualRead();
return this;
}
public SchemaValidator validateLatest() {
valid();
return new ValidateLatest(strategy);
}
public SchemaValidator validateAll() {
valid();
return new ValidateAll(strategy);
}
private void valid() {
if (null == strategy) {
throw new AvroRuntimeException("SchemaValidationStrategy not specified in builder");
}
}
}
| 7,239 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/SystemLimitException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
import org.slf4j.LoggerFactory;
/**
* Thrown to prevent making large allocations when reading potentially
* pathological input data from an untrusted source.
* <p/>
* The following system properties can be set to limit the size of bytes,
* strings and collection types to be allocated:
* <ul>
* <li><tt>org.apache.avro.limits.byte.maxLength</tt></li> limits the maximum
* size of <tt>byte</tt> types.</li>
* <li><tt>org.apache.avro.limits.collectionItems.maxLength</tt></li> limits the
* maximum number of <tt>map</tt> and <tt>list</tt> items that can be read at
* once single sequence.</li>
* <li><tt>org.apache.avro.limits.string.maxLength</tt></li> limits the maximum
* size of <tt>string</tt> types.</li>
* </ul>
*
* The default is to permit sizes up to {@link #MAX_ARRAY_VM_LIMIT}.
*/
public class SystemLimitException extends AvroRuntimeException {
/**
* The maximum length of array to allocate (unless necessary). Some VMs reserve
* some header words in an array. Attempts to allocate larger arrays may result
* in {@code OutOfMemoryError: Requested array size exceeds VM limit}
*
* @see <a href="https://bugs.openjdk.org/browse/JDK-8246725">JDK-8246725</a>
*/
// VisibleForTesting
static final int MAX_ARRAY_VM_LIMIT = Integer.MAX_VALUE - 8;
public static final String MAX_BYTES_LENGTH_PROPERTY = "org.apache.avro.limits.bytes.maxLength";
public static final String MAX_COLLECTION_LENGTH_PROPERTY = "org.apache.avro.limits.collectionItems.maxLength";
public static final String MAX_STRING_LENGTH_PROPERTY = "org.apache.avro.limits.string.maxLength";
private static int maxBytesLength = MAX_ARRAY_VM_LIMIT;
private static int maxCollectionLength = MAX_ARRAY_VM_LIMIT;
private static int maxStringLength = MAX_ARRAY_VM_LIMIT;
static {
resetLimits();
}
public SystemLimitException(String message) {
super(message);
}
/**
* Get an integer value stored in a system property, used to configure the
* system behaviour of decoders
*
* @param property The system property to fetch
* @param defaultValue The value to use if the system property is not present or
* parsable as an int
* @return The value from the system property
*/
private static int getLimitFromProperty(String property, int defaultValue) {
String o = System.getProperty(property);
int i = defaultValue;
if (o != null) {
try {
i = Integer.parseUnsignedInt(o);
} catch (NumberFormatException nfe) {
LoggerFactory.getLogger(SystemLimitException.class).warn("Could not parse property " + property + ": " + o,
nfe);
}
}
return i;
}
/**
* Check to ensure that reading the bytes is within the specified limits.
*
* @param length The proposed size of the bytes to read
* @return The size of the bytes if and only if it is within the limit and
* non-negative.
* @throws UnsupportedOperationException if reading the datum would allocate a
* collection that the Java VM would be
* unable to handle
* @throws SystemLimitException if the decoding should fail because it
* would otherwise result in an allocation
* exceeding the set limit
* @throws AvroRuntimeException if the length is negative
*/
public static int checkMaxBytesLength(long length) {
if (length < 0) {
throw new AvroRuntimeException("Malformed data. Length is negative: " + length);
}
if (length > MAX_ARRAY_VM_LIMIT) {
throw new UnsupportedOperationException(
"Cannot read arrays longer than " + MAX_ARRAY_VM_LIMIT + " bytes in Java library");
}
if (length > maxBytesLength) {
throw new SystemLimitException("Bytes length " + length + " exceeds maximum allowed");
}
return (int) length;
}
/**
* Check to ensure that reading the specified number of items remains within the
* specified limits.
*
* @param existing The number of elements items read in the collection
* @param items The next number of items to read. In normal usage, this is
* always a positive, permitted value. Negative and zero values
* have a special meaning in Avro decoding.
* @return The total number of items in the collection if and only if it is
* within the limit and non-negative.
* @throws UnsupportedOperationException if reading the items would allocate a
* collection that the Java VM would be
* unable to handle
* @throws SystemLimitException if the decoding should fail because it
* would otherwise result in an allocation
* exceeding the set limit
* @throws AvroRuntimeException if the length is negative
*/
public static int checkMaxCollectionLength(long existing, long items) {
long length = existing + items;
if (existing < 0) {
throw new AvroRuntimeException("Malformed data. Length is negative: " + existing);
}
if (items < 0) {
throw new AvroRuntimeException("Malformed data. Length is negative: " + items);
}
if (length > MAX_ARRAY_VM_LIMIT || length < existing) {
throw new UnsupportedOperationException(
"Cannot read collections larger than " + MAX_ARRAY_VM_LIMIT + " items in Java library");
}
if (length > maxCollectionLength) {
throw new SystemLimitException("Collection length " + length + " exceeds maximum allowed");
}
return (int) length;
}
/**
* Check to ensure that reading the string size is within the specified limits.
*
* @param length The proposed size of the string to read
* @return The size of the string if and only if it is within the limit and
* non-negative.
* @throws UnsupportedOperationException if reading the items would allocate a
* collection that the Java VM would be
* unable to handle
* @throws SystemLimitException if the decoding should fail because it
* would otherwise result in an allocation
* exceeding the set limit
* @throws AvroRuntimeException if the length is negative
*/
public static int checkMaxStringLength(long length) {
if (length < 0) {
throw new AvroRuntimeException("Malformed data. Length is negative: " + length);
}
if (length > MAX_ARRAY_VM_LIMIT) {
throw new UnsupportedOperationException("Cannot read strings longer than " + MAX_ARRAY_VM_LIMIT + " bytes");
}
if (length > maxStringLength) {
throw new SystemLimitException("String length " + length + " exceeds maximum allowed");
}
return (int) length;
}
/** Reread the limits from the system properties. */
// VisibleForTesting
static void resetLimits() {
maxBytesLength = getLimitFromProperty(MAX_BYTES_LENGTH_PROPERTY, MAX_ARRAY_VM_LIMIT);
maxCollectionLength = getLimitFromProperty(MAX_COLLECTION_LENGTH_PROPERTY, MAX_ARRAY_VM_LIMIT);
maxStringLength = getLimitFromProperty(MAX_STRING_LENGTH_PROPERTY, MAX_ARRAY_VM_LIMIT);
}
}
| 7,240 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/SchemaBuilderException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
/** Thrown for errors building schemas. */
public class SchemaBuilderException extends AvroRuntimeException {
public SchemaBuilderException(Throwable cause) {
super(cause);
}
public SchemaBuilderException(String message) {
super(message);
}
}
| 7,241 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/AvroRemoteException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
/** Base class for exceptions thrown to client by server. */
public class AvroRemoteException extends Exception {
private Object value;
protected AvroRemoteException() {
}
public AvroRemoteException(Throwable value) {
this(value.toString());
initCause(value);
}
public AvroRemoteException(Object value) {
super(value != null ? value.toString() : null);
this.value = value;
}
public AvroRemoteException(Object value, Throwable cause) {
super(value != null ? value.toString() : null, cause);
this.value = value;
}
public Object getValue() {
return value;
}
}
| 7,242 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/JsonProperties.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
import java.util.AbstractSet;
import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Queue;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ConcurrentMap;
import java.io.IOException;
import java.util.function.BiConsumer;
import org.apache.avro.util.internal.Accessor;
import org.apache.avro.util.internal.Accessor.JsonPropertiesAccessor;
import org.apache.avro.util.MapEntry;
import org.apache.avro.util.internal.JacksonUtils;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.TextNode;
/**
* Base class for objects that have JSON-valued properties. Avro and JSON values
* are represented in Java using the following mapping:
*
* <table>
* <th>
* <td>Avro type</td>
* <td>JSON type</td>
* <td>Java type</td></th>
* <tr>
* <td><code>null</code></td>
* <td><code>null</code></td>
* <td>{@link #NULL_VALUE}</td>
* </tr>
* <tr>
* <td><code>boolean</code></td>
* <td>Boolean</td>
* <td><code>boolean</code></td>
* </tr>
* <tr>
* <td><code>int</code></td>
* <td>Number</td>
* <td><code>int</code></td>
* </tr>
* <tr>
* <td><code>long</code></td>
* <td>Number</td>
* <td><code>long</code></td>
* </tr>
* <tr>
* <td><code>float</code></td>
* <td>Number</td>
* <td><code>float</code></td>
* </tr>
* <tr>
* <td><code>double</code></td>
* <td>Number</td>
* <td><code>double</code></td>
* </tr>
* <tr>
* <td><code>bytes</code></td>
* <td>String</td>
* <td><code>byte[]</code></td>
* </tr>
* <tr>
* <td><code>string</code></td>
* <td>String</td>
* <td>{@link java.lang.String}</td>
* </tr>
* <tr>
* <td><code>record</code></td>
* <td>Object</td>
* <td>{@link java.util.Map}</td>
* </tr>
* <tr>
* <td><code>enum</code></td>
* <td>String</td>
* <td>{@link java.lang.String}</td>
* </tr>
* <tr>
* <td><code>array</code></td>
* <td>Array</td>
* <td>{@link java.util.Collection}</td>
* </tr>
* <tr>
* <td><code>map</code></td>
* <td>Object</td>
* <td>{@link java.util.Map}</td>
* </tr>
* <tr>
* <td><code>fixed</code></td>
* <td>String</td>
* <td><code>byte[]</code></td>
* </tr>
* </table>
*
* @see org.apache.avro.data.Json
*/
public abstract class JsonProperties {
static {
Accessor.setAccessor(new JsonPropertiesAccessor() {
@Override
protected void addProp(JsonProperties props, String name, JsonNode value) {
props.addProp(name, value);
}
});
}
public static class Null {
private Null() {
}
}
/** A value representing a JSON <code>null</code>. */
public static final Null NULL_VALUE = new Null();
// use a ConcurrentHashMap for speed and thread safety, but keep a Queue of the
// entries to maintain order
// the queue is always updated after the main map and is thus is potentially a
// subset of the map.
// By making props private, we can control access and only implement/override
// the methods
// we need. We don't ever remove anything so we don't need to implement the
// clear/remove functionality.
// Also, we only ever ADD to the collection, never changing a value, so
// putWithAbsent is the
// only modifier
private ConcurrentMap<String, JsonNode> props = new ConcurrentHashMap<String, JsonNode>() {
private static final long serialVersionUID = 1L;
private Queue<MapEntry<String, JsonNode>> propOrder = new ConcurrentLinkedQueue<>();
@Override
public JsonNode putIfAbsent(String key, JsonNode value) {
JsonNode r = super.putIfAbsent(key, value);
if (r == null) {
propOrder.add(new MapEntry<>(key, value));
}
return r;
}
@Override
public JsonNode put(String key, JsonNode value) {
return putIfAbsent(key, value);
}
@Override
public Set<Map.Entry<String, JsonNode>> entrySet() {
return new AbstractSet<Map.Entry<String, JsonNode>>() {
@Override
public Iterator<Map.Entry<String, JsonNode>> iterator() {
return new Iterator<Map.Entry<String, JsonNode>>() {
Iterator<MapEntry<String, JsonNode>> it = propOrder.iterator();
@Override
public boolean hasNext() {
return it.hasNext();
}
@Override
public java.util.Map.Entry<String, JsonNode> next() {
return it.next();
}
};
}
@Override
public int size() {
return propOrder.size();
}
};
}
};
private Set<String> reserved;
JsonProperties(Set<String> reserved) {
this.reserved = reserved;
}
JsonProperties(Set<String> reserved, Map<String, ?> propMap) {
this.reserved = reserved;
for (Entry<String, ?> a : propMap.entrySet()) {
Object v = a.getValue();
JsonNode json = null;
if (v instanceof String) {
json = TextNode.valueOf((String) v);
} else if (v instanceof JsonNode) {
json = (JsonNode) v;
} else {
json = JacksonUtils.toJsonNode(v);
}
props.put(a.getKey(), json);
}
}
/**
* Returns the value of the named, string-valued property in this schema.
* Returns <tt>null</tt> if there is no string-valued property with that name.
*/
public String getProp(String name) {
JsonNode value = getJsonProp(name);
return value != null && value.isTextual() ? value.textValue() : null;
}
/**
* Returns the value of the named property in this schema. Returns <tt>null</tt>
* if there is no property with that name.
*/
private JsonNode getJsonProp(String name) {
return props.get(name);
}
/**
* Returns the value of the named property in this schema. Returns <tt>null</tt>
* if there is no property with that name.
*/
public Object getObjectProp(String name) {
return JacksonUtils.toObject(props.get(name));
}
public Object getObjectProp(String name, Object defaultValue) {
final JsonNode json = props.get(name);
return json != null ? JacksonUtils.toObject(json) : defaultValue;
}
/**
* Adds a property with the given name <tt>name</tt> and value <tt>value</tt>.
* Neither <tt>name</tt> nor <tt>value</tt> can be <tt>null</tt>. It is illegal
* to add a property if another with the same name but different value already
* exists in this schema.
*
* @param name The name of the property to add
* @param value The value for the property to add
*/
public void addProp(String name, String value) {
addProp(name, TextNode.valueOf(value));
}
public void addProp(String name, Object value) {
if (value instanceof JsonNode) {
addProp(name, (JsonNode) value);
} else {
addProp(name, JacksonUtils.toJsonNode(value));
}
}
public void putAll(JsonProperties np) {
for (Map.Entry<? extends String, ? extends JsonNode> e : np.props.entrySet())
addProp(e.getKey(), e.getValue());
}
/**
* Adds a property with the given name <tt>name</tt> and value <tt>value</tt>.
* Neither <tt>name</tt> nor <tt>value</tt> can be <tt>null</tt>. It is illegal
* to add a property if another with the same name but different value already
* exists in this schema.
*
* @param name The name of the property to add
* @param value The value for the property to add
*/
private void addProp(String name, JsonNode value) {
if (reserved.contains(name))
throw new AvroRuntimeException("Can't set reserved property: " + name);
if (value == null)
throw new AvroRuntimeException("Can't set a property to null: " + name);
JsonNode old = props.putIfAbsent(name, value);
if (old != null && !old.equals(value)) {
throw new AvroRuntimeException("Can't overwrite property: " + name);
}
}
/**
* Adds all the props from the specified json properties.
*
* @see #getObjectProps()
*/
public void addAllProps(JsonProperties properties) {
for (Entry<String, JsonNode> entry : properties.props.entrySet())
addProp(entry.getKey(), entry.getValue());
}
/** Return the defined properties as an unmodifiable Map. */
public Map<String, Object> getObjectProps() {
Map<String, Object> result = new LinkedHashMap<>();
for (Map.Entry<String, JsonNode> e : props.entrySet())
result.put(e.getKey(), JacksonUtils.toObject(e.getValue()));
return Collections.unmodifiableMap(result);
}
public boolean propsContainsKey(String key) {
return this.props.containsKey(key);
}
public void forEachProperty(BiConsumer<String, Object> consumer) {
for (Map.Entry<String, JsonNode> entry : this.props.entrySet()) {
final Object value = JacksonUtils.toObject(entry.getValue());
consumer.accept(entry.getKey(), value);
}
}
void writeProps(JsonGenerator gen) throws IOException {
for (Map.Entry<String, JsonNode> e : props.entrySet())
gen.writeObjectField(e.getKey(), e.getValue());
}
int propsHashCode() {
return props.hashCode();
}
boolean propsEqual(JsonProperties np) {
return props.equals(np.props);
}
public boolean hasProps() {
return !props.isEmpty();
}
}
| 7,243 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/SchemaNormalization.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
import java.util.Map;
import java.util.HashMap;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
/**
* Collection of static methods for generating the canonical form of schemas
* (see {@link #toParsingForm}) -- and fingerprints of canonical forms
* ({@link #fingerprint}).
*/
public class SchemaNormalization {
private SchemaNormalization() {
}
/**
* Returns "Parsing Canonical Form" of a schema as defined by Avro spec.
*/
public static String toParsingForm(Schema s) {
try {
Map<String, String> env = new HashMap<>();
return build(env, s, new StringBuilder()).toString();
} catch (IOException e) {
// Shouldn't happen, b/c StringBuilder can't throw IOException
throw new RuntimeException(e);
}
}
/**
* Returns a fingerprint of a string of bytes. This string is presumed to
* contain a canonical form of a schema. The algorithm used to compute the
* fingerprint is selected by the argument <i>fpName</i>. If <i>fpName</i>
* equals the string <code>"CRC-64-AVRO"</code>, then the result of
* {@link #fingerprint64} is returned in little-endian format. Otherwise,
* <i>fpName</i> is used as an algorithm name for
* {@link MessageDigest#getInstance(String)}, which will throw
* <code>NoSuchAlgorithmException</code> if it doesn't recognize the name.
* <p>
* Recommended Avro practice dictates that <code>"CRC-64-AVRO"</code> is used
* for 64-bit fingerprints, <code>"MD5"</code> is used for 128-bit fingerprints,
* and <code>"SHA-256"</code> is used for 256-bit fingerprints.
*/
public static byte[] fingerprint(String fpName, byte[] data) throws NoSuchAlgorithmException {
if (fpName.equals("CRC-64-AVRO")) {
long fp = fingerprint64(data);
byte[] result = new byte[8];
for (int i = 0; i < 8; i++) {
result[i] = (byte) fp;
fp >>= 8;
}
return result;
}
MessageDigest md = MessageDigest.getInstance(fpName);
return md.digest(data);
}
/**
* Returns the 64-bit Rabin Fingerprint (as recommended in the Avro spec) of a
* byte string.
*/
public static long fingerprint64(byte[] data) {
long result = EMPTY64;
for (byte b : data)
result = (result >>> 8) ^ FP64.FP_TABLE[(int) (result ^ b) & 0xff];
return result;
}
/**
* Returns {@link #fingerprint} applied to the parsing canonical form of the
* supplied schema.
*/
public static byte[] parsingFingerprint(String fpName, Schema s) throws NoSuchAlgorithmException {
return fingerprint(fpName, toParsingForm(s).getBytes(StandardCharsets.UTF_8));
}
/**
* Returns {@link #fingerprint64} applied to the parsing canonical form of the
* supplied schema.
*/
public static long parsingFingerprint64(Schema s) {
return fingerprint64(toParsingForm(s).getBytes(StandardCharsets.UTF_8));
}
private static Appendable build(Map<String, String> env, Schema s, Appendable o) throws IOException {
boolean firstTime = true;
Schema.Type st = s.getType();
switch (st) {
default: // boolean, bytes, double, float, int, long, null, string
return o.append('"').append(st.getName()).append('"');
case UNION:
o.append('[');
for (Schema b : s.getTypes()) {
if (!firstTime)
o.append(',');
else
firstTime = false;
build(env, b, o);
}
return o.append(']');
case ARRAY:
case MAP:
o.append("{\"type\":\"").append(st.getName()).append("\"");
if (st == Schema.Type.ARRAY)
build(env, s.getElementType(), o.append(",\"items\":"));
else
build(env, s.getValueType(), o.append(",\"values\":"));
return o.append("}");
case ENUM:
case FIXED:
case RECORD:
String name = s.getFullName();
if (env.get(name) != null)
return o.append(env.get(name));
String qname = "\"" + name + "\"";
env.put(name, qname);
o.append("{\"name\":").append(qname);
o.append(",\"type\":\"").append(st.getName()).append("\"");
if (st == Schema.Type.ENUM) {
o.append(",\"symbols\":[");
for (String enumSymbol : s.getEnumSymbols()) {
if (!firstTime)
o.append(',');
else
firstTime = false;
o.append('"').append(enumSymbol).append('"');
}
o.append("]");
} else if (st == Schema.Type.FIXED) {
o.append(",\"size\":").append(Integer.toString(s.getFixedSize()));
} else { // st == Schema.Type.RECORD
o.append(",\"fields\":[");
for (Schema.Field f : s.getFields()) {
if (!firstTime)
o.append(',');
else
firstTime = false;
o.append("{\"name\":\"").append(f.name()).append("\"");
build(env, f.schema(), o.append(",\"type\":")).append("}");
}
o.append("]");
}
return o.append("}");
}
}
final static long EMPTY64 = 0xc15d213aa4d7a795L;
/* An inner class ensures that FP_TABLE initialized only when needed. */
private static class FP64 {
private static final long[] FP_TABLE = new long[256];
static {
for (int i = 0; i < 256; i++) {
long fp = i;
for (int j = 0; j < 8; j++) {
long mask = -(fp & 1L);
fp = (fp >>> 1) ^ (EMPTY64 & mask);
}
FP_TABLE[i] = fp;
}
}
}
}
| 7,244 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/Conversions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericEnumSymbol;
import org.apache.avro.generic.GenericFixed;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.io.BinaryDecoder;
import org.apache.avro.io.BinaryEncoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.io.EncoderFactory;
import org.apache.avro.util.TimePeriod;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.math.RoundingMode;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.IntBuffer;
import java.util.Arrays;
import java.util.Collection;
import java.util.Map;
import java.util.UUID;
public class Conversions {
public static class UUIDConversion extends Conversion<UUID> {
@Override
public Class<UUID> getConvertedType() {
return UUID.class;
}
@Override
public Schema getRecommendedSchema() {
return LogicalTypes.uuid().addToSchema(Schema.create(Schema.Type.STRING));
}
@Override
public String getLogicalTypeName() {
return "uuid";
}
@Override
public UUID fromCharSequence(CharSequence value, Schema schema, LogicalType type) {
return UUID.fromString(value.toString());
}
@Override
public CharSequence toCharSequence(UUID value, Schema schema, LogicalType type) {
return value.toString();
}
}
public static class DecimalConversion extends Conversion<BigDecimal> {
@Override
public Class<BigDecimal> getConvertedType() {
return BigDecimal.class;
}
@Override
public Schema getRecommendedSchema() {
throw new UnsupportedOperationException("No recommended schema for decimal (scale is required)");
}
@Override
public String getLogicalTypeName() {
return "decimal";
}
@Override
public BigDecimal fromBytes(ByteBuffer value, Schema schema, LogicalType type) {
int scale = ((LogicalTypes.Decimal) type).getScale();
// always copy the bytes out because BigInteger has no offset/length ctor
byte[] bytes = new byte[value.remaining()];
value.duplicate().get(bytes);
return new BigDecimal(new BigInteger(bytes), scale);
}
@Override
public ByteBuffer toBytes(BigDecimal value, Schema schema, LogicalType type) {
value = validate((LogicalTypes.Decimal) type, value);
return ByteBuffer.wrap(value.unscaledValue().toByteArray());
}
@Override
public BigDecimal fromFixed(GenericFixed value, Schema schema, LogicalType type) {
int scale = ((LogicalTypes.Decimal) type).getScale();
return new BigDecimal(new BigInteger(value.bytes()), scale);
}
@Override
public GenericFixed toFixed(BigDecimal value, Schema schema, LogicalType type) {
value = validate((LogicalTypes.Decimal) type, value);
byte fillByte = (byte) (value.signum() < 0 ? 0xFF : 0x00);
byte[] unscaled = value.unscaledValue().toByteArray();
byte[] bytes = new byte[schema.getFixedSize()];
int unscaledLength = unscaled.length;
int offset = bytes.length - unscaledLength;
// Fill the front with the filler and copy the unscaled value into the remainder
Arrays.fill(bytes, 0, offset, fillByte);
System.arraycopy(unscaled, 0, bytes, offset, unscaledLength);
return new GenericData.Fixed(schema, bytes);
}
private static BigDecimal validate(final LogicalTypes.Decimal decimal, BigDecimal value) {
final int scale = decimal.getScale();
final int valueScale = value.scale();
boolean scaleAdjusted = false;
if (valueScale != scale) {
try {
value = value.setScale(scale, RoundingMode.UNNECESSARY);
scaleAdjusted = true;
} catch (ArithmeticException aex) {
throw new AvroTypeException(
"Cannot encode decimal with scale " + valueScale + " as scale " + scale + " without rounding");
}
}
int precision = decimal.getPrecision();
int valuePrecision = value.precision();
if (valuePrecision > precision) {
if (scaleAdjusted) {
throw new AvroTypeException("Cannot encode decimal with precision " + valuePrecision + " as max precision "
+ precision + ". This is after safely adjusting scale from " + valueScale + " to required " + scale);
} else {
throw new AvroTypeException(
"Cannot encode decimal with precision " + valuePrecision + " as max precision " + precision);
}
}
return value;
}
}
public static class BigDecimalConversion extends Conversion<BigDecimal> {
@Override
public Class<BigDecimal> getConvertedType() {
return BigDecimal.class;
}
@Override
public String getLogicalTypeName() {
return "big-decimal";
}
@Override
public BigDecimal fromBytes(final ByteBuffer value, final Schema schema, final LogicalType type) {
BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(value.array(), null);
try {
BigInteger bg = null;
ByteBuffer buffer = decoder.readBytes(null);
byte[] array = buffer.array();
if (array != null && array.length > 0) {
bg = new BigInteger(array);
}
int scale = decoder.readInt();
return new BigDecimal(bg, scale);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public ByteBuffer toBytes(final BigDecimal value, final Schema schema, final LogicalType type) {
try {
ByteArrayOutputStream out = new ByteArrayOutputStream();
BinaryEncoder encoder = EncoderFactory.get().binaryEncoder(out, null);
BigInteger unscaledValue = value.unscaledValue();
if (unscaledValue != null) {
encoder.writeBytes(unscaledValue.toByteArray());
} else {
encoder.writeBytes(new byte[] {});
}
encoder.writeInt(value.scale());
encoder.flush();
return ByteBuffer.wrap(out.toByteArray());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public Schema getRecommendedSchema() {
return LogicalTypes.bigDecimal().addToSchema(Schema.create(Schema.Type.BYTES));
}
}
public static class DurationConversion extends Conversion<TimePeriod> {
@Override
public Class<TimePeriod> getConvertedType() {
return TimePeriod.class;
}
@Override
public String getLogicalTypeName() {
return "duration";
}
@Override
public Schema getRecommendedSchema() {
return LogicalTypes.duration().addToSchema(Schema.createFixed("time.Duration",
"A 12-byte byte array encoding a duration in months, days and milliseconds.", null, 12));
}
@Override
public TimePeriod fromFixed(GenericFixed value, Schema schema, LogicalType type) {
IntBuffer buffer = ByteBuffer.wrap(value.bytes()).order(ByteOrder.LITTLE_ENDIAN).asIntBuffer();
long months = Integer.toUnsignedLong(buffer.get());
long days = Integer.toUnsignedLong(buffer.get());
long millis = Integer.toUnsignedLong(buffer.get());
return TimePeriod.of(months, days, millis);
}
@Override
public GenericFixed toFixed(TimePeriod value, Schema schema, LogicalType type) {
ByteBuffer buffer = ByteBuffer.allocate(12).order(ByteOrder.LITTLE_ENDIAN);
IntBuffer intBuffer = buffer.asIntBuffer();
intBuffer.put((int) value.getMonths());
intBuffer.put((int) value.getDays());
intBuffer.put((int) value.getMillis());
return new GenericData.Fixed(schema, buffer.array());
}
}
/**
* Convert an underlying representation of a logical type (such as a ByteBuffer)
* to a higher level object (such as a BigDecimal).
*
* @param datum The object to be converted.
* @param schema The schema of datum. Cannot be null if datum is not null.
* @param type The {@link org.apache.avro.LogicalType} of datum. Cannot be
* null if datum is not null.
* @param conversion The tool used to finish the conversion. Cannot be null if
* datum is not null.
* @return The result object, which is a high level object of the logical type.
* The null datum always converts to a null value.
* @throws IllegalArgumentException if datum is not null, but schema, type or
* conversion is.
*/
public static Object convertToLogicalType(Object datum, Schema schema, LogicalType type, Conversion<?> conversion) {
if (datum == null) {
return null;
}
if (schema == null || type == null || conversion == null) {
throw new IllegalArgumentException("Parameters cannot be null! Parameter values:"
+ Arrays.deepToString(new Object[] { datum, schema, type, conversion }));
}
try {
switch (schema.getType()) {
case RECORD:
return conversion.fromRecord((IndexedRecord) datum, schema, type);
case ENUM:
return conversion.fromEnumSymbol((GenericEnumSymbol<?>) datum, schema, type);
case ARRAY:
return conversion.fromArray((Collection<?>) datum, schema, type);
case MAP:
return conversion.fromMap((Map<?, ?>) datum, schema, type);
case FIXED:
return conversion.fromFixed((GenericFixed) datum, schema, type);
case STRING:
return conversion.fromCharSequence((CharSequence) datum, schema, type);
case BYTES:
return conversion.fromBytes((ByteBuffer) datum, schema, type);
case INT:
return conversion.fromInt((Integer) datum, schema, type);
case LONG:
return conversion.fromLong((Long) datum, schema, type);
case FLOAT:
return conversion.fromFloat((Float) datum, schema, type);
case DOUBLE:
return conversion.fromDouble((Double) datum, schema, type);
case BOOLEAN:
return conversion.fromBoolean((Boolean) datum, schema, type);
}
return datum;
} catch (ClassCastException e) {
throw new AvroRuntimeException(
"Cannot convert " + datum + ':' + datum.getClass().getSimpleName() + ": expected generic type", e);
}
}
/**
* Convert a high level representation of a logical type (such as a BigDecimal)
* to its underlying representation object (such as a ByteBuffer)
*
* @param datum The object to be converted.
* @param schema The schema of datum. Cannot be null if datum is not null.
* @param type The {@link org.apache.avro.LogicalType} of datum. Cannot be
* null if datum is not null.
* @param conversion The tool used to finish the conversion. Cannot be null if
* datum is not null.
* @return The result object, which is an underlying representation object of
* the logical type. If the input param datum is null, a null value will
* be returned.
* @throws IllegalArgumentException if datum is not null, but schema, type or
* conversion is.
*/
public static <T> Object convertToRawType(Object datum, Schema schema, LogicalType type, Conversion<T> conversion) {
if (datum == null) {
return null;
}
if (schema == null || type == null || conversion == null) {
throw new IllegalArgumentException("Parameters cannot be null! Parameter values:"
+ Arrays.deepToString(new Object[] { datum, schema, type, conversion }));
}
try {
Class<T> fromClass = conversion.getConvertedType();
switch (schema.getType()) {
case RECORD:
return conversion.toRecord(fromClass.cast(datum), schema, type);
case ENUM:
return conversion.toEnumSymbol(fromClass.cast(datum), schema, type);
case ARRAY:
return conversion.toArray(fromClass.cast(datum), schema, type);
case MAP:
return conversion.toMap(fromClass.cast(datum), schema, type);
case FIXED:
return conversion.toFixed(fromClass.cast(datum), schema, type);
case STRING:
return conversion.toCharSequence(fromClass.cast(datum), schema, type);
case BYTES:
return conversion.toBytes(fromClass.cast(datum), schema, type);
case INT:
return conversion.toInt(fromClass.cast(datum), schema, type);
case LONG:
return conversion.toLong(fromClass.cast(datum), schema, type);
case FLOAT:
return conversion.toFloat(fromClass.cast(datum), schema, type);
case DOUBLE:
return conversion.toDouble(fromClass.cast(datum), schema, type);
case BOOLEAN:
return conversion.toBoolean(fromClass.cast(datum), schema, type);
}
return datum;
} catch (ClassCastException e) {
throw new AvroRuntimeException(
"Cannot convert " + datum + ':' + datum.getClass().getSimpleName() + ": expected logical type", e);
}
}
}
| 7,245 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/InvalidAvroMagicException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
import java.io.IOException;
public class InvalidAvroMagicException extends IOException {
public InvalidAvroMagicException(String message) {
super(message);
}
}
| 7,246 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/AvroRuntimeException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
/** Base Avro exception. */
public class AvroRuntimeException extends RuntimeException {
public AvroRuntimeException(Throwable cause) {
super(cause);
}
public AvroRuntimeException(String message) {
super(message);
}
public AvroRuntimeException(String message, Throwable cause) {
super(message, cause);
}
}
| 7,247 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/AvroTypeException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
/** Thrown when an illegal type is used. */
public class AvroTypeException extends AvroRuntimeException {
public AvroTypeException(String message) {
super(message);
}
public AvroTypeException(String message, Throwable cause) {
super(message, cause);
}
}
| 7,248 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/ValidateAll.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.avro;
/**
* <p>
* A {@link SchemaValidator} for validating the provided schema against all
* schemas in the Iterable in {@link #validate(Schema, Iterable)}.
* </p>
* <p>
* Uses the {@link SchemaValidationStrategy} provided in the constructor to
* validate the {@link Schema} against each Schema in the Iterable, in Iterator
* order, via {@link SchemaValidationStrategy#validate(Schema, Schema)}.
* </p>
*/
public final class ValidateAll implements SchemaValidator {
private final SchemaValidationStrategy strategy;
/**
* @param strategy The strategy to use for validation of pairwise schemas.
*/
public ValidateAll(SchemaValidationStrategy strategy) {
this.strategy = strategy;
}
@Override
public void validate(Schema toValidate, Iterable<Schema> schemasInOrder) throws SchemaValidationException {
for (Schema existing : schemasInOrder) {
strategy.validate(toValidate, existing);
}
}
}
| 7,249 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/LogicalTypes.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collections;
import java.util.Map;
import java.util.Objects;
import java.util.ServiceLoader;
import java.util.concurrent.ConcurrentHashMap;
public class LogicalTypes {
private static final Logger LOG = LoggerFactory.getLogger(LogicalTypes.class);
/**
* Factory interface and SPI for logical types. A {@code LogicalTypeFactory} can
* be registered in two ways:
*
* <ol>
* <li>Manually, via {@link #register(LogicalTypeFactory)} or
* {@link #register(String, LogicalTypeFactory)}</li>
*
* <li>Automatically, when the {@code LogicalTypeFactory} implementation is a
* public class with a public no-arg constructor, is named in a file called
* {@code /META-INF/services/org.apache.avro.LogicalTypes$LogicalTypeFactory},
* and both are available in the classpath</li>
* </ol>
*
* @see ServiceLoader
*/
public interface LogicalTypeFactory {
LogicalType fromSchema(Schema schema);
default String getTypeName() {
throw new UnsupportedOperationException("LogicalTypeFactory TypeName has not been provided");
}
}
private static final Map<String, LogicalTypeFactory> REGISTERED_TYPES = new ConcurrentHashMap<>();
static {
for (LogicalTypeFactory logicalTypeFactory : ServiceLoader.load(LogicalTypeFactory.class)) {
register(logicalTypeFactory);
}
}
/**
* Register a logical type.
*
* @param factory The logical type factory
*
* @throws NullPointerException if {@code factory} or
* {@code factory.getTypedName()} is {@code null}
*/
public static void register(LogicalTypeFactory factory) {
Objects.requireNonNull(factory, "Logical type factory cannot be null");
register(factory.getTypeName(), factory);
}
/**
* Register a logical type.
*
* @param logicalTypeName The logical type name
* @param factory The logical type factory
*
* @throws NullPointerException if {@code logicalTypeName} or {@code factory} is
* {@code null}
*/
public static void register(String logicalTypeName, LogicalTypeFactory factory) {
Objects.requireNonNull(logicalTypeName, "Logical type name cannot be null");
Objects.requireNonNull(factory, "Logical type factory cannot be null");
try {
String factoryTypeName = factory.getTypeName();
if (!logicalTypeName.equals(factoryTypeName)) {
LOG.debug("Provided logicalTypeName '{}' does not match factory typeName '{}'", logicalTypeName,
factoryTypeName);
}
} catch (UnsupportedOperationException ignore) {
// Ignore exception, as the default interface method throws
// UnsupportedOperationException.
}
REGISTERED_TYPES.put(logicalTypeName, factory);
}
/**
* Return an unmodifiable map of any registered custom {@link LogicalType}
*/
public static Map<String, LogicalTypes.LogicalTypeFactory> getCustomRegisteredTypes() {
return Collections.unmodifiableMap(REGISTERED_TYPES);
}
/**
* Returns the {@link LogicalType} from the schema, if one is present.
*/
public static LogicalType fromSchema(Schema schema) {
return fromSchemaImpl(schema, true);
}
public static LogicalType fromSchemaIgnoreInvalid(Schema schema) {
return fromSchemaImpl(schema, false);
}
private static LogicalType fromSchemaImpl(Schema schema, boolean throwErrors) {
final LogicalType logicalType;
final String typeName = schema.getProp(LogicalType.LOGICAL_TYPE_PROP);
if (typeName == null) {
return null;
}
try {
switch (typeName) {
case TIMESTAMP_MILLIS:
logicalType = TIMESTAMP_MILLIS_TYPE;
break;
case DECIMAL:
logicalType = new Decimal(schema);
break;
case BIG_DECIMAL:
logicalType = BIG_DECIMAL_TYPE;
break;
case UUID:
logicalType = UUID_TYPE;
break;
case DATE:
logicalType = DATE_TYPE;
break;
case TIMESTAMP_MICROS:
logicalType = TIMESTAMP_MICROS_TYPE;
break;
case TIME_MILLIS:
logicalType = TIME_MILLIS_TYPE;
break;
case TIME_MICROS:
logicalType = TIME_MICROS_TYPE;
break;
case LOCAL_TIMESTAMP_MICROS:
logicalType = LOCAL_TIMESTAMP_MICROS_TYPE;
break;
case LOCAL_TIMESTAMP_MILLIS:
logicalType = LOCAL_TIMESTAMP_MILLIS_TYPE;
break;
default:
final LogicalTypeFactory typeFactory = REGISTERED_TYPES.get(typeName);
logicalType = (typeFactory == null) ? null : typeFactory.fromSchema(schema);
break;
}
// make sure the type is valid before returning it
if (logicalType != null) {
logicalType.validate(schema);
}
} catch (RuntimeException e) {
LOG.debug("Invalid logical type found", e);
if (throwErrors) {
throw e;
}
LOG.warn("Ignoring invalid logical type for name: {}", typeName);
// ignore invalid types
return null;
}
return logicalType;
}
private static final String DECIMAL = "decimal";
private static final String BIG_DECIMAL = "big-decimal";
private static final String DURATION = "duration";
private static final String UUID = "uuid";
private static final String DATE = "date";
private static final String TIME_MILLIS = "time-millis";
private static final String TIME_MICROS = "time-micros";
private static final String TIMESTAMP_MILLIS = "timestamp-millis";
private static final String TIMESTAMP_MICROS = "timestamp-micros";
private static final String LOCAL_TIMESTAMP_MILLIS = "local-timestamp-millis";
private static final String LOCAL_TIMESTAMP_MICROS = "local-timestamp-micros";
/** Create a Decimal LogicalType with the given precision and scale 0 */
public static Decimal decimal(int precision) {
return decimal(precision, 0);
}
/** Create a Decimal LogicalType with the given precision and scale */
public static Decimal decimal(int precision, int scale) {
return new Decimal(precision, scale);
}
private static final BigDecimal BIG_DECIMAL_TYPE = new BigDecimal();
/** Create a Big Decimal LogicalType that can accept any precision and scale */
public static BigDecimal bigDecimal() {
return BIG_DECIMAL_TYPE;
}
private static final LogicalType UUID_TYPE = new Uuid();
public static LogicalType uuid() {
return UUID_TYPE;
}
private static final LogicalType DURATION_TYPE = new Duration();
public static LogicalType duration() {
return DURATION_TYPE;
}
private static final Date DATE_TYPE = new Date();
public static Date date() {
return DATE_TYPE;
}
private static final TimeMillis TIME_MILLIS_TYPE = new TimeMillis();
public static TimeMillis timeMillis() {
return TIME_MILLIS_TYPE;
}
private static final TimeMicros TIME_MICROS_TYPE = new TimeMicros();
public static TimeMicros timeMicros() {
return TIME_MICROS_TYPE;
}
private static final TimestampMillis TIMESTAMP_MILLIS_TYPE = new TimestampMillis();
public static TimestampMillis timestampMillis() {
return TIMESTAMP_MILLIS_TYPE;
}
private static final TimestampMicros TIMESTAMP_MICROS_TYPE = new TimestampMicros();
public static TimestampMicros timestampMicros() {
return TIMESTAMP_MICROS_TYPE;
}
private static final LocalTimestampMillis LOCAL_TIMESTAMP_MILLIS_TYPE = new LocalTimestampMillis();
public static LocalTimestampMillis localTimestampMillis() {
return LOCAL_TIMESTAMP_MILLIS_TYPE;
}
private static final LocalTimestampMicros LOCAL_TIMESTAMP_MICROS_TYPE = new LocalTimestampMicros();
public static LocalTimestampMicros localTimestampMicros() {
return LOCAL_TIMESTAMP_MICROS_TYPE;
}
/** Uuid represents a uuid without a time */
public static class Uuid extends LogicalType {
private Uuid() {
super(UUID);
}
@Override
public void validate(Schema schema) {
super.validate(schema);
if (schema.getType() != Schema.Type.STRING) {
throw new IllegalArgumentException("Uuid can only be used with an underlying string type");
}
}
}
/**
* Duration represents a duration, consisting on months, days and milliseconds
*/
public static class Duration extends LogicalType {
private Duration() {
super(DURATION);
}
@Override
public void validate(Schema schema) {
super.validate(schema);
if (schema.getType() != Schema.Type.FIXED || schema.getFixedSize() != 12) {
throw new IllegalArgumentException("Duration can only be used with an underlying fixed type of size 12.");
}
}
}
/** Decimal represents arbitrary-precision fixed-scale decimal numbers */
public static class Decimal extends LogicalType {
private static final String PRECISION_PROP = "precision";
private static final String SCALE_PROP = "scale";
private final int precision;
private final int scale;
private Decimal(int precision, int scale) {
super(DECIMAL);
this.precision = precision;
this.scale = scale;
}
private Decimal(Schema schema) {
super("decimal");
if (!hasProperty(schema, PRECISION_PROP)) {
throw new IllegalArgumentException("Invalid decimal: missing precision");
}
this.precision = getInt(schema, PRECISION_PROP);
if (hasProperty(schema, SCALE_PROP)) {
this.scale = getInt(schema, SCALE_PROP);
} else {
this.scale = 0;
}
}
@Override
public Schema addToSchema(Schema schema) {
super.addToSchema(schema);
schema.addProp(PRECISION_PROP, precision);
schema.addProp(SCALE_PROP, scale);
return schema;
}
public int getPrecision() {
return precision;
}
public int getScale() {
return scale;
}
@Override
public void validate(Schema schema) {
super.validate(schema);
// validate the type
if (schema.getType() != Schema.Type.FIXED && schema.getType() != Schema.Type.BYTES) {
throw new IllegalArgumentException("Logical type decimal must be backed by fixed or bytes");
}
if (precision <= 0) {
throw new IllegalArgumentException("Invalid decimal precision: " + precision + " (must be positive)");
} else if (precision > maxPrecision(schema)) {
throw new IllegalArgumentException("fixed(" + schema.getFixedSize() + ") cannot store " + precision
+ " digits (max " + maxPrecision(schema) + ")");
}
if (scale < 0) {
throw new IllegalArgumentException("Invalid decimal scale: " + scale + " (must be positive)");
} else if (scale > precision) {
throw new IllegalArgumentException(
"Invalid decimal scale: " + scale + " (greater than precision: " + precision + ")");
}
}
private long maxPrecision(Schema schema) {
if (schema.getType() == Schema.Type.BYTES) {
// not bounded
return Integer.MAX_VALUE;
} else if (schema.getType() == Schema.Type.FIXED) {
int size = schema.getFixedSize();
return Math.round(Math.floor(Math.log10(2) * (8 * size - 1)));
} else {
// not valid for any other type
return 0;
}
}
private boolean hasProperty(Schema schema, String name) {
return schema.propsContainsKey(name);
}
private int getInt(Schema schema, String name) {
Object obj = schema.getObjectProp(name);
if (obj instanceof Integer) {
return (Integer) obj;
}
throw new IllegalArgumentException(
"Expected int " + name + ": " + (obj == null ? "null" : obj + ":" + obj.getClass().getSimpleName()));
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
Decimal decimal = (Decimal) o;
if (precision != decimal.precision)
return false;
return scale == decimal.scale;
}
@Override
public int hashCode() {
int result = precision;
result = 31 * result + scale;
return result;
}
}
public static class BigDecimal extends LogicalType {
private BigDecimal() {
super(BIG_DECIMAL);
}
@Override
public void validate(final Schema schema) {
super.validate(schema);
if (schema.getType() != Schema.Type.BYTES) {
throw new IllegalArgumentException("BigDecimal can only be used with an underlying bytes type");
}
}
}
/** Date represents a date without a time */
public static class Date extends LogicalType {
private Date() {
super(DATE);
}
@Override
public void validate(Schema schema) {
super.validate(schema);
if (schema.getType() != Schema.Type.INT) {
throw new IllegalArgumentException("Date can only be used with an underlying int type");
}
}
}
/** TimeMillis represents a time in milliseconds without a date */
public static class TimeMillis extends LogicalType {
private TimeMillis() {
super(TIME_MILLIS);
}
@Override
public void validate(Schema schema) {
super.validate(schema);
if (schema.getType() != Schema.Type.INT) {
throw new IllegalArgumentException("Time (millis) can only be used with an underlying int type");
}
}
}
/** TimeMicros represents a time in microseconds without a date */
public static class TimeMicros extends LogicalType {
private TimeMicros() {
super(TIME_MICROS);
}
@Override
public void validate(Schema schema) {
super.validate(schema);
if (schema.getType() != Schema.Type.LONG) {
throw new IllegalArgumentException("Time (micros) can only be used with an underlying long type");
}
}
}
/** TimestampMillis represents a date and time in milliseconds */
public static class TimestampMillis extends LogicalType {
private TimestampMillis() {
super(TIMESTAMP_MILLIS);
}
@Override
public void validate(Schema schema) {
super.validate(schema);
if (schema.getType() != Schema.Type.LONG) {
throw new IllegalArgumentException("Timestamp (millis) can only be used with an underlying long type");
}
}
}
/** TimestampMicros represents a date and time in microseconds */
public static class TimestampMicros extends LogicalType {
private TimestampMicros() {
super(TIMESTAMP_MICROS);
}
@Override
public void validate(Schema schema) {
super.validate(schema);
if (schema.getType() != Schema.Type.LONG) {
throw new IllegalArgumentException("Timestamp (micros) can only be used with an underlying long type");
}
}
}
public static class LocalTimestampMillis extends LogicalType {
private LocalTimestampMillis() {
super(LOCAL_TIMESTAMP_MILLIS);
}
@Override
public void validate(Schema schema) {
super.validate(schema);
if (schema.getType() != Schema.Type.LONG) {
throw new IllegalArgumentException("Local timestamp (millis) can only be used with an underlying long type");
}
}
}
public static class LocalTimestampMicros extends LogicalType {
private LocalTimestampMicros() {
super(LOCAL_TIMESTAMP_MICROS);
}
@Override
public void validate(Schema schema) {
super.validate(schema);
if (schema.getType() != Schema.Type.LONG) {
throw new IllegalArgumentException("Local timestamp (micros) can only be used with an underlying long type");
}
}
}
}
| 7,250 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/specific/SpecificData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.specific;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.AvroTypeException;
import org.apache.avro.Protocol;
import org.apache.avro.Schema;
import org.apache.avro.Schema.Type;
import org.apache.avro.generic.GenericData;
import org.apache.avro.io.BinaryDecoder;
import org.apache.avro.io.BinaryEncoder;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.io.EncoderFactory;
import org.apache.avro.util.ClassUtils;
import org.apache.avro.util.MapUtil;
import org.apache.avro.util.SchemaUtil;
import org.apache.avro.util.internal.ClassValueCache;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
import java.lang.reflect.ParameterizedType;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.WeakHashMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.function.Function;
/** Utilities for generated Java classes and interfaces. */
public class SpecificData extends GenericData {
private static final SpecificData INSTANCE = new SpecificData();
private static final Class<?>[] NO_ARG = new Class[] {};
private static final Class<?>[] SCHEMA_ARG = new Class[] { Schema.class };
private static final Function<Class<?>, Constructor<?>> CTOR_CACHE = new ClassValueCache<>(c -> {
boolean useSchema = SchemaConstructable.class.isAssignableFrom(c);
try {
Constructor<?> meth = c.getDeclaredConstructor(useSchema ? SCHEMA_ARG : NO_ARG);
meth.setAccessible(true);
return meth;
} catch (Exception e) {
throw new RuntimeException(e);
}
});
private static final Function<Class<?>, SpecificData> MODEL_CACHE = new ClassValueCache<>(c -> {
Field specificDataField;
try {
specificDataField = c.getDeclaredField("MODEL$");
specificDataField.setAccessible(true);
return (SpecificData) specificDataField.get(null);
} catch (NoSuchFieldException e) {
// Return default instance
return SpecificData.get();
} catch (IllegalAccessException e) {
throw new AvroRuntimeException("while trying to access field MODEL$ on " + c.getCanonicalName(), e);
}
});
public static final String CLASS_PROP = "java-class";
public static final String KEY_CLASS_PROP = "java-key-class";
public static final String ELEMENT_PROP = "java-element-class";
public static final char RESERVED_WORD_ESCAPE_CHAR = '$';
/**
* Reserved words from
* https://docs.oracle.com/javase/specs/jls/se16/html/jls-3.html require
* mangling in order to be used in generated Java code.
*/
public static final Set<String> RESERVED_WORDS = new HashSet<>(Arrays.asList(
// Keywords from Section 3.9 can't be used as identifiers.
"_", "abstract", "assert", "boolean", "break", "byte", "case", "catch", "char", "class", "const", "continue",
"default", "do", "double", "else", "enum", "extends", "final", "finally", "float", "for", "goto", "if",
"implements", "import", "instanceof", "int", "interface", "long", "native", "new", "package", "private",
"protected", "public", "return", "short", "static", "strictfp", "super", "switch", "synchronized", "this",
"throw", "throws", "transient", "try", "void", "volatile", "while",
// Literals from Section 3.10 can't be used as identifiers.
"true", "false", "null",
// Note that module-related restricted keywords can still be used.
// Class names used internally by the avro code generator
"Builder"));
/**
* Read/write some common builtin classes as strings. Representing these as
* strings isn't always best, as they aren't always ordered ideally, but at
* least they're stored. Also note that, for compatibility, only classes that
* wouldn't be otherwise correctly readable or writable should be added here,
* e.g., those without a no-arg constructor or those whose fields are all
* transient.
*/
protected Set<Class> stringableClasses = new HashSet<>(Arrays.asList(java.math.BigDecimal.class,
java.math.BigInteger.class, java.net.URI.class, java.net.URL.class, java.io.File.class));
/** For subclasses. Applications normally use {@link SpecificData#get()}. */
public SpecificData() {
}
/** Construct with a specific classloader. */
public SpecificData(ClassLoader classLoader) {
super(classLoader);
}
@Override
public DatumReader createDatumReader(Schema schema) {
return createDatumReader(schema, schema);
}
@Override
public DatumReader createDatumReader(Schema writer, Schema reader) {
return new SpecificDatumReader(writer, reader, this);
}
@Override
public DatumWriter createDatumWriter(Schema schema) {
return new SpecificDatumWriter(schema, this);
}
/** Return the singleton instance. */
public static SpecificData get() {
return INSTANCE;
}
/**
* For RECORD and UNION type schemas, this method returns the SpecificData
* instance of the class associated with the schema, in order to get the right
* conversions for any logical types used.
*
* @param reader the reader schema
* @return the SpecificData associated with the schema's class, or the default
* instance.
*/
public static SpecificData getForSchema(Schema reader) {
if (reader != null && (reader.getType() == Type.RECORD || reader.getType() == Type.UNION)) {
final Class<?> clazz = SpecificData.get().getClass(reader);
if (clazz != null) {
return getForClass(clazz);
}
}
return SpecificData.get();
}
/**
* If the given class is assignable to {@link SpecificRecordBase}, this method
* returns the SpecificData instance from the field {@code MODEL$}, in order to
* get the correct {@link org.apache.avro.Conversion} instances for the class.
* Falls back to the default instance {@link SpecificData#get()} for other
* classes or if the field is not found.
*
* @param c A class
* @param <T> .
* @return The SpecificData from the SpecificRecordBase instance, or the default
* SpecificData instance.
*/
public static <T> SpecificData getForClass(Class<T> c) {
if (SpecificRecordBase.class.isAssignableFrom(c)) {
return MODEL_CACHE.apply(c);
}
return SpecificData.get();
}
private boolean useCustomCoderFlag = Boolean
.parseBoolean(System.getProperty("org.apache.avro.specific.use_custom_coders", "false"));
/**
* Retrieve the current value of the custom-coders feature flag. Defaults to
* <code>false</code>, but this default can be overridden using the system
* property <code>org.apache.avro.specific.use_custom_coders</code>, and can be
* set dynamically by {@link SpecificData#useCustomCoders()}. See <a
* href="https://avro.apache.org/docs/current/gettingstartedjava.html#Beta+feature:+Generating+faster+code"Getting
* started with Java</a> for more about this feature flag.
*/
public boolean useCustomCoders() {
return useCustomCoderFlag;
}
/**
* Dynamically set the value of the custom-coder feature flag. See
* {@link SpecificData#useCustomCoders()}.
*/
public void setCustomCoders(boolean flag) {
useCustomCoderFlag = flag;
}
@Override
protected boolean isEnum(Object datum) {
return datum instanceof Enum || super.isEnum(datum);
}
@Override
public Object createEnum(String symbol, Schema schema) {
Class c = getClass(schema);
if (c == null)
return super.createEnum(symbol, schema); // punt to generic
if (RESERVED_WORDS.contains(symbol))
symbol += "$";
return Enum.valueOf(c, symbol);
}
@Override
protected Schema getEnumSchema(Object datum) {
return (datum instanceof Enum) ? getSchema(datum.getClass()) : super.getEnumSchema(datum);
}
private final ConcurrentMap<String, Class> classCache = new ConcurrentHashMap<>();
private static final Class NO_CLASS = new Object() {
}.getClass();
private static final Schema NULL_SCHEMA = Schema.create(Schema.Type.NULL);
/** Undoes mangling for reserved words. */
protected static String unmangle(String word) {
while (word.endsWith("$")) {
word = word.substring(0, word.length() - 1);
}
return word;
}
/** Return the class that implements a schema, or null if none exists. */
public Class getClass(Schema schema) {
switch (schema.getType()) {
case FIXED:
case RECORD:
case ENUM:
String name = schema.getFullName();
if (name == null)
return null;
Class<?> c = MapUtil.computeIfAbsent(classCache, name, n -> {
try {
return ClassUtils.forName(getClassLoader(), getClassName(schema));
} catch (ClassNotFoundException e) {
// This might be a nested namespace. Try using the last tokens in the
// namespace as an enclosing class by progressively replacing period
// delimiters with $
StringBuilder nestedName = new StringBuilder(n);
int lastDot = n.lastIndexOf('.');
while (lastDot != -1) {
nestedName.setCharAt(lastDot, '$');
try {
return ClassUtils.forName(getClassLoader(), nestedName.toString());
} catch (ClassNotFoundException ignored) {
}
lastDot = n.lastIndexOf('.', lastDot - 1);
}
return NO_CLASS;
}
});
return c == NO_CLASS ? null : c;
case ARRAY:
return List.class;
case MAP:
return Map.class;
case UNION:
List<Schema> types = schema.getTypes(); // elide unions with null
if ((types.size() == 2) && types.contains(NULL_SCHEMA))
return getWrapper(types.get(types.get(0).equals(NULL_SCHEMA) ? 1 : 0));
return Object.class;
case STRING:
if (STRING_TYPE_STRING.equals(schema.getProp(STRING_PROP)))
return String.class;
return CharSequence.class;
case BYTES:
return ByteBuffer.class;
case INT:
return Integer.TYPE;
case LONG:
return Long.TYPE;
case FLOAT:
return Float.TYPE;
case DOUBLE:
return Double.TYPE;
case BOOLEAN:
return Boolean.TYPE;
case NULL:
return Void.TYPE;
default:
throw new AvroRuntimeException("Unknown type: " + schema);
}
}
private Class getWrapper(Schema schema) {
switch (schema.getType()) {
case INT:
return Integer.class;
case LONG:
return Long.class;
case FLOAT:
return Float.class;
case DOUBLE:
return Double.class;
case BOOLEAN:
return Boolean.class;
default:
return getClass(schema);
}
}
/** Returns the Java class name indicated by a schema's name and namespace. */
public static String getClassName(Schema schema) {
String namespace = schema.getNamespace();
String name = schema.getName();
if (namespace == null || "".equals(namespace))
return name;
StringBuilder classNameBuilder = new StringBuilder();
String[] words = namespace.split("\\.");
for (int i = 0; i < words.length; i++) {
String word = words[i];
classNameBuilder.append(word);
if (RESERVED_WORDS.contains(word)) {
classNameBuilder.append(RESERVED_WORD_ESCAPE_CHAR);
}
if (i != words.length - 1 || !word.endsWith("$")) { // back-compatibly handle $
classNameBuilder.append(".");
}
}
classNameBuilder.append(name);
return classNameBuilder.toString();
}
// cache for schemas created from Class objects. Use ClassValue to avoid
// locking classloaders and is GC and thread safe.
private final ClassValueCache<Schema> schemaClassCache = new ClassValueCache<>(c -> createSchema(c, new HashMap<>()));
// for non-class objects, use a WeakHashMap, but this needs a sync block around
// it
private final Map<java.lang.reflect.Type, Schema> schemaTypeCache = Collections.synchronizedMap(new WeakHashMap<>());
/** Find the schema for a Java type. */
public Schema getSchema(java.lang.reflect.Type type) {
try {
if (type instanceof Class) {
return schemaClassCache.apply((Class<?>) type);
}
return schemaTypeCache.computeIfAbsent(type, t -> createSchema(t, new HashMap<>()));
} catch (Exception e) {
throw (e instanceof AvroRuntimeException) ? (AvroRuntimeException) e : new AvroRuntimeException(e);
}
}
/** Create the schema for a Java type. */
@SuppressWarnings(value = "unchecked")
protected Schema createSchema(java.lang.reflect.Type type, Map<String, Schema> names) {
if (type instanceof Class && CharSequence.class.isAssignableFrom((Class) type))
return Schema.create(Type.STRING);
else if (type == ByteBuffer.class)
return Schema.create(Type.BYTES);
else if ((type == Integer.class) || (type == Integer.TYPE))
return Schema.create(Type.INT);
else if ((type == Long.class) || (type == Long.TYPE))
return Schema.create(Type.LONG);
else if ((type == Float.class) || (type == Float.TYPE))
return Schema.create(Type.FLOAT);
else if ((type == Double.class) || (type == Double.TYPE))
return Schema.create(Type.DOUBLE);
else if ((type == Boolean.class) || (type == Boolean.TYPE))
return Schema.create(Type.BOOLEAN);
else if ((type == Void.class) || (type == Void.TYPE))
return Schema.create(Type.NULL);
else if (type instanceof ParameterizedType) {
ParameterizedType ptype = (ParameterizedType) type;
Class raw = (Class) ptype.getRawType();
java.lang.reflect.Type[] params = ptype.getActualTypeArguments();
if (Collection.class.isAssignableFrom(raw)) { // array
if (params.length != 1)
throw new AvroTypeException("No array type specified.");
return Schema.createArray(createSchema(params[0], names));
} else if (Map.class.isAssignableFrom(raw)) { // map
java.lang.reflect.Type key = params[0];
java.lang.reflect.Type value = params[1];
if (!(key instanceof Class && CharSequence.class.isAssignableFrom((Class<?>) key)))
throw new AvroTypeException("Map key class not CharSequence: " + SchemaUtil.describe(key));
return Schema.createMap(createSchema(value, names));
} else if (Optional.class.isAssignableFrom(raw)) {
return Schema.createUnion(Schema.create(Schema.Type.NULL), createSchema(params[0], names));
} else {
return createSchema(raw, names);
}
} else if (type instanceof Class) { // class
Class c = (Class) type;
String fullName = c.getName();
Schema schema = names.get(fullName);
if (schema == null)
try {
schema = (Schema) (c.getDeclaredField("SCHEMA$").get(null));
if (!fullName.equals(getClassName(schema)))
// HACK: schema mismatches class. maven shade plugin? try replacing.
schema = new Schema.Parser()
.parse(schema.toString().replace(schema.getNamespace(), c.getPackage().getName()));
} catch (NoSuchFieldException e) {
throw new AvroRuntimeException("Not a Specific class: " + c);
} catch (IllegalAccessException e) {
throw new AvroRuntimeException(e);
}
names.put(fullName, schema);
return schema;
}
throw new AvroTypeException("Unknown type: " + type);
}
@Override
protected String getSchemaName(Object datum) {
if (datum != null) {
Class c = datum.getClass();
if (isStringable(c))
return Schema.Type.STRING.getName();
}
return super.getSchemaName(datum);
}
/** True if a class should be serialized with toString(). */
protected boolean isStringable(Class<?> c) {
return stringableClasses.contains(c);
}
/** True if a class IS a string type */
protected boolean isStringType(Class<?> c) {
// this will return true for String, Utf8, CharSequence
return CharSequence.class.isAssignableFrom(c);
}
/** Return the protocol for a Java interface. */
public Protocol getProtocol(Class iface) {
try {
Protocol p = (Protocol) (iface.getDeclaredField("PROTOCOL").get(null));
if (!p.getNamespace().equals(iface.getPackage().getName()))
// HACK: protocol mismatches iface. maven shade plugin? try replacing.
p = Protocol.parse(p.toString().replace(p.getNamespace(), iface.getPackage().getName()));
return p;
} catch (NoSuchFieldException e) {
throw new AvroRuntimeException("Not a Specific protocol: " + iface);
} catch (IllegalAccessException e) {
throw new AvroRuntimeException(e);
}
}
@Override
protected int compare(Object o1, Object o2, Schema s, boolean eq) {
switch (s.getType()) {
case ENUM:
if (o1 instanceof Enum)
return ((Enum) o1).ordinal() - ((Enum) o2).ordinal();
default:
return super.compare(o1, o2, s, eq);
}
}
/**
* Create an instance of a class. If the class implements
* {@link SchemaConstructable}, call a constructor with a
* {@link org.apache.avro.Schema} parameter, otherwise use a no-arg constructor.
*/
@SuppressWarnings("unchecked")
public static Object newInstance(Class c, Schema s) {
boolean useSchema = SchemaConstructable.class.isAssignableFrom(c);
Object result;
try {
Constructor<?> meth = CTOR_CACHE.apply(c);
result = meth.newInstance(useSchema ? new Object[] { s } : null);
} catch (Exception e) {
throw new RuntimeException(e);
}
return result;
}
@Override
public Object createFixed(Object old, Schema schema) {
Class c = getClass(schema);
if (c == null)
return super.createFixed(old, schema); // punt to generic
return c.isInstance(old) ? old : newInstance(c, schema);
}
@Override
public Object newRecord(Object old, Schema schema) {
Class c = getClass(schema);
if (c == null)
return super.newRecord(old, schema); // punt to generic
return (c.isInstance(old) ? old : newInstance(c, schema));
}
@SuppressWarnings("rawtypes")
@Override
/**
* Create an InstanceSupplier that caches all information required for the
* creation of a schema record instance rather than having to look them up for
* each call (as newRecord would)
*/
public InstanceSupplier getNewRecordSupplier(Schema schema) {
Class c = getClass(schema);
if (c == null) {
return super.getNewRecordSupplier(schema);
}
boolean useSchema = SchemaConstructable.class.isAssignableFrom(c);
Constructor<?> meth = CTOR_CACHE.apply(c);
Object[] params = useSchema ? new Object[] { schema } : (Object[]) null;
return (old, sch) -> {
try {
return c.isInstance(old) ? old : meth.newInstance(params);
} catch (ReflectiveOperationException e) {
throw new RuntimeException(e);
}
};
}
/**
* Tag interface that indicates that a class has a one-argument constructor that
* accepts a Schema.
*
* @see #newInstance
*/
public interface SchemaConstructable {
}
/** Runtime utility used by generated classes. */
public static BinaryDecoder getDecoder(ObjectInput in) {
return DecoderFactory.get().directBinaryDecoder(new ExternalizableInput(in), null);
}
/** Runtime utility used by generated classes. */
public static BinaryEncoder getEncoder(ObjectOutput out) {
return EncoderFactory.get().directBinaryEncoder(new ExternalizableOutput(out), null);
}
@Override
public Object createString(Object value) {
// Many times the use is String.Priority processing
if (value instanceof String) {
return value;
} else if (isStringable(value.getClass())) {
return value;
}
return super.createString(value);
}
}
| 7,251 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/specific/FixedSize.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.specific;
import java.lang.annotation.Documented;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Declares the size of implementations of
* {@link org.apache.avro.generic.GenericFixed GenericFixed}.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target({ ElementType.TYPE })
@Documented
public @interface FixedSize {
/** The declared size of instances of classes with this annotation. */
int value();
}
| 7,252 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/specific/SpecificErrorBuilderBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.specific;
import java.lang.reflect.Constructor;
import org.apache.avro.Schema;
import org.apache.avro.data.ErrorBuilder;
import org.apache.avro.data.RecordBuilderBase;
/**
* Abstract base class for specific ErrorBuilder implementations. Not
* thread-safe.
*/
abstract public class SpecificErrorBuilderBase<T extends SpecificExceptionBase> extends RecordBuilderBase<T>
implements ErrorBuilder<T> {
private Constructor<T> errorConstructor;
private Object value;
private boolean hasValue;
private Throwable cause;
private boolean hasCause;
/**
* Creates a SpecificErrorBuilderBase for building errors of the given type.
*
* @param schema the schema associated with the error class.
*/
protected SpecificErrorBuilderBase(Schema schema) {
super(schema, SpecificData.get());
}
/**
* Creates a SpecificErrorBuilderBase for building errors of the given type.
*
* @param schema the schema associated with the error class.
* @param model the SpecificData instance associated with the error class
*/
protected SpecificErrorBuilderBase(Schema schema, SpecificData model) {
super(schema, model);
}
/**
* SpecificErrorBuilderBase copy constructor.
*
* @param other SpecificErrorBuilderBase instance to copy.
*/
protected SpecificErrorBuilderBase(SpecificErrorBuilderBase<T> other) {
super(other, SpecificData.get());
this.errorConstructor = other.errorConstructor;
this.value = other.value;
this.hasValue = other.hasValue;
this.cause = other.cause;
this.hasCause = other.hasCause;
}
/**
* Creates a SpecificErrorBuilderBase by copying an existing error instance.
*
* @param other the error instance to copy.
*/
protected SpecificErrorBuilderBase(T other) {
super(other.getSchema(), SpecificData.get());
Object otherValue = other.getValue();
if (otherValue != null) {
setValue(otherValue);
}
Throwable otherCause = other.getCause();
if (otherCause != null) {
setCause(otherCause);
}
}
@Override
public Object getValue() {
return value;
}
@Override
public SpecificErrorBuilderBase<T> setValue(Object value) {
this.value = value;
hasValue = true;
return this;
}
@Override
public boolean hasValue() {
return hasValue;
}
@Override
public SpecificErrorBuilderBase<T> clearValue() {
value = null;
hasValue = false;
return this;
}
@Override
public Throwable getCause() {
return cause;
}
@Override
public SpecificErrorBuilderBase<T> setCause(Throwable cause) {
this.cause = cause;
hasCause = true;
return this;
}
@Override
public boolean hasCause() {
return hasCause;
}
@Override
public SpecificErrorBuilderBase<T> clearCause() {
cause = null;
hasCause = false;
return this;
}
}
| 7,253 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/specific/ExternalizableOutput.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.specific;
import java.io.OutputStream;
import java.io.IOException;
import java.io.ObjectOutput;
/**
* Helper to permit Externalizable implementations that write to an
* OutputStream.
*/
class ExternalizableOutput extends OutputStream {
private final ObjectOutput out;
public ExternalizableOutput(ObjectOutput out) {
this.out = out;
}
@Override
public void flush() throws IOException {
out.flush();
}
@Override
public void close() throws IOException {
out.close();
}
@Override
public void write(int c) throws IOException {
out.write(c);
}
@Override
public void write(byte[] b) throws IOException {
out.write(b);
}
@Override
public void write(byte[] b, int offset, int len) throws IOException {
out.write(b, offset, len);
}
}
| 7,254 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/specific/AvroGenerated.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.specific;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Indicates that an annotated class is an Avro generated class. All Avro
* generated classes will be annotated with this annotation.
*/
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
public @interface AvroGenerated {
}
| 7,255 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/specific/SpecificFixed.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.specific;
import java.io.Externalizable;
import java.io.ObjectOutput;
import java.io.ObjectInput;
import java.io.IOException;
import java.util.Arrays;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericFixed;
import org.apache.avro.io.BinaryData;
/** Base class for generated fixed-sized data classes. */
public abstract class SpecificFixed implements GenericFixed, Comparable<SpecificFixed>, Externalizable {
private byte[] bytes;
public SpecificFixed() {
bytes(new byte[getSchema().getFixedSize()]);
}
public SpecificFixed(byte[] bytes) {
bytes(bytes);
}
public void bytes(byte[] bytes) {
this.bytes = bytes;
}
@Override
public byte[] bytes() {
return bytes;
}
@Override
public abstract Schema getSchema();
@Override
public boolean equals(Object o) {
if (o == this)
return true;
return o instanceof GenericFixed && Arrays.equals(bytes, ((GenericFixed) o).bytes());
}
@Override
public int hashCode() {
return Arrays.hashCode(bytes);
}
@Override
public String toString() {
return Arrays.toString(bytes);
}
@Override
public int compareTo(SpecificFixed that) {
return BinaryData.compareBytes(this.bytes, 0, this.bytes.length, that.bytes, 0, that.bytes.length);
}
@Override
public abstract void writeExternal(ObjectOutput out) throws IOException;
@Override
public abstract void readExternal(ObjectInput in) throws IOException;
}
| 7,256 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/specific/ExternalizableInput.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.specific;
import java.io.InputStream;
import java.io.IOException;
import java.io.ObjectInput;
/**
* Helper to permit Externalizable implementations that write to an InputStream.
*/
class ExternalizableInput extends InputStream {
private final ObjectInput in;
public ExternalizableInput(ObjectInput in) {
this.in = in;
}
@Override
public int available() throws IOException {
return in.available();
}
@Override
public void close() throws IOException {
in.close();
}
@Override
public boolean markSupported() {
return false;
}
@Override
public int read() throws IOException {
return in.read();
}
@Override
public int read(byte[] b) throws IOException {
return in.read(b);
}
@Override
public int read(byte[] b, int offset, int len) throws IOException {
return in.read(b, offset, len);
}
@Override
public long skip(long n) throws IOException {
return in.skip(n);
}
}
| 7,257 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/specific/SpecificRecord.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.specific;
import org.apache.avro.generic.IndexedRecord;
/**
* Implemented by generated record classes. Permits efficient access to fields.
*/
public interface SpecificRecord extends IndexedRecord {
}
| 7,258 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/specific/SpecificRecordBuilderBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.specific;
import org.apache.avro.Schema;
import org.apache.avro.data.RecordBuilderBase;
/**
* Abstract base class for specific RecordBuilder implementations. Not
* thread-safe.
*/
abstract public class SpecificRecordBuilderBase<T extends SpecificRecord> extends RecordBuilderBase<T> {
/**
* Creates a SpecificRecordBuilderBase for building records of the given type.
*
* @param schema the schema associated with the record class.
*/
protected SpecificRecordBuilderBase(Schema schema) {
super(schema, SpecificData.getForSchema(schema));
}
/**
* Creates a SpecificRecordBuilderBase for building records of the given type.
*
* @param schema the schema associated with the record class.
* @param model the SpecificData associated with the specific record class
*/
protected SpecificRecordBuilderBase(Schema schema, SpecificData model) {
super(schema, model);
}
/**
* SpecificRecordBuilderBase copy constructor.
*
* @param other SpecificRecordBuilderBase instance to copy.
*/
protected SpecificRecordBuilderBase(SpecificRecordBuilderBase<T> other) {
super(other, other.data());
}
/**
* Creates a SpecificRecordBuilderBase by copying an existing record instance.
*
* @param other the record instance to copy.
*/
protected SpecificRecordBuilderBase(T other) {
super(other.getSchema(), SpecificData.getForSchema(other.getSchema()));
}
}
| 7,259 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/specific/SpecificRecordBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.specific;
import java.io.Externalizable;
import java.io.ObjectOutput;
import java.io.ObjectInput;
import java.io.IOException;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.Conversion;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.ResolvingDecoder;
import org.apache.avro.io.Encoder;
import org.apache.avro.message.MessageDecoder;
import org.apache.avro.message.MessageEncoder;
/** Base class for generated record classes. */
public abstract class SpecificRecordBase
implements SpecificRecord, Comparable<SpecificRecord>, GenericRecord, Externalizable {
public SpecificData getSpecificData() {
// Default implementation for backwards compatibility, overridden in generated
// code
return SpecificData.get();
}
public Conversion<?> getConversion(int field) {
// for backward-compatibility. no older specific classes have conversions.
return null;
}
@Override
public void put(String fieldName, Object value) {
Schema.Field field = getSchema().getField(fieldName);
if (field == null) {
throw new AvroRuntimeException("Not a valid schema field: " + fieldName);
}
put(field.pos(), value);
}
@Override
public Object get(String fieldName) {
Schema.Field field = getSchema().getField(fieldName);
if (field == null) {
throw new AvroRuntimeException("Not a valid schema field: " + fieldName);
}
return get(field.pos());
}
public Conversion<?> getConversion(String fieldName) {
return getConversion(getSchema().getField(fieldName).pos());
}
@Override
public boolean equals(Object that) {
if (that == this)
return true; // identical object
if (!(that instanceof SpecificRecord))
return false; // not a record
if (this.getClass() != that.getClass())
return false; // not same schema
return getSpecificData().compare(this, that, this.getSchema(), true) == 0;
}
@Override
public int hashCode() {
return getSpecificData().hashCode(this, this.getSchema());
}
@Override
public int compareTo(SpecificRecord that) {
return getSpecificData().compare(this, that, this.getSchema());
}
@Override
public String toString() {
return getSpecificData().toString(this);
}
@Override
public void writeExternal(ObjectOutput out) throws IOException {
new SpecificDatumWriter<>(getSchema()).write(this, SpecificData.getEncoder(out));
}
@Override
public void readExternal(ObjectInput in) throws IOException {
new SpecificDatumReader<>(getSchema()).read(this, SpecificData.getDecoder(in));
}
/**
* Returns true iff an instance supports the {@link MessageEncoder#encode} and
* {@link MessageDecoder#decode} operations. Should only be used by
* <code>SpecificDatumReader/Writer</code> to selectively use
* {@link #customEncode} and {@link #customDecode} to optimize the
* (de)serialization of values.
*/
protected boolean hasCustomCoders() {
return false;
}
public void customEncode(Encoder out) throws IOException {
throw new UnsupportedOperationException();
}
public void customDecode(ResolvingDecoder in) throws IOException {
throw new UnsupportedOperationException();
}
}
| 7,260 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/specific/SpecificDatumReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.specific;
import org.apache.avro.Conversion;
import org.apache.avro.Schema;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.io.ResolvingDecoder;
import org.apache.avro.util.ClassUtils;
import java.io.IOException;
/**
* {@link org.apache.avro.io.DatumReader DatumReader} for generated Java
* classes.
*/
public class SpecificDatumReader<T> extends GenericDatumReader<T> {
public SpecificDatumReader() {
this(null, null, SpecificData.get());
}
/** Construct for reading instances of a class. */
public SpecificDatumReader(Class<T> c) {
this(SpecificData.getForClass(c));
setSchema(getSpecificData().getSchema(c));
}
/** Construct where the writer's and reader's schemas are the same. */
public SpecificDatumReader(Schema schema) {
this(schema, schema, SpecificData.getForSchema(schema));
}
/** Construct given writer's and reader's schema. */
public SpecificDatumReader(Schema writer, Schema reader) {
this(writer, reader, SpecificData.getForSchema(reader));
}
/**
* Construct given writer's schema, reader's schema, and a {@link SpecificData}.
*/
public SpecificDatumReader(Schema writer, Schema reader, SpecificData data) {
super(writer, reader, data);
}
/** Construct given a {@link SpecificData}. */
public SpecificDatumReader(SpecificData data) {
super(data);
}
/** Return the contained {@link SpecificData}. */
public SpecificData getSpecificData() {
return (SpecificData) getData();
}
@Override
public void setSchema(Schema actual) {
// if expected is unset and actual is a specific record,
// then default expected to schema of currently loaded class
if (getExpected() == null && actual != null && actual.getType() == Schema.Type.RECORD) {
SpecificData data = getSpecificData();
Class c = data.getClass(actual);
if (c != null && SpecificRecord.class.isAssignableFrom(c))
setExpected(data.getSchema(c));
}
super.setSchema(actual);
}
@Override
protected Class findStringClass(Schema schema) {
Class stringClass = null;
switch (schema.getType()) {
case STRING:
stringClass = getPropAsClass(schema, SpecificData.CLASS_PROP);
break;
case MAP:
stringClass = getPropAsClass(schema, SpecificData.KEY_CLASS_PROP);
break;
}
if (stringClass != null)
return stringClass;
return super.findStringClass(schema);
}
private Class getPropAsClass(Schema schema, String prop) {
String name = schema.getProp(prop);
if (name == null)
return null;
try {
return ClassUtils.forName(getData().getClassLoader(), name);
} catch (ClassNotFoundException e) {
throw new AvroRuntimeException(e);
}
}
@Override
protected Object readRecord(Object old, Schema expected, ResolvingDecoder in) throws IOException {
SpecificData data = getSpecificData();
if (data.useCustomCoders()) {
old = data.newRecord(old, expected);
if (old instanceof SpecificRecordBase) {
SpecificRecordBase d = (SpecificRecordBase) old;
if (d.hasCustomCoders()) {
d.customDecode(in);
return d;
}
}
}
return super.readRecord(old, expected, in);
}
@Override
protected void readField(Object record, Schema.Field field, Object oldDatum, ResolvingDecoder in, Object state)
throws IOException {
if (record instanceof SpecificRecordBase) {
Conversion<?> conversion = ((SpecificRecordBase) record).getConversion(field.pos());
Object datum;
if (conversion != null) {
datum = readWithConversion(oldDatum, field.schema(), field.schema().getLogicalType(), conversion, in);
} else {
datum = readWithoutConversion(oldDatum, field.schema(), in);
}
getData().setField(record, field.name(), field.pos(), datum);
} else {
super.readField(record, field, oldDatum, in, state);
}
}
}
| 7,261 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/specific/SpecificDatumWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.specific;
import java.io.IOException;
import org.apache.avro.AvroTypeException;
import org.apache.avro.Conversion;
import org.apache.avro.LogicalType;
import org.apache.avro.Schema;
import org.apache.avro.path.TracingAvroTypeException;
import org.apache.avro.generic.GenericDatumWriter;
import org.apache.avro.path.TracingClassCastException;
import org.apache.avro.path.TracingNullPointException;
import org.apache.avro.io.Encoder;
import org.apache.avro.path.LocationStep;
/**
* {@link org.apache.avro.io.DatumWriter DatumWriter} for generated Java
* classes.
*/
public class SpecificDatumWriter<T> extends GenericDatumWriter<T> {
public SpecificDatumWriter() {
super(SpecificData.get());
}
public SpecificDatumWriter(Class<T> c) {
super(SpecificData.get().getSchema(c), SpecificData.getForClass(c));
}
public SpecificDatumWriter(Schema schema) {
super(schema, SpecificData.getForSchema(schema));
}
public SpecificDatumWriter(Schema root, SpecificData specificData) {
super(root, specificData);
}
protected SpecificDatumWriter(SpecificData specificData) {
super(specificData);
}
/** Returns the {@link SpecificData} implementation used by this writer. */
public SpecificData getSpecificData() {
return (SpecificData) getData();
}
@Override
protected void writeEnum(Schema schema, Object datum, Encoder out) throws IOException {
if (!(datum instanceof Enum))
super.writeEnum(schema, datum, out); // punt to generic
else
out.writeEnum(((Enum) datum).ordinal());
}
@Override
protected void writeString(Schema schema, Object datum, Encoder out) throws IOException {
if (!(datum instanceof CharSequence) && getSpecificData().isStringable(datum.getClass())) {
datum = datum.toString(); // convert to string
}
writeString(datum, out);
}
@Override
protected void writeRecord(Schema schema, Object datum, Encoder out) throws IOException {
if (datum instanceof SpecificRecordBase && this.getSpecificData().useCustomCoders()) {
SpecificRecordBase d = (SpecificRecordBase) datum;
if (d.hasCustomCoders()) {
try {
d.customEncode(out);
} catch (NullPointerException e) {
throw new TracingNullPointException(e, null, true);
}
return;
}
}
super.writeRecord(schema, datum, out);
}
@Override
protected void writeField(Object datum, Schema.Field f, Encoder out, Object state) throws IOException {
if (datum instanceof SpecificRecordBase) {
Conversion<?> conversion = ((SpecificRecordBase) datum).getConversion(f.pos());
Schema fieldSchema = f.schema();
LogicalType logicalType = fieldSchema.getLogicalType();
Object value = getData().getField(datum, f.name(), f.pos());
if (conversion != null && logicalType != null) {
value = convert(fieldSchema, logicalType, conversion, value);
}
try {
writeWithoutConversion(fieldSchema, value, out);
} catch (TracingNullPointException | TracingClassCastException | TracingAvroTypeException e) {
e.tracePath(new LocationStep(".", f.name()));
throw e;
} catch (AvroTypeException ate) {
throw addAvroTypeMsg(ate, " in field '" + f.name() + "'");
}
} else {
super.writeField(datum, f, out, state);
}
}
}
| 7,262 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/specific/SpecificExceptionBase.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.specific;
import java.io.Externalizable;
import java.io.ObjectOutput;
import java.io.ObjectInput;
import java.io.IOException;
import org.apache.avro.AvroRemoteException;
import org.apache.avro.Schema;
/** Base class for specific exceptions. */
public abstract class SpecificExceptionBase extends AvroRemoteException implements SpecificRecord, Externalizable {
public SpecificExceptionBase() {
super();
}
public SpecificExceptionBase(Throwable value) {
super(value);
}
public SpecificExceptionBase(Object value) {
super(value);
}
public SpecificExceptionBase(Object value, Throwable cause) {
super(value, cause);
}
@Override
public abstract Schema getSchema();
@Override
public abstract Object get(int field);
@Override
public abstract void put(int field, Object value);
@Override
public boolean equals(Object that) {
if (that == this)
return true; // identical object
if (!(that instanceof SpecificExceptionBase))
return false; // not a record
if (this.getClass() != that.getClass())
return false; // not same schema
return this.getSpecificData().compare(this, that, this.getSchema()) == 0;
}
@Override
public int hashCode() {
return SpecificData.get().hashCode(this, this.getSchema());
}
@Override
public abstract void writeExternal(ObjectOutput out) throws IOException;
@Override
public abstract void readExternal(ObjectInput in) throws IOException;
public SpecificData getSpecificData() {
// Default implementation for backwards compatibility, overridden in generated
// code
return SpecificData.get();
}
}
| 7,263 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util/ByteBufferOutputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.util;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
/**
* Utility to collect data written to an {@link OutputStream} in
* {@link ByteBuffer}s.
*/
public class ByteBufferOutputStream extends OutputStream {
public static final int BUFFER_SIZE = 8192;
private List<ByteBuffer> buffers;
public ByteBufferOutputStream() {
reset();
}
/** Returns all data written and resets the stream to be empty. */
public List<ByteBuffer> getBufferList() {
List<ByteBuffer> result = buffers;
reset();
for (Buffer buffer : result) {
buffer.flip();
}
return result;
}
/** Prepend a list of ByteBuffers to this stream. */
public void prepend(List<ByteBuffer> lists) {
for (Buffer buffer : lists) {
buffer.position(buffer.limit());
}
buffers.addAll(0, lists);
}
/** Append a list of ByteBuffers to this stream. */
public void append(List<ByteBuffer> lists) {
for (Buffer buffer : lists) {
buffer.position(buffer.limit());
}
buffers.addAll(lists);
}
public void reset() {
buffers = new ArrayList<>();
buffers.add(ByteBuffer.allocate(BUFFER_SIZE));
}
public void write(ByteBuffer buffer) {
buffers.add(buffer);
}
@Override
public void write(int b) {
ByteBuffer buffer = buffers.get(buffers.size() - 1);
if (buffer.remaining() < 1) {
buffer = ByteBuffer.allocate(BUFFER_SIZE);
buffers.add(buffer);
}
buffer.put((byte) b);
}
@Override
public void write(byte[] b, int off, int len) {
ByteBuffer buffer = buffers.get(buffers.size() - 1);
int remaining = buffer.remaining();
while (len > remaining) {
buffer.put(b, off, remaining);
len -= remaining;
off += remaining;
buffer = ByteBuffer.allocate(BUFFER_SIZE);
buffers.add(buffer);
remaining = buffer.remaining();
}
buffer.put(b, off, len);
}
/** Add a buffer to the output without copying, if possible. */
public void writeBuffer(ByteBuffer buffer) throws IOException {
if (buffer.remaining() < BUFFER_SIZE) {
write(buffer.array(), buffer.position(), buffer.remaining());
} else { // append w/o copying bytes
ByteBuffer dup = buffer.duplicate();
dup.position(buffer.limit()); // ready for flip
buffers.add(dup);
}
}
}
| 7,264 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util/WeakIdentityHashMap.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.avro.util;
import java.lang.ref.ReferenceQueue;
import java.lang.ref.WeakReference;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
/**
* Implements a combination of WeakHashMap and IdentityHashMap. Useful for
* caches that need to key off of a == comparison instead of a .equals.
*
* <b> This class is not a general-purpose Map implementation! While this class
* implements the Map interface, it intentionally violates Map's general
* contract, which mandates the use of the equals method when comparing objects.
* This class is designed for use only in the rare cases wherein
* reference-equality semantics are required.
*
* Note that this implementation is not synchronized. </b>
*/
public class WeakIdentityHashMap<K, V> implements Map<K, V> {
private final ReferenceQueue<K> queue = new ReferenceQueue<>();
private Map<IdentityWeakReference, V> backingStore = new ConcurrentHashMap<>();
public WeakIdentityHashMap() {
}
@Override
public void clear() {
backingStore.clear();
reap();
}
@Override
public boolean containsKey(Object key) {
reap();
return backingStore.containsKey(new IdentityWeakReference(key));
}
@Override
public boolean containsValue(Object value) {
reap();
return backingStore.containsValue(value);
}
@Override
public Set<Map.Entry<K, V>> entrySet() {
reap();
Set<Map.Entry<K, V>> ret = new HashSet<>();
for (Map.Entry<IdentityWeakReference, V> ref : backingStore.entrySet()) {
final K key = ref.getKey().get();
final V value = ref.getValue();
Map.Entry<K, V> entry = new Map.Entry<K, V>() {
@Override
public K getKey() {
return key;
}
@Override
public V getValue() {
return value;
}
@Override
public V setValue(V value) {
throw new UnsupportedOperationException();
}
};
ret.add(entry);
}
return Collections.unmodifiableSet(ret);
}
@Override
public Set<K> keySet() {
reap();
Set<K> ret = new HashSet<>();
for (IdentityWeakReference ref : backingStore.keySet()) {
ret.add(ref.get());
}
return Collections.unmodifiableSet(ret);
}
@Override
public boolean equals(Object o) {
if (!(o instanceof WeakIdentityHashMap)) {
return false;
}
return backingStore.equals(((WeakIdentityHashMap) o).backingStore);
}
@Override
public V get(Object key) {
reap();
return backingStore.get(new IdentityWeakReference(key));
}
@Override
public V put(K key, V value) {
reap();
return backingStore.put(new IdentityWeakReference(key), value);
}
@Override
public int hashCode() {
reap();
return backingStore.hashCode();
}
@Override
public boolean isEmpty() {
reap();
return backingStore.isEmpty();
}
@Override
public void putAll(Map t) {
throw new UnsupportedOperationException();
}
@Override
public V remove(Object key) {
reap();
return backingStore.remove(new IdentityWeakReference(key));
}
@Override
public int size() {
reap();
return backingStore.size();
}
@Override
public Collection<V> values() {
reap();
return backingStore.values();
}
private synchronized void reap() {
Object zombie = queue.poll();
while (zombie != null) {
IdentityWeakReference victim = (IdentityWeakReference) zombie;
backingStore.remove(victim);
zombie = queue.poll();
}
}
class IdentityWeakReference extends WeakReference<K> {
int hash;
@SuppressWarnings("unchecked")
IdentityWeakReference(Object obj) {
super((K) obj, queue);
hash = System.identityHashCode(obj);
}
@Override
public int hashCode() {
return hash;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof WeakIdentityHashMap.IdentityWeakReference)) {
return false;
}
IdentityWeakReference ref = (IdentityWeakReference) o;
return this.get() == ref.get();
}
}
}
| 7,265 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util/ClassUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.util;
public class ClassUtils {
private ClassUtils() {
}
/**
* Loads a class using the class loader. 1. The class loader of the current
* class is being used. 2. The thread context class loader is being used. If
* both approaches fail, returns null.
*
* @param className The name of the class to load.
* @return The class or null if no class loader could load the class.
*/
public static Class<?> forName(String className) throws ClassNotFoundException {
return ClassUtils.forName(ClassUtils.class, className);
}
/**
* Loads a class using the class loader. 1. The class loader of the context
* class is being used. 2. The thread context class loader is being used. If
* both approaches fail, returns null.
*
* @param contextClass The name of a context class to use.
* @param className The name of the class to load
* @return The class or null if no class loader could load the class.
*/
public static Class<?> forName(Class<?> contextClass, String className) throws ClassNotFoundException {
Class<?> c = null;
if (contextClass.getClassLoader() != null) {
c = forName(className, contextClass.getClassLoader());
}
if (c == null && Thread.currentThread().getContextClassLoader() != null) {
c = forName(className, Thread.currentThread().getContextClassLoader());
}
if (c == null) {
throw new ClassNotFoundException("Failed to load class" + className);
}
return c;
}
/**
* Loads a class using the class loader. 1. The class loader of the context
* class is being used. 2. The thread context class loader is being used. If
* both approaches fail, returns null.
*
* @param classLoader The classloader to use.
* @param className The name of the class to load
* @return The class or null if no class loader could load the class.
*/
public static Class<?> forName(ClassLoader classLoader, String className) throws ClassNotFoundException {
Class<?> c = null;
if (classLoader != null) {
c = forName(className, classLoader);
}
if (c == null && Thread.currentThread().getContextClassLoader() != null) {
c = forName(className, Thread.currentThread().getContextClassLoader());
}
if (c == null) {
throw new ClassNotFoundException("Failed to load class" + className);
}
return c;
}
/**
* Loads a {@link Class} from the specified {@link ClassLoader} without throwing
* {@link ClassNotFoundException}.
*
* @param className
* @param classLoader
* @return
*/
private static Class<?> forName(String className, ClassLoader classLoader) {
Class<?> c = null;
if (classLoader != null && className != null) {
try {
c = Class.forName(className, true, classLoader);
} catch (ClassNotFoundException e) {
// Ignore and return null
}
}
return c;
}
}
| 7,266 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util/ReusableByteBufferInputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.avro.util;
import java.io.IOException;
import java.io.InputStream;
import java.nio.Buffer;
import java.nio.ByteBuffer;
public class ReusableByteBufferInputStream extends InputStream {
private static final ByteBuffer EMPTY_BUFFER = ByteBuffer.allocate(0);
// work around issues compiling with Java11 but running with 8
// due to ByteBuffer overriding several methods
private ByteBuffer byteBuffer = EMPTY_BUFFER;
private Buffer buffer = byteBuffer;
private int mark = 0;
public void setByteBuffer(ByteBuffer buf) {
// do not modify the buffer that is passed in
this.byteBuffer = buf.duplicate();
this.buffer = byteBuffer;
this.mark = buf.position();
}
@Override
public int read() throws IOException {
if (buffer.hasRemaining()) {
return byteBuffer.get() & 0xff;
} else {
return -1;
}
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
if (buffer.remaining() <= 0) {
return -1;
}
// allow IndexOutOfBoundsException to be thrown by ByteBuffer#get
int bytesToRead = Math.min(len, buffer.remaining());
byteBuffer.get(b, off, bytesToRead);
return bytesToRead;
}
@Override
public long skip(long n) throws IOException {
if (n <= 0) {
// n may be negative and results in skipping 0 bytes, according to javadoc
return 0;
}
// this catches n > Integer.MAX_VALUE
int bytesToSkip = n > buffer.remaining() ? buffer.remaining() : (int) n;
buffer.position(buffer.position() + bytesToSkip);
return bytesToSkip;
}
@Override
public synchronized void mark(int readLimit) {
// readLimit is ignored. there is no requirement to implement readLimit, it
// is a way for implementations to avoid buffering too much. since all data
// for this stream is held in memory, this has no need for such a limit.
this.mark = buffer.position();
}
@Override
public synchronized void reset() throws IOException {
buffer.position(mark);
}
@Override
public boolean markSupported() {
return true;
}
}
| 7,267 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util/TimePeriod.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.util;
import java.io.Serializable;
import java.time.DateTimeException;
import java.time.Duration;
import java.time.Period;
import java.time.chrono.ChronoPeriod;
import java.time.chrono.IsoChronology;
import java.time.temporal.ChronoUnit;
import java.time.temporal.Temporal;
import java.time.temporal.TemporalAmount;
import java.time.temporal.TemporalUnit;
import java.time.temporal.UnsupportedTemporalTypeException;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import static java.time.temporal.ChronoUnit.DAYS;
import static java.time.temporal.ChronoUnit.MILLIS;
import static java.time.temporal.ChronoUnit.MONTHS;
import static java.util.Collections.unmodifiableList;
import static java.util.Objects.requireNonNull;
/**
* <p>
* A temporal amount to model an {@link org.apache.avro.LogicalTypes.Duration
* Avro duration} (the logical type).
* </p>
*
* <p>
* It consists of a number of months, days and milliseconds, all modelled as an
* unsigned integer.
* </p>
*
* <p>
* Compared to {@link Period java.time.Period}, this class has a smaller range
* ('only' supporting a little less than 358 million years), and cannot support
* negative time periods.
* </p>
*
* <p>
* Compared to {@link Duration java.time.Duration}, this class has less
* precision (milliseconds compared to nanoseconds), cannot support negative
* durations, and has a much smaller range. Where {@code java.time.Duration}
* supports fixed ranges up to about 68 years, {@code TimePeriod} can only
* handle about 49 days.
* </p>
*
* <p>
* Comparison with the regular {@code java.time} classes:
* </p>
*
* <table>
* <tr>
* <th></th>
* <th>TimePeriod</th>
* <th>{@link Period}</th>
* <th>{@link Duration}</th>
* </tr>
* <tr>
* <td>Precision</td>
* <td>milliseconds</td>
* <td>days</td>
* <td>nanoseconds</td>
* </tr>
* <tr>
* <td>Time range (approx.)</td>
* <td>0 - 49 days</td>
* <td>unsupported</td>
* <td>-68 - 68 years</td>
* </tr>
* <tr>
* <td>Date range (approx.)</td>
* <td>0 to 370 million years</td>
* <td>-2.3 to 2.3 billion years</td>
* <td>unsupported</td>
* </tr>
* </table>
*
* @see <a href=
* "https://avro.apache.org/docs/1.11.1/specification/#duration">Avro 1.11
* specification on duration</a>
*/
public final class TimePeriod implements TemporalAmount, Serializable {
private static final long MAX_UNSIGNED_INT = 0xffffffffL;
private static final long MONTHS_PER_YEAR = 12;
private static final long MONTHS_PER_DECADE = MONTHS_PER_YEAR * 10;
private static final long MONTHS_PER_CENTURY = MONTHS_PER_DECADE * 10;
private static final long MONTHS_PER_MILLENNIUM = MONTHS_PER_CENTURY * 10;
private static final long MILLIS_PER_SECOND = 1_000;
private static final long MILLIS_PER_MINUTE = MILLIS_PER_SECOND * 60;
private static final long MILLIS_PER_HOUR = MILLIS_PER_MINUTE * 60;
private static final long MILLIS_IN_HALF_DAY = MILLIS_PER_HOUR * 12;
private static final long MICROS_PER_MILLI = 1_000;
private static final long NANOS_PER_MILLI = 1_000_000;
private final long months;
private final long days;
private final long millis;
/**
* Create a TimePeriod from another TemporalAmount, such as a {@link Period} or
* a {@link Duration}.
*
* @param amount a temporal amount
* @return the corresponding TimePeriod
*/
public static TimePeriod from(TemporalAmount amount) {
if (requireNonNull(amount, "amount") instanceof TimePeriod) {
return (TimePeriod) amount;
}
if (amount instanceof ChronoPeriod) {
if (!IsoChronology.INSTANCE.equals(((ChronoPeriod) amount).getChronology())) {
throw new DateTimeException("TimePeriod requires ISO chronology: " + amount);
}
}
long months = 0;
long days = 0;
long millis = 0;
for (TemporalUnit unit : amount.getUnits()) {
if (unit instanceof ChronoUnit) {
long unitAmount = amount.get(unit);
switch ((ChronoUnit) unit) {
case MILLENNIA:
months = unsignedInt(months + unitAmount * MONTHS_PER_MILLENNIUM);
break;
case CENTURIES:
months = unsignedInt(months + unitAmount * MONTHS_PER_CENTURY);
break;
case DECADES:
months = unsignedInt(months + unitAmount * MONTHS_PER_DECADE);
break;
case YEARS:
months = unsignedInt(months + unitAmount * MONTHS_PER_YEAR);
break;
case MONTHS:
months = unsignedInt(months + unitAmount);
break;
case WEEKS:
days = unsignedInt(days + unitAmount * 7);
break;
case DAYS:
days = unsignedInt(days + unitAmount);
break;
case HALF_DAYS:
days = unsignedInt(days + (unitAmount / 2)); // Truncates halves
if (unitAmount % 2 != 0) {
millis = unsignedInt(millis + MILLIS_IN_HALF_DAY);
}
break;
case HOURS:
millis = unsignedInt(millis + unitAmount * MILLIS_PER_HOUR);
break;
case MINUTES:
millis = unsignedInt(millis + unitAmount * MILLIS_PER_MINUTE);
break;
case SECONDS:
millis = unsignedInt(millis + unitAmount * MILLIS_PER_SECOND);
break;
case MILLIS:
millis = unsignedInt(millis + unitAmount);
break;
case MICROS:
if (unitAmount % MICROS_PER_MILLI != 0) {
throw new DateTimeException(
"Cannot add " + unitAmount + " microseconds: not a whole number of milliseconds");
}
millis = unsignedInt(millis + unitAmount / MICROS_PER_MILLI);
break;
case NANOS:
if (unitAmount % NANOS_PER_MILLI != 0) {
throw new DateTimeException(
"Cannot add " + unitAmount + " nanoseconds: not a whole number of milliseconds");
}
millis = unsignedInt(millis + unitAmount / NANOS_PER_MILLI);
break;
default:
throw new UnsupportedTemporalTypeException("Unsupported unit: " + unit);
}
} else {
throw new UnsupportedTemporalTypeException("Unsupported unit: " + unit);
}
}
return new TimePeriod(months, days, millis);
}
/**
* Create a TimePeriod from a number of months, days and milliseconds
*
* @param months a number of months
* @param days a number of days
* @param millis a number of milliseconds
* @return the corresponding TimePeriod
* @throws ArithmeticException if any of the parameters does not fit an unsigned
* long (0..4294967296)
*/
public static TimePeriod of(long months, long days, long millis) {
return new TimePeriod(unsignedInt(months), unsignedInt(days), unsignedInt(millis));
}
private static long unsignedInt(long number) {
if (number != (number & MAX_UNSIGNED_INT)) {
throw new ArithmeticException("Overflow/underflow of unsigned int");
}
return number;
}
private TimePeriod(long months, long days, long millis) {
this.months = months;
this.days = days;
this.millis = millis;
}
public Duration toDuration() {
return Duration.from(this);
}
public Period toPeriod() {
if (isDateBased()) {
// We use unsigned ints, which have double the range of a signed int that
// Period uses. We can split months to years and months to ensure there's no
// overflow. But we cannot split days, as both days and months have varying
// lengths.
int yearsAsInt = (int) (months / MONTHS_PER_YEAR);
int monthsAsInt = (int) (months % MONTHS_PER_YEAR);
int daysAsInt = (int) days;
if (days != daysAsInt) {
throw new DateTimeException("Too many days: a Period can contain at most " + Integer.MAX_VALUE + " days.");
}
return Period.ofYears(yearsAsInt).withMonths(monthsAsInt).withDays(daysAsInt);
}
throw new DateTimeException("Cannot convert this TimePeriod to a Period: is not date based");
}
/**
* Determines if the TimePeriod is date based (i.e., if its milliseconds
* component is 0).
*
* @return {@code true} iff the TimePeriod is date based
*/
public boolean isDateBased() {
return millis == 0;
}
/**
* Determines if the TimePeriod is time based (i.e., if its months and days
* components are 0).
*
* @return {@code true} iff the TimePeriod is time based
*/
public boolean isTimeBased() {
return months == 0 && days == 0;
}
public long getMonths() {
return months;
}
public long getDays() {
return days;
}
public long getMillis() {
return millis;
}
@Override
public long get(TemporalUnit unit) {
if (unit == MONTHS) {
return months;
} else if (unit == DAYS) {
return days;
} else if (unit == MILLIS) {
return millis;
} else {
throw new UnsupportedTemporalTypeException("Unsupported unit: " + unit);
}
}
@Override
public List<TemporalUnit> getUnits() {
List<TemporalUnit> units = new ArrayList<>();
// The zero-checks ensure compatibility with the Java Time classes Period and
// Duration where possible.
if (months != 0) {
units.add(MONTHS);
}
if (days != 0) {
units.add(DAYS);
}
if (millis != 0) {
units.add(MILLIS);
}
return unmodifiableList(units);
}
@Override
public Temporal addTo(Temporal temporal) {
return addTo(temporal, months, days, millis);
}
@Override
public Temporal subtractFrom(Temporal temporal) {
return addTo(temporal, -months, -days, -millis);
}
private Temporal addTo(Temporal temporal, long months, long days, long millis) {
// The zero-checks ensure we can add a TimePeriod to a Temporal even when it
// does not support all fields, as long as the unsupported fields are zero.
if (months != 0) {
temporal = temporal.plus(months, MONTHS);
}
if (days != 0) {
temporal = temporal.plus(days, DAYS);
}
if (millis != 0) {
temporal = temporal.plus(millis, MILLIS);
}
return temporal;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
TimePeriod that = (TimePeriod) o;
return months == that.months && days == that.days && millis == that.millis;
}
@Override
public int hashCode() {
return Objects.hash(months, days, millis);
}
@Override
public String toString() {
StringBuilder buffer = new StringBuilder();
buffer.append("P");
if (months != 0) {
int years = (int) (months / MONTHS_PER_YEAR);
int monthsLeft = (int) (months % MONTHS_PER_YEAR);
if (years != 0) {
buffer.append(years).append("Y");
}
if (monthsLeft != 0) {
buffer.append(monthsLeft).append("M");
}
}
if (days != 0 || (months == 0 && millis == 0)) {
buffer.append(days);
}
if (millis != 0) {
long millisLeft = millis;
int hours = (int) (millisLeft / MILLIS_PER_HOUR);
millisLeft -= MILLIS_PER_HOUR * hours;
int minutes = (int) (millisLeft / MILLIS_PER_MINUTE);
millisLeft -= MILLIS_PER_MINUTE * minutes;
int seconds = (int) (millisLeft / MILLIS_PER_SECOND);
millisLeft -= MILLIS_PER_SECOND * seconds;
if (millisLeft != 0) {
buffer.append(String.format("T%02d:%02d:%02d.%03d", hours, minutes, seconds, millisLeft));
} else if (seconds != 0) {
buffer.append(String.format("T%02d:%02d:%02d", hours, minutes, seconds));
} else if (minutes != 0) {
buffer.append(String.format("T%02d:%02d", hours, minutes));
} else {
buffer.append(String.format("T%02d", hours));
}
}
return buffer.toString();
}
}
| 7,268 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util/MapUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.util;
import java.util.concurrent.ConcurrentMap;
import java.util.function.Function;
public class MapUtil {
private MapUtil() {
super();
}
/**
* A temporary workaround for Java 8 specific performance issue JDK-8161372
* .<br>
* This class should be removed once we drop Java 8 support.
*
* @see <a href=
* "https://bugs.openjdk.java.net/browse/JDK-8161372">JDK-8161372</a>
*/
public static <K, V> V computeIfAbsent(ConcurrentMap<K, V> map, K key, Function<K, V> mappingFunction) {
V value = map.get(key);
if (value != null) {
return value;
}
return map.computeIfAbsent(key, mappingFunction::apply);
}
}
| 7,269 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util/Utf8.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.util;
import java.io.Externalizable;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import org.apache.avro.SystemLimitException;
import org.apache.avro.io.BinaryData;
/**
* A Utf8 string. Unlike {@link String}, instances are mutable. This is more
* efficient than {@link String} when reading or writing a sequence of values,
* as a single instance may be reused.
*/
public class Utf8 implements Comparable<Utf8>, CharSequence, Externalizable {
private static final byte[] EMPTY = new byte[0];
private byte[] bytes;
private int hash;
private int length;
private String string;
public Utf8() {
bytes = EMPTY;
}
public Utf8(String string) {
byte[] bytes = getBytesFor(string);
int length = bytes.length;
SystemLimitException.checkMaxStringLength(length);
this.bytes = bytes;
this.length = length;
this.string = string;
}
public Utf8(Utf8 other) {
this.length = other.length;
this.bytes = Arrays.copyOf(other.bytes, other.length);
this.string = other.string;
this.hash = other.hash;
}
public Utf8(byte[] bytes) {
int length = bytes.length;
SystemLimitException.checkMaxStringLength(length);
this.bytes = bytes;
this.length = length;
}
/**
* Return UTF-8 encoded bytes. Only valid through {@link #getByteLength()}.
*/
public byte[] getBytes() {
return bytes;
}
/**
* Return length in bytes.
*
* @deprecated call {@link #getByteLength()} instead.
*/
@Deprecated
public int getLength() {
return length;
}
/** Return length in bytes. */
public int getByteLength() {
return length;
}
/**
* Set length in bytes. Should called whenever byte content changes, even if the
* length does not change, as this also clears the cached String.
*
* @deprecated call {@link #setByteLength(int)} instead.
*/
@Deprecated
public Utf8 setLength(int newLength) {
return setByteLength(newLength);
}
/**
* Set length in bytes. Should called whenever byte content changes, even if the
* length does not change, as this also clears the cached String.
*/
public Utf8 setByteLength(int newLength) {
SystemLimitException.checkMaxStringLength(newLength);
if (this.bytes.length < newLength) {
this.bytes = Arrays.copyOf(this.bytes, newLength);
}
this.length = newLength;
this.string = null;
this.hash = 0;
return this;
}
/** Set to the contents of a String. */
public Utf8 set(String string) {
byte[] bytes = getBytesFor(string);
int length = bytes.length;
SystemLimitException.checkMaxStringLength(length);
this.bytes = bytes;
this.length = length;
this.string = string;
this.hash = 0;
return this;
}
public Utf8 set(Utf8 other) {
if (this.bytes.length < other.length) {
this.bytes = new byte[other.length];
}
this.length = other.length;
System.arraycopy(other.bytes, 0, bytes, 0, length);
this.string = other.string;
this.hash = other.hash;
return this;
}
@Override
public String toString() {
if (this.length == 0)
return "";
if (this.string == null) {
this.string = new String(bytes, 0, length, StandardCharsets.UTF_8);
}
return this.string;
}
@Override
public boolean equals(Object o) {
if (o == this)
return true;
if (!(o instanceof Utf8))
return false;
Utf8 that = (Utf8) o;
if (!(this.length == that.length))
return false;
byte[] thatBytes = that.bytes;
for (int i = 0; i < this.length; i++)
if (bytes[i] != thatBytes[i])
return false;
return true;
}
@Override
public int hashCode() {
int h = hash;
if (h == 0) {
byte[] bytes = this.bytes;
int length = this.length;
for (int i = 0; i < length; i++) {
h = h * 31 + bytes[i];
}
this.hash = h;
}
return h;
}
@Override
public int compareTo(Utf8 that) {
return BinaryData.compareBytes(this.bytes, 0, this.length, that.bytes, 0, that.length);
}
// CharSequence implementation
@Override
public char charAt(int index) {
return toString().charAt(index);
}
@Override
public int length() {
return toString().length();
}
@Override
public CharSequence subSequence(int start, int end) {
return toString().subSequence(start, end);
}
/** Gets the UTF-8 bytes for a String */
public static byte[] getBytesFor(String str) {
return str.getBytes(StandardCharsets.UTF_8);
}
@Override
public void writeExternal(ObjectOutput out) throws IOException {
out.writeInt(bytes.length);
out.write(bytes);
}
@Override
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
setByteLength(in.readInt());
in.readFully(bytes);
}
}
| 7,270 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util/ReusableByteArrayInputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.avro.util;
import java.io.ByteArrayInputStream;
public class ReusableByteArrayInputStream extends ByteArrayInputStream {
public ReusableByteArrayInputStream() {
super(new byte[0]);
}
public void setByteArray(byte[] buf, int offset, int length) {
this.buf = buf;
this.pos = offset;
this.count = Math.min(offset + length, buf.length);
this.mark = offset;
}
}
| 7,271 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util/MapEntry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.util;
import java.util.Map;
/**
* An implementation of {@link Map.Entry} with well-defined member names.
* <p>
* Using this class helps make Avro immune from the naming variations of
* key/value fields among several {@link Map.Entry} implementations. If objects
* of this class are used instead of the regular ones obtained by
* {@link Map#entrySet()}, then we need not worry about the actual field names
* or any changes to them in the future.
* <p>
* Example: {@code ConcurrentHashMap.MapEntry} does not name the fields as key/
* value in Java 1.8 while it used to do so in Java 1.7
*
* @param <K> Key of the map-entry
* @param <V> Value of the map-entry
*/
public class MapEntry<K, V> implements Map.Entry<K, V> {
K key;
V value;
public MapEntry(K key, V value) {
this.key = key;
this.value = value;
}
@Override
public K getKey() {
return key;
}
@Override
public V getValue() {
return value;
}
@Override
public V setValue(V value) {
V oldValue = this.value;
this.value = value;
return oldValue;
}
}
| 7,272 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util/RandomData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.util;
import org.apache.avro.LogicalType;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import org.apache.avro.file.CodecFactory;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.generic.GenericArray;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumWriter;
import java.io.File;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
/** Generates schema data as Java objects with random values. */
public class RandomData implements Iterable<Object> {
public static final String USE_DEFAULT = "use-default";
private final GenericData genericData;
private static final int MILLIS_IN_DAY = (int) Duration.ofDays(1).toMillis();
private final Schema root;
private final long seed;
private final int count;
private final boolean utf8ForString;
public RandomData(Schema schema, int count) {
this(schema, count, false);
}
public RandomData(Schema schema, int count, long seed) {
this(schema, count, seed, false);
}
public RandomData(Schema schema, int count, boolean utf8ForString) {
this(schema, count, System.currentTimeMillis(), utf8ForString);
}
public RandomData(Schema schema, int count, long seed, boolean utf8ForString) {
this(GenericData.get(), schema, count, seed, utf8ForString);
}
public RandomData(GenericData genericData, Schema schema, int count) {
this(genericData, schema, count, false);
}
public RandomData(GenericData genericData, Schema schema, int count, long seed) {
this(genericData, schema, count, seed, false);
}
public RandomData(GenericData genericData, Schema schema, int count, boolean utf8ForString) {
this(genericData, schema, count, System.currentTimeMillis(), utf8ForString);
}
public RandomData(GenericData genericData, Schema schema, int count, long seed, boolean utf8ForString) {
this.genericData = genericData;
this.root = schema;
this.seed = seed;
this.count = count;
this.utf8ForString = utf8ForString;
}
@Override
public Iterator<Object> iterator() {
return new Iterator<Object>() {
private int n;
private final Random random = new Random(seed);
@Override
public boolean hasNext() {
return n < count;
}
@Override
public Object next() {
n++;
return generate(root, random, 0);
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
@SuppressWarnings(value = "unchecked")
private Object generate(Schema schema, Random random, int d) {
switch (schema.getType()) {
case RECORD:
Object record = genericData.newRecord(null, schema);
for (Schema.Field field : schema.getFields()) {
Object value = (field.getObjectProp(USE_DEFAULT) == null) ? generate(field.schema(), random, d + 1)
: GenericData.get().getDefaultValue(field);
genericData.setField(record, field.name(), field.pos(), value);
}
return record;
case ENUM:
List<String> symbols = schema.getEnumSymbols();
return genericData.createEnum(symbols.get(random.nextInt(symbols.size())), schema);
case ARRAY:
int length = Math.max(0, (random.nextInt(5) + 2) - d);
GenericArray<Object> array = (GenericArray<Object>) genericData.newArray(null, length, schema);
for (int i = 0; i < length; i++)
array.add(generate(schema.getElementType(), random, d + 1));
return array;
case MAP:
length = Math.max(0, (random.nextInt(5) + 2) - d);
Map<Object, Object> map = (Map<Object, Object>) genericData.newMap(null, length);
for (int i = 0; i < length; i++) {
map.put(randomString(random, 40), generate(schema.getValueType(), random, d + 1));
}
return map;
case UNION:
List<Schema> types = schema.getTypes();
return generate(types.get(random.nextInt(types.size())), random, d);
case FIXED:
byte[] bytes = new byte[schema.getFixedSize()];
random.nextBytes(bytes);
return genericData.createFixed(null, bytes, schema);
case STRING:
return randomString(random, 40);
case BYTES:
return randomBytes(random, 40);
case INT:
return this.randomInt(random, schema.getLogicalType());
case LONG:
return this.randomLong(random, schema.getLogicalType());
case FLOAT:
return random.nextFloat();
case DOUBLE:
return random.nextDouble();
case BOOLEAN:
return random.nextBoolean();
case NULL:
return null;
default:
throw new RuntimeException("Unknown type: " + schema);
}
}
private static final Charset UTF8 = StandardCharsets.UTF_8;
private int randomInt(Random random, LogicalType type) {
if (type instanceof LogicalTypes.TimeMillis) {
return random.nextInt(RandomData.MILLIS_IN_DAY - 1);
}
// LogicalTypes.Date LocalDate.MAX.toEpochDay() > Integer.MAX;
return random.nextInt();
}
private long randomLong(Random random, LogicalType type) {
if (type instanceof LogicalTypes.TimeMicros) {
return ThreadLocalRandom.current().nextLong(RandomData.MILLIS_IN_DAY * 1000L);
}
// For LogicalTypes.TimestampMillis, every long would be OK,
// Instant.MAX.toEpochMilli() failed and would be > Long.MAX_VALUE.
return random.nextLong();
}
private Object randomString(Random random, int maxLength) {
int length = random.nextInt(maxLength);
byte[] bytes = new byte[length];
for (int i = 0; i < length; i++) {
bytes[i] = (byte) ('a' + random.nextInt('z' - 'a'));
}
return utf8ForString ? new Utf8(bytes) : new String(bytes, UTF8);
}
private static ByteBuffer randomBytes(Random rand, int maxLength) {
ByteBuffer bytes = ByteBuffer.allocate(rand.nextInt(maxLength));
bytes.limit(bytes.capacity());
rand.nextBytes(bytes.array());
return bytes;
}
public static void main(String[] args) throws Exception {
if (args.length < 3 || args.length > 4) {
System.out.println("Usage: RandomData <schemafile> <outputfile> <count> [codec]");
System.exit(-1);
}
Schema sch = new Schema.Parser().parse(new File(args[0]));
try (DataFileWriter<Object> writer = new DataFileWriter<>(new GenericDatumWriter<>())) {
writer.setCodec(CodecFactory.fromString(args.length >= 4 ? args[3] : "null"));
writer.setMeta("user_metadata", "someByteArray".getBytes(StandardCharsets.UTF_8));
writer.create(sch, new File(args[1]));
for (Object datum : new RandomData(sch, Integer.parseInt(args[2]))) {
writer.append(datum);
}
}
}
}
| 7,273 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util/NonCopyingByteArrayOutputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.util;
import java.io.ByteArrayOutputStream;
import java.nio.ByteBuffer;
/**
* Utility to make data written to an {@link ByteArrayOutputStream} directly
* available as a {@link ByteBuffer}.
*/
public class NonCopyingByteArrayOutputStream extends ByteArrayOutputStream {
/**
* Creates a new byte array output stream, with a buffer capacity of the
* specified size, in bytes.
*
* @param size the initial size
* @throws IllegalArgumentException if size is negative
*/
public NonCopyingByteArrayOutputStream(int size) {
super(size);
}
/**
* Get the contents of this ByteArrayOutputStream wrapped as a ByteBuffer. This
* is a shallow copy. Changes to this ByteArrayOutputstream "write through" to
* the ByteBuffer.
*
* @return The contents of this ByteArrayOutputstream wrapped as a ByteBuffer
*/
public ByteBuffer asByteBuffer() {
return ByteBuffer.wrap(super.buf, 0, super.count);
}
}
| 7,274 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util/ByteBufferInputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.util;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.util.List;
/** Utility to present {@link ByteBuffer} data as an {@link InputStream}. */
public class ByteBufferInputStream extends InputStream {
private List<ByteBuffer> buffers;
private int current;
public ByteBufferInputStream(List<ByteBuffer> buffers) {
this.buffers = buffers;
}
/**
* @see InputStream#read()
*/
@Override
public int read() throws IOException {
ByteBuffer buffer = getBuffer();
if (buffer == null) {
return -1;
}
return buffer.get() & 0xff;
}
/**
* @see InputStream#read(byte[], int, int)
*/
@Override
public int read(byte[] b, int off, int len) throws IOException {
if (len == 0)
return 0;
ByteBuffer buffer = getBuffer();
if (buffer == null) {
return -1;
}
int remaining = buffer.remaining();
if (len > remaining) {
buffer.get(b, off, remaining);
return remaining;
} else {
buffer.get(b, off, len);
return len;
}
}
/**
* Read a buffer from the input without copying, if possible.
*/
public ByteBuffer readBuffer(int length) throws IOException {
if (length == 0)
return ByteBuffer.allocate(0);
ByteBuffer buffer = getBuffer();
if (buffer == null) {
return ByteBuffer.allocate(0);
}
if (buffer.remaining() == length) { // can return current as-is?
current++;
return buffer; // return w/o copying
}
// punt: allocate a new buffer & copy into it
ByteBuffer result = ByteBuffer.allocate(length);
int start = 0;
while (start < length)
start += read(result.array(), start, length - start);
return result;
}
/**
* Returns the next non-empty buffer.
*/
private ByteBuffer getBuffer() throws IOException {
while (current < buffers.size()) {
ByteBuffer buffer = buffers.get(current);
if (buffer.hasRemaining())
return buffer;
current++;
}
return null;
}
}
| 7,275 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util/SchemaUtil.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.util;
import org.apache.avro.Schema;
import java.util.StringJoiner;
public class SchemaUtil {
private SchemaUtil() {
// utility class
}
public static String describe(Schema schema) {
if (schema == null) {
return "unknown";
}
switch (schema.getType()) {
case UNION:
StringJoiner csv = new StringJoiner(", ");
for (Schema branch : schema.getTypes()) {
csv.add(describe(branch));
}
return "[" + csv + "]";
case MAP:
return "Map<String, " + describe(schema.getValueType()) + ">";
case ARRAY:
return "List<" + describe(schema.getElementType()) + ">";
default:
return schema.getName();
}
}
public static String describe(Object datum) {
if (datum == null) {
return "null";
}
return datum + " (a " + datum.getClass().getName() + ")";
}
}
| 7,276 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util/internal/ClassValueCache.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.util.internal;
import java.util.function.Function;
/**
* Wraps a {@link ClassValue} cache so it can be overridden in an android
* environment, where it isn't available.
*
* @param <R> Return type of the ClassValue
*/
public class ClassValueCache<R> implements Function<Class<?>, R> {
private final Function<Class<?>, R> ifAbsent;
private final ClassValue<R> cache = new ClassValue<R>() {
@Override
protected R computeValue(Class<?> c) {
return ifAbsent.apply(c);
}
};
public ClassValueCache(Function<Class<?>, R> ifAbsent) {
this.ifAbsent = ifAbsent;
}
@Override
public R apply(Class<?> c) {
return cache.get(c);
}
}
| 7,277 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util/internal/ThreadLocalWithInitial.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.util.internal;
import java.util.function.Supplier;
/**
* Wraps a {@link ThreadLocal#withInitial(Supplier)} so it can be overridden in
* an android environment, where this method is not available until API 26.
*/
public class ThreadLocalWithInitial {
/** Delegate a ThreadLocal instance with the supplier. */
public static <T> ThreadLocal<T> of(Supplier<? extends T> supplier) {
return ThreadLocal.withInitial(supplier);
}
}
| 7,278 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util/internal/Accessor.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.util.internal;
import java.io.IOException;
import org.apache.avro.JsonProperties;
import org.apache.avro.Schema;
import org.apache.avro.Schema.Field;
import org.apache.avro.Schema.Field.Order;
import org.apache.avro.io.Encoder;
import org.apache.avro.io.EncoderFactory;
import org.apache.avro.io.JsonEncoder;
import org.apache.avro.io.parsing.ResolvingGrammarGenerator;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonNode;
public class Accessor {
public abstract static class JsonPropertiesAccessor {
protected abstract void addProp(JsonProperties props, String name, JsonNode value);
}
public abstract static class FieldAccessor {
protected abstract JsonNode defaultValue(Field field);
protected abstract Field createField(String name, Schema schema, String doc, JsonNode defaultValue,
boolean validate, Order order);
protected abstract Field createField(String name, Schema schema, String doc, JsonNode defaultValue);
}
public abstract static class ResolvingGrammarGeneratorAccessor {
protected abstract void encode(Encoder e, Schema s, JsonNode n) throws IOException;
}
public abstract static class EncoderFactoryAccessor {
protected abstract JsonEncoder jsonEncoder(EncoderFactory factory, Schema schema, JsonGenerator gen)
throws IOException;
}
private static volatile JsonPropertiesAccessor jsonPropertiesAccessor;
private static volatile FieldAccessor fieldAccessor;
private static volatile ResolvingGrammarGeneratorAccessor resolvingGrammarGeneratorAccessor;
public static void setAccessor(JsonPropertiesAccessor accessor) {
if (jsonPropertiesAccessor != null)
throw new IllegalStateException("JsonPropertiesAccessor already initialized");
jsonPropertiesAccessor = accessor;
}
public static void setAccessor(FieldAccessor accessor) {
if (fieldAccessor != null)
throw new IllegalStateException("FieldAccessor already initialized");
fieldAccessor = accessor;
}
private static FieldAccessor fieldAccessor() {
if (fieldAccessor == null)
ensureLoaded(Field.class);
return fieldAccessor;
}
public static void setAccessor(ResolvingGrammarGeneratorAccessor accessor) {
if (resolvingGrammarGeneratorAccessor != null)
throw new IllegalStateException("ResolvingGrammarGeneratorAccessor already initialized");
resolvingGrammarGeneratorAccessor = accessor;
}
private static ResolvingGrammarGeneratorAccessor resolvingGrammarGeneratorAccessor() {
if (resolvingGrammarGeneratorAccessor == null)
ensureLoaded(ResolvingGrammarGenerator.class);
return resolvingGrammarGeneratorAccessor;
}
private static void ensureLoaded(Class<?> c) {
try {
Class.forName(c.getName());
} catch (ClassNotFoundException e) {
// Shall never happen as the class is specified by its Class instance
}
}
public static void addProp(JsonProperties props, String name, JsonNode value) {
jsonPropertiesAccessor.addProp(props, name, value);
}
public static JsonNode defaultValue(Field field) {
return fieldAccessor.defaultValue(field);
}
public static void encode(Encoder e, Schema s, JsonNode n) throws IOException {
resolvingGrammarGeneratorAccessor().encode(e, s, n);
}
public static Field createField(String name, Schema schema, String doc, JsonNode defaultValue, boolean validate,
Order order) {
return fieldAccessor().createField(name, schema, doc, defaultValue, validate, order);
}
public static Field createField(String name, Schema schema, String doc, JsonNode defaultValue) {
return fieldAccessor().createField(name, schema, doc, defaultValue);
}
}
| 7,279 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util/internal/JacksonUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.util.internal;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.PropertyAccessor;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.JsonProperties;
import org.apache.avro.Schema;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.util.TokenBuffer;
public class JacksonUtils {
private JacksonUtils() {
}
public static JsonNode toJsonNode(Object datum) {
if (datum == null) {
return null;
}
try {
TokenBuffer generator = new TokenBuffer(new ObjectMapper(), false);
toJson(datum, generator);
return new ObjectMapper().readTree(generator.asParser());
} catch (IOException e) {
throw new AvroRuntimeException(e);
}
}
@SuppressWarnings(value = "unchecked")
static void toJson(Object datum, JsonGenerator generator) throws IOException {
if (datum == JsonProperties.NULL_VALUE) { // null
generator.writeNull();
} else if (datum instanceof Map) { // record, map
generator.writeStartObject();
for (Map.Entry<Object, Object> entry : ((Map<Object, Object>) datum).entrySet()) {
generator.writeFieldName(entry.getKey().toString());
toJson(entry.getValue(), generator);
}
generator.writeEndObject();
} else if (datum instanceof Collection) { // array
generator.writeStartArray();
for (Object element : (Collection<?>) datum) {
toJson(element, generator);
}
generator.writeEndArray();
} else if (datum instanceof byte[]) { // bytes, fixed
generator.writeBinary((byte[]) datum);// writeString(new String((byte[]) datum, StandardCharsets.ISO_8859_1));
} else if (datum instanceof CharSequence || datum instanceof Enum<?>) { // string, enum
generator.writeString(datum.toString());
} else if (datum instanceof Double) { // double
generator.writeNumber((Double) datum);
} else if (datum instanceof Float) { // float
generator.writeNumber((Float) datum);
} else if (datum instanceof Long) { // long
generator.writeNumber((Long) datum);
} else if (datum instanceof Integer) { // int
generator.writeNumber((Integer) datum);
} else if (datum instanceof Boolean) { // boolean
generator.writeBoolean((Boolean) datum);
} else if (datum instanceof BigInteger) {
generator.writeNumber((BigInteger) datum);
} else if (datum instanceof BigDecimal) {
generator.writeNumber((BigDecimal) datum);
} else {
throw new AvroRuntimeException("Unknown datum class: " + datum.getClass());
}
}
public static Object toObject(JsonNode jsonNode) {
return toObject(jsonNode, null);
}
public static Object toObject(JsonNode jsonNode, Schema schema) {
if (schema != null && schema.getType().equals(Schema.Type.UNION)) {
return toObject(jsonNode, schema.getTypes().get(0));
}
if (jsonNode == null) {
return null;
} else if (jsonNode.isNull()) {
return JsonProperties.NULL_VALUE;
} else if (jsonNode.isBoolean()) {
return jsonNode.asBoolean();
} else if (jsonNode.isInt()) {
if (schema == null || schema.getType().equals(Schema.Type.INT)) {
return jsonNode.asInt();
} else if (schema.getType().equals(Schema.Type.LONG)) {
return jsonNode.asLong();
} else if (schema.getType().equals(Schema.Type.FLOAT)) {
return (float) jsonNode.asDouble();
} else if (schema.getType().equals(Schema.Type.DOUBLE)) {
return jsonNode.asDouble();
}
} else if (jsonNode.isLong()) {
if (schema == null || schema.getType().equals(Schema.Type.LONG)) {
return jsonNode.asLong();
} else if (schema.getType().equals(Schema.Type.INT)) {
if (jsonNode.canConvertToInt()) {
return jsonNode.asInt();
} else {
return jsonNode.asLong();
}
} else if (schema.getType().equals(Schema.Type.FLOAT)) {
return (float) jsonNode.asDouble();
} else if (schema.getType().equals(Schema.Type.DOUBLE)) {
return jsonNode.asDouble();
}
} else if (jsonNode.isDouble() || jsonNode.isFloat()) {
if (schema != null) {
if (schema.getType().equals(Schema.Type.DOUBLE)) {
return jsonNode.doubleValue();
} else if (schema.getType().equals(Schema.Type.FLOAT)) {
return jsonNode.floatValue();
}
} else if (jsonNode.isDouble()) {
return jsonNode.doubleValue();
} else {
return jsonNode.floatValue();
}
} else if (jsonNode.isBinary()) {
try {
return jsonNode.binaryValue();
} catch (IOException ex) {
// only for TextNode, so, can't happen with binaryNode.
throw new UncheckedIOException(ex);
}
} else if (jsonNode.isTextual()) {
if (schema == null || schema.getType().equals(Schema.Type.STRING) || schema.getType().equals(Schema.Type.ENUM)) {
return jsonNode.asText();
} else if (schema.getType().equals(Schema.Type.BYTES) || schema.getType().equals(Schema.Type.FIXED)) {
return jsonNode.textValue().getBytes(StandardCharsets.ISO_8859_1);
}
} else if (jsonNode.isArray()) {
List<Object> l = new ArrayList<>();
for (JsonNode node : jsonNode) {
l.add(toObject(node, schema == null ? null : schema.getElementType()));
}
return l;
} else if (jsonNode.isObject()) {
Map<Object, Object> m = new LinkedHashMap<>();
for (Iterator<String> it = jsonNode.fieldNames(); it.hasNext();) {
String key = it.next();
final Schema s;
if (schema != null && schema.getType().equals(Schema.Type.MAP)) {
s = schema.getValueType();
} else if (schema != null && schema.getType().equals(Schema.Type.RECORD)) {
s = schema.getField(key).schema();
} else {
s = null;
}
Object value = toObject(jsonNode.get(key), s);
m.put(key, value);
}
return m;
}
return null;
}
/**
* Convert an object into a map
*
* @param datum The object
* @return Its Map representation
*/
public static Map objectToMap(Object datum) {
ObjectMapper mapper = new ObjectMapper();
// we only care about fields
mapper.setVisibility(PropertyAccessor.ALL, JsonAutoDetect.Visibility.NONE);
mapper.setVisibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY);
return mapper.convertValue(datum, Map.class);
}
}
| 7,280 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util/springframework/ConcurrentReferenceHashMap.java | /*
* Copyright 2002-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.util.springframework;
import org.apache.avro.reflect.Nullable;
import java.lang.ref.ReferenceQueue;
import java.lang.ref.SoftReference;
import java.lang.ref.WeakReference;
import java.lang.reflect.Array;
import java.util.AbstractMap;
import java.util.AbstractSet;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantLock;
/**
* A {@link ConcurrentHashMap} that uses {@link ReferenceType#SOFT soft} or
* {@linkplain ReferenceType#WEAK weak} references for both {@code keys} and
* {@code values}.
*
* <p>
* This class can be used as an alternative to
* {@code Collections.synchronizedMap(new WeakHashMap<K, Reference<V>>())} in
* order to support better performance when accessed concurrently. This
* implementation follows the same design constraints as
* {@link ConcurrentHashMap} with the exception that {@code null} values and
* {@code null} keys are supported.
*
* <p>
* <b>NOTE:</b> The use of references means that there is no guarantee that
* items placed into the map will be subsequently available. The garbage
* collector may discard references at any time, so it may appear that an
* unknown thread is silently removing entries.
*
* <p>
* If not explicitly specified, this implementation will use
* {@linkplain SoftReference soft entry references}.
*
* @param <K> the key type
* @param <V> the value type
* @author Phillip Webb
* @author Juergen Hoeller
* @since 3.2
*/
public class ConcurrentReferenceHashMap<K, V> extends AbstractMap<K, V> implements ConcurrentMap<K, V> {
private static final int DEFAULT_INITIAL_CAPACITY = 16;
private static final float DEFAULT_LOAD_FACTOR = 0.75f;
private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
private static final ReferenceType DEFAULT_REFERENCE_TYPE = ReferenceType.SOFT;
private static final int MAXIMUM_CONCURRENCY_LEVEL = 1 << 16;
private static final int MAXIMUM_SEGMENT_SIZE = 1 << 30;
/**
* Array of segments indexed using the high order bits from the hash.
*/
private final Segment[] segments;
/**
* When the average number of references per table exceeds this value resize
* will be attempted.
*/
private final float loadFactor;
/**
* The reference type: SOFT or WEAK.
*/
private final ReferenceType referenceType;
/**
* The shift value used to calculate the size of the segments array and an index
* from the hash.
*/
private final int shift;
/**
* Late binding entry set.
*/
@Nullable
private volatile Set<Map.Entry<K, V>> entrySet;
/**
* Create a new {@code ConcurrentReferenceHashMap} instance.
*/
public ConcurrentReferenceHashMap() {
this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, DEFAULT_REFERENCE_TYPE);
}
/**
* Create a new {@code ConcurrentReferenceHashMap} instance.
*
* @param initialCapacity the initial capacity of the map
*/
public ConcurrentReferenceHashMap(int initialCapacity) {
this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, DEFAULT_REFERENCE_TYPE);
}
/**
* Create a new {@code ConcurrentReferenceHashMap} instance.
*
* @param initialCapacity the initial capacity of the map
* @param loadFactor the load factor. When the average number of references
* per table exceeds this value resize will be attempted
*/
public ConcurrentReferenceHashMap(int initialCapacity, float loadFactor) {
this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL, DEFAULT_REFERENCE_TYPE);
}
/**
* Create a new {@code ConcurrentReferenceHashMap} instance.
*
* @param initialCapacity the initial capacity of the map
* @param concurrencyLevel the expected number of threads that will concurrently
* write to the map
*/
public ConcurrentReferenceHashMap(int initialCapacity, int concurrencyLevel) {
this(initialCapacity, DEFAULT_LOAD_FACTOR, concurrencyLevel, DEFAULT_REFERENCE_TYPE);
}
/**
* Create a new {@code ConcurrentReferenceHashMap} instance.
*
* @param initialCapacity the initial capacity of the map
* @param referenceType the reference type used for entries (soft or weak)
*/
public ConcurrentReferenceHashMap(int initialCapacity, ReferenceType referenceType) {
this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, referenceType);
}
/**
* Create a new {@code ConcurrentReferenceHashMap} instance.
*
* @param initialCapacity the initial capacity of the map
* @param loadFactor the load factor. When the average number of
* references per table exceeds this value, resize will
* be attempted.
* @param concurrencyLevel the expected number of threads that will concurrently
* write to the map
*/
public ConcurrentReferenceHashMap(int initialCapacity, float loadFactor, int concurrencyLevel) {
this(initialCapacity, loadFactor, concurrencyLevel, DEFAULT_REFERENCE_TYPE);
}
/**
* Create a new {@code ConcurrentReferenceHashMap} instance.
*
* @param initialCapacity the initial capacity of the map
* @param loadFactor the load factor. When the average number of
* references per table exceeds this value, resize will
* be attempted.
* @param concurrencyLevel the expected number of threads that will concurrently
* write to the map
* @param referenceType the reference type used for entries (soft or weak)
*/
@SuppressWarnings("unchecked")
public ConcurrentReferenceHashMap(int initialCapacity, float loadFactor, int concurrencyLevel,
ReferenceType referenceType) {
Assert.isTrue(initialCapacity >= 0, "Initial capacity must not be negative");
Assert.isTrue(loadFactor > 0f, "Load factor must be positive");
Assert.isTrue(concurrencyLevel > 0, "Concurrency level must be positive");
Assert.notNull(referenceType, "Reference type must not be null");
this.loadFactor = loadFactor;
this.shift = calculateShift(concurrencyLevel, MAXIMUM_CONCURRENCY_LEVEL);
int size = 1 << this.shift;
this.referenceType = referenceType;
int roundedUpSegmentCapacity = (int) ((initialCapacity + size - 1L) / size);
int initialSize = 1 << calculateShift(roundedUpSegmentCapacity, MAXIMUM_SEGMENT_SIZE);
Segment[] segments = (Segment[]) Array.newInstance(Segment.class, size);
int resizeThreshold = (int) (initialSize * getLoadFactor());
for (int i = 0; i < segments.length; i++) {
segments[i] = new Segment(initialSize, resizeThreshold);
}
this.segments = segments;
}
protected final float getLoadFactor() {
return this.loadFactor;
}
protected final int getSegmentsSize() {
return this.segments.length;
}
protected final Segment getSegment(int index) {
return this.segments[index];
}
/**
* Factory method that returns the {@link ReferenceManager}. This method will be
* called once for each {@link Segment}.
*
* @return a new reference manager
*/
protected ReferenceManager createReferenceManager() {
return new ReferenceManager();
}
/**
* Get the hash for a given object, apply an additional hash function to reduce
* collisions. This implementation uses the same Wang/Jenkins algorithm as
* {@link ConcurrentHashMap}. Subclasses can override to provide alternative
* hashing.
*
* @param o the object to hash (may be null)
* @return the resulting hash code
*/
protected int getHash(@Nullable Object o) {
int hash = (o != null ? o.hashCode() : 0);
hash += (hash << 15) ^ 0xffffcd7d;
hash ^= (hash >>> 10);
hash += (hash << 3);
hash ^= (hash >>> 6);
hash += (hash << 2) + (hash << 14);
hash ^= (hash >>> 16);
return hash;
}
@Override
@Nullable
public V get(@Nullable Object key) {
Reference<K, V> ref = getReference(key, Restructure.WHEN_NECESSARY);
Entry<K, V> entry = (ref != null ? ref.get() : null);
return (entry != null ? entry.getValue() : null);
}
@Override
@Nullable
public V getOrDefault(@Nullable Object key, @Nullable V defaultValue) {
Reference<K, V> ref = getReference(key, Restructure.WHEN_NECESSARY);
Entry<K, V> entry = (ref != null ? ref.get() : null);
return (entry != null ? entry.getValue() : defaultValue);
}
@Override
public boolean containsKey(@Nullable Object key) {
Reference<K, V> ref = getReference(key, Restructure.WHEN_NECESSARY);
Entry<K, V> entry = (ref != null ? ref.get() : null);
return (entry != null && ObjectUtils.nullSafeEquals(entry.getKey(), key));
}
/**
* Return a {@link Reference} to the {@link Entry} for the specified
* {@code key}, or {@code null} if not found.
*
* @param key the key (can be {@code null})
* @param restructure types of restructure allowed during this call
* @return the reference, or {@code null} if not found
*/
@Nullable
protected final Reference<K, V> getReference(@Nullable Object key, Restructure restructure) {
int hash = getHash(key);
return getSegmentForHash(hash).getReference(key, hash, restructure);
}
@Override
@Nullable
public V put(@Nullable K key, @Nullable V value) {
return put(key, value, true);
}
@Override
@Nullable
public V putIfAbsent(@Nullable K key, @Nullable V value) {
return put(key, value, false);
}
@Nullable
private V put(@Nullable final K key, @Nullable final V value, final boolean overwriteExisting) {
return doTask(key, new Task<V>(TaskOption.RESTRUCTURE_BEFORE, TaskOption.RESIZE) {
@Override
@Nullable
protected V execute(@Nullable Reference<K, V> ref, @Nullable Entry<K, V> entry, @Nullable Entries<V> entries) {
if (entry != null) {
V oldValue = entry.getValue();
if (overwriteExisting) {
entry.setValue(value);
}
return oldValue;
}
Assert.state(entries != null, "No entries segment");
entries.add(value);
return null;
}
});
}
@Override
@Nullable
public V remove(@Nullable Object key) {
return doTask(key, new Task<V>(TaskOption.RESTRUCTURE_AFTER, TaskOption.SKIP_IF_EMPTY) {
@Override
@Nullable
protected V execute(@Nullable Reference<K, V> ref, @Nullable Entry<K, V> entry) {
if (entry != null) {
if (ref != null) {
ref.release();
}
return entry.value;
}
return null;
}
});
}
@Override
public boolean remove(@Nullable Object key, final @Nullable Object value) {
Boolean result = doTask(key, new Task<Boolean>(TaskOption.RESTRUCTURE_AFTER, TaskOption.SKIP_IF_EMPTY) {
@Override
protected Boolean execute(@Nullable Reference<K, V> ref, @Nullable Entry<K, V> entry) {
if (entry != null && ObjectUtils.nullSafeEquals(entry.getValue(), value)) {
if (ref != null) {
ref.release();
}
return true;
}
return false;
}
});
return (Boolean.TRUE.equals(result));
}
@Override
public boolean replace(@Nullable K key, final @Nullable V oldValue, final @Nullable V newValue) {
Boolean result = doTask(key, new Task<Boolean>(TaskOption.RESTRUCTURE_BEFORE, TaskOption.SKIP_IF_EMPTY) {
@Override
protected Boolean execute(@Nullable Reference<K, V> ref, @Nullable Entry<K, V> entry) {
if (entry != null && ObjectUtils.nullSafeEquals(entry.getValue(), oldValue)) {
entry.setValue(newValue);
return true;
}
return false;
}
});
return (Boolean.TRUE.equals(result));
}
@Override
@Nullable
public V replace(@Nullable K key, final @Nullable V value) {
return doTask(key, new Task<V>(TaskOption.RESTRUCTURE_BEFORE, TaskOption.SKIP_IF_EMPTY) {
@Override
@Nullable
protected V execute(@Nullable Reference<K, V> ref, @Nullable Entry<K, V> entry) {
if (entry != null) {
V oldValue = entry.getValue();
entry.setValue(value);
return oldValue;
}
return null;
}
});
}
@Override
public void clear() {
for (Segment segment : this.segments) {
segment.clear();
}
}
/**
* Remove any entries that have been garbage collected and are no longer
* referenced. Under normal circumstances garbage collected entries are
* automatically purged as items are added or removed from the Map. This method
* can be used to force a purge, and is useful when the Map is read frequently
* but updated less often.
*/
public void purgeUnreferencedEntries() {
for (Segment segment : this.segments) {
segment.restructureIfNecessary(false);
}
}
@Override
public int size() {
int size = 0;
for (Segment segment : this.segments) {
size += segment.getCount();
}
return size;
}
@Override
public boolean isEmpty() {
for (Segment segment : this.segments) {
if (segment.getCount() > 0) {
return false;
}
}
return true;
}
@Override
public Set<Map.Entry<K, V>> entrySet() {
Set<Map.Entry<K, V>> entrySet = this.entrySet;
if (entrySet == null) {
entrySet = new EntrySet();
this.entrySet = entrySet;
}
return entrySet;
}
@Nullable
private <T> T doTask(@Nullable Object key, Task<T> task) {
int hash = getHash(key);
return getSegmentForHash(hash).doTask(hash, key, task);
}
private Segment getSegmentForHash(int hash) {
return this.segments[(hash >>> (32 - this.shift)) & (this.segments.length - 1)];
}
/**
* Calculate a shift value that can be used to create a power-of-two value
* between the specified maximum and minimum values.
*
* @param minimumValue the minimum value
* @param maximumValue the maximum value
* @return the calculated shift (use {@code 1 << shift} to obtain a value)
*/
protected static int calculateShift(int minimumValue, int maximumValue) {
int shift = 0;
int value = 1;
while (value < minimumValue && value < maximumValue) {
value <<= 1;
shift++;
}
return shift;
}
/**
* Various reference types supported by this map.
*/
public enum ReferenceType {
/**
* Use {@link SoftReference SoftReferences}.
*/
SOFT,
/**
* Use {@link WeakReference WeakReferences}.
*/
WEAK
}
/**
* A single segment used to divide the map to allow better concurrent
* performance.
*/
@SuppressWarnings("serial")
protected final class Segment extends ReentrantLock {
private final ReferenceManager referenceManager;
private final int initialSize;
/**
* Array of references indexed using the low order bits from the hash. This
* property should only be set along with {@code resizeThreshold}.
*/
private volatile Reference<K, V>[] references;
/**
* The total number of references contained in this segment. This includes
* chained references and references that have been garbage collected but not
* purged.
*/
private final AtomicInteger count = new AtomicInteger();
/**
* The threshold when resizing of the references should occur. When
* {@code count} exceeds this value references will be resized.
*/
private int resizeThreshold;
public Segment(int initialSize, int resizeThreshold) {
this.referenceManager = createReferenceManager();
this.initialSize = initialSize;
this.references = createReferenceArray(initialSize);
this.resizeThreshold = resizeThreshold;
}
@Nullable
public Reference<K, V> getReference(@Nullable Object key, int hash, Restructure restructure) {
if (restructure == Restructure.WHEN_NECESSARY) {
restructureIfNecessary(false);
}
if (this.count.get() == 0) {
return null;
}
// Use a local copy to protect against other threads writing
Reference<K, V>[] references = this.references;
int index = getIndex(hash, references);
Reference<K, V> head = references[index];
return findInChain(head, key, hash);
}
/**
* Apply an update operation to this segment. The segment will be locked during
* the update.
*
* @param hash the hash of the key
* @param key the key
* @param task the update operation
* @return the result of the operation
*/
@Nullable
public <T> T doTask(final int hash, @Nullable final Object key, final Task<T> task) {
boolean resize = task.hasOption(TaskOption.RESIZE);
if (task.hasOption(TaskOption.RESTRUCTURE_BEFORE)) {
restructureIfNecessary(resize);
}
if (task.hasOption(TaskOption.SKIP_IF_EMPTY) && this.count.get() == 0) {
return task.execute(null, null, null);
}
lock();
try {
final int index = getIndex(hash, this.references);
final Reference<K, V> head = this.references[index];
Reference<K, V> ref = findInChain(head, key, hash);
Entry<K, V> entry = (ref != null ? ref.get() : null);
Entries<V> entries = value -> {
@SuppressWarnings("unchecked")
Entry<K, V> newEntry = new Entry<>((K) key, value);
Reference<K, V> newReference = Segment.this.referenceManager.createReference(newEntry, hash, head);
Segment.this.references[index] = newReference;
Segment.this.count.incrementAndGet();
};
return task.execute(ref, entry, entries);
} finally {
unlock();
if (task.hasOption(TaskOption.RESTRUCTURE_AFTER)) {
restructureIfNecessary(resize);
}
}
}
/**
* Clear all items from this segment.
*/
public void clear() {
if (this.count.get() == 0) {
return;
}
lock();
try {
this.references = createReferenceArray(this.initialSize);
this.resizeThreshold = (int) (this.references.length * getLoadFactor());
this.count.set(0);
} finally {
unlock();
}
}
/**
* Restructure the underlying data structure when it becomes necessary. This
* method can increase the size of the references table as well as purge any
* references that have been garbage collected.
*
* @param allowResize if resizing is permitted
*/
private void restructureIfNecessary(boolean allowResize) {
int currCount = this.count.get();
boolean needsResize = allowResize && (currCount > 0 && currCount >= this.resizeThreshold);
Reference<K, V> ref = this.referenceManager.pollForPurge();
if (ref != null || (needsResize)) {
restructure(allowResize, ref);
}
}
private void restructure(boolean allowResize, @Nullable Reference<K, V> ref) {
boolean needsResize;
lock();
try {
int countAfterRestructure = this.count.get();
Set<Reference<K, V>> toPurge = Collections.emptySet();
if (ref != null) {
toPurge = new HashSet<>();
while (ref != null) {
toPurge.add(ref);
ref = this.referenceManager.pollForPurge();
}
}
countAfterRestructure -= toPurge.size();
// Recalculate taking into account count inside lock and items that
// will be purged
needsResize = (countAfterRestructure > 0 && countAfterRestructure >= this.resizeThreshold);
boolean resizing = false;
int restructureSize = this.references.length;
if (allowResize && needsResize && restructureSize < MAXIMUM_SEGMENT_SIZE) {
restructureSize <<= 1;
resizing = true;
}
// Either create a new table or reuse the existing one
Reference<K, V>[] restructured = (resizing ? createReferenceArray(restructureSize) : this.references);
// Restructure
for (int i = 0; i < this.references.length; i++) {
ref = this.references[i];
if (!resizing) {
restructured[i] = null;
}
while (ref != null) {
if (!toPurge.contains(ref)) {
Entry<K, V> entry = ref.get();
if (entry != null) {
int index = getIndex(ref.getHash(), restructured);
restructured[index] = this.referenceManager.createReference(entry, ref.getHash(), restructured[index]);
}
}
ref = ref.getNext();
}
}
// Replace volatile members
if (resizing) {
this.references = restructured;
this.resizeThreshold = (int) (this.references.length * getLoadFactor());
}
this.count.set(Math.max(countAfterRestructure, 0));
} finally {
unlock();
}
}
@Nullable
private Reference<K, V> findInChain(Reference<K, V> ref, @Nullable Object key, int hash) {
Reference<K, V> currRef = ref;
while (currRef != null) {
if (currRef.getHash() == hash) {
Entry<K, V> entry = currRef.get();
if (entry != null) {
K entryKey = entry.getKey();
if (ObjectUtils.nullSafeEquals(entryKey, key)) {
return currRef;
}
}
}
currRef = currRef.getNext();
}
return null;
}
@SuppressWarnings({ "unchecked" })
private Reference<K, V>[] createReferenceArray(int size) {
return new Reference[size];
}
private int getIndex(int hash, Reference<K, V>[] references) {
return (hash & (references.length - 1));
}
/**
* Return the size of the current references array.
*/
public int getSize() {
return this.references.length;
}
/**
* Return the total number of references in this segment.
*/
public int getCount() {
return this.count.get();
}
}
/**
* A reference to an {@link Entry} contained in the map. Implementations are
* usually wrappers around specific Java reference implementations (e.g.,
* {@link SoftReference}).
*
* @param <K> the key type
* @param <V> the value type
*/
protected interface Reference<K, V> {
/**
* Return the referenced entry, or {@code null} if the entry is no longer
* available.
*/
@Nullable
Entry<K, V> get();
/**
* Return the hash for the reference.
*/
int getHash();
/**
* Return the next reference in the chain, or {@code null} if none.
*/
@Nullable
Reference<K, V> getNext();
/**
* Release this entry and ensure that it will be returned from
* {@code ReferenceManager#pollForPurge()}.
*/
void release();
}
/**
* A single map entry.
*
* @param <K> the key type
* @param <V> the value type
*/
protected static final class Entry<K, V> implements Map.Entry<K, V> {
@Nullable
private final K key;
@Nullable
private volatile V value;
public Entry(@Nullable K key, @Nullable V value) {
this.key = key;
this.value = value;
}
@Override
@Nullable
public K getKey() {
return this.key;
}
@Override
@Nullable
public V getValue() {
return this.value;
}
@Override
@Nullable
public V setValue(@Nullable V value) {
V previous = this.value;
this.value = value;
return previous;
}
@Override
public String toString() {
return (this.key + "=" + this.value);
}
@Override
@SuppressWarnings("rawtypes")
public boolean equals(@Nullable Object other) {
if (this == other) {
return true;
}
if (!(other instanceof Map.Entry)) {
return false;
}
Map.Entry otherEntry = (Map.Entry) other;
return (ObjectUtils.nullSafeEquals(getKey(), otherEntry.getKey())
&& ObjectUtils.nullSafeEquals(getValue(), otherEntry.getValue()));
}
@Override
public int hashCode() {
return (ObjectUtils.nullSafeHashCode(this.key) ^ ObjectUtils.nullSafeHashCode(this.value));
}
}
/**
* A task that can be {@link Segment#doTask run} against a {@link Segment}.
*/
private abstract class Task<T> {
private final EnumSet<TaskOption> options;
public Task(TaskOption... options) {
this.options = (options.length == 0 ? EnumSet.noneOf(TaskOption.class) : EnumSet.of(options[0], options));
}
public boolean hasOption(TaskOption option) {
return this.options.contains(option);
}
/**
* Execute the task.
*
* @param ref the found reference (or {@code null})
* @param entry the found entry (or {@code null})
* @param entries access to the underlying entries
* @return the result of the task
* @see #execute(Reference, Entry)
*/
@Nullable
protected T execute(@Nullable Reference<K, V> ref, @Nullable Entry<K, V> entry, @Nullable Entries<V> entries) {
return execute(ref, entry);
}
/**
* Convenience method that can be used for tasks that do not need access to
* {@link Entries}.
*
* @param ref the found reference (or {@code null})
* @param entry the found entry (or {@code null})
* @return the result of the task
* @see #execute(Reference, Entry, Entries)
*/
@Nullable
protected T execute(@Nullable Reference<K, V> ref, @Nullable Entry<K, V> entry) {
return null;
}
}
/**
* Various options supported by a {@code Task}.
*/
private enum TaskOption {
RESTRUCTURE_BEFORE, RESTRUCTURE_AFTER, SKIP_IF_EMPTY, RESIZE
}
/**
* Allows a task access to {@link Segment} entries.
*/
private interface Entries<V> {
/**
* Add a new entry with the specified value.
*
* @param value the value to add
*/
void add(@Nullable V value);
}
/**
* Internal entry-set implementation.
*/
private class EntrySet extends AbstractSet<Map.Entry<K, V>> {
@Override
public Iterator<Map.Entry<K, V>> iterator() {
return new EntryIterator();
}
@Override
public boolean contains(@Nullable Object o) {
if (o instanceof Map.Entry<?, ?>) {
Map.Entry<?, ?> entry = (Map.Entry<?, ?>) o;
Reference<K, V> ref = ConcurrentReferenceHashMap.this.getReference(entry.getKey(), Restructure.NEVER);
Entry<K, V> otherEntry = (ref != null ? ref.get() : null);
if (otherEntry != null) {
return ObjectUtils.nullSafeEquals(entry.getValue(), otherEntry.getValue());
}
}
return false;
}
@Override
public boolean remove(Object o) {
if (o instanceof Map.Entry<?, ?>) {
Map.Entry<?, ?> entry = (Map.Entry<?, ?>) o;
return ConcurrentReferenceHashMap.this.remove(entry.getKey(), entry.getValue());
}
return false;
}
@Override
public int size() {
return ConcurrentReferenceHashMap.this.size();
}
@Override
public void clear() {
ConcurrentReferenceHashMap.this.clear();
}
}
/**
* Internal entry iterator implementation.
*/
private class EntryIterator implements Iterator<Map.Entry<K, V>> {
private int segmentIndex;
private int referenceIndex;
@Nullable
private Reference<K, V>[] references;
@Nullable
private Reference<K, V> reference;
@Nullable
private Entry<K, V> next;
@Nullable
private Entry<K, V> last;
public EntryIterator() {
moveToNextSegment();
}
@Override
public boolean hasNext() {
getNextIfNecessary();
return (this.next != null);
}
@Override
public Entry<K, V> next() {
getNextIfNecessary();
if (this.next == null) {
throw new NoSuchElementException();
}
this.last = this.next;
this.next = null;
return this.last;
}
private void getNextIfNecessary() {
while (this.next == null) {
moveToNextReference();
if (this.reference == null) {
return;
}
this.next = this.reference.get();
}
}
private void moveToNextReference() {
if (this.reference != null) {
this.reference = this.reference.getNext();
}
while (this.reference == null && this.references != null) {
if (this.referenceIndex >= this.references.length) {
moveToNextSegment();
this.referenceIndex = 0;
} else {
this.reference = this.references[this.referenceIndex];
this.referenceIndex++;
}
}
}
private void moveToNextSegment() {
this.reference = null;
this.references = null;
if (this.segmentIndex < ConcurrentReferenceHashMap.this.segments.length) {
this.references = ConcurrentReferenceHashMap.this.segments[this.segmentIndex].references;
this.segmentIndex++;
}
}
@Override
public void remove() {
Assert.state(this.last != null, "No element to remove");
ConcurrentReferenceHashMap.this.remove(this.last.getKey());
this.last = null;
}
}
/**
* The types of restructuring that can be performed.
*/
protected enum Restructure {
WHEN_NECESSARY, NEVER
}
/**
* Strategy class used to manage {@link Reference References}. This class can be
* overridden if alternative reference types need to be supported.
*/
protected class ReferenceManager {
private final ReferenceQueue<Entry<K, V>> queue = new ReferenceQueue<>();
/**
* Factory method used to create a new {@link Reference}.
*
* @param entry the entry contained in the reference
* @param hash the hash
* @param next the next reference in the chain, or {@code null} if none
* @return a new {@link Reference}
*/
public Reference<K, V> createReference(Entry<K, V> entry, int hash, @Nullable Reference<K, V> next) {
if (ConcurrentReferenceHashMap.this.referenceType == ReferenceType.WEAK) {
return new WeakEntryReference<>(entry, hash, next, this.queue);
}
return new SoftEntryReference<>(entry, hash, next, this.queue);
}
/**
* Return any reference that has been garbage collected and can be purged from
* the underlying structure or {@code null} if no references need purging. This
* method must be thread safe and ideally should not block when returning
* {@code null}. References should be returned once and only once.
*
* @return a reference to purge or {@code null}
*/
@SuppressWarnings("unchecked")
@Nullable
public Reference<K, V> pollForPurge() {
return (Reference<K, V>) this.queue.poll();
}
}
/**
* Internal {@link Reference} implementation for {@link SoftReference
* SoftReferences}.
*/
private static final class SoftEntryReference<K, V> extends SoftReference<Entry<K, V>> implements Reference<K, V> {
private final int hash;
@Nullable
private final Reference<K, V> nextReference;
public SoftEntryReference(Entry<K, V> entry, int hash, @Nullable Reference<K, V> next,
ReferenceQueue<Entry<K, V>> queue) {
super(entry, queue);
this.hash = hash;
this.nextReference = next;
}
@Override
public int getHash() {
return this.hash;
}
@Override
@Nullable
public Reference<K, V> getNext() {
return this.nextReference;
}
@Override
public void release() {
enqueue();
clear();
}
}
/**
* Internal {@link Reference} implementation for {@link WeakReference
* WeakReferences}.
*/
private static final class WeakEntryReference<K, V> extends WeakReference<Entry<K, V>> implements Reference<K, V> {
private final int hash;
@Nullable
private final Reference<K, V> nextReference;
public WeakEntryReference(Entry<K, V> entry, int hash, @Nullable Reference<K, V> next,
ReferenceQueue<Entry<K, V>> queue) {
super(entry, queue);
this.hash = hash;
this.nextReference = next;
}
@Override
public int getHash() {
return this.hash;
}
@Override
@Nullable
public Reference<K, V> getNext() {
return this.nextReference;
}
@Override
public void release() {
enqueue();
clear();
}
}
}
| 7,281 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util/springframework/Assert.java | /*
* Copyright 2002-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.util.springframework;
import org.apache.avro.reflect.Nullable;
/**
* Assertion utility class that assists in validating arguments.
*
* <p>
* Useful for identifying programmer errors early and clearly at runtime.
*
* <p>
* For example, if the contract of a public method states it does not allow
* {@code null} arguments, {@code Assert} can be used to validate that contract.
* Doing this clearly indicates a contract violation when it occurs and protects
* the class's invariants.
*
* <p>
* Typically used to validate method arguments rather than configuration
* properties, to check for cases that are usually programmer errors rather than
* configuration errors. In contrast to configuration initialization code, there
* is usually no point in falling back to defaults in such methods.
*
* <p>
* This class is similar to JUnit's assertion library. If an argument value is
* deemed invalid, an {@link IllegalArgumentException} is thrown (typically).
* For example:
*
* <pre class="code">
* Assert.notNull(clazz, "The class must not be null");
* Assert.isTrue(i > 0, "The value must be greater than zero");
* </pre>
*
* <p>
* Mainly for internal use within the framework; for a more comprehensive suite
* of assertion utilities consider {@code org.apache.commons.lang3.Validate}
* from <a href="https://commons.apache.org/proper/commons-lang/">Apache Commons
* Lang</a>, Google Guava's <a href=
* "https://github.com/google/guava/wiki/PreconditionsExplained">Preconditions</a>,
* or similar third-party libraries.
*
* @author Keith Donald
* @author Juergen Hoeller
* @author Sam Brannen
* @author Colin Sampaleanu
* @author Rob Harrop
* @since 1.1.2
*/
class Assert {
private Assert() {
}
/**
* Assert a boolean expression, throwing an {@code IllegalStateException} if the
* expression evaluates to {@code false}.
*
* <pre class="code">
* Assert.state(id == null, "The id property must not already be initialized");
* </pre>
*
* @param expression a boolean expression
* @param message the exception message to use if the assertion fails
* @throws IllegalStateException if {@code expression} is {@code false}
*/
public static void state(boolean expression, String message) {
if (!expression) {
throw new IllegalStateException(message);
}
}
/**
* Assert a boolean expression, throwing an {@code IllegalArgumentException} if
* the expression evaluates to {@code false}.
*
* <pre class="code">
* Assert.isTrue(i > 0, "The value must be greater than zero");
* </pre>
*
* @param expression a boolean expression
* @param message the exception message to use if the assertion fails
* @throws IllegalArgumentException if {@code expression} is {@code false}
*/
public static void isTrue(boolean expression, String message) {
if (!expression) {
throw new IllegalArgumentException(message);
}
}
/**
* Assert that an object is not {@code null}.
*
* <pre class="code">
* Assert.notNull(clazz, "The class must not be null");
* </pre>
*
* @param object the object to check
* @param message the exception message to use if the assertion fails
* @throws IllegalArgumentException if the object is {@code null}
*/
public static void notNull(@Nullable Object object, String message) {
if (object == null) {
throw new IllegalArgumentException(message);
}
}
}
| 7,282 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/util/springframework/ObjectUtils.java | /*
* Copyright 2002-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.util.springframework;
import org.apache.avro.reflect.Nullable;
import org.apache.avro.util.ClassUtils;
import java.util.Arrays;
/**
* Miscellaneous object utility methods.
*
* <p>
* Mainly for internal use within the framework.
*
* <p>
* Thanks to Alex Ruiz for contributing several enhancements to this class!
*
* @author Juergen Hoeller
* @author Keith Donald
* @author Rod Johnson
* @author Rob Harrop
* @author Chris Beams
* @author Sam Brannen
* @see ClassUtils see CollectionUtils see StringUtils
* @since 19.03.2004
*/
class ObjectUtils {
private ObjectUtils() {
}
private static final int INITIAL_HASH = 7;
private static final int MULTIPLIER = 31;
/**
* Determine whether the given array is empty: i.e. {@code null} or of zero
* length.
*
* @param array the array to check
*/
public static boolean isEmpty(@Nullable Object[] array) {
return (array == null || array.length == 0);
}
// ---------------------------------------------------------------------
// Convenience methods for content-based equality/hash-code handling
// ---------------------------------------------------------------------
/**
* Determine if the given objects are equal, returning {@code true} if both are
* {@code null} or {@code false} if only one is {@code null}.
* <p>
* Compares arrays with {@code Arrays.equals}, performing an equality check
* based on the array elements rather than the array reference.
*
* @param o1 first Object to compare
* @param o2 second Object to compare
* @return whether the given objects are equal
* @see Object#equals(Object)
* @see Arrays#equals
*/
public static boolean nullSafeEquals(@Nullable Object o1, @Nullable Object o2) {
if (o1 == o2) {
return true;
}
if (o1 == null || o2 == null) {
return false;
}
if (o1.equals(o2)) {
return true;
}
if (o1.getClass().isArray() && o2.getClass().isArray()) {
return arrayEquals(o1, o2);
}
return false;
}
/**
* Compare the given arrays with {@code Arrays.equals}, performing an equality
* check based on the array elements rather than the array reference.
*
* @param o1 first array to compare
* @param o2 second array to compare
* @return whether the given objects are equal
* @see #nullSafeEquals(Object, Object)
* @see Arrays#equals
*/
private static boolean arrayEquals(Object o1, Object o2) {
if (o1 instanceof Object[] && o2 instanceof Object[]) {
return Arrays.equals((Object[]) o1, (Object[]) o2);
}
if (o1 instanceof boolean[] && o2 instanceof boolean[]) {
return Arrays.equals((boolean[]) o1, (boolean[]) o2);
}
if (o1 instanceof byte[] && o2 instanceof byte[]) {
return Arrays.equals((byte[]) o1, (byte[]) o2);
}
if (o1 instanceof char[] && o2 instanceof char[]) {
return Arrays.equals((char[]) o1, (char[]) o2);
}
if (o1 instanceof double[] && o2 instanceof double[]) {
return Arrays.equals((double[]) o1, (double[]) o2);
}
if (o1 instanceof float[] && o2 instanceof float[]) {
return Arrays.equals((float[]) o1, (float[]) o2);
}
if (o1 instanceof int[] && o2 instanceof int[]) {
return Arrays.equals((int[]) o1, (int[]) o2);
}
if (o1 instanceof long[] && o2 instanceof long[]) {
return Arrays.equals((long[]) o1, (long[]) o2);
}
if (o1 instanceof short[] && o2 instanceof short[]) {
return Arrays.equals((short[]) o1, (short[]) o2);
}
return false;
}
/**
* Return as hash code for the given object; typically the value of
* {@code Object#hashCode()}}. If the object is an array, this method will
* delegate to any of the {@code nullSafeHashCode} methods for arrays in this
* class. If the object is {@code null}, this method returns 0.
*
* @see Object#hashCode()
* @see #nullSafeHashCode(Object[])
* @see #nullSafeHashCode(boolean[])
* @see #nullSafeHashCode(byte[])
* @see #nullSafeHashCode(char[])
* @see #nullSafeHashCode(double[])
* @see #nullSafeHashCode(float[])
* @see #nullSafeHashCode(int[])
* @see #nullSafeHashCode(long[])
* @see #nullSafeHashCode(short[])
*/
public static int nullSafeHashCode(@Nullable Object obj) {
if (obj == null) {
return 0;
}
if (obj.getClass().isArray()) {
if (obj instanceof Object[]) {
return nullSafeHashCode((Object[]) obj);
}
if (obj instanceof boolean[]) {
return nullSafeHashCode((boolean[]) obj);
}
if (obj instanceof byte[]) {
return nullSafeHashCode((byte[]) obj);
}
if (obj instanceof char[]) {
return nullSafeHashCode((char[]) obj);
}
if (obj instanceof double[]) {
return nullSafeHashCode((double[]) obj);
}
if (obj instanceof float[]) {
return nullSafeHashCode((float[]) obj);
}
if (obj instanceof int[]) {
return nullSafeHashCode((int[]) obj);
}
if (obj instanceof long[]) {
return nullSafeHashCode((long[]) obj);
}
if (obj instanceof short[]) {
return nullSafeHashCode((short[]) obj);
}
}
return obj.hashCode();
}
/**
* Return a hash code based on the contents of the specified array. If
* {@code array} is {@code null}, this method returns 0.
*/
public static int nullSafeHashCode(@Nullable Object[] array) {
if (array == null) {
return 0;
}
int hash = INITIAL_HASH;
for (Object element : array) {
hash = MULTIPLIER * hash + nullSafeHashCode(element);
}
return hash;
}
/**
* Return a hash code based on the contents of the specified array. If
* {@code array} is {@code null}, this method returns 0.
*/
public static int nullSafeHashCode(@Nullable boolean[] array) {
if (array == null) {
return 0;
}
int hash = INITIAL_HASH;
for (boolean element : array) {
hash = MULTIPLIER * hash + Boolean.hashCode(element);
}
return hash;
}
/**
* Return a hash code based on the contents of the specified array. If
* {@code array} is {@code null}, this method returns 0.
*/
public static int nullSafeHashCode(@Nullable byte[] array) {
if (array == null) {
return 0;
}
int hash = INITIAL_HASH;
for (byte element : array) {
hash = MULTIPLIER * hash + element;
}
return hash;
}
/**
* Return a hash code based on the contents of the specified array. If
* {@code array} is {@code null}, this method returns 0.
*/
public static int nullSafeHashCode(@Nullable char[] array) {
if (array == null) {
return 0;
}
int hash = INITIAL_HASH;
for (char element : array) {
hash = MULTIPLIER * hash + element;
}
return hash;
}
/**
* Return a hash code based on the contents of the specified array. If
* {@code array} is {@code null}, this method returns 0.
*/
public static int nullSafeHashCode(@Nullable double[] array) {
if (array == null) {
return 0;
}
int hash = INITIAL_HASH;
for (double element : array) {
hash = MULTIPLIER * hash + Double.hashCode(element);
}
return hash;
}
/**
* Return a hash code based on the contents of the specified array. If
* {@code array} is {@code null}, this method returns 0.
*/
public static int nullSafeHashCode(@Nullable float[] array) {
if (array == null) {
return 0;
}
int hash = INITIAL_HASH;
for (float element : array) {
hash = MULTIPLIER * hash + Float.hashCode(element);
}
return hash;
}
/**
* Return a hash code based on the contents of the specified array. If
* {@code array} is {@code null}, this method returns 0.
*/
public static int nullSafeHashCode(@Nullable int[] array) {
if (array == null) {
return 0;
}
int hash = INITIAL_HASH;
for (int element : array) {
hash = MULTIPLIER * hash + element;
}
return hash;
}
/**
* Return a hash code based on the contents of the specified array. If
* {@code array} is {@code null}, this method returns 0.
*/
public static int nullSafeHashCode(@Nullable long[] array) {
if (array == null) {
return 0;
}
int hash = INITIAL_HASH;
for (long element : array) {
hash = MULTIPLIER * hash + Long.hashCode(element);
}
return hash;
}
/**
* Return a hash code based on the contents of the specified array. If
* {@code array} is {@code null}, this method returns 0.
*/
public static int nullSafeHashCode(@Nullable short[] array) {
if (array == null) {
return 0;
}
int hash = INITIAL_HASH;
for (short element : array) {
hash = MULTIPLIER * hash + element;
}
return hash;
}
}
| 7,283 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/file/ZstandardLoader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.file;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import com.github.luben.zstd.BufferPool;
import com.github.luben.zstd.NoPool;
import com.github.luben.zstd.RecyclingBufferPool;
import com.github.luben.zstd.Zstd;
import com.github.luben.zstd.ZstdInputStreamNoFinalizer;
import com.github.luben.zstd.ZstdOutputStreamNoFinalizer;
/* causes lazier classloader initialization of ZStandard libraries, so that
* we get NoClassDefFoundError when we try and use the Codec's compress
* or decompress methods rather than when we instantiate it */
final class ZstandardLoader {
static InputStream input(InputStream compressed, boolean useBufferPool) throws IOException {
BufferPool pool = useBufferPool ? RecyclingBufferPool.INSTANCE : NoPool.INSTANCE;
return new ZstdInputStreamNoFinalizer(compressed, pool);
}
static OutputStream output(OutputStream compressed, int level, boolean checksum, boolean useBufferPool)
throws IOException {
int bounded = Math.max(Math.min(level, Zstd.maxCompressionLevel()), Zstd.minCompressionLevel());
BufferPool pool = useBufferPool ? RecyclingBufferPool.INSTANCE : NoPool.INSTANCE;
ZstdOutputStreamNoFinalizer zstdOutputStream = new ZstdOutputStreamNoFinalizer(compressed, pool).setLevel(bounded);
zstdOutputStream.setCloseFrameOnFlush(false);
zstdOutputStream.setChecksum(checksum);
return zstdOutputStream;
}
}
| 7,284 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/file/BZip2Codec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.file;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.avro.util.NonCopyingByteArrayOutputStream;
import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream;
import org.apache.commons.compress.compressors.bzip2.BZip2CompressorOutputStream;
/** * Implements bzip2 compression and decompression. */
public class BZip2Codec extends Codec {
public static final int DEFAULT_BUFFER_SIZE = 64 * 1024;
private final byte[] buffer = new byte[DEFAULT_BUFFER_SIZE];
static class Option extends CodecFactory {
@Override
protected Codec createInstance() {
return new BZip2Codec();
}
}
@Override
public String getName() {
return DataFileConstants.BZIP2_CODEC;
}
@Override
public ByteBuffer compress(ByteBuffer uncompressedData) throws IOException {
NonCopyingByteArrayOutputStream baos = new NonCopyingByteArrayOutputStream(DEFAULT_BUFFER_SIZE);
try (BZip2CompressorOutputStream outputStream = new BZip2CompressorOutputStream(baos)) {
outputStream.write(uncompressedData.array(), computeOffset(uncompressedData), uncompressedData.remaining());
}
return baos.asByteBuffer();
}
@Override
public ByteBuffer decompress(ByteBuffer compressedData) throws IOException {
ByteArrayInputStream bais = new ByteArrayInputStream(compressedData.array(), computeOffset(compressedData),
compressedData.remaining());
@SuppressWarnings("resource")
NonCopyingByteArrayOutputStream baos = new NonCopyingByteArrayOutputStream(DEFAULT_BUFFER_SIZE);
try (BZip2CompressorInputStream inputStream = new BZip2CompressorInputStream(bais)) {
int readCount = -1;
while ((readCount = inputStream.read(buffer, compressedData.position(), buffer.length)) > 0) {
baos.write(buffer, 0, readCount);
}
return baos.asByteBuffer();
}
}
@Override
public int hashCode() {
return getName().hashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
return obj != null && obj.getClass() == getClass();
}
}
| 7,285 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/file/SnappyCodec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.file;
import java.io.IOException;
import java.nio.Buffer;
import java.nio.ByteBuffer;
import java.util.zip.CRC32;
import org.xerial.snappy.Snappy;
/** * Implements Snappy compression and decompression. */
public class SnappyCodec extends Codec {
private CRC32 crc32 = new CRC32();
static class Option extends CodecFactory {
static {
// if snappy isn't available, this will throw an exception which we
// can catch so we can avoid registering this codec
Snappy.getNativeLibraryVersion();
}
@Override
protected Codec createInstance() {
return new SnappyCodec();
}
}
private SnappyCodec() {
}
@Override
public String getName() {
return DataFileConstants.SNAPPY_CODEC;
}
@Override
public ByteBuffer compress(ByteBuffer in) throws IOException {
int offset = computeOffset(in);
ByteBuffer out = ByteBuffer.allocate(Snappy.maxCompressedLength(in.remaining()) + 4);
int size = Snappy.compress(in.array(), offset, in.remaining(), out.array(), 0);
crc32.reset();
crc32.update(in.array(), offset, in.remaining());
out.putInt(size, (int) crc32.getValue());
((Buffer) out).limit(size + 4);
return out;
}
@Override
public ByteBuffer decompress(ByteBuffer in) throws IOException {
int offset = computeOffset(in);
ByteBuffer out = ByteBuffer.allocate(Snappy.uncompressedLength(in.array(), offset, in.remaining() - 4));
int size = Snappy.uncompress(in.array(), offset, in.remaining() - 4, out.array(), 0);
((Buffer) out).limit(size);
crc32.reset();
crc32.update(out.array(), 0, size);
if (in.getInt(((Buffer) in).limit() - 4) != (int) crc32.getValue())
throw new IOException("Checksum failure");
return out;
}
@Override
public int hashCode() {
return getName().hashCode();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
return obj != null && obj.getClass() == getClass();
}
}
| 7,286 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/file/DataFileReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.file;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.io.File;
import java.util.Arrays;
import org.apache.avro.InvalidAvroMagicException;
import org.apache.avro.io.DecoderFactory;
import org.apache.commons.compress.utils.IOUtils;
import org.apache.avro.io.DatumReader;
import static org.apache.avro.file.DataFileConstants.SYNC_SIZE;
import static org.apache.avro.file.DataFileConstants.MAGIC;
/**
* Random access to files written with {@link DataFileWriter}.
*
* @see DataFileWriter
*/
public class DataFileReader<D> extends DataFileStream<D> implements FileReader<D> {
private SeekableInputStream sin;
private long blockStart;
private int[] partialMatchTable;
/** Open a reader for a file. */
public static <D> FileReader<D> openReader(File file, DatumReader<D> reader) throws IOException {
SeekableFileInput input = new SeekableFileInput(file);
try {
return openReader(input, reader);
} catch (final Throwable e) {
IOUtils.closeQuietly(input);
throw e;
}
}
/** Open a reader for a file. */
public static <D> FileReader<D> openReader(SeekableInput in, DatumReader<D> reader) throws IOException {
if (in.length() < MAGIC.length)
throw new InvalidAvroMagicException("Not an Avro data file");
// read magic header
byte[] magic = new byte[MAGIC.length];
in.seek(0);
int offset = 0;
int length = magic.length;
while (length > 0) {
int bytesRead = in.read(magic, offset, length);
if (bytesRead < 0)
throw new EOFException("Unexpected EOF with " + length + " bytes remaining to read");
length -= bytesRead;
offset += bytesRead;
}
if (Arrays.equals(MAGIC, magic)) // current format
return new DataFileReader<>(in, reader, magic);
if (Arrays.equals(DataFileReader12.MAGIC, magic)) // 1.2 format
return new DataFileReader12<>(in, reader);
throw new InvalidAvroMagicException("Not an Avro data file");
}
/**
* Construct a reader for a file at the current position of the input, without
* reading the header.
*
* @param sync True to read forward to the next sync point after opening, false
* to assume that the input is already at a valid sync point.
*/
public static <D> DataFileReader<D> openReader(SeekableInput in, DatumReader<D> reader, Header header, boolean sync)
throws IOException {
DataFileReader<D> dreader = new DataFileReader<>(in, reader, header);
// seek/sync to an (assumed) valid position
if (sync)
dreader.sync(in.tell());
else
dreader.seek(in.tell());
return dreader;
}
/**
* Construct a reader for a file. For example,if you want to read a file
* record,you need to close the resource. You can use try-with-resource as
* follows:
*
* <pre>
* try (FileReader<User> dataFileReader =
* DataFileReader.openReader(file,datumReader)) { //Consume the reader } catch
* (IOException e) { throw new RunTimeIOException(e,"Failed to read metadata for
* file: %s", file); }
*
* <pre/>
*/
public DataFileReader(File file, DatumReader<D> reader) throws IOException {
this(new SeekableFileInput(file), reader, true, null);
}
/**
* Construct a reader for a file. For example,if you want to read a file
* record,you need to close the resource. You can use try-with-resource as
* follows:
*
* <pre>
* try (FileReader<User> dataFileReader =
* DataFileReader.openReader(file,datumReader)) { //Consume the reader } catch
* (IOException e) { throw new RunTimeIOException(e,"Failed to read metadata for
* file: %s", file); }
*
* <pre/>
*/
public DataFileReader(SeekableInput sin, DatumReader<D> reader) throws IOException {
this(sin, reader, false, null);
}
private DataFileReader(SeekableInput sin, DatumReader<D> reader, byte[] magic) throws IOException {
this(sin, reader, false, magic);
}
/** Construct a reader for a file. Please close resource files yourself. */
protected DataFileReader(SeekableInput sin, DatumReader<D> reader, boolean closeOnError, byte[] magic)
throws IOException {
super(reader);
try {
this.sin = new SeekableInputStream(sin);
initialize(this.sin, magic);
blockFinished();
} catch (final Throwable e) {
if (closeOnError) {
IOUtils.closeQuietly(sin);
}
throw e;
}
}
/**
* Construct using a {@link DataFileStream.Header}. Does not call
* {@link #sync(long)} or {@link #seek(long)}.
*/
protected DataFileReader(SeekableInput sin, DatumReader<D> reader, Header header) throws IOException {
super(reader);
this.sin = new SeekableInputStream(sin);
initialize(header);
}
/**
* Move to a specific, known synchronization point, one returned from
* {@link DataFileWriter#sync()} while writing. If synchronization points were
* not saved while writing a file, use {@link #sync(long)} instead.
*/
public void seek(long position) throws IOException {
sin.seek(position);
vin = DecoderFactory.get().binaryDecoder(this.sin, vin);
datumIn = null;
blockRemaining = 0;
blockStart = position;
}
/**
* Move to the next synchronization point after a position. To process a range
* of file entires, call this with the starting position, then check
* {@link #pastSync(long)} with the end point before each call to
* {@link #next()}.
*/
@Override
public void sync(final long position) throws IOException {
seek(position);
// work around an issue where 1.5.4 C stored sync in metadata
if ((position == 0L) && (getMeta("avro.sync") != null)) {
initialize(sin, null); // re-init to skip header
return;
}
if (this.partialMatchTable == null) {
this.partialMatchTable = computePartialMatchTable(getHeader().sync);
}
final byte[] sync = getHeader().sync;
final InputStream in = vin.inputStream();
final int[] pm = this.partialMatchTable;
// Search for the sequence of bytes in the stream using Knuth-Morris-Pratt
long i = 0L;
for (int b = in.read(), j = 0; b != -1; b = in.read(), i++) {
final byte cb = (byte) b;
while (j > 0 && sync[j] != cb) {
j = pm[j - 1];
}
if (sync[j] == cb) {
j++;
}
if (j == SYNC_SIZE) {
this.blockStart = position + i + 1L;
return;
}
}
// if no match set start to the end position
blockStart = sin.tell();
}
/**
* Compute that Knuth-Morris-Pratt partial match table.
*
* @param pattern The pattern being searched
* @return the pre-computed partial match table
*
* @see <a href= "https://github.com/williamfiset/Algorithms">William Fiset
* Algorithms</a>
*/
private int[] computePartialMatchTable(final byte[] pattern) {
final int[] pm = new int[pattern.length];
for (int i = 1, len = 0; i < pattern.length;) {
if (pattern[i] == pattern[len]) {
pm[i++] = ++len;
} else {
if (len > 0) {
len = pm[len - 1];
} else {
i++;
}
}
}
return pm;
}
@Override
protected void blockFinished() throws IOException {
blockStart = sin.tell() - vin.inputStream().available();
}
/** Return the last synchronization point before our current position. */
public long previousSync() {
return blockStart;
}
/** Return true if past the next synchronization point after a position. */
@Override
public boolean pastSync(long position) throws IOException {
return ((blockStart >= position + SYNC_SIZE) || (blockStart >= sin.length()));
}
@Override
public long tell() throws IOException {
return sin.tell();
}
static class SeekableInputStream extends InputStream implements SeekableInput {
private final byte[] oneByte = new byte[1];
private SeekableInput in;
SeekableInputStream(SeekableInput in) throws IOException {
this.in = in;
}
@Override
public void seek(long p) throws IOException {
if (p < 0)
throw new IOException("Illegal seek: " + p);
in.seek(p);
}
@Override
public long tell() throws IOException {
return in.tell();
}
@Override
public long length() throws IOException {
return in.length();
}
@Override
public int read(byte[] b) throws IOException {
return in.read(b, 0, b.length);
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
return in.read(b, off, len);
}
@Override
public int read() throws IOException {
int n = read(oneByte, 0, 1);
if (n == 1) {
return oneByte[0] & 0xff;
} else {
return n;
}
}
@Override
public long skip(long skip) throws IOException {
long position = in.tell();
long length = in.length();
long remaining = length - position;
if (remaining > skip) {
in.seek(skip);
return in.tell() - position;
} else {
in.seek(remaining);
return in.tell() - position;
}
}
@Override
public void close() throws IOException {
in.close();
super.close();
}
@Override
public int available() throws IOException {
long remaining = (in.length() - in.tell());
return (remaining > Integer.MAX_VALUE) ? Integer.MAX_VALUE : (int) remaining;
}
}
}
| 7,287 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/file/DataFileReader12.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.file;
import java.io.IOException;
import java.io.Closeable;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.apache.avro.InvalidAvroMagicException;
import org.apache.avro.Schema;
import org.apache.avro.UnknownAvroCodecException;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.io.BinaryDecoder;
/** Read files written by Avro version 1.2. */
public class DataFileReader12<D> implements FileReader<D>, Closeable {
private static final byte VERSION = 0;
static final byte[] MAGIC = new byte[] { (byte) 'O', (byte) 'b', (byte) 'j', VERSION };
private static final long FOOTER_BLOCK = -1;
private static final int SYNC_SIZE = 16;
private static final String SCHEMA = "schema";
private static final String SYNC = "sync";
private static final String CODEC = "codec";
private static final String NULL_CODEC = "null";
private Schema schema;
private DatumReader<D> reader;
private DataFileReader.SeekableInputStream in;
private BinaryDecoder vin;
private Map<String, byte[]> meta = new HashMap<>();
private long blockCount; // # entries in block
private long blockStart;
private byte[] sync = new byte[SYNC_SIZE];
private byte[] syncBuffer = new byte[SYNC_SIZE];
/** Construct a reader for a file. */
public DataFileReader12(SeekableInput sin, DatumReader<D> reader) throws IOException {
this.in = new DataFileReader.SeekableInputStream(sin);
byte[] magic = new byte[4];
in.seek(0); // seek to 0 to read magic header
in.read(magic);
if (!Arrays.equals(MAGIC, magic))
throw new InvalidAvroMagicException("Not a data file.");
long length = in.length();
in.seek(length - 4);
int footerSize = (in.read() << 24) + (in.read() << 16) + (in.read() << 8) + in.read();
seek(length - footerSize);
long l = vin.readMapStart();
if (l > 0) {
do {
for (long i = 0; i < l; i++) {
String key = vin.readString(null).toString();
ByteBuffer value = vin.readBytes(null);
byte[] bb = new byte[value.remaining()];
value.get(bb);
meta.put(key, bb);
}
} while ((l = vin.mapNext()) != 0);
}
this.sync = getMeta(SYNC);
String codec = getMetaString(CODEC);
if (codec != null && !codec.equals(NULL_CODEC)) {
throw new UnknownAvroCodecException("Unknown codec: " + codec);
}
this.schema = new Schema.Parser().parse(getMetaString(SCHEMA));
this.reader = reader;
reader.setSchema(schema);
seek(MAGIC.length); // seek to start
}
/** Return the value of a metadata property. */
public synchronized byte[] getMeta(String key) {
return meta.get(key);
}
/** Return the value of a metadata property. */
public synchronized String getMetaString(String key) {
byte[] value = getMeta(key);
if (value == null) {
return null;
}
return new String(value, StandardCharsets.UTF_8);
}
/** Return the value of a metadata property. */
public synchronized long getMetaLong(String key) {
return Long.parseLong(getMetaString(key));
}
/** Return the schema used in this file. */
@Override
public Schema getSchema() {
return schema;
}
// Iterator and Iterable implementation
private D peek;
@Override
public Iterator<D> iterator() {
return this;
}
@Override
public boolean hasNext() {
if (peek != null || blockCount != 0)
return true;
this.peek = next();
return peek != null;
}
@Override
public D next() {
if (peek != null) {
D result = peek;
peek = null;
return result;
}
try {
return next(null);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
/** Return the next datum in the file. */
@Override
public synchronized D next(D reuse) throws IOException {
while (blockCount == 0) { // at start of block
if (in.tell() == in.length()) // at eof
return null;
skipSync(); // skip a sync
blockCount = vin.readLong(); // read blockCount
if (blockCount == FOOTER_BLOCK) {
seek(vin.readLong() + in.tell()); // skip a footer
}
}
blockCount--;
return reader.read(reuse, vin);
}
private void skipSync() throws IOException {
vin.readFixed(syncBuffer);
if (!Arrays.equals(syncBuffer, sync))
throw new IOException("Invalid sync!");
}
/**
* Move to the specified synchronization point, as returned by
* {@link DataFileWriter#sync()}.
*/
public synchronized void seek(long position) throws IOException {
in.seek(position);
blockCount = 0;
blockStart = position;
vin = DecoderFactory.get().binaryDecoder(in, vin);
}
/** Move to the next synchronization point after a position. */
@Override
public synchronized void sync(long position) throws IOException {
if (in.tell() + SYNC_SIZE >= in.length()) {
seek(in.length());
return;
}
in.seek(position);
vin.readFixed(syncBuffer);
for (int i = 0; in.tell() < in.length(); i++) {
int j = 0;
for (; j < sync.length; j++) {
if (sync[j] != syncBuffer[(i + j) % sync.length])
break;
}
if (j == sync.length) { // position before sync
seek(in.tell() - SYNC_SIZE);
return;
}
syncBuffer[i % sync.length] = (byte) in.read();
}
seek(in.length());
}
/** Return true if past the next synchronization point after a position. */
@Override
public boolean pastSync(long position) throws IOException {
return ((blockStart >= position + SYNC_SIZE) || (blockStart >= in.length()));
}
/** Return the current position in the input. */
@Override
public long tell() throws IOException {
return in.tell();
}
/** Close this reader. */
@Override
public synchronized void close() throws IOException {
in.close();
}
}
| 7,288 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/file/DataFileWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.file;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.io.BufferedOutputStream;
import java.io.Closeable;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FilterOutputStream;
import java.io.Flushable;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileStream.DataBlock;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.io.BinaryEncoder;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.io.EncoderFactory;
import org.apache.avro.util.NonCopyingByteArrayOutputStream;
import org.apache.commons.compress.utils.IOUtils;
/**
* Stores in a file a sequence of data conforming to a schema. The schema is
* stored in the file with the data. Each datum in a file is of the same schema.
* Data is written with a {@link DatumWriter}. Data is grouped into
* <i>blocks</i>. A synchronization marker is written between blocks, so that
* files may be split. Blocks may be compressed. Extensible metadata is stored
* at the end of the file. Files may be appended to.
*
* @see DataFileReader
*/
public class DataFileWriter<D> implements Closeable, Flushable {
private Schema schema;
private DatumWriter<D> dout;
private OutputStream underlyingStream;
private BufferedFileOutputStream out;
private BinaryEncoder vout;
private final Map<String, byte[]> meta = new HashMap<>();
private long blockCount; // # entries in current block
private NonCopyingByteArrayOutputStream buffer;
private BinaryEncoder bufOut;
private byte[] sync; // 16 random bytes
private int syncInterval = DataFileConstants.DEFAULT_SYNC_INTERVAL;
private boolean isOpen;
private Codec codec;
private boolean flushOnEveryBlock = true;
/** Construct a writer, not yet open. */
public DataFileWriter(DatumWriter<D> dout) {
this.dout = dout;
}
private void assertOpen() {
if (!isOpen)
throw new AvroRuntimeException("not open");
}
private void assertNotOpen() {
if (isOpen)
throw new AvroRuntimeException("already open");
}
/**
* Configures this writer to use the given codec. May not be reset after writes
* have begun.
*/
public DataFileWriter<D> setCodec(CodecFactory c) {
assertNotOpen();
this.codec = c.createInstance();
setMetaInternal(DataFileConstants.CODEC, codec.getName());
return this;
}
/**
* Set the synchronization interval for this file, in bytes. Valid values range
* from 32 to 2^30 Suggested values are between 2K and 2M
*
* The stream is flushed by default at the end of each synchronization interval.
*
* If {@linkplain #setFlushOnEveryBlock(boolean)} is called with param set to
* false, then the block may not be flushed to the stream after the sync marker
* is written. In this case, the {@linkplain #flush()} must be called to flush
* the stream.
*
* Invalid values throw IllegalArgumentException
*
* @param syncInterval the approximate number of uncompressed bytes to write in
* each block
* @return this DataFileWriter
*/
public DataFileWriter<D> setSyncInterval(int syncInterval) {
if (syncInterval < 32 || syncInterval > (1 << 30)) {
throw new IllegalArgumentException("Invalid syncInterval value: " + syncInterval);
}
this.syncInterval = syncInterval;
return this;
}
/** Open a new file for data matching a schema with a random sync. */
public DataFileWriter<D> create(Schema schema, File file) throws IOException {
SyncableFileOutputStream sfos = new SyncableFileOutputStream(file);
try {
return create(schema, sfos, null);
} catch (final Throwable e) {
IOUtils.closeQuietly(sfos);
throw e;
}
}
/** Open a new file for data matching a schema with a random sync. */
public DataFileWriter<D> create(Schema schema, OutputStream outs) throws IOException {
return create(schema, outs, null);
}
/** Open a new file for data matching a schema with an explicit sync. */
public DataFileWriter<D> create(Schema schema, OutputStream outs, byte[] sync) throws IOException {
assertNotOpen();
this.schema = schema;
setMetaInternal(DataFileConstants.SCHEMA, schema.toString());
if (sync == null) {
this.sync = generateSync();
} else if (sync.length == 16) {
this.sync = sync;
} else {
throw new IOException("sync must be exactly 16 bytes");
}
init(outs);
vout.writeFixed(DataFileConstants.MAGIC); // write magic
vout.writeMapStart(); // write metadata
vout.setItemCount(meta.size());
for (Map.Entry<String, byte[]> entry : meta.entrySet()) {
vout.startItem();
vout.writeString(entry.getKey());
vout.writeBytes(entry.getValue());
}
vout.writeMapEnd();
vout.writeFixed(this.sync); // write initial sync
vout.flush(); // vout may be buffered, flush before writing to out
return this;
}
/**
* Set whether this writer should flush the block to the stream every time a
* sync marker is written. By default, the writer will flush the buffer each
* time a sync marker is written (if the block size limit is reached or the
* {@linkplain #sync()} is called.
*
* @param flushOnEveryBlock - If set to false, this writer will not flush the
* block to the stream until {@linkplain #flush()} is
* explicitly called.
*/
public void setFlushOnEveryBlock(boolean flushOnEveryBlock) {
this.flushOnEveryBlock = flushOnEveryBlock;
}
/**
* @return - true if this writer flushes the block to the stream every time a
* sync marker is written. Else returns false.
*/
public boolean isFlushOnEveryBlock() {
return this.flushOnEveryBlock;
}
/** Open a writer appending to an existing file. */
public DataFileWriter<D> appendTo(File file) throws IOException {
try (SeekableInput input = new SeekableFileInput(file)) {
OutputStream output = new SyncableFileOutputStream(file, true);
return appendTo(input, output);
}
// output does not need to be closed here. It will be closed by invoking close()
// of this writer.
}
/**
* Open a writer appending to an existing file. <strong>Since 1.9.0 this method
* does not close in.</strong>
*
* @param in reading the existing file.
* @param out positioned at the end of the existing file.
*/
public DataFileWriter<D> appendTo(SeekableInput in, OutputStream out) throws IOException {
assertNotOpen();
DataFileReader<D> reader = new DataFileReader<>(in, new GenericDatumReader<>());
this.schema = reader.getSchema();
this.sync = reader.getHeader().sync;
this.meta.putAll(reader.getHeader().meta);
byte[] codecBytes = this.meta.get(DataFileConstants.CODEC);
if (codecBytes != null) {
String strCodec = new String(codecBytes, StandardCharsets.UTF_8);
this.codec = CodecFactory.fromString(strCodec).createInstance();
} else {
this.codec = CodecFactory.nullCodec().createInstance();
}
init(out);
return this;
}
private void init(OutputStream outs) throws IOException {
this.underlyingStream = outs;
this.out = new BufferedFileOutputStream(outs);
EncoderFactory efactory = new EncoderFactory();
this.vout = efactory.directBinaryEncoder(out, null);
dout.setSchema(schema);
buffer = new NonCopyingByteArrayOutputStream(Math.min((int) (syncInterval * 1.25), Integer.MAX_VALUE / 2 - 1));
this.bufOut = efactory.directBinaryEncoder(buffer, null);
if (this.codec == null) {
this.codec = CodecFactory.nullCodec().createInstance();
}
this.isOpen = true;
}
private static byte[] generateSync() {
try {
MessageDigest digester = MessageDigest.getInstance("MD5");
long time = System.currentTimeMillis();
digester.update((UUID.randomUUID() + "@" + time).getBytes(UTF_8));
return digester.digest();
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
}
private DataFileWriter<D> setMetaInternal(String key, byte[] value) {
assertNotOpen();
meta.put(key, value);
return this;
}
private DataFileWriter<D> setMetaInternal(String key, String value) {
return setMetaInternal(key, value.getBytes(UTF_8));
}
/** Set a metadata property. */
public DataFileWriter<D> setMeta(String key, byte[] value) {
if (isReservedMeta(key)) {
throw new AvroRuntimeException("Cannot set reserved meta key: " + key);
}
return setMetaInternal(key, value);
}
public static boolean isReservedMeta(String key) {
return key.startsWith("avro.");
}
/** Set a metadata property. */
public DataFileWriter<D> setMeta(String key, String value) {
return setMeta(key, value.getBytes(UTF_8));
}
/** Set a metadata property. */
public DataFileWriter<D> setMeta(String key, long value) {
return setMeta(key, Long.toString(value));
}
/**
* Thrown by {@link #append(Object)} when an exception occurs while writing a
* datum to the buffer. When this is thrown, the file is unaltered and may
* continue to be appended to.
*/
public static class AppendWriteException extends RuntimeException {
public AppendWriteException(Exception e) {
super(e);
}
}
/**
* Append a datum to the file.
*
* @see AppendWriteException
*/
public void append(D datum) throws IOException {
assertOpen();
int usedBuffer = bufferInUse();
try {
dout.write(datum, bufOut);
} catch (IOException | RuntimeException e) {
resetBufferTo(usedBuffer);
throw new AppendWriteException(e);
}
blockCount++;
writeIfBlockFull();
}
// if there is an error encoding, flush the encoder and then
// reset the buffer position to contain size bytes, discarding the rest.
// Otherwise the file will be corrupt with a partial record.
private void resetBufferTo(int size) throws IOException {
bufOut.flush();
byte[] data = buffer.toByteArray();
buffer.reset();
buffer.write(data, 0, size);
}
/**
* Expert: Append a pre-encoded datum to the file. No validation is performed to
* check that the encoding conforms to the file's schema. Appending
* non-conforming data may result in an unreadable file.
*/
public void appendEncoded(ByteBuffer datum) throws IOException {
assertOpen();
bufOut.writeFixed(datum);
blockCount++;
writeIfBlockFull();
}
private int bufferInUse() {
return (buffer.size() + bufOut.bytesBuffered());
}
private void writeIfBlockFull() throws IOException {
if (bufferInUse() >= syncInterval)
writeBlock();
}
/**
* Appends data from another file. otherFile must have the same schema. Data
* blocks will be copied without de-serializing data. If the codecs of the two
* files are compatible, data blocks are copied directly without decompression.
* If the codecs are not compatible, blocks from otherFile are uncompressed and
* then compressed using this file's codec.
* <p/>
* If the recompress flag is set all blocks are decompressed and then compressed
* using this file's codec. This is useful when the two files have compatible
* compression codecs but different codec options. For example, one might append
* a file compressed with deflate at compression level 1 to a file with deflate
* at compression level 7. If <i>recompress</i> is false, blocks will be copied
* without changing the compression level. If true, they will be converted to
* the new compression level.
*
* @param otherFile
* @param recompress
* @throws IOException
*/
public void appendAllFrom(DataFileStream<D> otherFile, boolean recompress) throws IOException {
assertOpen();
// make sure other file has same schema
Schema otherSchema = otherFile.getSchema();
if (!this.schema.equals(otherSchema)) {
throw new IOException("Schema from file " + otherFile + " does not match");
}
// flush anything written so far
writeBlock();
Codec otherCodec = otherFile.resolveCodec();
DataBlock nextBlockRaw = null;
if (codec.equals(otherCodec) && !recompress) {
// copy raw bytes
while (otherFile.hasNextBlock()) {
nextBlockRaw = otherFile.nextRawBlock(nextBlockRaw);
nextBlockRaw.writeBlockTo(vout, sync);
}
} else {
while (otherFile.hasNextBlock()) {
nextBlockRaw = otherFile.nextRawBlock(nextBlockRaw);
nextBlockRaw.decompressUsing(otherCodec);
nextBlockRaw.compressUsing(codec);
nextBlockRaw.writeBlockTo(vout, sync);
}
}
}
private void writeBlock() throws IOException {
if (blockCount > 0) {
try {
bufOut.flush();
ByteBuffer uncompressed = buffer.asByteBuffer();
DataBlock block = new DataBlock(uncompressed, blockCount);
block.setFlushOnWrite(flushOnEveryBlock);
block.compressUsing(codec);
block.writeBlockTo(vout, sync);
} finally {
buffer.reset();
blockCount = 0;
}
}
}
/**
* Return the current position as a value that may be passed to
* {@link DataFileReader#seek(long)}. Forces the end of the current block,
* emitting a synchronization marker. By default, this will also flush the block
* to the stream.
*
* If {@linkplain #setFlushOnEveryBlock(boolean)} is called with param set to
* false, then this method may not flush the block. In this case, the
* {@linkplain #flush()} must be called to flush the stream.
*/
public long sync() throws IOException {
assertOpen();
writeBlock();
return out.tell();
}
/**
* Calls {@linkplain #sync()} and then flushes the current state of the file.
*/
@Override
public void flush() throws IOException {
sync();
vout.flush();
}
/**
* If this writer was instantiated using a {@linkplain File},
* {@linkplain FileOutputStream} or {@linkplain Syncable} instance, this method
* flushes all buffers for this writer to disk. In other cases, this method
* behaves exactly like {@linkplain #flush()}.
*
* @throws IOException
*/
public void fSync() throws IOException {
flush();
if (underlyingStream instanceof Syncable) {
((Syncable) underlyingStream).sync();
} else if (underlyingStream instanceof FileOutputStream) {
((FileOutputStream) underlyingStream).getFD().sync();
}
}
/** Flush and close the file. */
@Override
public void close() throws IOException {
if (isOpen) {
flush();
out.close();
isOpen = false;
}
}
private class BufferedFileOutputStream extends BufferedOutputStream {
private long position; // start of buffer
private class PositionFilter extends FilterOutputStream {
public PositionFilter(OutputStream out) throws IOException {
super(out);
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
out.write(b, off, len);
position += len; // update on write
}
}
public BufferedFileOutputStream(OutputStream out) throws IOException {
super(null);
this.out = new PositionFilter(out);
}
public long tell() {
return position + count;
}
@Override
public synchronized void flush() throws IOException {
try {
super.flush();
} finally {
// Ensure that count is reset in any case to avoid writing garbage to the end of
// the file in case of an error
// occurred during the write
count = 0;
}
}
}
}
| 7,289 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/file/FileReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.file;
import java.io.IOException;
import java.io.Closeable;
import java.util.Iterator;
import java.util.NoSuchElementException;
import org.apache.avro.Schema;
/** Interface for reading data from a file. */
public interface FileReader<D> extends Iterator<D>, Iterable<D>, Closeable {
/** Return the schema for data in this file. */
Schema getSchema();
/**
* Read the next datum from the file.
*
* @param reuse an instance to reuse.
* @throws NoSuchElementException if no more remain in the file.
*/
D next(D reuse) throws IOException;
/**
* Move to the next synchronization point after a position. To process a range
* of file entires, call this with the starting position, then check
* {@link #pastSync(long)} with the end point before each call to
* {@link #next()}.
*/
void sync(long position) throws IOException;
/** Return true if past the next synchronization point after a position. */
boolean pastSync(long position) throws IOException;
/** Return the current position in the input. */
long tell() throws IOException;
}
| 7,290 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/file/DataFileConstants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.file;
/**
* Constants used in data files.
*/
public class DataFileConstants {
private DataFileConstants() {
} // no public ctor
public static final byte VERSION = 1;
public static final byte[] MAGIC = new byte[] { (byte) 'O', (byte) 'b', (byte) 'j', VERSION };
public static final long FOOTER_BLOCK = -1;
public static final int SYNC_SIZE = 16;
public static final int DEFAULT_SYNC_INTERVAL = 4000 * SYNC_SIZE;
public static final String SCHEMA = "avro.schema";
public static final String CODEC = "avro.codec";
public static final String NULL_CODEC = "null";
public static final String DEFLATE_CODEC = "deflate";
public static final String SNAPPY_CODEC = "snappy";
public static final String BZIP2_CODEC = "bzip2";
public static final String XZ_CODEC = "xz";
public static final String ZSTANDARD_CODEC = "zstandard";
}
| 7,291 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/file/SeekableFileInput.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.file;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileDescriptor;
import java.io.IOException;
/** A {@link FileInputStream} that implements {@link SeekableInput}. */
public class SeekableFileInput extends FileInputStream implements SeekableInput {
public SeekableFileInput(File file) throws IOException {
super(file);
}
public SeekableFileInput(FileDescriptor fd) throws IOException {
super(fd);
}
@Override
public void seek(long p) throws IOException {
getChannel().position(p);
}
@Override
public long tell() throws IOException {
return getChannel().position();
}
@Override
public long length() throws IOException {
return getChannel().size();
}
}
| 7,292 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/file/DeflateCodec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.file;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.zip.Deflater;
import java.util.zip.DeflaterOutputStream;
import java.util.zip.Inflater;
import java.util.zip.InflaterOutputStream;
import org.apache.avro.util.NonCopyingByteArrayOutputStream;
/**
* Implements DEFLATE (RFC1951) compression and decompression.
*
* Note that there is a distinction between RFC1951 (deflate) and RFC1950
* (zlib). zlib adds an extra 2-byte header at the front, and a 4-byte checksum
* at the end. The code here, by passing "true" as the "nowrap" option to
* {@link Inflater} and {@link Deflater}, is using RFC1951.
*/
public class DeflateCodec extends Codec {
private static final int DEFAULT_BUFFER_SIZE = 8192;
static class Option extends CodecFactory {
private int compressionLevel;
Option(int compressionLevel) {
this.compressionLevel = compressionLevel;
}
@Override
protected Codec createInstance() {
return new DeflateCodec(compressionLevel);
}
}
private Deflater deflater;
private Inflater inflater;
// currently only do 'nowrap' -- RFC 1951, not zlib
private boolean nowrap = true;
private int compressionLevel;
public DeflateCodec(int compressionLevel) {
this.compressionLevel = compressionLevel;
}
@Override
public String getName() {
return DataFileConstants.DEFLATE_CODEC;
}
@Override
public ByteBuffer compress(ByteBuffer data) throws IOException {
NonCopyingByteArrayOutputStream baos = new NonCopyingByteArrayOutputStream(DEFAULT_BUFFER_SIZE);
try (OutputStream outputStream = new DeflaterOutputStream(baos, getDeflater())) {
outputStream.write(data.array(), computeOffset(data), data.remaining());
}
return baos.asByteBuffer();
}
@Override
public ByteBuffer decompress(ByteBuffer data) throws IOException {
NonCopyingByteArrayOutputStream baos = new NonCopyingByteArrayOutputStream(DEFAULT_BUFFER_SIZE);
try (OutputStream outputStream = new InflaterOutputStream(baos, getInflater())) {
outputStream.write(data.array(), computeOffset(data), data.remaining());
}
return baos.asByteBuffer();
}
// get and initialize the inflater for use.
private Inflater getInflater() {
if (null == inflater) {
inflater = new Inflater(nowrap);
} else {
inflater.reset();
}
return inflater;
}
// get and initialize the deflater for use.
private Deflater getDeflater() {
if (null == deflater) {
deflater = new Deflater(compressionLevel, nowrap);
} else {
deflater.reset();
}
return deflater;
}
@Override
public int hashCode() {
return nowrap ? 0 : 1;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null || obj.getClass() != getClass())
return false;
DeflateCodec other = (DeflateCodec) obj;
return (this.nowrap == other.nowrap);
}
@Override
public String toString() {
return getName() + "-" + compressionLevel;
}
}
| 7,293 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/file/SyncableFileOutputStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.avro.file;
import java.io.File;
import java.io.FileDescriptor;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
/**
* An implementation of {@linkplain Syncable} which writes to a file. An
* instance of this class can be used with {@linkplain DataFileWriter} to
* guarantee that Avro Container Files are persisted to disk on supported
* platforms using the {@linkplain org.apache.avro.file.DataFileWriter#fSync()}
* method.
*
* @see FileOutputStream
*/
public class SyncableFileOutputStream extends FileOutputStream implements Syncable {
/**
* Creates an instance of {@linkplain SyncableFileOutputStream} with the given
* name.
*
* @param name - the full file name.
* @throws FileNotFoundException - if the file cannot be created or opened.
*/
public SyncableFileOutputStream(String name) throws FileNotFoundException {
super(name);
}
/**
* Creates an instance of {@linkplain SyncableFileOutputStream} using the given
* {@linkplain File} instance.
*
* @param file - The file to use to create the output stream.
*
* @throws FileNotFoundException - if the file cannot be created or opened.
*/
public SyncableFileOutputStream(File file) throws FileNotFoundException {
super(file);
}
/**
* Creates an instance of {@linkplain SyncableFileOutputStream} with the given
* name and optionally append to the file if it already exists.
*
* @param name - the full file name.
* @param append - true if the file is to be appended to
*
* @throws FileNotFoundException - if the file cannot be created or opened.
*/
public SyncableFileOutputStream(String name, boolean append) throws FileNotFoundException {
super(name, append);
}
/**
* Creates an instance of {@linkplain SyncableFileOutputStream} that writes to
* the file represented by the given {@linkplain File} instance and optionally
* append to the file if it already exists.
*
* @param file - the file instance to use to create the stream.
* @param append - true if the file is to be appended to
*
* @throws FileNotFoundException - if the file cannot be created or opened.
*/
public SyncableFileOutputStream(File file, boolean append) throws FileNotFoundException {
super(file, append);
}
/**
* Creates an instance of {@linkplain SyncableFileOutputStream} using the given
* {@linkplain FileDescriptor} instance.
*/
public SyncableFileOutputStream(FileDescriptor fdObj) {
super(fdObj);
}
/**
* {@inheritDoc}
*/
@Override
public void sync() throws IOException {
getFD().sync();
}
}
| 7,294 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/file/ZstandardCodec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.file;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import org.apache.avro.util.NonCopyingByteArrayOutputStream;
import org.apache.commons.compress.utils.IOUtils;
public class ZstandardCodec extends Codec {
public final static int DEFAULT_COMPRESSION = 3;
public final static boolean DEFAULT_USE_BUFFERPOOL = false;
private static final int DEFAULT_BUFFER_SIZE = 8192;
static class Option extends CodecFactory {
private final int compressionLevel;
private final boolean useChecksum;
private final boolean useBufferPool;
Option(int compressionLevel, boolean useChecksum, boolean useBufferPool) {
this.compressionLevel = compressionLevel;
this.useChecksum = useChecksum;
this.useBufferPool = useBufferPool;
}
@Override
protected Codec createInstance() {
return new ZstandardCodec(compressionLevel, useChecksum, useBufferPool);
}
}
private final int compressionLevel;
private final boolean useChecksum;
private final boolean useBufferPool;
/**
* Create a ZstandardCodec instance with the given compressionLevel, checksum,
* and bufferPool option
**/
public ZstandardCodec(int compressionLevel, boolean useChecksum, boolean useBufferPool) {
this.compressionLevel = compressionLevel;
this.useChecksum = useChecksum;
this.useBufferPool = useBufferPool;
}
@Override
public String getName() {
return DataFileConstants.ZSTANDARD_CODEC;
}
@Override
public ByteBuffer compress(ByteBuffer data) throws IOException {
NonCopyingByteArrayOutputStream baos = new NonCopyingByteArrayOutputStream(DEFAULT_BUFFER_SIZE);
try (OutputStream outputStream = ZstandardLoader.output(baos, compressionLevel, useChecksum, useBufferPool)) {
outputStream.write(data.array(), computeOffset(data), data.remaining());
}
return baos.asByteBuffer();
}
@Override
public ByteBuffer decompress(ByteBuffer compressedData) throws IOException {
NonCopyingByteArrayOutputStream baos = new NonCopyingByteArrayOutputStream(DEFAULT_BUFFER_SIZE);
InputStream bytesIn = new ByteArrayInputStream(compressedData.array(), computeOffset(compressedData),
compressedData.remaining());
try (InputStream ios = ZstandardLoader.input(bytesIn, useBufferPool)) {
IOUtils.copy(ios, baos);
}
return baos.asByteBuffer();
}
@Override
public int hashCode() {
return getName().hashCode();
}
@Override
public boolean equals(Object obj) {
return (this == obj) || (obj != null && obj.getClass() == this.getClass());
}
@Override
public String toString() {
return getName() + "[" + compressionLevel + "]";
}
}
| 7,295 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/file/DataFileStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.file;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.io.Closeable;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.InvalidAvroMagicException;
import org.apache.avro.Schema;
import org.apache.avro.io.BinaryEncoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.io.BinaryDecoder;
import org.apache.avro.io.DatumReader;
/**
* Streaming access to files written by {@link DataFileWriter}. Use
* {@link DataFileReader} for file-based input.
*
* @see DataFileWriter
*/
public class DataFileStream<D> implements Iterator<D>, Iterable<D>, Closeable {
/**
* A handle that can be used to reopen a DataFile without re-reading the header
* of the stream.
*/
public static final class Header {
Schema schema;
Map<String, byte[]> meta = new HashMap<>();
private transient List<String> metaKeyList = new ArrayList<>();
byte[] sync = new byte[DataFileConstants.SYNC_SIZE];
private Header() {
}
}
private DatumReader<D> reader;
private long blockSize;
private boolean availableBlock = false;
private Header header;
/** Decoder on raw input stream. (Used for metadata.) */
BinaryDecoder vin;
/**
* Secondary decoder, for datums. (Different than vin for block segments.)
*/
BinaryDecoder datumIn = null;
ByteBuffer blockBuffer;
long blockCount; // # entries in block
long blockRemaining; // # entries remaining in block
byte[] syncBuffer = new byte[DataFileConstants.SYNC_SIZE];
private Codec codec;
/**
* Construct a reader for an input stream. For file-based input, use
* {@link DataFileReader}. This will buffer, wrapping with a
* {@link java.io.BufferedInputStream} is not necessary.
*/
public DataFileStream(InputStream in, DatumReader<D> reader) throws IOException {
this.reader = reader;
initialize(in, null);
}
/**
* create an uninitialized DataFileStream
*/
protected DataFileStream(DatumReader<D> reader) throws IOException {
this.reader = reader;
}
byte[] readMagic() throws IOException {
if (this.vin == null) {
throw new IOException("InputStream is not initialized");
}
byte[] magic = new byte[DataFileConstants.MAGIC.length];
try {
vin.readFixed(magic); // read magic
} catch (IOException e) {
throw new IOException("Not an Avro data file.", e);
}
return magic;
}
void validateMagic(byte[] magic) throws InvalidAvroMagicException {
if (!Arrays.equals(DataFileConstants.MAGIC, magic))
throw new InvalidAvroMagicException("Not an Avro data file.");
}
/** Initialize the stream by reading from its head. */
void initialize(InputStream in, byte[] magic) throws IOException {
this.header = new Header();
this.vin = DecoderFactory.get().binaryDecoder(in, vin);
magic = (magic == null) ? readMagic() : magic;
validateMagic(magic);
long l = vin.readMapStart(); // read meta data
if (l > 0) {
do {
for (long i = 0; i < l; i++) {
String key = vin.readString(null).toString();
ByteBuffer value = vin.readBytes(null);
byte[] bb = new byte[value.remaining()];
value.get(bb);
header.meta.put(key, bb);
header.metaKeyList.add(key);
}
} while ((l = vin.mapNext()) != 0);
}
vin.readFixed(header.sync); // read sync
// finalize the header
header.metaKeyList = Collections.unmodifiableList(header.metaKeyList);
header.schema = new Schema.Parser(Schema.NameValidator.NO_VALIDATION).setValidateDefaults(false)
.parse(getMetaString(DataFileConstants.SCHEMA));
this.codec = resolveCodec();
reader.setSchema(header.schema);
}
/** Initialize the stream without reading from it. */
void initialize(Header header) throws IOException {
this.header = header;
this.codec = resolveCodec();
reader.setSchema(header.schema);
}
Codec resolveCodec() {
String codecStr = getMetaString(DataFileConstants.CODEC);
if (codecStr != null) {
return CodecFactory.fromString(codecStr).createInstance();
} else {
return CodecFactory.nullCodec().createInstance();
}
}
/**
* A handle that can be used to reopen this stream without rereading the head.
*/
public Header getHeader() {
return header;
}
/** Return the schema used in this file. */
public Schema getSchema() {
return header.schema;
}
/** Return the list of keys in the metadata */
public List<String> getMetaKeys() {
return header.metaKeyList;
}
/** Return the value of a metadata property. */
public byte[] getMeta(String key) {
return header.meta.get(key);
}
/** Return the value of a metadata property. */
public String getMetaString(String key) {
byte[] value = getMeta(key);
if (value == null) {
return null;
}
return new String(value, StandardCharsets.UTF_8);
}
/** Return the value of a metadata property. */
public long getMetaLong(String key) {
return Long.parseLong(getMetaString(key));
}
/**
* Returns an iterator over entries in this file. Note that this iterator is
* shared with other users of the file: it does not contain a separate pointer
* into the file.
*/
@Override
public Iterator<D> iterator() {
return this;
}
private DataBlock block = null;
/** True if more entries remain in this file. */
@Override
public boolean hasNext() {
try {
if (blockRemaining == 0) {
// check that the previous block was finished
if (null != datumIn) {
boolean atEnd = datumIn.isEnd();
if (!atEnd) {
throw new IOException("Block read partially, the data may be corrupt");
}
}
if (hasNextBlock()) {
block = nextRawBlock(block);
block.decompressUsing(codec);
blockBuffer = block.getAsByteBuffer();
datumIn = DecoderFactory.get().binaryDecoder(blockBuffer.array(),
blockBuffer.arrayOffset() + blockBuffer.position(), blockBuffer.remaining(), datumIn);
}
}
return blockRemaining != 0;
} catch (EOFException e) { // at EOF
return false;
} catch (IOException e) {
throw new AvroRuntimeException(e);
}
}
/**
* Read the next datum in the file.
*
* @throws NoSuchElementException if no more remain in the file.
*/
@Override
public D next() {
try {
return next(null);
} catch (IOException e) {
throw new AvroRuntimeException(e);
}
}
/**
* Read the next datum from the file.
*
* @param reuse an instance to reuse.
* @throws NoSuchElementException if no more remain in the file.
*/
public D next(D reuse) throws IOException {
if (!hasNext())
throw new NoSuchElementException();
D result = reader.read(reuse, datumIn);
if (0 == --blockRemaining) {
blockFinished();
}
return result;
}
/** Expert: Return the next block in the file, as binary-encoded data. */
public ByteBuffer nextBlock() throws IOException {
if (!hasNext())
throw new NoSuchElementException();
if (blockRemaining != blockCount)
throw new IllegalStateException("Not at block start.");
blockRemaining = 0;
datumIn = null;
return blockBuffer;
}
/** Expert: Return the count of items in the current block. */
public long getBlockCount() {
return blockCount;
}
/** Expert: Return the size in bytes (uncompressed) of the current block. */
public long getBlockSize() {
return blockSize;
}
protected void blockFinished() throws IOException {
// nothing for the stream impl
}
boolean hasNextBlock() {
try {
if (availableBlock)
return true;
if (vin.isEnd())
return false;
blockRemaining = vin.readLong(); // read block count
blockSize = vin.readLong(); // read block size
if (blockSize > Integer.MAX_VALUE || blockSize < 0) {
throw new IOException("Block size invalid or too large for this " + "implementation: " + blockSize);
}
blockCount = blockRemaining;
availableBlock = true;
return true;
} catch (EOFException eof) {
return false;
} catch (IOException e) {
throw new AvroRuntimeException(e);
}
}
DataBlock nextRawBlock(DataBlock reuse) throws IOException {
if (!hasNextBlock()) {
throw new NoSuchElementException();
}
if (reuse == null || reuse.data.length < (int) blockSize) {
reuse = new DataBlock(blockRemaining, (int) blockSize);
} else {
reuse.numEntries = blockRemaining;
reuse.blockSize = (int) blockSize;
}
// throws if it can't read the size requested
vin.readFixed(reuse.data, 0, reuse.blockSize);
vin.readFixed(syncBuffer);
availableBlock = false;
if (!Arrays.equals(syncBuffer, header.sync))
throw new IOException("Invalid sync!");
return reuse;
}
/** Not supported. */
@Override
public void remove() {
throw new UnsupportedOperationException();
}
/** Close this reader. */
@Override
public void close() throws IOException {
vin.inputStream().close();
}
static class DataBlock {
private byte[] data;
private long numEntries;
private int blockSize;
private int offset = 0;
private boolean flushOnWrite = true;
private DataBlock(long numEntries, int blockSize) {
this.data = new byte[blockSize];
this.numEntries = numEntries;
this.blockSize = blockSize;
}
DataBlock(ByteBuffer block, long numEntries) {
this.data = block.array();
this.blockSize = block.remaining();
this.offset = block.arrayOffset() + block.position();
this.numEntries = numEntries;
}
byte[] getData() {
return data;
}
long getNumEntries() {
return numEntries;
}
int getBlockSize() {
return blockSize;
}
boolean isFlushOnWrite() {
return flushOnWrite;
}
void setFlushOnWrite(boolean flushOnWrite) {
this.flushOnWrite = flushOnWrite;
}
ByteBuffer getAsByteBuffer() {
return ByteBuffer.wrap(data, offset, blockSize);
}
void decompressUsing(Codec c) throws IOException {
ByteBuffer result = c.decompress(getAsByteBuffer());
data = result.array();
blockSize = result.remaining();
}
void compressUsing(Codec c) throws IOException {
ByteBuffer result = c.compress(getAsByteBuffer());
data = result.array();
blockSize = result.remaining();
}
void writeBlockTo(BinaryEncoder e, byte[] sync) throws IOException {
e.writeLong(this.numEntries);
e.writeLong(this.blockSize);
e.writeFixed(this.data, offset, this.blockSize);
e.writeFixed(sync);
if (flushOnWrite) {
e.flush();
}
}
}
}
| 7,296 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/file/NullCodec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.file;
import java.io.IOException;
import java.nio.ByteBuffer;
/** Implements "null" (pass through) codec. */
final class NullCodec extends Codec {
private static final NullCodec INSTANCE = new NullCodec();
static class Option extends CodecFactory {
@Override
protected Codec createInstance() {
return INSTANCE;
}
}
/** No options available for NullCodec. */
public static final CodecFactory OPTION = new Option();
@Override
public String getName() {
return DataFileConstants.NULL_CODEC;
}
@Override
public ByteBuffer compress(ByteBuffer buffer) throws IOException {
return buffer;
}
@Override
public ByteBuffer decompress(ByteBuffer data) throws IOException {
return data;
}
@Override
public boolean equals(Object other) {
if (this == other)
return true;
return (other != null && other.getClass() == getClass());
}
@Override
public int hashCode() {
return 2;
}
}
| 7,297 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/file/SeekableByteArrayInput.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.file;
import java.io.ByteArrayInputStream;
import java.io.IOException;
/** A {@link SeekableInput} backed with data in a byte array. */
public class SeekableByteArrayInput extends ByteArrayInputStream implements SeekableInput {
public SeekableByteArrayInput(byte[] data) {
super(data);
}
@Override
public long length() throws IOException {
return this.count;
}
@Override
public void seek(long p) throws IOException {
this.reset();
this.skip(p);
}
@Override
public long tell() throws IOException {
return this.pos;
}
}
| 7,298 |
0 | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro | Create_ds/avro/lang/java/avro/src/main/java/org/apache/avro/file/Syncable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.avro.file;
import java.io.IOException;
public interface Syncable {
/**
* Sync the file to disk. On supported platforms, this method behaves like POSIX
* <code>fsync</code> and syncs all underlying OS buffers for this file
* descriptor to disk. On these platforms, if this method returns, the data
* written to this instance is guaranteed to be persisted on disk.
*
* @throws IOException - if an error occurred while attempting to sync the data
* to disk.
*/
void sync() throws IOException;
}
| 7,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.