focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
return this.list(directory, listener, new HostPreferences(session.getHost()).getInteger("brick.listing.chunksize"));
}
|
@Test
public void testListChunking() throws Exception {
final Path directory = new BrickDirectoryFeature(session).mkdir(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(AbstractPath.Type.directory)), new TransferStatus());
final Path f1 = new BrickTouchFeature(session).touch(new Path(directory, new AlphanumericRandomStringService().random(), EnumSet.of(AbstractPath.Type.file)), new TransferStatus());
final Path f2 = new BrickTouchFeature(session).touch(new Path(directory, new AlphanumericRandomStringService().random(), EnumSet.of(AbstractPath.Type.file)), new TransferStatus());
final AttributedList<Path> list = new BrickListService(session).list(directory, new DisabledListProgressListener(), 1);
assertNotSame(AttributedList.emptyList(), list);
assertFalse(list.isEmpty());
assertEquals(2, list.size());
for(Path f : list) {
assertSame(directory, f.getParent());
}
assertTrue(list.contains(f1));
assertTrue(list.contains(f2));
new BrickDeleteFeature(session).delete(Collections.singletonList(directory), new DisabledPasswordCallback(), new Delete.DisabledCallback());
}
|
@Override
public BasicTypeDefine reconvert(Column column) {
BasicTypeDefine.BasicTypeDefineBuilder builder =
BasicTypeDefine.builder()
.name(column.getName())
.nullable(column.isNullable())
.comment(column.getComment())
.defaultValue(column.getDefaultValue());
switch (column.getDataType().getSqlType()) {
case BOOLEAN:
builder.columnType(DB2_BOOLEAN);
builder.dataType(DB2_BOOLEAN);
break;
case TINYINT:
case SMALLINT:
builder.columnType(DB2_SMALLINT);
builder.dataType(DB2_SMALLINT);
break;
case INT:
builder.columnType(DB2_INT);
builder.dataType(DB2_INT);
break;
case BIGINT:
builder.columnType(DB2_BIGINT);
builder.dataType(DB2_BIGINT);
break;
case FLOAT:
builder.columnType(DB2_REAL);
builder.dataType(DB2_REAL);
break;
case DOUBLE:
builder.columnType(DB2_DOUBLE);
builder.dataType(DB2_DOUBLE);
break;
case DECIMAL:
DecimalType decimalType = (DecimalType) column.getDataType();
long precision = decimalType.getPrecision();
int scale = decimalType.getScale();
if (precision <= 0) {
precision = DEFAULT_PRECISION;
scale = DEFAULT_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is precision less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (precision > MAX_PRECISION) {
scale = (int) Math.max(0, scale - (precision - MAX_PRECISION));
precision = MAX_PRECISION;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum precision of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_PRECISION,
precision,
scale);
}
if (scale < 0) {
scale = 0;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is scale less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (scale > MAX_SCALE) {
scale = MAX_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_SCALE,
precision,
scale);
}
builder.columnType(String.format("%s(%s,%s)", DB2_DECIMAL, precision, scale));
builder.dataType(DB2_DECIMAL);
builder.precision(precision);
builder.scale(scale);
break;
case BYTES:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.columnType(
String.format("%s(%s)", DB2_VARBINARY, MAX_VARBINARY_LENGTH));
builder.dataType(DB2_VARBINARY);
builder.length(column.getColumnLength());
} else if (column.getColumnLength() <= MAX_BINARY_LENGTH) {
builder.columnType(
String.format("%s(%s)", DB2_BINARY, column.getColumnLength()));
builder.dataType(DB2_BINARY);
builder.length(column.getColumnLength());
} else if (column.getColumnLength() <= MAX_VARBINARY_LENGTH) {
builder.columnType(
String.format("%s(%s)", DB2_VARBINARY, column.getColumnLength()));
builder.dataType(DB2_VARBINARY);
builder.length(column.getColumnLength());
} else {
long length = column.getColumnLength();
if (length > MAX_BLOB_LENGTH) {
length = MAX_BLOB_LENGTH;
log.warn(
"The length of blob type {} is out of range, "
+ "it will be converted to {}({})",
column.getName(),
DB2_BLOB,
length);
}
builder.columnType(String.format("%s(%s)", DB2_BLOB, length));
builder.dataType(DB2_BLOB);
builder.length(length);
}
break;
case STRING:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.columnType(String.format("%s(%s)", DB2_VARCHAR, MAX_VARCHAR_LENGTH));
builder.dataType(DB2_VARCHAR);
builder.length(column.getColumnLength());
} else if (column.getColumnLength() <= MAX_CHAR_LENGTH) {
builder.columnType(String.format("%s(%s)", DB2_CHAR, column.getColumnLength()));
builder.dataType(DB2_CHAR);
builder.length(column.getColumnLength());
} else if (column.getColumnLength() <= MAX_VARCHAR_LENGTH) {
builder.columnType(
String.format("%s(%s)", DB2_VARCHAR, column.getColumnLength()));
builder.dataType(DB2_VARCHAR);
builder.length(column.getColumnLength());
} else {
long length = column.getColumnLength();
if (length > MAX_CLOB_LENGTH) {
length = MAX_CLOB_LENGTH;
log.warn(
"The length of clob type {} is out of range, "
+ "it will be converted to {}({})",
column.getName(),
DB2_CLOB,
length);
}
builder.columnType(String.format("%s(%s)", DB2_CLOB, length));
builder.dataType(DB2_CLOB);
builder.length(length);
}
break;
case DATE:
builder.columnType(DB2_DATE);
builder.dataType(DB2_DATE);
break;
case TIME:
builder.columnType(DB2_TIME);
builder.dataType(DB2_TIME);
break;
case TIMESTAMP:
if (column.getScale() != null && column.getScale() > 0) {
int timestampScale = column.getScale();
if (column.getScale() > MAX_TIMESTAMP_SCALE) {
timestampScale = MAX_TIMESTAMP_SCALE;
log.warn(
"The timestamp column {} type timestamp({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to timestamp({})",
column.getName(),
column.getScale(),
MAX_TIMESTAMP_SCALE,
timestampScale);
}
builder.columnType(String.format("%s(%s)", DB2_TIMESTAMP, timestampScale));
builder.scale(timestampScale);
} else {
builder.columnType(DB2_TIMESTAMP);
}
builder.dataType(DB2_TIMESTAMP);
break;
default:
throw CommonError.convertToConnectorTypeError(
DatabaseIdentifier.DB_2,
column.getDataType().getSqlType().name(),
column.getName());
}
return builder.build();
}
|
@Test
public void testReconvertDate() {
Column column =
PhysicalColumn.builder()
.name("test")
.dataType(LocalTimeType.LOCAL_DATE_TYPE)
.build();
BasicTypeDefine typeDefine = DB2TypeConverter.INSTANCE.reconvert(column);
Assertions.assertEquals(column.getName(), typeDefine.getName());
Assertions.assertEquals(DB2TypeConverter.DB2_DATE, typeDefine.getColumnType());
Assertions.assertEquals(DB2TypeConverter.DB2_DATE, typeDefine.getDataType());
}
|
@Override
public List<String> getKeys(final String id) {
String hashKeyPart = ".{" + id + "}";
String tokenKey = getKeyName() + hashKeyPart + ".tokens";
String timestampKey = UUIDUtils.getInstance().generateShortUuid() + hashKeyPart + ".timestamp";
return Arrays.asList(tokenKey, timestampKey);
}
|
@Test
public void getKeysTest() {
String prefix = slidingWindowRateLimiterAlgorithm.getKeyName() + ".{" + ID;
String tokenKey = prefix + "}.tokens";
MatcherAssert.assertThat(tokenKey, is(slidingWindowRateLimiterAlgorithm.getKeys(ID).get(0)));
}
|
public static boolean isInvalidStanzaSentPriorToResourceBinding(final Packet stanza, final ClientSession session)
{
// Openfire sets 'authenticated' only after resource binding.
if (session.getStatus() == Session.Status.AUTHENTICATED) {
return false;
}
// Beware, the 'to' address in the stanza will have been overwritten by the
final JID intendedRecipient = stanza.getTo();
final JID serverDomain = new JID(XMPPServer.getInstance().getServerInfo().getXMPPDomain());
// If there's no 'to' address, then the stanza is implicitly addressed at the user itself.
if (intendedRecipient == null) {
return false;
}
// TODO: after authentication (but prior to resource binding), it should be possible to verify that the
// intended recipient's bare JID corresponds with the authorized user. Openfire currently does not have an API
// that can be used to obtain the authorized username, prior to resource binding.
if (intendedRecipient.equals(serverDomain)) {
return false;
}
return true;
}
|
@Test
public void testIsInvalid_addressedAtDomain_unauthenticated() throws Exception
{
// Setup test fixture.
final Packet stanza = new Message();
stanza.setTo(XMPPServer.getInstance().getServerInfo().getXMPPDomain());
final LocalClientSession session = mock(LocalClientSession.class, withSettings().strictness(Strictness.LENIENT));
when(session.getStatus()).thenReturn(Session.Status.CONNECTED); // Openfire sets 'AUTHENTICATED' only after resource binding has been done.
// Execute system under test.
final boolean result = SessionPacketRouter.isInvalidStanzaSentPriorToResourceBinding(stanza, session);
// Verify results.
assertFalse(result);
}
|
@Override
public ProcResult fetchResult()
throws AnalysisException {
BaseProcResult result = new BaseProcResult();
result.setNames(getMetadata());
final List<List<String>> computeNodesInfos = getClusterComputeNodesInfos();
for (List<String> computeNodesInfo : computeNodesInfos) {
List<String> oneInfo = new ArrayList<>(computeNodesInfo.size());
oneInfo.addAll(computeNodesInfo);
result.addRow(oneInfo);
}
return result;
}
|
@Test
public void testFetchResultSharedNothing() throws AnalysisException {
new Expectations() {
{
RunMode.isSharedDataMode();
minTimes = 0;
result = false;
}
};
ComputeNodeProcDir dir = new ComputeNodeProcDir(systemInfoService);
ProcResult result = dir.fetchResult();
Assert.assertNotNull(result);
Assert.assertTrue(result instanceof BaseProcResult);
int columnIndex = getTabletNumColumnIndex(result.getColumnNames());
// no "TabletNum" column in shared-nothing mode
Assert.assertEquals(-1, columnIndex);
}
|
public int getIndexByIdentifier(FactIdentifier factIdentifier, ExpressionIdentifier expressionIdentifier) {
return IntStream.range(0, factMappings.size()).filter(index -> {
FactMapping factMapping = factMappings.get(index);
return Objects.equals(factMapping.getExpressionIdentifier(), expressionIdentifier) &&
Objects.equals(factMapping.getFactIdentifier(), factIdentifier);
}).findFirst().orElseThrow(() -> new IllegalArgumentException(
new StringBuilder().append("Impossible to find a FactMapping with factIdentifier '").append(factIdentifier.getName())
.append("' and expressionIdentifier '").append(expressionIdentifier.getName()).append("'").toString()));
}
|
@Test
public void getIndexByIdentifierTest() {
FactMapping factMapping0 = modelDescriptor.addFactMapping(factIdentifier, expressionIdentifier);
FactMapping factMapping1 = modelDescriptor.addFactMapping(factIdentifier2, expressionIdentifier);
int indexToCheck = 0;
int indexRetrieved = modelDescriptor.getIndexByIdentifier(factMapping0.getFactIdentifier(), expressionIdentifier);
assertThat(indexRetrieved).isEqualTo(indexToCheck);
indexToCheck = 1;
indexRetrieved = modelDescriptor.getIndexByIdentifier(factMapping1.getFactIdentifier(), expressionIdentifier);
assertThat(indexRetrieved).isEqualTo(indexToCheck);
}
|
public boolean match(String left, String right) {
if (left != null && left.startsWith("\"") && left.endsWith("\"")) {
left = left.substring(1, left.length() - 1);
}
if (right != null && right.startsWith("\"") && right.endsWith("\"")) {
right = right.substring(1, right.length() - 1);
}
return Objects.equals(left, right);
}
|
@Test
public void stringShouldEqual() {
assertTrue(new StringMatch().match("\"a\"", "a"));
assertTrue(new StringMatch().match("a", "a"));
assertFalse(new StringMatch().match("\"a\"", "ab"));
}
|
@Override
@TpsControl(pointName = "RemoteNamingInstanceRegisterDeregister", name = "RemoteNamingInstanceRegisterDeregister")
@Secured(action = ActionTypes.WRITE)
@ExtractorManager.Extractor(rpcExtractor = InstanceRequestParamExtractor.class)
public InstanceResponse handle(InstanceRequest request, RequestMeta meta) throws NacosException {
Service service = Service.newService(request.getNamespace(), request.getGroupName(), request.getServiceName(),
true);
InstanceUtil.setInstanceIdIfEmpty(request.getInstance(), service.getGroupedServiceName());
switch (request.getType()) {
case NamingRemoteConstants.REGISTER_INSTANCE:
return registerInstance(service, request, meta);
case NamingRemoteConstants.DE_REGISTER_INSTANCE:
return deregisterInstance(service, request, meta);
default:
throw new NacosException(NacosException.INVALID_PARAM,
String.format("Unsupported request type %s", request.getType()));
}
}
|
@Test
void testHandle() throws NacosException {
InstanceRequest instanceRequest = new InstanceRequest();
instanceRequest.setType(NamingRemoteConstants.REGISTER_INSTANCE);
instanceRequest.setServiceName("service1");
instanceRequest.setGroupName("group1");
Instance instance = new Instance();
instanceRequest.setInstance(instance);
RequestMeta requestMeta = new RequestMeta();
instanceRequestHandler.handle(instanceRequest, requestMeta);
Mockito.verify(clientOperationService).registerInstance(Mockito.any(), Mockito.any(), Mockito.anyString());
instanceRequest.setType(NamingRemoteConstants.DE_REGISTER_INSTANCE);
instanceRequestHandler.handle(instanceRequest, requestMeta);
Mockito.verify(clientOperationService).deregisterInstance(Mockito.any(), Mockito.any(), Mockito.anyString());
instanceRequest.setType("xxx");
try {
instanceRequestHandler.handle(instanceRequest, requestMeta);
} catch (Exception e) {
assertEquals(NacosException.INVALID_PARAM, ((NacosException) e).getErrCode());
}
}
|
@Override
public InterpreterResult interpret(String st, InterpreterContext context) {
String[] lines = splitAndRemoveEmpty(st, "\n");
return interpret(lines, context);
}
|
@Test
void mkdirTest() throws IOException, AlluxioException {
String qualifiedPath =
"alluxio://" + mLocalAlluxioCluster.getHostname() + ":"
+ mLocalAlluxioCluster.getMasterRpcPort() + "/root/testFile1";
InterpreterResult output = alluxioInterpreter.interpret("mkdir " + qualifiedPath, null);
boolean existsDir = fs.exists(new AlluxioURI("/root/testFile1"));
assertEquals(
"Successfully created directory " + qualifiedPath + "\n\n",
output.message().get(0).getData());
assertTrue(existsDir);
}
|
@Override
public SchemaAndValue toConnectData(String topic, byte[] value) {
JsonNode jsonValue;
// This handles a tombstone message
if (value == null) {
return SchemaAndValue.NULL;
}
try {
jsonValue = deserializer.deserialize(topic, value);
} catch (SerializationException e) {
throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e);
}
if (config.schemasEnabled() && (!jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME) || !jsonValue.has(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME)))
throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." +
" If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration.");
// The deserialized data should either be an envelope object containing the schema and the payload or the schema
// was stripped during serialization and we need to fill in an all-encompassing schema.
if (!config.schemasEnabled()) {
ObjectNode envelope = JSON_NODE_FACTORY.objectNode();
envelope.set(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME, null);
envelope.set(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME, jsonValue);
jsonValue = envelope;
}
Schema schema = asConnectSchema(jsonValue.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME));
return new SchemaAndValue(
schema,
convertToConnect(schema, jsonValue.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME), config)
);
}
|
@Test
public void timeToConnect() {
Schema schema = Time.SCHEMA;
GregorianCalendar calendar = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0);
calendar.setTimeZone(TimeZone.getTimeZone("UTC"));
calendar.add(Calendar.MILLISECOND, 14400000);
java.util.Date reference = calendar.getTime();
String msg = "{ \"schema\": { \"type\": \"int32\", \"name\": \"org.apache.kafka.connect.data.Time\", \"version\": 1 }, \"payload\": 14400000 }";
SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes());
java.util.Date converted = (java.util.Date) schemaAndValue.value();
assertEquals(schema, schemaAndValue.schema());
assertEquals(reference, converted);
}
|
public void decode(ByteBuf buffer) {
boolean last;
int statusCode;
while (true) {
switch(state) {
case READ_COMMON_HEADER:
if (buffer.readableBytes() < SPDY_HEADER_SIZE) {
return;
}
int frameOffset = buffer.readerIndex();
int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET;
int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET;
buffer.skipBytes(SPDY_HEADER_SIZE);
boolean control = (buffer.getByte(frameOffset) & 0x80) != 0;
int version;
int type;
if (control) {
// Decode control frame common header
version = getUnsignedShort(buffer, frameOffset) & 0x7FFF;
type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET);
streamId = 0; // Default to session Stream-ID
} else {
// Decode data frame common header
version = spdyVersion; // Default to expected version
type = SPDY_DATA_FRAME;
streamId = getUnsignedInt(buffer, frameOffset);
}
flags = buffer.getByte(flagsOffset);
length = getUnsignedMedium(buffer, lengthOffset);
// Check version first then validity
if (version != spdyVersion) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SPDY Version");
} else if (!isValidFrameHeader(streamId, type, flags, length)) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid Frame Error");
} else {
state = getNextState(type, length);
}
break;
case READ_DATA_FRAME:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0));
break;
}
// Generate data frames that do not exceed maxChunkSize
int dataLength = Math.min(maxChunkSize, length);
// Wait until entire frame is readable
if (buffer.readableBytes() < dataLength) {
return;
}
ByteBuf data = buffer.alloc().buffer(dataLength);
data.writeBytes(buffer, dataLength);
length -= dataLength;
if (length == 0) {
state = State.READ_COMMON_HEADER;
}
last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN);
delegate.readDataFrame(streamId, last, data);
break;
case READ_SYN_STREAM_FRAME:
if (buffer.readableBytes() < 10) {
return;
}
int offset = buffer.readerIndex();
streamId = getUnsignedInt(buffer, offset);
int associatedToStreamId = getUnsignedInt(buffer, offset + 4);
byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07);
last = hasFlag(flags, SPDY_FLAG_FIN);
boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL);
buffer.skipBytes(10);
length -= 10;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_STREAM Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional);
}
break;
case READ_SYN_REPLY_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_REPLY Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynReplyFrame(streamId, last);
}
break;
case READ_RST_STREAM_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (streamId == 0 || statusCode == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid RST_STREAM Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readRstStreamFrame(streamId, statusCode);
}
break;
case READ_SETTINGS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR);
numSettings = getUnsignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
length -= 4;
// Validate frame length against number of entries. Each ID/Value entry is 8 bytes.
if ((length & 0x07) != 0 || length >> 3 != numSettings) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SETTINGS Frame");
} else {
state = State.READ_SETTING;
delegate.readSettingsFrame(clear);
}
break;
case READ_SETTING:
if (numSettings == 0) {
state = State.READ_COMMON_HEADER;
delegate.readSettingsEnd();
break;
}
if (buffer.readableBytes() < 8) {
return;
}
byte settingsFlags = buffer.getByte(buffer.readerIndex());
int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1);
int value = getSignedInt(buffer, buffer.readerIndex() + 4);
boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE);
boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED);
buffer.skipBytes(8);
--numSettings;
delegate.readSetting(id, value, persistValue, persisted);
break;
case READ_PING_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
int pingId = getSignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
state = State.READ_COMMON_HEADER;
delegate.readPingFrame(pingId);
break;
case READ_GOAWAY_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
state = State.READ_COMMON_HEADER;
delegate.readGoAwayFrame(lastGoodStreamId, statusCode);
break;
case READ_HEADERS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid HEADERS Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readHeadersFrame(streamId, last);
}
break;
case READ_WINDOW_UPDATE_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (deltaWindowSize == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid WINDOW_UPDATE Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readWindowUpdateFrame(streamId, deltaWindowSize);
}
break;
case READ_HEADER_BLOCK:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readHeaderBlockEnd();
break;
}
if (!buffer.isReadable()) {
return;
}
int compressedBytes = Math.min(buffer.readableBytes(), length);
ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes);
headerBlock.writeBytes(buffer, compressedBytes);
length -= compressedBytes;
delegate.readHeaderBlock(headerBlock);
break;
case DISCARD_FRAME:
int numBytes = Math.min(buffer.readableBytes(), length);
buffer.skipBytes(numBytes);
length -= numBytes;
if (length == 0) {
state = State.READ_COMMON_HEADER;
break;
}
return;
case FRAME_ERROR:
buffer.skipBytes(buffer.readableBytes());
return;
default:
throw new Error("Shouldn't reach here.");
}
}
}
|
@Test
public void testUnknownSpdyHeadersFrameFlags() throws Exception {
short type = 8;
byte flags = (byte) 0xFE; // undefined flags
int length = 4;
int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01;
ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length);
encodeControlFrameHeader(buf, type, flags, length);
buf.writeInt(streamId);
decoder.decode(buf);
verify(delegate).readHeadersFrame(streamId, false);
verify(delegate).readHeaderBlockEnd();
assertFalse(buf.isReadable());
buf.release();
}
|
public boolean remove(long member) {
assert member >= 0;
int prefix = (int) (member >>> Integer.SIZE);
if (prefix == lastPrefix) {
if (lastStorage.remove((int) member)) {
lastPrefix = -1;
lastStorage = null;
return storages.clear(prefix);
} else {
return false;
}
} else {
Storage32 storage = storages.get(prefix);
if (storage == null) {
return false;
}
if (storage.remove((int) member)) {
lastPrefix = -1;
lastStorage = null;
return storages.clear(prefix);
} else {
lastPrefix = prefix;
lastStorage = storage;
return false;
}
}
}
|
@Test
public void testRemove() {
// try to clear empty set
for (long i = 0; i < ARRAY_STORAGE_32_MAX_SIZE / 2; ++i) {
clear(i);
verify();
}
// at the beginning
for (long i = 0; i < ARRAY_STORAGE_32_MAX_SIZE / 2; ++i) {
set(i);
}
for (long i = 0; i < ARRAY_STORAGE_32_MAX_SIZE / 2 + 100; ++i) {
clear(i);
verify();
// try nonexistent
clear(i);
verify();
}
// offset
for (long i = 1000000; i < 1000000 + ARRAY_STORAGE_32_MAX_SIZE; ++i) {
set(i);
}
for (long i = 1000000 + ARRAY_STORAGE_32_MAX_SIZE + 100; i >= 1000000; --i) {
clear(i);
verify();
// try nonexistent
clear(i);
verify();
}
// test empty again
for (long i = 111; i < 1111; ++i) {
clear(i);
verify();
}
// try gaps
for (long i = 0; i < 1000; ++i) {
set(i * i);
}
for (long i = 0; i < 1000; ++i) {
clear(i * i);
verify();
clear(i * i);
verify();
}
// try larger gaps
for (long i = (long) Math.sqrt(Long.MAX_VALUE) - 1000; i < (long) Math.sqrt(Long.MAX_VALUE); ++i) {
set(i * i);
}
for (long i = (long) Math.sqrt(Long.MAX_VALUE) - 1000; i < (long) Math.sqrt(Long.MAX_VALUE); ++i) {
clear(i * i);
verify();
}
// try larger 2-element gaps
for (long i = (long) Math.sqrt(Long.MAX_VALUE) - 1000; i < (long) Math.sqrt(Long.MAX_VALUE); ++i) {
set(i * i);
set(i * i - 1);
}
for (long i = (long) Math.sqrt(Long.MAX_VALUE) - 1000; i < (long) Math.sqrt(Long.MAX_VALUE); ++i) {
clear(i * i);
verify();
clear(i * i - 1);
verify();
}
// try some edge cases
for (long i = 0; i <= 2; ++i) {
set(i);
}
for (long i = 0; i <= 2; ++i) {
clear(i);
verify();
}
for (long i = Short.MAX_VALUE - 2; i <= Short.MAX_VALUE + 2; ++i) {
set(i);
}
for (long i = Short.MAX_VALUE - 2; i <= Short.MAX_VALUE + 2; ++i) {
clear(i);
verify();
}
for (long i = Integer.MAX_VALUE - 2; i <= (long) Integer.MAX_VALUE + 2; ++i) {
set(i);
}
for (long i = Integer.MAX_VALUE - 2; i <= (long) Integer.MAX_VALUE + 2; ++i) {
clear(i);
verify();
}
for (long i = Long.MAX_VALUE; i >= Long.MAX_VALUE - 2; --i) {
set(i);
}
for (long i = Long.MAX_VALUE; i >= Long.MAX_VALUE - 2; --i) {
clear(i);
verify();
}
}
|
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (!(obj instanceof AtomicPositiveInteger)) {
return false;
}
AtomicPositiveInteger other = (AtomicPositiveInteger) obj;
return intValue() == other.intValue();
}
|
@Test
void testEquals() {
assertEquals(new AtomicPositiveInteger(), new AtomicPositiveInteger());
assertEquals(new AtomicPositiveInteger(1), new AtomicPositiveInteger(1));
}
|
public Rating getRatingForDensity(double value) {
return ratingBounds.entrySet().stream()
.filter(e -> e.getValue().match(value))
.map(Map.Entry::getKey)
.findFirst()
.orElseThrow(() -> new IllegalArgumentException(format("Invalid value '%s'", value)));
}
|
@Test
public void fail_on_invalid_density() {
assertThatThrownBy(() -> ratingGrid.getRatingForDensity(-1))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Invalid value '-1.0'");
}
|
public static UriTemplate create(String template, Charset charset) {
return new UriTemplate(template, true, charset);
}
|
@Test
void ensurePlusIsSupportedOnPath() {
String template = "https://www.example.com/sam+adams/beer/{type}";
UriTemplate uriTemplate = UriTemplate.create(template, Util.UTF_8);
String expanded = uriTemplate.expand(Collections.emptyMap());
assertThat(expanded).isEqualToIgnoringCase("https://www.example.com/sam+adams/beer/");
}
|
@Override
public void validateUserList(Collection<Long> ids) {
if (CollUtil.isEmpty(ids)) {
return;
}
// 获得岗位信息
List<AdminUserDO> users = userMapper.selectBatchIds(ids);
Map<Long, AdminUserDO> userMap = CollectionUtils.convertMap(users, AdminUserDO::getId);
// 校验
ids.forEach(id -> {
AdminUserDO user = userMap.get(id);
if (user == null) {
throw exception(USER_NOT_EXISTS);
}
if (!CommonStatusEnum.ENABLE.getStatus().equals(user.getStatus())) {
throw exception(USER_IS_DISABLE, user.getNickname());
}
});
}
|
@Test
public void testValidateUserList_notFound() {
// 准备参数
List<Long> ids = singletonList(randomLongId());
// 调用, 并断言异常
assertServiceException(() -> userService.validateUserList(ids), USER_NOT_EXISTS);
}
|
@Override
protected void analyzeDependency(Dependency dependency, Engine engine) throws AnalysisException {
removeJreEntries(dependency);
removeBadMatches(dependency);
removeWrongVersionMatches(dependency);
removeSpuriousCPE(dependency);
removeDuplicativeEntriesFromJar(dependency, engine);
addFalseNegativeCPEs(dependency);
}
|
@Test
public void testAnalyzeDependency() throws Exception {
Dependency dependency = new Dependency();
dependency.setFileName("pom.xml");
dependency.setFilePath("pom.xml");
Cpe cpe = builder.part(Part.APPLICATION).vendor("file").product("file").version("1.2.1").build();
CpeIdentifier id = new CpeIdentifier(cpe, "http://some.org/url", Confidence.HIGHEST);
dependency.addVulnerableSoftwareIdentifier(id);
Engine engine = null;
FalsePositiveAnalyzer instance = new FalsePositiveAnalyzer();
int before = dependency.getVulnerableSoftwareIdentifiers().size();
instance.analyze(dependency, engine);
int after = dependency.getVulnerableSoftwareIdentifiers().size();
assertTrue(before > after);
}
|
@Override
public Map<K, V> loadAll(Collection<K> keys) {
awaitSuccessfulInit();
Object[] keysArray = keys.toArray();
String sql = queries.loadAll(keys.size());
try (SqlResult queryResult = sqlService.execute(sql, keysArray)) {
Iterator<SqlRow> it = queryResult.iterator();
Map<K, V> result = new HashMap<>();
while (it.hasNext()) {
SqlRow sqlRow = it.next();
// If there is a single column as the value, return that column as the value
if (queryResult.getRowMetadata().getColumnCount() == 2 && genericMapStoreProperties.singleColumnAsValue) {
K id = sqlRow.getObject(genericMapStoreProperties.idColumn);
result.put(id, sqlRow.getObject(1));
} else {
K id = sqlRow.getObject(genericMapStoreProperties.idColumn);
//noinspection unchecked
V record = (V) toGenericRecord(sqlRow, genericMapStoreProperties);
result.put(id, record);
}
}
return result;
}
}
|
@Test
public void givenRowDoesNotExist_whenLoadAll_thenReturnEmptyMap() {
objectProvider.createObject(mapName, false);
mapLoader = createMapLoader();
Map<Integer, GenericRecord> records = mapLoader.loadAll(newArrayList(0));
assertThat(records).isEmpty();
}
|
public ProjectCleaner purge(DbSession session, String rootUuid, String projectUuid, Configuration projectConfig, Set<String> disabledComponentUuids) {
long start = System.currentTimeMillis();
profiler.reset();
periodCleaner.clean(session, rootUuid, projectConfig);
PurgeConfiguration configuration = newDefaultPurgeConfiguration(projectConfig, rootUuid, projectUuid, disabledComponentUuids);
purgeDao.purge(session, configuration, purgeListener, profiler);
session.commit();
logProfiling(start, projectConfig);
return this;
}
|
@Test
public void call_period_cleaner_index_client_and_purge_dao() {
settings.setProperty(PurgeConstants.DAYS_BEFORE_DELETING_CLOSED_ISSUES, 5);
underTest.purge(mock(DbSession.class), "root", "project", settings.asConfig(), emptySet());
verify(periodCleaner).clean(any(), any(), any());
verify(dao).purge(any(), any(), any(), any());
}
|
@Override
public Set<TopicPartition> changelogPartitions() {
return task.changelogPartitions();
}
|
@Test
public void shouldDelegateChangelogPartitions() {
final ReadOnlyTask readOnlyTask = new ReadOnlyTask(task);
readOnlyTask.changelogPartitions();
verify(task).changelogPartitions();
}
|
@Override
public Map<String, Metric> getMetrics() {
final Map<String, Metric> gauges = new HashMap<>();
gauges.put("total.init", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getInit() +
mxBean.getNonHeapMemoryUsage().getInit());
gauges.put("total.used", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getUsed() +
mxBean.getNonHeapMemoryUsage().getUsed());
gauges.put("total.max", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getMax() == -1 ?
-1 : mxBean.getHeapMemoryUsage().getMax() + mxBean.getNonHeapMemoryUsage().getMax());
gauges.put("total.committed", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getCommitted() +
mxBean.getNonHeapMemoryUsage().getCommitted());
gauges.put("heap.init", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getInit());
gauges.put("heap.used", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getUsed());
gauges.put("heap.max", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getMax());
gauges.put("heap.committed", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getCommitted());
gauges.put("heap.usage", new RatioGauge() {
@Override
protected Ratio getRatio() {
final MemoryUsage usage = mxBean.getHeapMemoryUsage();
return Ratio.of(usage.getUsed(), usage.getMax());
}
});
gauges.put("non-heap.init", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getInit());
gauges.put("non-heap.used", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getUsed());
gauges.put("non-heap.max", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getMax());
gauges.put("non-heap.committed", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getCommitted());
gauges.put("non-heap.usage", new RatioGauge() {
@Override
protected Ratio getRatio() {
final MemoryUsage usage = mxBean.getNonHeapMemoryUsage();
return Ratio.of(usage.getUsed(), usage.getMax() == -1 ? usage.getCommitted() : usage.getMax());
}
});
for (final MemoryPoolMXBean pool : memoryPools) {
final String poolName = name("pools", WHITESPACE.matcher(pool.getName()).replaceAll("-"));
gauges.put(name(poolName, "usage"), new RatioGauge() {
@Override
protected Ratio getRatio() {
MemoryUsage usage = pool.getUsage();
return Ratio.of(usage.getUsed(),
usage.getMax() == -1 ? usage.getCommitted() : usage.getMax());
}
});
gauges.put(name(poolName, "max"), (Gauge<Long>) () -> pool.getUsage().getMax());
gauges.put(name(poolName, "used"), (Gauge<Long>) () -> pool.getUsage().getUsed());
gauges.put(name(poolName, "committed"), (Gauge<Long>) () -> pool.getUsage().getCommitted());
// Only register GC usage metrics if the memory pool supports usage statistics.
if (pool.getCollectionUsage() != null) {
gauges.put(name(poolName, "used-after-gc"), (Gauge<Long>) () ->
pool.getCollectionUsage().getUsed());
}
gauges.put(name(poolName, "init"), (Gauge<Long>) () -> pool.getUsage().getInit());
}
return Collections.unmodifiableMap(gauges);
}
|
@Test
public void hasAGaugeForWeirdMemoryPoolUsed() {
final Gauge gauge = (Gauge) gauges.getMetrics().get("pools.Weird-Pool.used");
assertThat(gauge.getValue())
.isEqualTo(300L);
}
|
public void start() {
Preconditions.checkState(state.compareAndSet(State.LATENT, State.STARTED), "Cannot be started more than once");
Preconditions.checkState(!executorService.isShutdown(), "Already started");
Preconditions.checkState(!hasLeadership, "Already has leadership");
client.getConnectionStateListenable().addListener(listener);
requeue();
}
|
@Test
public void testLeaderNodeDeleteOnInterrupt() throws Exception {
Timing2 timing = new Timing2();
LeaderSelector selector = null;
CuratorFramework client = null;
try {
client = CuratorFrameworkFactory.newClient(
server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1));
final CountDownLatch reconnectedLatch = new CountDownLatch(1);
ConnectionStateListener connectionStateListener = new ConnectionStateListener() {
@Override
public void stateChanged(CuratorFramework client, ConnectionState newState) {
if (newState == ConnectionState.RECONNECTED) {
reconnectedLatch.countDown();
}
}
};
client.getConnectionStateListenable().addListener(connectionStateListener);
client.start();
final BlockingQueue<Thread> queue = new ArrayBlockingQueue<Thread>(1);
LeaderSelectorListener listener = new LeaderSelectorListener() {
@Override
public void takeLeadership(CuratorFramework client) throws Exception {
queue.add(Thread.currentThread());
try {
Thread.currentThread().join();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
@Override
public void stateChanged(CuratorFramework client, ConnectionState newState) {}
};
selector = new LeaderSelector(client, "/leader", listener);
selector.start();
Thread leaderThread = timing.takeFromQueue(queue);
server.stop();
leaderThread.interrupt();
server.restart();
assertTrue(timing.awaitLatch(reconnectedLatch));
timing.sleepABit();
assertEquals(client.getChildren().forPath("/leader").size(), 0);
} finally {
CloseableUtils.closeQuietly(selector);
CloseableUtils.closeQuietly(client);
}
}
|
@Override
public ProtobufSystemInfo.Section toProtobuf() {
ProtobufSystemInfo.Section.Builder protobuf = ProtobufSystemInfo.Section.newBuilder();
protobuf.setName("System");
setAttribute(protobuf, "Server ID", server.getId());
setAttribute(protobuf, "Version", getVersion());
setAttribute(protobuf, "Edition", sonarRuntime.getEdition().getLabel());
setAttribute(protobuf, NCLOC.getName(), statisticsSupport.getLinesOfCode());
setAttribute(protobuf, "Container", containerSupport.isRunningInContainer());
setAttribute(protobuf, "External Users and Groups Provisioning", commonSystemInformation.getManagedInstanceProviderName());
setAttribute(protobuf, "External User Authentication", commonSystemInformation.getExternalUserAuthentication());
addIfNotEmpty(protobuf, "Accepted external identity providers",
commonSystemInformation.getEnabledIdentityProviders());
addIfNotEmpty(protobuf, "External identity providers whose users are allowed to sign themselves up",
commonSystemInformation.getAllowsToSignUpEnabledIdentityProviders());
setAttribute(protobuf, "High Availability", false);
setAttribute(protobuf, "Official Distribution", officialDistribution.check());
setAttribute(protobuf, "Force authentication", commonSystemInformation.getForceAuthentication());
setAttribute(protobuf, "Home Dir", config.get(PATH_HOME.getKey()).orElse(null));
setAttribute(protobuf, "Data Dir", config.get(PATH_DATA.getKey()).orElse(null));
setAttribute(protobuf, "Temp Dir", config.get(PATH_TEMP.getKey()).orElse(null));
setAttribute(protobuf, "Processors", Runtime.getRuntime().availableProcessors());
return protobuf.build();
}
|
@Test
@UseDataProvider("trueOrFalse")
public void toProtobuf_whenRunningOrNotRunningInContainer_shouldReturnCorrectFlag(boolean flag) {
when(containerSupport.isRunningInContainer()).thenReturn(flag);
ProtobufSystemInfo.Section protobuf = underTest.toProtobuf();
assertThat(attribute(protobuf, "Container").getBooleanValue()).isEqualTo(flag);
}
|
@Override
public int hashCode() {
return Objects.hash(
threadName,
threadState,
activeTasks,
standbyTasks,
mainConsumerClientId,
restoreConsumerClientId,
producerClientIds,
adminClientId);
}
|
@Test
public void shouldNotBeEqualIfDifferInThreadState() {
final ThreadMetadata differThreadState = new ThreadMetadataImpl(
THREAD_NAME,
"different",
MAIN_CONSUMER_CLIENT_ID,
RESTORE_CONSUMER_CLIENT_ID,
PRODUCER_CLIENT_IDS,
ADMIN_CLIENT_ID,
ACTIVE_TASKS,
STANDBY_TASKS
);
assertThat(threadMetadata, not(equalTo(differThreadState)));
assertThat(threadMetadata.hashCode(), not(equalTo(differThreadState.hashCode())));
}
|
@Override
public KsMaterializedQueryResult<WindowedRow> get(
final GenericKey key,
final int partition,
final Range<Instant> windowStartBounds,
final Range<Instant> windowEndBounds,
final Optional<Position> position
) {
try {
final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds);
final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds);
final WindowKeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query =
WindowKeyQuery.withKeyAndWindowStartRange(key, lower, upper);
StateQueryRequest<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> request =
inStore(stateStore.getStateStoreName()).withQuery(query);
if (position.isPresent()) {
request = request.withPositionBound(PositionBound.at(position.get()));
}
final KafkaStreams streams = stateStore.getKafkaStreams();
final StateQueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> result =
streams.query(request);
final QueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> queryResult =
result.getPartitionResults().get(partition);
if (queryResult.isFailure()) {
throw failedQueryException(queryResult);
}
if (queryResult.getResult() == null) {
return KsMaterializedQueryResult.rowIteratorWithPosition(
Collections.emptyIterator(), queryResult.getPosition());
}
try (WindowStoreIterator<ValueAndTimestamp<GenericRow>> it
= queryResult.getResult()) {
final Builder<WindowedRow> builder = ImmutableList.builder();
while (it.hasNext()) {
final KeyValue<Long, ValueAndTimestamp<GenericRow>> next = it.next();
final Instant windowStart = Instant.ofEpochMilli(next.key);
if (!windowStartBounds.contains(windowStart)) {
continue;
}
final Instant windowEnd = windowStart.plus(windowSize);
if (!windowEndBounds.contains(windowEnd)) {
continue;
}
final TimeWindow window =
new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli());
final WindowedRow row = WindowedRow.of(
stateStore.schema(),
new Windowed<>(key, window),
next.value.value(),
next.value.timestamp()
);
builder.add(row);
}
return KsMaterializedQueryResult.rowIteratorWithPosition(
builder.build().iterator(), queryResult.getPosition());
}
} catch (final NotUpToBoundException | MaterializationException e) {
throw e;
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
}
|
@Test
@SuppressWarnings("unchecked")
public void shouldReturnValuesForOpenEndBounds() {
// Given:
final Range<Instant> end = Range.open(
NOW,
NOW.plusSeconds(10)
);
final Range<Instant> startEquiv = Range.open(
end.lowerEndpoint().minus(WINDOW_SIZE),
end.upperEndpoint().minus(WINDOW_SIZE)
);
final StateQueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> partitionResult =
new StateQueryResult<>();
final QueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> queryResult = QueryResult.forResult(fetchIterator);
queryResult.setPosition(POSITION);
partitionResult.addResult(PARTITION, queryResult);
when(kafkaStreams.query(any(StateQueryRequest.class))).thenReturn(partitionResult);
when(fetchIterator.hasNext())
.thenReturn(true)
.thenReturn(true)
.thenReturn(true)
.thenReturn(false);
when(fetchIterator.next())
.thenReturn(new KeyValue<>(startEquiv.lowerEndpoint().toEpochMilli(), VALUE_1))
.thenReturn(
new KeyValue<>(startEquiv.lowerEndpoint().plusMillis(1).toEpochMilli(), VALUE_2))
.thenReturn(new KeyValue<>(startEquiv.upperEndpoint().toEpochMilli(), VALUE_3))
.thenThrow(new AssertionError());
// When:
final KsMaterializedQueryResult<WindowedRow> result =
table.get(A_KEY, PARTITION, Range.all(), end);
// Then:
final Iterator<WindowedRow> rowIterator = result.getRowIterator();
assertThat(rowIterator.hasNext(), is(true));
final List<WindowedRow> resultList = Lists.newArrayList(rowIterator);
assertThat(resultList, contains(
WindowedRow.of(
SCHEMA,
windowedKey(startEquiv.lowerEndpoint().plusMillis(1)),
VALUE_2.value(),
VALUE_2.timestamp()
)
));
assertThat(result.getPosition(), not(Optional.empty()));
assertThat(result.getPosition().get(), is(POSITION));
}
|
public static <T> Inner<T> create() {
return new Inner<T>();
}
|
@Test
@Category(NeedsRunner.class)
public void testMissingFieldIndex() {
thrown.expect(IllegalArgumentException.class);
pipeline
.apply(Create.of(new AutoValue_FilterTest_Simple("pass", 52, 2)))
.apply(Filter.<AutoValue_FilterTest_Simple>create().whereFieldId(23, f -> true));
pipeline.run();
}
|
public NonClosedTracking<RAW, BASE> trackNonClosed(Input<RAW> rawInput, Input<BASE> baseInput) {
NonClosedTracking<RAW, BASE> tracking = NonClosedTracking.of(rawInput, baseInput);
// 1. match by rule, line, line hash and message
match(tracking, LineAndLineHashAndMessage::new);
// 2. match issues with same rule, same line and same line hash, but not necessarily with same message
match(tracking, LineAndLineHashKey::new);
// 3. detect code moves by comparing blocks of codes
detectCodeMoves(rawInput, baseInput, tracking);
// 4. match issues with same rule, same message and same line hash
match(tracking, LineHashAndMessageKey::new);
// 5. match issues with same rule, same line and same message
match(tracking, LineAndMessageKey::new);
// 6. match issues with same rule and same line hash but different line and different message.
// See SONAR-2812
match(tracking, LineHashKey::new);
return tracking;
}
|
@Test
public void no_lines_and_different_messages_match() {
FakeInput baseInput = new FakeInput("H1", "H2", "H3");
Issue base = baseInput.createIssue(RULE_SYSTEM_PRINT, "msg1");
FakeInput rawInput = new FakeInput("H10", "H11", "H12");
Issue raw = rawInput.createIssue(RULE_SYSTEM_PRINT, "msg2");
Tracking<Issue, Issue> tracking = tracker.trackNonClosed(rawInput, baseInput);
assertThat(tracking.baseFor(raw)).isSameAs(base);
}
|
public static <T> T copyProperties(Object source, Class<T> tClass, String... ignoreProperties) {
if (null == source) {
return null;
}
T target = ReflectUtil.newInstanceIfPossible(tClass);
copyProperties(source, target, CopyOptions.create().setIgnoreProperties(ignoreProperties));
return target;
}
|
@Test
public void copyBeanToBeanTest() {
// 测试在copyProperties方法中alias是否有效
final Food info = new Food();
info.setBookID("0");
info.setCode("123");
final HllFoodEntity entity = new HllFoodEntity();
BeanUtil.copyProperties(info, entity);
assertEquals(info.getBookID(), entity.getBookId());
assertEquals(info.getCode(), entity.getCode2());
}
|
public static AddressV2Message read(ByteBuffer payload) throws BufferUnderflowException, ProtocolException {
return new AddressV2Message(readAddresses(payload, 2));
}
|
@Test
public void roundtrip() {
AddressMessage message = AddressV2Message.read(ByteBuffer.wrap(ByteUtils.parseHex(MESSAGE_HEX)));
List<PeerAddress> addresses = message.getAddresses();
assertEquals(5, addresses.size());
PeerAddress a0 = addresses.get(0);
assertEquals("2009-01-09T02:54:25Z", TimeUtils.dateTimeFormat(a0.time()));
assertFalse(a0.getServices().hasAny());
assertTrue(a0.getAddr() instanceof Inet4Address);
assertEquals("0.0.0.1", a0.getAddr().getHostAddress());
assertNull(a0.getHostname());
assertEquals(0, a0.getPort());
PeerAddress a1 = addresses.get(1);
assertEquals("2039-11-22T11:22:33Z", TimeUtils.dateTimeFormat(a1.time()));
assertEquals(Services.NODE_NETWORK, a1.getServices().bits());
assertTrue(a1.getAddr() instanceof Inet6Address);
assertEquals("0:0:0:0:0:0:0:1", a1.getAddr().getHostAddress());
assertNull(a1.getHostname());
assertEquals(0xf1, a1.getPort());
PeerAddress a2 = addresses.get(2);
assertEquals("2106-02-07T06:28:15Z", TimeUtils.dateTimeFormat(a2.time()));
assertEquals(Services.NODE_WITNESS | 1 << 6 /* NODE_COMPACT_FILTERS */
| Services.NODE_NETWORK_LIMITED, a2.getServices().bits());
assertTrue(a2.getAddr() instanceof Inet6Address);
assertEquals("0:0:0:0:0:0:0:1", a2.getAddr().getHostAddress());
assertNull(a2.getHostname());
assertEquals(0xf1f2, a2.getPort());
PeerAddress a3 = addresses.get(3);
assertEquals("1970-01-01T00:00:00Z", TimeUtils.dateTimeFormat(a3.time()));
assertFalse(a3.getServices().hasAny());
assertNull(a3.getAddr());
assertEquals("6hzph5hv6337r6p2.onion", a3.getHostname());
assertEquals(0, a3.getPort());
PeerAddress a4 = addresses.get(4);
assertEquals("1970-01-01T00:00:00Z", TimeUtils.dateTimeFormat(a4.time()));
assertFalse(a4.getServices().hasAny());
assertNull(a4.getAddr());
assertEquals("kpgvmscirrdqpekbqjsvw5teanhatztpp2gl6eee4zkowvwfxwenqaid.onion", a4.getHostname());
assertEquals(0, a4.getPort());
assertEquals(MESSAGE_HEX, ByteUtils.formatHex(message.serialize()));
}
|
public static NamespaceName get(String tenant, String namespace) {
validateNamespaceName(tenant, namespace);
return get(tenant + '/' + namespace);
}
|
@SuppressWarnings("AssertBetweenInconvertibleTypes")
@Test
public void namespace_equalsCheckType() {
assertNotEquals(NamespaceName.get("prop/cluster/ns"), "prop/cluster/ns");
}
|
public static Object[] realize(Object[] objs, Class<?>[] types) {
if (objs.length != types.length) {
throw new IllegalArgumentException("args.length != types.length");
}
Object[] dests = new Object[objs.length];
for (int i = 0; i < objs.length; i++) {
dests[i] = realize(objs[i], types[i]);
}
return dests;
}
|
@Test
public void testJSONObjectToPersonMapPojo() {
JSONObject jsonObject = new JSONObject();
jsonObject.put("personId", "1");
jsonObject.put("personName", "hand");
Object result = PojoUtils.realize(jsonObject, PersonMap.class);
assertEquals(PersonMap.class, result.getClass());
}
|
@Override
public void createNetwork(K8sNetwork network) {
checkNotNull(network, ERR_NULL_NETWORK);
checkArgument(!Strings.isNullOrEmpty(network.networkId()), ERR_NULL_NETWORK_ID);
k8sNetworkStore.createNetwork(network);
log.info(String.format(MSG_NETWORK, network.name(), MSG_CREATED));
}
|
@Test(expected = IllegalArgumentException.class)
public void testCreateDuplicateNetwork() {
target.createNetwork(NETWORK);
target.createNetwork(NETWORK);
}
|
public static SchemaPairCompatibility checkReaderWriterCompatibility(final Schema reader, final Schema writer) {
final SchemaCompatibilityResult compatibility = new ReaderWriterCompatibilityChecker().getCompatibility(reader,
writer);
final String message;
switch (compatibility.getCompatibility()) {
case INCOMPATIBLE: {
message = String.format(
"Data encoded using writer schema:%n%s%n" + "will or may fail to decode using reader schema:%n%s%n",
writer.toString(true), reader.toString(true));
break;
}
case COMPATIBLE: {
message = READER_WRITER_COMPATIBLE_MESSAGE;
break;
}
default:
throw new AvroRuntimeException("Unknown compatibility: " + compatibility);
}
return new SchemaPairCompatibility(compatibility, reader, writer, message);
}
|
@Test
void readerWriterCompatibility() {
for (ReaderWriter readerWriter : COMPATIBLE_READER_WRITER_TEST_CASES) {
final Schema reader = readerWriter.getReader();
final Schema writer = readerWriter.getWriter();
LOG.debug("Testing compatibility of reader {} with writer {}.", reader, writer);
final SchemaPairCompatibility result = checkReaderWriterCompatibility(reader, writer);
assertEquals(SchemaCompatibilityType.COMPATIBLE, result.getType(), String
.format("Expecting reader %s to be compatible with writer %s, but tested incompatible.", reader, writer));
}
}
|
public String convert(ILoggingEvent le) {
List<Marker> markers = le.getMarkers();
if (markers == null || markers.isEmpty()) {
return EMPTY;
} else {
return markers.toString();
}
}
|
@Test
public void testWithNullMarker() {
String result = converter.convert(createLoggingEvent(null));
assertEquals("[null]", result);
}
|
CompletableFuture<Void> beginExecute(
@Nonnull List<? extends Tasklet> tasklets,
@Nonnull CompletableFuture<Void> cancellationFuture,
@Nonnull ClassLoader jobClassLoader
) {
final ExecutionTracker executionTracker = new ExecutionTracker(tasklets.size(), cancellationFuture);
try {
final Map<Boolean, List<Tasklet>> byCooperation =
tasklets.stream().collect(partitioningBy(
tasklet -> doWithClassLoader(jobClassLoader, tasklet::isCooperative)
));
submitCooperativeTasklets(executionTracker, jobClassLoader, byCooperation.get(true));
submitBlockingTasklets(executionTracker, jobClassLoader, byCooperation.get(false));
} catch (Throwable t) {
executionTracker.future.internalCompleteExceptionally(t);
}
return executionTracker.future;
}
|
@Test
public void when_twoNonBlockingTasklets_then_differentWorker() {
// Given
TaskletAssertingThreadLocal t1 = new TaskletAssertingThreadLocal();
TaskletAssertingThreadLocal t2 = new TaskletAssertingThreadLocal();
assertTrue(t1.isCooperative());
// When
CompletableFuture<Void> f1 = tes.beginExecute(singletonList(t1), new CompletableFuture<>(), classLoader);
CompletableFuture<Void> f2 = tes.beginExecute(singletonList(t2), new CompletableFuture<>(), classLoader);
f1.join();
f2.join();
// Then
// -- assertions are inside TaskletAssertingThreadLocal and will fail, if t1 and t2 are running on the same thread
}
|
public void processOnce() throws IOException {
// set status of query to OK.
ctx.getState().reset();
executor = null;
// reset sequence id of MySQL protocol
final MysqlChannel channel = ctx.getMysqlChannel();
channel.setSequenceId(0);
// read packet from channel
try {
packetBuf = channel.fetchOnePacket();
if (packetBuf == null) {
throw new RpcException(ctx.getRemoteIP(), "Error happened when receiving packet.");
}
} catch (AsynchronousCloseException e) {
// when this happened, timeout checker close this channel
// killed flag in ctx has been already set, just return
return;
}
// dispatch
dispatch();
// finalize
finalizeCommand();
ctx.setCommand(MysqlCommand.COM_SLEEP);
}
|
@Test
public void testQuery(@Mocked StmtExecutor executor) throws Exception {
ConnectContext ctx = initMockContext(mockChannel(queryPacket), GlobalStateMgr.getCurrentState());
ConnectProcessor processor = new ConnectProcessor(ctx);
// Mock statement executor
new Expectations() {
{
executor.getQueryStatisticsForAuditLog();
minTimes = 0;
result = statistics;
}
};
processor.processOnce();
Assert.assertEquals(MysqlCommand.COM_QUERY, myContext.getCommand());
}
|
public static Map<String, String> generateMetricsAndLogConfigMapData(Reconciliation reconciliation, AbstractModel model, MetricsAndLogging metricsAndLogging) {
Map<String, String> data = new HashMap<>(2);
if (model instanceof SupportsLogging supportsLogging) {
data.put(supportsLogging.logging().configMapKey(), supportsLogging.logging().loggingConfiguration(reconciliation, metricsAndLogging.loggingCm()));
}
if (model instanceof SupportsMetrics supportMetrics) {
String parseResult = supportMetrics.metrics().metricsJson(reconciliation, metricsAndLogging.metricsCm());
if (parseResult != null) {
data.put(MetricsModel.CONFIG_MAP_KEY, parseResult);
}
}
return data;
}
|
@Test
public void testConfigMapDataWithMetricsAndLogging() {
Kafka kafka = new KafkaBuilder()
.withNewMetadata()
.endMetadata()
.build();
Map<String, String> data = ConfigMapUtils.generateMetricsAndLogConfigMapData(Reconciliation.DUMMY_RECONCILIATION, new ModelWithMetricsAndLogging(kafka), new MetricsAndLogging(new ConfigMapBuilder().withNewMetadata().withName("metrics-cm").endMetadata().withData(Map.of("metrics.yaml", "")).build(), new ConfigMapBuilder().withNewMetadata().withName("logging-cm").endMetadata().withData(Map.of("log4j.properties", "")).build()));
assertThat(data.size(), is(2));
assertThat(data.get(MetricsModel.CONFIG_MAP_KEY), is(notNullValue()));
assertThat(data.get(LoggingModel.LOG4J1_CONFIG_MAP_KEY), is(notNullValue()));
}
|
@Override
public void load(String mountTableConfigPath, Configuration conf)
throws IOException {
this.mountTable = new Path(mountTableConfigPath);
String scheme = mountTable.toUri().getScheme();
FsGetter fsGetter = new ViewFileSystemOverloadScheme.ChildFsGetter(scheme);
try (FileSystem fs = fsGetter.getNewInstance(mountTable.toUri(), conf)) {
RemoteIterator<LocatedFileStatus> listFiles =
fs.listFiles(mountTable, false);
LocatedFileStatus lfs = null;
int higherVersion = -1;
while (listFiles.hasNext()) {
LocatedFileStatus curLfs = listFiles.next();
String cur = curLfs.getPath().getName();
String[] nameParts = cur.split(REGEX_DOT);
if (nameParts.length < 2) {
logInvalidFileNameFormat(cur);
continue; // invalid file name
}
int curVersion = higherVersion;
try {
curVersion = Integer.parseInt(nameParts[nameParts.length - 2]);
} catch (NumberFormatException nfe) {
logInvalidFileNameFormat(cur);
continue;
}
if (curVersion > higherVersion) {
higherVersion = curVersion;
lfs = curLfs;
}
}
if (lfs == null) {
// No valid mount table file found.
// TODO: Should we fail? Currently viewfs init will fail if no mount
// links anyway.
LOGGER.warn("No valid mount-table file exist at: {}. At least one "
+ "mount-table file should present with the name format: "
+ "mount-table.<versionNumber>.xml", mountTableConfigPath);
return;
}
// Latest version file.
Path latestVersionMountTable = lfs.getPath();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Loading the mount-table {} into configuration.",
latestVersionMountTable);
}
try (FSDataInputStream open = fs.open(latestVersionMountTable)) {
Configuration newConf = new Configuration(false);
newConf.addResource(open);
// This will add configuration props as resource, instead of stream
// itself. So, that stream can be closed now.
conf.addResource(newConf);
}
}
}
|
@Test(expected = FileNotFoundException.class)
public void testLoadWithMountFile() throws Exception {
loader.load(new URI(targetTestRoot.toString() + "/Non-Existent-File.xml")
.toString(), conf);
}
|
@Override
protected Map<String, ConfigValue> validateSourceConnectorConfig(SourceConnector connector, ConfigDef configDef, Map<String, String> config) {
Map<String, ConfigValue> result = super.validateSourceConnectorConfig(connector, configDef, config);
validateSourceConnectorExactlyOnceSupport(config, result, connector);
validateSourceConnectorTransactionBoundary(config, result, connector);
return result;
}
|
@Test
public void testExactlyOnceSourceSupportValidationWhenExactlyOnceNotEnabledOnWorker() {
Map<String, String> config = new HashMap<>();
config.put(SourceConnectorConfig.EXACTLY_ONCE_SUPPORT_CONFIG, REQUIRED.toString());
SourceConnector connectorMock = mock(SourceConnector.class);
when(connectorMock.exactlyOnceSupport(eq(config))).thenReturn(ExactlyOnceSupport.SUPPORTED);
Map<String, ConfigValue> validatedConfigs = herder.validateSourceConnectorConfig(
connectorMock, SourceConnectorConfig.configDef(), config);
List<String> errors = validatedConfigs.get(SourceConnectorConfig.EXACTLY_ONCE_SUPPORT_CONFIG).errorMessages();
assertEquals(
Collections.singletonList("This worker does not have exactly-once source support enabled."),
errors);
}
|
static Polygon buildPolygon(TDWay way) {
Coordinate[] coordinates = JTSUtils.toCoordinates(way);
return GEOMETRY_FACTORY.createPolygon(GEOMETRY_FACTORY.createLinearRing(coordinates), null);
}
|
@Test
public void testBuildValidPolygonWith2InnerRings() {
String testfile = "valid-polygon-2-inner-rings.wkt";
List<TDWay> ways = MockingUtils.wktPolygonToWays(testfile);
Polygon polygon = JTSUtils.buildPolygon(ways.get(0), ways.subList(1, ways.size()));
Geometry expected = MockingUtils.readWKTFile(testfile);
Assert.isTrue(polygon.isValid());
Assert.equals(expected, polygon);
}
|
static void createCompactedTopic(String topicName, short partitions, short replicationFactor, Admin admin) {
NewTopic topicDescription = TopicAdmin.defineTopic(topicName).
compacted().
partitions(partitions).
replicationFactor(replicationFactor).
build();
CreateTopicsOptions args = new CreateTopicsOptions().validateOnly(false);
try {
admin.createTopics(singleton(topicDescription), args).values().get(topicName).get();
log.info("Created topic '{}'", topicName);
} catch (InterruptedException e) {
Thread.interrupted();
throw new ConnectException("Interrupted while attempting to create/find topic '" + topicName + "'", e);
} catch (ExecutionException e) {
Throwable cause = e.getCause();
if (cause instanceof TopicExistsException) {
log.debug("Unable to create topic '{}' since it already exists.", topicName);
return;
}
if (cause instanceof UnsupportedVersionException) {
log.debug("Unable to create topic '{}' since the brokers do not support the CreateTopics API." +
" Falling back to assume topic exists or will be auto-created by the broker.",
topicName);
return;
}
if (cause instanceof TopicAuthorizationException) {
log.debug("Not authorized to create topic(s) '{}' upon the brokers." +
" Falling back to assume topic(s) exist or will be auto-created by the broker.",
topicName);
return;
}
if (cause instanceof ClusterAuthorizationException) {
log.debug("Not authorized to create topic '{}'." +
" Falling back to assume topic exists or will be auto-created by the broker.",
topicName);
return;
}
if (cause instanceof InvalidConfigurationException) {
throw new ConnectException("Unable to create topic '" + topicName + "': " + cause.getMessage(),
cause);
}
if (cause instanceof TimeoutException) {
// Timed out waiting for the operation to complete
throw new ConnectException("Timed out while checking for or creating topic '" + topicName + "'." +
" This could indicate a connectivity issue, unavailable topic partitions, or if" +
" this is your first use of the topic it may have taken too long to create.", cause);
}
throw new ConnectException("Error while attempting to create/find topic '" + topicName + "'", e);
}
}
|
@Test
public void testCreateCompactedTopicFailsWithInvalidConfigurationException() throws Exception {
Map<String, KafkaFuture<Void>> values = Collections.singletonMap(TOPIC, future);
when(future.get()).thenThrow(new ExecutionException(new InvalidConfigurationException("wrong config")));
when(ctr.values()).thenReturn(values);
when(admin.createTopics(any(), any())).thenReturn(ctr);
Throwable ce = assertThrows(ConnectException.class, () -> MirrorUtils.createCompactedTopic(TOPIC, (short) 1, (short) 1, admin), "Should have exception thrown");
assertInstanceOf(InvalidConfigurationException.class, ce.getCause());
verify(future).get();
verify(ctr).values();
verify(admin).createTopics(any(), any());
}
|
public synchronized boolean isUnsupportedBrowserEnvironment() {
String environment = getEnvironmentName();
if ( environment.contains( "linux" ) ) {
return false;
}
final String userAgent = getUserAgent();
if ( userAgent == null ) {
return true;
}
if ( environment.contains( "windows" ) ) {
Matcher edgeMatcher = EDGE_PATTERN.matcher( userAgent );
int edgeMinVersion = getSupportedVersion( "min.windows.browser.supported" );
return !edgeMatcher.find() || checkUserAgent( edgeMatcher, edgeMinVersion );
}
return checkUserAgent( SAFARI_PATTERN.matcher( userAgent ), getSupportedVersion( "min.mac.browser.supported" ) );
}
|
@Test
public void testIsUnSupportedBrowserEnvironment_ubuntu() {
EnvironmentUtilsMock mock = new EnvironmentUtilsMock( Case.UBUNTU );
assertTrue( mock.getMockedInstance().isUnsupportedBrowserEnvironment() );
mock = new EnvironmentUtilsMock( Case.UBUNTU_WRONG );
assertFalse( mock.getMockedInstance().isUnsupportedBrowserEnvironment() );
}
|
@Override
public Collection<Object> getAllTaskKeys() {
throw new UnsupportedOperationException("ExecuteTaskEngine do not support get all task keys");
}
|
@Test
void testGetAllTaskKeys() {
assertThrows(UnsupportedOperationException.class, () -> {
executeTaskExecuteEngine.getAllTaskKeys();
});
}
|
@Override
public void resetSentence() {
mPreviousWord = null;
}
|
@Test
public void testResetSentence() throws Exception {
mNextWordDictionaryUnderTest.load();
assertHasNextWordsForWord(mNextWordDictionaryUnderTest, "hello");
assertHasNextWordsForWord(mNextWordDictionaryUnderTest, "menny");
mNextWordDictionaryUnderTest.resetSentence();
assertHasNextWordsForWord(mNextWordDictionaryUnderTest, "hello", "menny");
assertHasNextWordsForWord(mNextWordDictionaryUnderTest, "menny");
mNextWordDictionaryUnderTest.close();
}
|
@Override
public Object invoke( Object proxy, Method method, Object[] args ) throws Throwable {
try {
return method.invoke( repositoryService, args );
} catch ( InvocationTargetException ex ) {
return sessionTimeoutHandler.handle( repositoryService, ex.getCause(), method, args );
}
}
|
@SuppressWarnings( "unchecked" )
@Test
public void testHandlerCallOnException() throws Throwable {
when( repositoryService.getUsers() ).thenThrow( KettleRepositoryLostException.class );
Method method = RepositorySecurityManager.class.getMethod( "getUsers" );
metaStoresessionTimeoutHandler.invoke( mock( Proxy.class ), method, new Object[0] );
verify( sessionTimeoutHandler ).handle( any(), any(), any(), any() );
}
|
@Override
public ResponseSchemaEntry apply(RequestedField field) {
ResponseEntryDataType type = knownFields.stream()
.filter(f -> f.name().equals(field.name()))
.findFirst()
.map(MappedFieldTypeDTO::type)
.map(ResponseEntryDataType::fromFieldType)
.orElse(ResponseEntryDataType.UNKNOWN);
return ResponseSchemaEntry.field(field.toString(), type);
}
|
@Test
void testSchemaCreation() {
final Set<MappedFieldTypeDTO> knownFields = Set.of(
MappedFieldTypeDTO.create("age", org.graylog2.indexer.fieldtypes.FieldTypeMapper.LONG_TYPE),
MappedFieldTypeDTO.create("salary", org.graylog2.indexer.fieldtypes.FieldTypeMapper.DOUBLE_TYPE),
MappedFieldTypeDTO.create("position", org.graylog2.indexer.fieldtypes.FieldTypeMapper.STRING_TYPE)
);
final MessageFieldTypeMapper mapper = new MessageFieldTypeMapper(knownFields);
Assertions.assertThat(mapper.apply(RequestedField.parse("age"))).isEqualTo(ResponseSchemaEntry.field("age", ResponseEntryDataType.NUMERIC));
Assertions.assertThat(mapper.apply(RequestedField.parse("salary"))).isEqualTo(ResponseSchemaEntry.field("salary", ResponseEntryDataType.NUMERIC));
Assertions.assertThat(mapper.apply(RequestedField.parse("position"))).isEqualTo(ResponseSchemaEntry.field("position", ResponseEntryDataType.STRING));
Assertions.assertThat(mapper.apply(RequestedField.parse("nvmd"))).isEqualTo(ResponseSchemaEntry.field("nvmd", ResponseEntryDataType.UNKNOWN));
}
|
public static File createUniqueFile(String prefix) {
return new File(tempFolder(), prefix + "-" + UUID.randomUUID());
}
|
@Test
public void shouldCreateUniqueFilesParentDirectoryIfDoesNotExist() throws IOException {
String newTmpDir = original.getProperty("java.io.tmpdir") + "/" + UUID.randomUUID();
System.setProperty("java.io.tmpdir", newTmpDir);
File file = files.createUniqueFile("foo");
assertThat(file.getParentFile().exists(), is(true));
}
|
static void removeField(Event evt, List<String> fieldsToRemove) {
try {
for (String s : fieldsToRemove) {
String fieldToRemove = StringInterpolation.evaluate(evt, s);
evt.remove(fieldToRemove);
}
} catch (IOException ex) {
throw new IllegalStateException(ex);
}
}
|
@Test
public void testRemoveField() {
// remove a field
Event e = new Event();
String testField = "test_field";
String testValue = "test_value";
e.setField(testField, testValue);
CommonActions.removeField(e, Collections.singletonList(testField));
Assert.assertFalse(e.getData().keySet().contains(testField));
// remove non-existent field
e = new Event();
String testField2 = "test_field2";
e.setField(testField2, testValue);
CommonActions.removeField(e, Collections.singletonList(testField));
Assert.assertFalse(e.getData().keySet().contains(testField));
Assert.assertTrue(e.getData().keySet().contains(testField2));
// remove multiple fields
e = new Event();
List<String> fields = new ArrayList<>();
for (int k = 0; k < 3; k++) {
String field = testField + k;
e.setField(field, testValue);
fields.add(field);
}
e.setField(testField, testValue);
CommonActions.removeField(e, fields);
for (String field : fields) {
Assert.assertFalse(e.getData().keySet().contains(field));
}
Assert.assertTrue(e.getData().keySet().contains(testField));
// remove dynamically-named field
e = new Event();
String otherField = "other_field";
String otherValue = "other_value";
e.setField(otherField, otherValue);
String derivativeField = otherValue + "_foo";
e.setField(derivativeField, otherValue);
CommonActions.removeField(e, Collections.singletonList("%{" + otherField + "}_foo"));
Assert.assertFalse(e.getData().keySet().contains(derivativeField));
Assert.assertTrue(e.getData().keySet().contains(otherField));
}
|
@Override
public void requestDestroyed(final ServletRequestEvent sre) {
try {
HttpServletRequest request = (HttpServletRequest) sre.getServletRequest();
if (Objects.nonNull(request) && Objects.nonNull(request.getSession())) {
HttpSession session = request.getSession();
request.removeAttribute(CLIENT_IP_NAME);
session.removeAttribute(CLIENT_IP_NAME);
}
} catch (Exception e) {
LOG.error("request destroyed error", e);
}
}
|
@Test
public void testRequestDestroyed() {
ServletRequestEvent sre = mock(ServletRequestEvent.class);
HttpServletRequest request = mock(HttpServletRequest.class);
HttpSession session = mock(HttpSession.class);
when(sre.getServletRequest()).thenReturn(request);
when(request.getSession()).thenReturn(session);
websocketListener.requestDestroyed(sre);
verify(request).removeAttribute(CLIENT_IP_NAME);
verify(session).removeAttribute(CLIENT_IP_NAME);
}
|
static Map<String, String> resolveVariables(String expression, String str) {
if (expression == null || str == null) return Collections.emptyMap();
Map<String, String> resolvedVariables = new HashMap<>();
StringBuilder variableBuilder = new StringBuilder();
State state = State.TEXT;
int j = 0;
int expressionLength = expression.length();
for (int i = 0; i < expressionLength; i++) {
char e = expression.charAt(i);
switch (e) {
case '{':
if (state == END_VAR) return Collections.emptyMap();
state = VAR;
break;
case '}':
if (state != VAR) return Collections.emptyMap();
state = END_VAR;
if (i != expressionLength - 1) break;
default:
switch (state) {
case VAR:
variableBuilder.append(e);
break;
case END_VAR:
String replacement;
boolean ec = i == expressionLength - 1;
if (ec) {
replacement = str.substring(j);
} else {
int k = str.indexOf(e, j);
if (k == -1) return Collections.emptyMap();
replacement = str.substring(j, str.indexOf(e, j));
}
resolvedVariables.put(variableBuilder.toString(), replacement);
j += replacement.length();
if (j == str.length() && ec) return resolvedVariables;
variableBuilder.setLength(0);
state = TEXT;
case TEXT:
if (str.charAt(j) != e) return Collections.emptyMap();
j++;
}
}
}
return resolvedVariables;
}
|
@Test
public void testPrefixSufix() {
Map<String, String> res = resolveVariables("prefix_{variable1}_{variable2}_suffix", "prefix_value1_value2_suffix");
assertEquals(2, res.size());
assertEquals(res.get("variable1"), "value1");
assertEquals(res.get("variable2"), "value2");
}
|
public synchronized TopologyDescription describe() {
return internalTopologyBuilder.describe();
}
|
@Test
public void streamStreamOuterJoinTopologyWithCustomStoresSuppliers() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<Integer, String> stream1;
final KStream<Integer, String> stream2;
stream1 = builder.stream("input-topic1");
stream2 = builder.stream("input-topic2");
final JoinWindows joinWindows = JoinWindows.ofTimeDifferenceWithNoGrace(ofMillis(100));
final WindowBytesStoreSupplier thisStoreSupplier = Stores.inMemoryWindowStore("in-memory-join-store",
Duration.ofMillis(joinWindows.size() + joinWindows.gracePeriodMs()),
Duration.ofMillis(joinWindows.size()), true);
final WindowBytesStoreSupplier otherStoreSupplier = Stores.inMemoryWindowStore("in-memory-join-store-other",
Duration.ofMillis(joinWindows.size() + joinWindows.gracePeriodMs()),
Duration.ofMillis(joinWindows.size()), true);
stream1.outerJoin(
stream2,
MockValueJoiner.TOSTRING_JOINER,
joinWindows,
StreamJoined.with(Serdes.Integer(), Serdes.String(), Serdes.String())
.withThisStoreSupplier(thisStoreSupplier)
.withOtherStoreSupplier(otherStoreSupplier));
final TopologyDescription describe = builder.build().describe();
assertEquals(
"Topologies:\n" +
" Sub-topology: 0\n" +
" Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic1])\n" +
" --> KSTREAM-WINDOWED-0000000002\n" +
" Source: KSTREAM-SOURCE-0000000001 (topics: [input-topic2])\n" +
" --> KSTREAM-WINDOWED-0000000003\n" +
" Processor: KSTREAM-WINDOWED-0000000002 (stores: [in-memory-join-store])\n" +
" --> KSTREAM-OUTERTHIS-0000000004\n" +
" <-- KSTREAM-SOURCE-0000000000\n" +
" Processor: KSTREAM-WINDOWED-0000000003 (stores: [in-memory-join-store-other])\n" +
" --> KSTREAM-OUTEROTHER-0000000005\n" +
" <-- KSTREAM-SOURCE-0000000001\n" +
" Processor: KSTREAM-OUTEROTHER-0000000005 (stores: [in-memory-join-store-outer-shared-join-store, in-memory-join-store])\n" +
" --> KSTREAM-MERGE-0000000006\n" +
" <-- KSTREAM-WINDOWED-0000000003\n" +
" Processor: KSTREAM-OUTERTHIS-0000000004 (stores: [in-memory-join-store-other, in-memory-join-store-outer-shared-join-store])\n" +
" --> KSTREAM-MERGE-0000000006\n" +
" <-- KSTREAM-WINDOWED-0000000002\n" +
" Processor: KSTREAM-MERGE-0000000006 (stores: [])\n" +
" --> none\n" +
" <-- KSTREAM-OUTERTHIS-0000000004, KSTREAM-OUTEROTHER-0000000005\n\n",
describe.toString());
}
|
protected static HttpUrl buildUrl(@Nullable String serverUrl, String relativeUrl) {
if (serverUrl == null || !(serverUrl.toLowerCase(ENGLISH).startsWith("http://") || serverUrl.toLowerCase(ENGLISH).startsWith("https://"))) {
throw new IllegalArgumentException("url must start with http:// or https://");
}
return HttpUrl.parse(removeEnd(serverUrl, "/") + relativeUrl);
}
|
@Test
public void invalid_empty_url() {
assertThatThrownBy(() -> BitbucketServerRestClient.buildUrl(null, ""))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("url must start with http:// or https://");
}
|
@Override
public InterpreterResult interpret(String st, InterpreterContext context) {
return helper.interpret(session, st, context);
}
|
@Test
void should_describe_table() {
// Given
String query = "DESCRIBE TABLE live_data.complex_table;";
final String expected = reformatHtml(
readTestResource("/scalate/DescribeTable_live_data_complex_table.html"));
// When
final InterpreterResult actual = interpreter.interpret(query, intrContext);
// Then
assertEquals(Code.SUCCESS, actual.code());
assertEquals(expected, reformatHtml(actual.message().get(0).getData()));
}
|
public static Optional<Expression> convert(
org.apache.flink.table.expressions.Expression flinkExpression) {
if (!(flinkExpression instanceof CallExpression)) {
return Optional.empty();
}
CallExpression call = (CallExpression) flinkExpression;
Operation op = FILTERS.get(call.getFunctionDefinition());
if (op != null) {
switch (op) {
case IS_NULL:
return onlyChildAs(call, FieldReferenceExpression.class)
.map(FieldReferenceExpression::getName)
.map(Expressions::isNull);
case NOT_NULL:
return onlyChildAs(call, FieldReferenceExpression.class)
.map(FieldReferenceExpression::getName)
.map(Expressions::notNull);
case LT:
return convertFieldAndLiteral(Expressions::lessThan, Expressions::greaterThan, call);
case LT_EQ:
return convertFieldAndLiteral(
Expressions::lessThanOrEqual, Expressions::greaterThanOrEqual, call);
case GT:
return convertFieldAndLiteral(Expressions::greaterThan, Expressions::lessThan, call);
case GT_EQ:
return convertFieldAndLiteral(
Expressions::greaterThanOrEqual, Expressions::lessThanOrEqual, call);
case EQ:
return convertFieldAndLiteral(
(ref, lit) -> {
if (NaNUtil.isNaN(lit)) {
return Expressions.isNaN(ref);
} else {
return Expressions.equal(ref, lit);
}
},
call);
case NOT_EQ:
return convertFieldAndLiteral(
(ref, lit) -> {
if (NaNUtil.isNaN(lit)) {
return Expressions.notNaN(ref);
} else {
return Expressions.notEqual(ref, lit);
}
},
call);
case NOT:
return onlyChildAs(call, CallExpression.class)
.flatMap(FlinkFilters::convert)
.map(Expressions::not);
case AND:
return convertLogicExpression(Expressions::and, call);
case OR:
return convertLogicExpression(Expressions::or, call);
case STARTS_WITH:
return convertLike(call);
}
}
return Optional.empty();
}
|
@Test
public void testGreaterThan() {
UnboundPredicate<Integer> expected =
org.apache.iceberg.expressions.Expressions.greaterThan("field1", 1);
Optional<org.apache.iceberg.expressions.Expression> actual =
FlinkFilters.convert(resolve(Expressions.$("field1").isGreater(Expressions.lit(1))));
assertThat(actual).isPresent();
assertPredicatesMatch(expected, actual.get());
Optional<org.apache.iceberg.expressions.Expression> actual1 =
FlinkFilters.convert(resolve(Expressions.lit(1).isLess(Expressions.$("field1"))));
assertThat(actual1).isPresent();
assertPredicatesMatch(expected, actual1.get());
}
|
public static <R> R callInstanceMethod(
final Object instance, final String methodName, ClassParameter<?>... classParameters) {
perfStatsCollector.incrementCount(
String.format(
"ReflectionHelpers.callInstanceMethod-%s_%s",
instance.getClass().getName(), methodName));
try {
final Class<?>[] classes = ClassParameter.getClasses(classParameters);
final Object[] values = ClassParameter.getValues(classParameters);
return traverseClassHierarchy(
instance.getClass(),
NoSuchMethodException.class,
traversalClass -> {
Method declaredMethod = traversalClass.getDeclaredMethod(methodName, classes);
declaredMethod.setAccessible(true);
return (R) declaredMethod.invoke(instance, values);
});
} catch (InvocationTargetException e) {
if (e.getTargetException() instanceof RuntimeException) {
throw (RuntimeException) e.getTargetException();
}
if (e.getTargetException() instanceof Error) {
throw (Error) e.getTargetException();
}
throw new RuntimeException(e.getTargetException());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
|
@Test
public void callInstanceMethodReflectively_wrapsCheckedException() {
ExampleDescendant example = new ExampleDescendant();
try {
ReflectionHelpers.callInstanceMethod(example, "throwCheckedException");
fail("Expected exception not thrown");
} catch (RuntimeException e) {
assertThat(e.getCause()).isInstanceOf(TestException.class);
}
}
|
public FontMetrics parse() throws IOException
{
return parseFontMetric(false);
}
|
@Test
void testHelveticaCharMetricsReducedDataset() throws IOException
{
AFMParser parser = new AFMParser(
new FileInputStream("src/test/resources/afm/Helvetica.afm"));
FontMetrics fontMetrics = parser.parse(true);
// char metrics
checkHelveticaCharMetrics(fontMetrics.getCharMetrics());
}
|
public Cholesky cholesky() {
return cholesky(false);
}
|
@Test
public void testCholesky() {
System.out.println("Cholesky");
double[][] A = {
{0.9000, 0.4000, 0.7000f},
{0.4000, 0.5000, 0.3000f},
{0.7000, 0.3000, 0.8000f}
};
double[][] L = {
{0.9486833, 0.00000000, 0.0000000f},
{0.4216370, 0.56764621, 0.0000000f},
{0.7378648, -0.01957401, 0.5051459f}
};
Matrix a = Matrix.of(A);
a.uplo(UPLO.LOWER);
Matrix.Cholesky cholesky = a.cholesky();
for (int i = 0; i < a.nrow(); i++) {
for (int j = 0; j <= i; j++) {
assertEquals(Math.abs(L[i][j]), Math.abs(cholesky.lu.get(i, j)), 1E-7);
}
}
double[] b = {0.5, 0.5, 0.5f};
double[] x = {-0.2027027, 0.8783784, 0.4729730f};
double[] x2 = cholesky.solve(b);
assertEquals(x.length, x2.length);
for (int i = 0; i < x.length; i++) {
assertEquals(x[i], x2[i], 1E-7);
}
double[][] B = {
{0.5, 0.2f},
{0.5, 0.8f},
{0.5, 0.3f}
};
double[][] X = {
{-0.2027027, -1.2837838f},
{ 0.8783784, 2.2297297f},
{ 0.4729730, 0.6621622f}
};
Matrix X2 = Matrix.of(B);
cholesky.solve(X2);
assertEquals(X.length, X2.nrow());
assertEquals(X[0].length, X2.ncol());
for (int i = 0; i < X.length; i++) {
for (int j = 0; j < X[i].length; j++) {
assertEquals(X[i][j], X2.get(i, j), 1E-6);
}
}
}
|
public static TemplateEngine createEngine() {
return TemplateFactory.create();
}
|
@Test
public void velocityEngineTest() {
// 字符串模板
TemplateEngine engine = TemplateUtil.createEngine(
new TemplateConfig("templates", ResourceMode.STRING).setCustomEngine(VelocityEngine.class));
Template template = engine.getTemplate("你好,$name");
String result = template.render(Dict.create().set("name", "hutool"));
assertEquals("你好,hutool", result);
//ClassPath模板
engine = TemplateUtil.createEngine(
new TemplateConfig("templates", ResourceMode.CLASSPATH).setCustomEngine(VelocityEngine.class));
template = engine.getTemplate("velocity_test.vtl");
result = template.render(Dict.create().set("name", "hutool"));
assertEquals("你好,hutool", result);
template = engine.getTemplate("templates/velocity_test.vtl");
result = template.render(Dict.create().set("name", "hutool"));
assertEquals("你好,hutool", result);
}
|
public static BadRequestException appNamespaceAlreadyExists(String appId, String namespaceName) {
return new BadRequestException("appNamespace already exists for appId:%s namespaceName:%s", appId, namespaceName);
}
|
@Test
public void testAppNamespaceAlreadyExists() {
BadRequestException appNamespaceAlreadyExists = BadRequestException.appNamespaceAlreadyExists(appId, namespaceName);
assertEquals("appNamespace already exists for appId:app-1001 namespaceName:application", appNamespaceAlreadyExists.getMessage());
}
|
public static String extractString(String stringLiteral)
{
return StringEscapeUtils.unescapeJson(stringLiteral.substring(1, stringLiteral.length() - 1));
}
|
@Test
public void testExtractString()
{
String extracted = PdlParseUtils.extractString("\"A string with escape chars: \\n\\t\\f\"");
assertEquals(extracted, "A string with escape chars: \n\t\f");
}
|
@Override
public long getMs(PropertyKey key) {
checkArgument(key.getType() == PropertyKey.PropertyType.DURATION);
return FormatUtils.parseTimeSize((String) get(key));
}
|
@Test
public void alias() throws Exception {
try (Closeable p =
new SystemPropertyRule("alluxio.master.worker.timeout.ms", "100").toResource()) {
resetConf();
assertEquals(100, mConfiguration.getMs(PropertyKey.MASTER_WORKER_TIMEOUT_MS));
}
}
|
public static Map<String, DataSource> create(final Map<String, DataSourcePoolProperties> propsMap, final boolean cacheEnabled) {
Map<String, DataSource> result = new LinkedHashMap<>(propsMap.size(), 1F);
for (Entry<String, DataSourcePoolProperties> entry : propsMap.entrySet()) {
result.put(entry.getKey(), create(entry.getKey(), entry.getValue(), cacheEnabled, result.values()));
}
return result;
}
|
@Test
void assertCreate() {
assertDataSource((MockedDataSource) DataSourcePoolCreator.create(new DataSourcePoolProperties(MockedDataSource.class.getName(), createProperties())));
}
|
public void validate(AlmSettingDto dto) {
String clientId = requireNonNull(dto.getClientId());
String appId = requireNonNull(dto.getAppId());
String decryptedClientSecret = requireNonNull(dto.getDecryptedClientSecret(settings.getEncryption()));
bitbucketCloudRestClient.validate(clientId, decryptedClientSecret, appId);
}
|
@Test
public void validate_callsValidate() {
AlmSettingDto dto = mock(AlmSettingDto.class);
when(dto.getAppId()).thenReturn(EXAMPLE_APP_ID);
when(dto.getClientId()).thenReturn("clientId");
when(dto.getDecryptedClientSecret(any())).thenReturn("secret");
underTest.validate(dto);
verify(bitbucketCloudRestClient, times(1)).validate("clientId", "secret", EXAMPLE_APP_ID);
}
|
public JibContainer runBuild()
throws BuildStepsExecutionException, IOException, CacheDirectoryCreationException {
try {
logger.accept(LogEvent.lifecycle(""));
logger.accept(LogEvent.lifecycle(startupMessage));
JibContainer jibContainer = jibContainerBuilder.containerize(containerizer);
logger.accept(LogEvent.lifecycle(""));
logger.accept(LogEvent.lifecycle(successMessage));
// when an image is built, write out the digest and id
if (imageDigestOutputPath != null) {
String imageDigest = jibContainer.getDigest().toString();
Files.write(imageDigestOutputPath, imageDigest.getBytes(StandardCharsets.UTF_8));
}
if (imageIdOutputPath != null) {
String imageId = jibContainer.getImageId().toString();
Files.write(imageIdOutputPath, imageId.getBytes(StandardCharsets.UTF_8));
}
if (imageJsonOutputPath != null) {
ImageMetadataOutput metadataOutput = ImageMetadataOutput.fromJibContainer(jibContainer);
String imageJson = metadataOutput.toJson();
Files.write(imageJsonOutputPath, imageJson.getBytes(StandardCharsets.UTF_8));
}
return jibContainer;
} catch (HttpHostConnectException ex) {
// Failed to connect to registry.
throw new BuildStepsExecutionException(helpfulSuggestions.forHttpHostConnect(), ex);
} catch (RegistryUnauthorizedException ex) {
handleRegistryUnauthorizedException(ex, helpfulSuggestions);
} catch (RegistryCredentialsNotSentException ex) {
throw new BuildStepsExecutionException(helpfulSuggestions.forCredentialsNotSent(), ex);
} catch (RegistryAuthenticationFailedException ex) {
if (ex.getCause() instanceof ResponseException) {
handleRegistryUnauthorizedException(
new RegistryUnauthorizedException(
ex.getServerUrl(), ex.getImageName(), (ResponseException) ex.getCause()),
helpfulSuggestions);
} else {
// Unknown cause
throw new BuildStepsExecutionException(helpfulSuggestions.none(), ex);
}
} catch (UnknownHostException ex) {
throw new BuildStepsExecutionException(helpfulSuggestions.forUnknownHost(), ex);
} catch (InsecureRegistryException ex) {
throw new BuildStepsExecutionException(helpfulSuggestions.forInsecureRegistry(), ex);
} catch (RegistryException ex) {
String message = Verify.verifyNotNull(ex.getMessage()); // keep null-away happy
throw new BuildStepsExecutionException(message, ex);
} catch (ExecutionException ex) {
String message = ex.getCause().getMessage();
throw new BuildStepsExecutionException(
message == null ? "(null exception message)" : message, ex.getCause());
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
throw new BuildStepsExecutionException(helpfulSuggestions.none(), ex);
}
throw new IllegalStateException("unreachable");
}
|
@Test
public void testBuildImage_pass()
throws BuildStepsExecutionException, IOException, CacheDirectoryCreationException {
JibContainer buildResult = testJibBuildRunner.runBuild();
Assert.assertNull(buildResult);
}
|
public SerializableFunction<Row, T> getFromRowFunction() {
return fromRowFunction;
}
|
@Test
public void testOuterOneOfRowToProto() {
ProtoDynamicMessageSchema schemaProvider = schemaFromDescriptor(OuterOneOf.getDescriptor());
SerializableFunction<Row, DynamicMessage> fromRow = schemaProvider.getFromRowFunction();
assertEquals(OUTER_ONEOF_PROTO.toString(), fromRow.apply(OUTER_ONEOF_ROW).toString());
}
|
public static FlowGraph of(GraphCluster graph) throws IllegalVariableEvaluationException {
return FlowGraph.builder()
.nodes(GraphUtils.nodes(graph))
.edges(GraphUtils.edges(graph))
.clusters(GraphUtils.clusters(graph, new ArrayList<>())
.stream()
.map(g -> new Cluster(g.getKey(), g.getKey().getGraph()
.nodes()
.stream()
.map(AbstractGraph::getUid)
.toList(),
g.getValue(),
g.getKey().getRoot().getUid(),
g.getKey().getEnd().getUid()
))
.toList()
)
.build();
}
|
@Test
void trigger() throws IllegalVariableEvaluationException {
Flow flow = this.parse("flows/valids/trigger-flow-listener.yaml");
triggerRepositoryInterface.save(
Trigger.of(flow, flow.getTriggers().getFirst()).toBuilder().disabled(true).build()
);
FlowGraph flowGraph = graphService.flowGraph(flow, null);
assertThat(flowGraph.getNodes().size(), is(6));
assertThat(flowGraph.getEdges().size(), is(5));
assertThat(flowGraph.getClusters().size(), is(1));
AbstractGraph triggerGraph = flowGraph.getNodes().stream().filter(e -> e instanceof GraphTrigger).findFirst().orElseThrow();
assertThat(((GraphTrigger) triggerGraph).getTrigger().getDisabled(), is(true));
}
|
@Override
public void report() {
try {
tryReport();
} catch (ConcurrentModificationException | NoSuchElementException ignored) {
// at tryReport() we don't synchronize while iterating over the various maps which might
// cause a
// ConcurrentModificationException or NoSuchElementException to be thrown,
// if concurrently a metric is being added or removed.
}
}
|
@Test
void testSkipOnNoMetrics() {
reporter.report();
assertThat(testLoggerResource.getMessages())
.noneMatch(logOutput -> logOutput.contains("Starting metrics report"))
.anyMatch(logOutput -> logOutput.contains("Skipping metrics report"));
}
|
public void calculate(IThrowableProxy tp) {
while (tp != null) {
populateFrames(tp.getStackTraceElementProxyArray());
IThrowableProxy[] suppressed = tp.getSuppressed();
if(suppressed != null) {
for(IThrowableProxy current:suppressed) {
populateFrames(current.getStackTraceElementProxyArray());
}
}
tp = tp.getCause();
}
}
|
@Test
public void smoke() throws Exception {
Throwable t = new Throwable("x");
ThrowableProxy tp = new ThrowableProxy(t);
PackagingDataCalculator pdc = tp.getPackagingDataCalculator();
pdc.calculate(tp);
verify(tp);
tp.fullDump();
}
|
public void parse(DataByteArrayInputStream input, int readSize) throws Exception {
if (currentParser == null) {
currentParser = initializeHeaderParser();
}
// Parser stack will run until current incoming data has all been consumed.
currentParser.parse(input, readSize);
}
|
@Test
public void testMessageDecodingPerformance() throws Exception {
byte[] CONTENTS = new byte[MESSAGE_SIZE];
for (int i = 0; i < MESSAGE_SIZE; i++) {
CONTENTS[i] = 'a';
}
PUBLISH publish = new PUBLISH();
publish.dup(false);
publish.messageId((short) 127);
publish.qos(QoS.AT_LEAST_ONCE);
publish.payload(new Buffer(CONTENTS));
publish.topicName(new UTF8Buffer("TOPIC"));
DataByteArrayOutputStream output = new DataByteArrayOutputStream();
wireFormat.marshal(publish.encode(), output);
Buffer marshalled = output.toBuffer();
long startTime = System.currentTimeMillis();
for (int i = 0; i < ITERATIONS; ++i) {
DataByteArrayInputStream input = new DataByteArrayInputStream(marshalled);
codec.parse(input, marshalled.length());
assertTrue(!frames.isEmpty());
publish = new PUBLISH().decode(frames.get(0));
frames.clear();
}
long duration = System.currentTimeMillis() - startTime;
LOG.info("Total time to process: {}", TimeUnit.MILLISECONDS.toSeconds(duration));
}
|
@VisibleForTesting
static Optional<String> performUpdateCheck(
Path configDir,
String currentVersion,
String versionUrl,
String toolName,
Consumer<LogEvent> log) {
Path lastUpdateCheck = configDir.resolve(LAST_UPDATE_CHECK_FILENAME);
try {
// Check time of last update check
if (Files.exists(lastUpdateCheck)) {
try {
String fileContents =
new String(Files.readAllBytes(lastUpdateCheck), StandardCharsets.UTF_8);
Instant modifiedTime = Instant.parse(fileContents);
if (modifiedTime.plus(Duration.ofDays(1)).isAfter(Instant.now())) {
return Optional.empty();
}
} catch (DateTimeParseException | IOException ex) {
// If reading update time failed, file might be corrupt, so delete it
log.accept(LogEvent.debug("Failed to read lastUpdateCheck; " + ex.getMessage()));
Files.delete(lastUpdateCheck);
}
}
// Check for update
FailoverHttpClient httpClient = new FailoverHttpClient(true, false, ignored -> {});
try {
Response response =
httpClient.get(
new URL(versionUrl),
Request.builder()
.setHttpTimeout(3000)
.setUserAgent("jib " + currentVersion + " " + toolName)
.build());
VersionJsonTemplate version =
JsonTemplateMapper.readJson(response.getBody(), VersionJsonTemplate.class);
Path lastUpdateCheckTemp =
Files.createTempFile(configDir, LAST_UPDATE_CHECK_FILENAME, null);
lastUpdateCheckTemp.toFile().deleteOnExit();
Files.write(lastUpdateCheckTemp, Instant.now().toString().getBytes(StandardCharsets.UTF_8));
Files.move(lastUpdateCheckTemp, lastUpdateCheck, StandardCopyOption.REPLACE_EXISTING);
if (currentVersion.equals(version.latest)) {
return Optional.empty();
}
return Optional.of(version.latest);
} finally {
httpClient.shutDown();
}
} catch (IOException ex) {
log.accept(LogEvent.debug("Update check failed; " + ex.getMessage()));
}
return Optional.empty();
}
|
@Test
public void testPerformUpdateCheck_newJsonField()
throws IOException, InterruptedException, GeneralSecurityException, URISyntaxException {
String response =
"HTTP/1.1 200 OK\nContent-Length:43\n\n{\"latest\":\"2.0.0\",\"unknownField\":\"unknown\"}";
try (TestWebServer server = new TestWebServer(false, Collections.singletonList(response), 1)) {
setupLastUpdateCheck();
Optional<String> message =
UpdateChecker.performUpdateCheck(
configDir, "1.0.2", server.getEndpoint(), "tool-name", ignored -> {});
assertThat(message).hasValue("2.0.0");
}
}
|
@PostMapping("/enabled")
@RequiresPermissions("system:plugin:disable")
public ShenyuAdminResult enabled(@Valid @RequestBody final BatchCommonDTO batchCommonDTO) {
final String result = pluginService.enabled(batchCommonDTO.getIds(), batchCommonDTO.getEnabled());
if (StringUtils.isNoneBlank(result)) {
return ShenyuAdminResult.error(result);
}
return ShenyuAdminResult.success(ShenyuResultMessage.ENABLE_SUCCESS);
}
|
@Test
public void testEnabled() throws Exception {
BatchCommonDTO batchCommonDTO = new BatchCommonDTO();
batchCommonDTO.setEnabled(false);
batchCommonDTO.setIds(Collections.singletonList("123"));
given(this.pluginService.enabled(batchCommonDTO.getIds(), batchCommonDTO.getEnabled())).willReturn(StringUtils.EMPTY);
this.mockMvc.perform(MockMvcRequestBuilders.post("/plugin/enabled")
.contentType(MediaType.APPLICATION_JSON)
.content(GsonUtils.getInstance().toJson(batchCommonDTO)))
.andExpect(status().isOk())
.andExpect(jsonPath("$.message", is(ShenyuResultMessage.ENABLE_SUCCESS)))
.andReturn();
given(this.pluginService.enabled(batchCommonDTO.getIds(), batchCommonDTO.getEnabled())).willReturn(AdminConstants.SYS_PLUGIN_ID_NOT_EXIST);
this.mockMvc.perform(MockMvcRequestBuilders.post("/plugin/enabled")
.contentType(MediaType.APPLICATION_JSON)
.content(GsonUtils.getInstance().toJson(batchCommonDTO)))
.andExpect(status().isOk())
.andExpect(jsonPath("$.message", is(AdminConstants.SYS_PLUGIN_ID_NOT_EXIST)))
.andReturn();
}
|
public static ParamType getVarArgsSchemaFromType(final Type type) {
return getSchemaFromType(type, VARARGS_JAVA_TO_ARG_TYPE);
}
|
@Test
public void shouldGetGenericSchemaFromTypeVariadic() throws NoSuchMethodException {
// Given:
final Type genericType = getClass().getMethod("genericType").getGenericReturnType();
// When:
final ParamType returnType = UdfUtil.getVarArgsSchemaFromType(genericType);
// Then:
MatcherAssert.assertThat(returnType, CoreMatchers.is(GenericType.of("T")));
}
|
public RulesDefinition.Context load() {
RulesDefinition.Context context = new RulesDefinitionContext();
for (RulesDefinition pluginDefinition : pluginDefs) {
context.setCurrentPluginKey(serverPluginRepository.getPluginKey(pluginDefinition));
pluginDefinition.define(context);
}
context.setCurrentPluginKey(null);
return context;
}
|
@Test
public void no_definitions() {
RulesDefinition.Context context = new RuleDefinitionsLoader(mock(ServerPluginRepository.class)).load();
assertThat(context.repositories()).isEmpty();
}
|
public static ResourceModel processResource(final Class<?> resourceClass)
{
return processResource(resourceClass, null);
}
|
@Test(expectedExceptions = ResourceConfigException.class)
public void failsOnNonPublicActionMethod() {
@RestLiCollection(name = "nonPublicActionMethod")
class LocalClass extends CollectionResourceTemplate<Long, EmptyRecord>
{
@Action(name = "protectedAction")
protected void protectedAction(@ActionParam("actionParam") String actionParam) {
}
}
RestLiAnnotationReader.processResource(LocalClass.class);
Assert.fail("#addActionResourceMethod should fail throwing a ResourceConfigException");
}
|
protected Map<String, String> parseJettyOptions( Node node ) {
Map<String, String> jettyOptions = null;
Node jettyOptionsNode = XMLHandler.getSubNode( node, XML_TAG_JETTY_OPTIONS );
if ( jettyOptionsNode != null ) {
jettyOptions = new HashMap<String, String>();
if ( XMLHandler.getTagValue( jettyOptionsNode, XML_TAG_ACCEPTORS ) != null ) {
jettyOptions.put( Const.KETTLE_CARTE_JETTY_ACCEPTORS, XMLHandler.getTagValue( jettyOptionsNode, XML_TAG_ACCEPTORS ) );
}
if ( XMLHandler.getTagValue( jettyOptionsNode, XML_TAG_ACCEPT_QUEUE_SIZE ) != null ) {
jettyOptions.put( Const.KETTLE_CARTE_JETTY_ACCEPT_QUEUE_SIZE, XMLHandler.getTagValue( jettyOptionsNode,
XML_TAG_ACCEPT_QUEUE_SIZE ) );
}
if ( XMLHandler.getTagValue( jettyOptionsNode, XML_TAG_LOW_RES_MAX_IDLE_TIME ) != null ) {
jettyOptions.put( Const.KETTLE_CARTE_JETTY_RES_MAX_IDLE_TIME, XMLHandler.getTagValue( jettyOptionsNode,
XML_TAG_LOW_RES_MAX_IDLE_TIME ) );
}
}
return jettyOptions;
}
|
@Test
public void testParseJettyOption_NoOptionsNode() throws KettleXMLException {
Node configNode = getConfigNode( getConfigWithNoOptionsNode() );
Map<String, String> parseJettyOptions = slServerConfig.parseJettyOptions( configNode );
assertNull( parseJettyOptions );
}
|
@Override
public UpdateNodeResourceResponse updateNodeResource(UpdateNodeResourceRequest request)
throws YarnException, IOException {
// parameter verification.
if (request == null) {
routerMetrics.incrUpdateNodeResourceFailedRetrieved();
RouterServerUtil.logAndThrowException("Missing UpdateNodeResource request.", null);
}
String subClusterId = request.getSubClusterId();
if (StringUtils.isBlank(subClusterId)) {
routerMetrics.incrUpdateNodeResourceFailedRetrieved();
RouterServerUtil.logAndThrowException("Missing UpdateNodeResource SubClusterId.", null);
}
try {
long startTime = clock.getTime();
RMAdminProtocolMethod remoteMethod = new RMAdminProtocolMethod(
new Class[]{UpdateNodeResourceRequest.class}, new Object[]{request});
Collection<UpdateNodeResourceResponse> updateNodeResourceResps =
remoteMethod.invokeConcurrent(this, UpdateNodeResourceResponse.class, subClusterId);
if (CollectionUtils.isNotEmpty(updateNodeResourceResps)) {
long stopTime = clock.getTime();
routerMetrics.succeededUpdateNodeResourceRetrieved(stopTime - startTime);
return UpdateNodeResourceResponse.newInstance();
}
} catch (YarnException e) {
routerMetrics.incrUpdateNodeResourceFailedRetrieved();
RouterServerUtil.logAndThrowException(e,
"Unable to updateNodeResource due to exception. " + e.getMessage());
}
routerMetrics.incrUpdateNodeResourceFailedRetrieved();
throw new YarnException("Unable to updateNodeResource.");
}
|
@Test
public void testUpdateNodeResourceNormalRequest() throws Exception {
// case 1, test the existing subCluster (SC-1).
Map<NodeId, ResourceOption> nodeResourceMap = new HashMap<>();
NodeId nodeId = NodeId.newInstance("127.0.0.1", 1);
ResourceOption resourceOption =
ResourceOption.newInstance(Resource.newInstance(2 * GB, 1), -1);
nodeResourceMap.put(nodeId, resourceOption);
UpdateNodeResourceRequest request =
UpdateNodeResourceRequest.newInstance(nodeResourceMap, "SC-1");
UpdateNodeResourceResponse response = interceptor.updateNodeResource(request);
assertNotNull(response);
// case 2, test the non-exist subCluster.
UpdateNodeResourceRequest request1 =
UpdateNodeResourceRequest.newInstance(nodeResourceMap, "SC-NON");
LambdaTestUtils.intercept(Exception.class, "subClusterId = SC-NON is not an active subCluster.",
() -> interceptor.updateNodeResource(request1));
}
|
@Override
public Processor<K, Change<V1>, K, Change<VOut>> get() {
return new KTableKTableOuterJoinProcessor(valueGetterSupplier2.get());
}
|
@Test
public void shouldLogAndMeterSkippedRecordsDueToNullLeftKey() {
final StreamsBuilder builder = new StreamsBuilder();
@SuppressWarnings("unchecked")
final Processor<String, Change<String>, String, Change<Object>> join = new KTableKTableOuterJoin<>(
(KTableImpl<String, String, String>) builder.table("left", Consumed.with(Serdes.String(), Serdes.String())),
(KTableImpl<String, String, String>) builder.table("right", Consumed.with(Serdes.String(), Serdes.String())),
null
).get();
final MockProcessorContext<String, Change<Object>> context = new MockProcessorContext<>(props);
context.setRecordMetadata("left", -1, -2);
join.init(context);
try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KTableKTableOuterJoin.class)) {
join.process(new Record<>(null, new Change<>("new", "old"), 0));
assertThat(
appender.getMessages(),
hasItem("Skipping record due to null key. topic=[left] partition=[-1] offset=[-2]")
);
}
}
|
public static Environment of(@NonNull Properties props) {
var environment = new Environment();
environment.props = props;
return environment;
}
|
@Test(expected = IllegalStateException.class)
public void testNoEnvByFile() {
Environment.of(new File("a123.properties"));
}
|
public static byte[] signMessage(RawTransaction rawTransaction, Credentials credentials) {
byte[] encodedTransaction;
if (rawTransaction.getTransaction().getType().isEip4844()) {
encodedTransaction = encode4844(rawTransaction);
} else {
encodedTransaction = encode(rawTransaction);
}
Sign.SignatureData signatureData =
Sign.signMessage(encodedTransaction, credentials.getEcKeyPair());
return encode(rawTransaction, signatureData);
}
|
@Test
public void testSignMessageAfterEip155() {
byte[] signedMessage =
TransactionEncoder.signMessage(
createEip155RawTransaction(),
ChainId.MAIN_NET.getId(),
SampleKeys.CREDENTIALS_ETH_EXAMPLE);
String hexMessage = Numeric.toHexString(signedMessage);
assertEquals(SIGN_RESULT_ETH_EXAMPLE, hexMessage);
}
|
public static List<String> flattenToNames(DataType dataType) {
return flattenToNames(dataType, Collections.emptyList());
}
|
@Test
void testFlattenToNames() {
assertThat(DataTypeUtils.flattenToNames(INT(), Collections.emptyList())).containsOnly("f0");
assertThat(DataTypeUtils.flattenToNames(INT(), Collections.singletonList("f0")))
.containsOnly("f0_0");
assertThat(
DataTypeUtils.flattenToNames(
ROW(FIELD("a", INT()), FIELD("b", BOOLEAN())),
Collections.emptyList()))
.containsExactly("a", "b");
}
|
@Override
public double quantile(double p) {
if (p < 0.0 || p > 1.0) {
throw new IllegalArgumentException("Invalid p: " + p);
}
double x = Beta.inverseRegularizedIncompleteBetaFunction(0.5 * nu1, 0.5 * nu2, p);
return nu2 * x / (nu1 * (1.0 - x));
}
|
@Test
public void testQuantile() {
System.out.println("quantile");
FDistribution instance = new FDistribution(10, 20);
instance.rand();
assertEquals(0.2269944, instance.quantile(0.01), 1E-7);
assertEquals(0.4543918, instance.quantile(0.1), 1E-7);
assertEquals(0.5944412, instance.quantile(0.2), 1E-7);
assertEquals(0.9662639, instance.quantile(0.5), 1E-7);
assertEquals(3.368186, instance.quantile(0.99), 1E-6);
assertEquals(7.180539, instance.quantile(0.9999), 1E-6);
}
|
public void updateMember(
ClassicGroupMember member,
JoinGroupRequestProtocolCollection protocols,
int rebalanceTimeoutMs,
int sessionTimeoutMs,
CompletableFuture<JoinGroupResponseData> future
) {
decrementSupportedProtocols(member);
member.setSupportedProtocols(protocols);
incrementSupportedProtocols(member);
member.setRebalanceTimeoutMs(rebalanceTimeoutMs);
member.setSessionTimeoutMs(sessionTimeoutMs);
if (future != null && !member.isAwaitingJoin()) {
numMembersAwaitingJoinResponse++;
} else if (future == null && member.isAwaitingJoin()) {
numMembersAwaitingJoinResponse--;
}
member.setAwaitingJoinFuture(future);
}
|
@Test
public void testUpdateMember() {
JoinGroupRequestProtocolCollection protocols = new JoinGroupRequestProtocolCollection();
protocols.add(new JoinGroupRequestProtocol()
.setName("roundrobin")
.setMetadata(new byte[0]));
ClassicGroupMember member = new ClassicGroupMember(
memberId,
Optional.empty(),
clientId,
clientHost,
rebalanceTimeoutMs,
sessionTimeoutMs,
protocolType,
protocols
);
group.add(member);
JoinGroupRequestProtocolCollection newProtocols = new JoinGroupRequestProtocolCollection();
newProtocols.add(new JoinGroupRequestProtocol()
.setName("range")
.setMetadata(new byte[0]));
newProtocols.add(new JoinGroupRequestProtocol()
.setName("roundrobin")
.setMetadata(new byte[0]));
int newRebalanceTimeoutMs = 120000;
int newSessionTimeoutMs = 20000;
group.updateMember(member, newProtocols, newRebalanceTimeoutMs, newSessionTimeoutMs, null);
assertEquals(group.rebalanceTimeoutMs(), newRebalanceTimeoutMs);
assertEquals(member.sessionTimeoutMs(), newSessionTimeoutMs);
assertEquals(newProtocols, member.supportedProtocols());
}
|
public static TransformExecutorService serial(ExecutorService executor) {
return new SerialTransformExecutor(executor);
}
|
@Test
public void serialCompleteNotExecutingTaskThrows() {
@SuppressWarnings("unchecked")
DirectTransformExecutor<Object> first = mock(DirectTransformExecutor.class);
@SuppressWarnings("unchecked")
DirectTransformExecutor<Object> second = mock(DirectTransformExecutor.class);
TransformExecutorService serial = TransformExecutorServices.serial(executorService);
serial.schedule(first);
thrown.expect(IllegalStateException.class);
thrown.expectMessage("unexpected currently executing");
serial.complete(second);
}
|
protected String getInNotInConstraint(final List<Object> values) {
String expressionString = values.stream()
.map(Object::toString)
.collect(Collectors.joining(", ", "(", ")"));
return String.format(VALUE_PATTERN, "in", expressionString);
}
|
@Test
void getInNotInConstraint() {
List<Object> values = Arrays.asList("-5", "0.5", "1", "10");
String retrieved = KiePMMLDescrLhsFactory.factory(lhsBuilder).getInNotInConstraint(values);
String expected = "value in (-5, 0.5, 1, 10)";
assertThat(retrieved).isEqualTo(expected);
}
|
public static boolean isAllInventoryTasksFinished(final Collection<PipelineTask> inventoryTasks) {
if (inventoryTasks.isEmpty()) {
log.warn("inventoryTasks is empty");
}
return inventoryTasks.stream().allMatch(each -> each.getTaskProgress().getPosition() instanceof IngestFinishedPosition);
}
|
@Test
void assertAllInventoryTasksAreFinished() {
AtomicReference<IngestPosition> finishedPosition = new AtomicReference<>(new IngestFinishedPosition());
InventoryTask actualTask1 = new InventoryTask("foo_id_1", mock(ExecuteEngine.class), mock(ExecuteEngine.class), mock(Dumper.class), mock(Importer.class), finishedPosition);
InventoryTask actualTask2 = new InventoryTask("foo_id_2", mock(ExecuteEngine.class), mock(ExecuteEngine.class), mock(Dumper.class), mock(Importer.class), finishedPosition);
assertTrue(PipelineJobProgressDetector.isAllInventoryTasksFinished(Arrays.asList(actualTask1, actualTask2)));
}
|
List<?> apply(
final GenericRow row,
final ProcessingLogger processingLogger
) {
final Object[] args = new Object[parameterExtractors.size()];
for (int i = 0; i < parameterExtractors.size(); i++) {
args[i] = evalParam(row, processingLogger, i);
}
try {
final List<?> result = tableFunction.apply(args);
if (result == null) {
processingLogger.error(RecordProcessingError.recordProcessingError(nullMsg, row));
return ImmutableList.of();
}
return result;
} catch (final Exception e) {
processingLogger.error(RecordProcessingError.recordProcessingError(exceptionMsg, e, row));
return ImmutableList.of();
}
}
|
@Test
public void shouldReturnEmptyListIfUdtfReturnsNull() {
// Given:
when(tableFunction.apply(any())).thenReturn(null);
// When:
final List<?> result = applier.apply(VALUE, processingLogger);
// Then:
assertThat(result, is(empty()));
}
|
public void deleteIfExists(File fileOrDir) {
try {
deleteIfExistsOrThrowIOE(fileOrDir);
} catch (IOException e) {
throw new IllegalStateException("Can not delete " + fileOrDir, e);
}
}
|
@Test
public void deleteIfExists_deletes_file() throws Exception {
File file = temp.newFile();
underTest.deleteIfExists(file);
assertThat(file).doesNotExist();
}
|
public static final long getSequenceId(MessageId messageId) {
MessageIdAdv msgId = (MessageIdAdv) messageId;
long ledgerId = msgId.getLedgerId();
long entryId = msgId.getEntryId();
// Combine ledger id and entry id to form offset
// Use less than 32 bits to represent entry id since it will get
// rolled over way before overflowing the max int range
long offset = (ledgerId << 28) | entryId;
return offset;
}
|
@Test
public void testGetSequenceId() {
long lid = 12345L;
long eid = 34566L;
MessageIdImpl id = mock(MessageIdImpl.class);
when(id.getLedgerId()).thenReturn(lid);
when(id.getEntryId()).thenReturn(eid);
assertEquals((lid << 28) | eid, FunctionCommon.getSequenceId(id));
}
|
@Deprecated
public static Type resolveLastTypeParameter(Type genericContext, Class<?> supertype)
throws IllegalStateException {
return Types.resolveLastTypeParameter(genericContext, supertype);
}
|
@Test
void resolveLastTypeParameterWhenWildcard() throws Exception {
Type context =
LastTypeParameter.class.getDeclaredField("PARAMETERIZED_WILDCARD_LIST_STRING")
.getGenericType();
Type listStringType = LastTypeParameter.class.getDeclaredField("LIST_STRING").getGenericType();
Type last = resolveLastTypeParameter(context, Parameterized.class);
assertThat(last).isEqualTo(listStringType);
}
|
@Override
public int launch(AgentLaunchDescriptor descriptor) {
LogConfigurator logConfigurator = new LogConfigurator("agent-launcher-logback.xml");
return logConfigurator.runWithLogger(() -> doLaunch(descriptor));
}
|
@Test
@DisabledOnOs(OS.WINDOWS)
public void should_NOT_Download_AgentJar_IfTheCurrentJarIsUpToDate() throws Exception {
TEST_AGENT_LAUNCHER.copyTo(AGENT_LAUNCHER_JAR);
TEST_AGENT.copyTo(AGENT_BINARY_JAR);
assertTrue(AGENT_BINARY_JAR.setLastModified(0));
new AgentLauncherImpl().launch(launchDescriptor());
assertThat(AGENT_BINARY_JAR.lastModified(), is(0L));
}
|
public synchronized InputStream getApplicationInputStream(String appName) {
try {
File appFile = appFile(appName, appName + OAR);
if (!appFile.exists()) {
appFile = appFile(appName, appName + JAR);
}
return new FileInputStream(appFile.exists() ? appFile : appFile(appName, APP_XML));
} catch (FileNotFoundException e) {
throw new ApplicationException("Application " + appName + " not found");
}
}
|
@Test
public void getAppXmlStream() throws IOException {
savePlainApp();
InputStream stream = aar.getApplicationInputStream(APP_NAME);
byte[] orig = ByteStreams.toByteArray(getClass().getResourceAsStream("app.xml"));
byte[] loaded = ByteStreams.toByteArray(stream);
assertArrayEquals("incorrect stream", orig, loaded);
stream.close();
}
|
public void log(QueryLogParams params) {
_logger.debug("Broker Response: {}", params._response);
if (!(_logRateLimiter.tryAcquire() || shouldForceLog(params))) {
_numDroppedLogs.incrementAndGet();
return;
}
final StringBuilder queryLogBuilder = new StringBuilder();
for (QueryLogEntry value : QUERY_LOG_ENTRY_VALUES) {
value.format(queryLogBuilder, this, params);
queryLogBuilder.append(',');
}
// always log the query last - don't add this to the QueryLogEntry enum
queryLogBuilder.append("query=")
.append(StringUtils.substring(params._requestContext.getQuery(), 0, _maxQueryLengthToLog));
_logger.info(queryLogBuilder.toString());
if (_droppedLogRateLimiter.tryAcquire()) {
// use getAndSet to 0 so that there will be no race condition between
// loggers that increment this counter and this thread
long numDroppedLogsSinceLastLog = _numDroppedLogs.getAndSet(0);
if (numDroppedLogsSinceLastLog > 0) {
_logger.warn("{} logs were dropped. (log max rate per second: {})", numDroppedLogsSinceLastLog,
_logRateLimiter.getRate());
}
}
}
|
@Test
public void shouldForceLogWhenTimeIsMoreThanOneSecond() {
// Given:
Mockito.when(_logRateLimiter.tryAcquire()).thenReturn(false);
QueryLogger.QueryLogParams params = generateParams(false, 0, 1456);
QueryLogger queryLogger = new QueryLogger(_logRateLimiter, 100, true, _logger, _droppedRateLimiter);
// When:
queryLogger.log(params);
// Then:
Assert.assertEquals(_infoLog.size(), 1);
}
|
protected static void setModuleKeyAndNameIfNotDefined(Map<String, String> childProps, String moduleId, String parentKey) {
if (!childProps.containsKey(MODULE_KEY_PROPERTY)) {
if (!childProps.containsKey(CoreProperties.PROJECT_KEY_PROPERTY)) {
childProps.put(MODULE_KEY_PROPERTY, parentKey + ":" + moduleId);
} else {
String childKey = childProps.get(CoreProperties.PROJECT_KEY_PROPERTY);
childProps.put(MODULE_KEY_PROPERTY, parentKey + ":" + childKey);
}
}
childProps.putIfAbsent(CoreProperties.PROJECT_NAME_PROPERTY, moduleId);
// For backward compatibility with ProjectDefinition
childProps.put(CoreProperties.PROJECT_KEY_PROPERTY, childProps.get(MODULE_KEY_PROPERTY));
}
|
@Test
public void shouldSetModuleKeyIfNotPresent() {
Map<String, String> props = new HashMap<>();
props.put("sonar.projectVersion", "1.0");
// should be set
ProjectReactorBuilder.setModuleKeyAndNameIfNotDefined(props, "foo", "parent");
assertThat(props)
.containsEntry("sonar.moduleKey", "parent:foo")
.containsEntry("sonar.projectName", "foo");
// but not this 2nd time
ProjectReactorBuilder.setModuleKeyAndNameIfNotDefined(props, "bar", "parent");
assertThat(props)
.containsEntry("sonar.moduleKey", "parent:foo")
.containsEntry("sonar.projectName", "foo");
}
|
public static void ensureTopic(
final String name,
final KsqlConfig ksqlConfig,
final KafkaTopicClient topicClient
) {
if (topicClient.isTopicExists(name)) {
validateTopicConfig(name, ksqlConfig, topicClient);
return;
}
final short replicationFactor = ksqlConfig
.getShort(KsqlConfig.KSQL_INTERNAL_TOPIC_REPLICAS_PROPERTY);
if (replicationFactor < 2) {
log.warn("Creating topic {} with replication factor of {} which is less than 2. "
+ "This is not advisable in a production environment. ",
name, replicationFactor);
}
final short minInSyncReplicas = ksqlConfig
.getShort(KsqlConfig.KSQL_INTERNAL_TOPIC_MIN_INSYNC_REPLICAS_PROPERTY);
topicClient.createTopic(
name,
INTERNAL_TOPIC_PARTITION_COUNT,
replicationFactor,
ImmutableMap.<String, Object>builder()
.putAll(INTERNAL_TOPIC_CONFIG)
.put(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, minInSyncReplicas)
.build()
);
}
|
@Test
public void shouldCreateInternalTopicWithNumReplicasFromConfig() {
// Given:
when(ksqlConfig.getShort(KsqlConfig.KSQL_INTERNAL_TOPIC_REPLICAS_PROPERTY)).thenReturn((short) 3);
// When:
KsqlInternalTopicUtils.ensureTopic(TOPIC_NAME, ksqlConfig, topicClient);
// Then:
verify(topicClient).createTopic(TOPIC_NAME, 1, (short) 3, commandTopicConfig);
}
|
@Override
public ExecuteContext onThrow(ExecuteContext context) {
ThreadLocalUtils.removeRequestTag();
ThreadLocalUtils.removeRequestData();
return context;
}
|
@Test
public void testOnThrow() {
ThreadLocalUtils.setRequestData(new RequestData(Collections.emptyMap(), "", ""));
interceptor.onThrow(context);
Assert.assertNull(ThreadLocalUtils.getRequestData());
}
|
public void init(PinotConfiguration pinotConfiguration) {
Preconditions.checkArgument(0 < Integer.parseInt(pinotConfiguration.getProperty(MAX_RETRY)),
"[AzureEnvironmentProvider]: " + MAX_RETRY + " cannot be less than or equal to 0");
Preconditions.checkArgument(!StringUtils.isBlank(pinotConfiguration.getProperty(IMDS_ENDPOINT)),
"[AzureEnvironmentProvider]: " + IMDS_ENDPOINT + " should not be null or empty");
_maxRetry = Integer.parseInt(pinotConfiguration.getProperty(MAX_RETRY));
_imdsEndpoint = pinotConfiguration.getProperty(IMDS_ENDPOINT);
int connectionTimeoutMillis = Integer.parseInt(pinotConfiguration.getProperty(CONNECTION_TIMEOUT_MILLIS));
int requestTimeoutMillis = Integer.parseInt(pinotConfiguration.getProperty(REQUEST_TIMEOUT_MILLIS));
final RequestConfig requestConfig =
RequestConfig.custom().setConnectTimeout(Timeout.of(connectionTimeoutMillis, TimeUnit.MILLISECONDS))
.setResponseTimeout(Timeout.of(requestTimeoutMillis, TimeUnit.MILLISECONDS)).build();
final HttpRequestRetryStrategy httpRequestRetry = new DefaultHttpRequestRetryStrategy(
_maxRetry,
TimeValue.ofSeconds(1));
_closeableHttpClient =
HttpClients.custom().setDefaultRequestConfig(requestConfig).setRetryStrategy(httpRequestRetry).build();
}
|
@Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp =
"\\[AzureEnvironmentProvider\\]: maxRetry cannot be less than " + "or equal to 0")
public void testInvalidRetryCount() {
Map<String, Object> map = _pinotConfiguration.toMap();
map.put(MAX_RETRY, "0");
PinotConfiguration pinotConfiguration = new PinotConfiguration(map);
_azureEnvironmentProvider.init(pinotConfiguration);
}
|
@Override
public String getName() {
return _name;
}
|
@Test
public void testStringStrPositionTransformFunction() {
ExpressionContext expression =
RequestContextUtils.getExpression(String.format("str_pos(%s, 'A')", STRING_ALPHANUM_SV_COLUMN));
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper);
assertEquals(transformFunction.getName(), "strpos");
int[] expectedValues = new int[NUM_ROWS];
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = StringUtils.indexOf(_stringAlphaNumericSVValues[i], 'A');
}
testTransformFunction(transformFunction, expectedValues);
expression = RequestContextUtils.getExpression(String.format("str_r_pos(%s, 'A')", STRING_ALPHANUM_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper);
assertEquals(transformFunction.getName(), "strrpos");
expectedValues = new int[NUM_ROWS];
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = StringUtils.lastIndexOf(_stringAlphaNumericSVValues[i], 'A');
}
testTransformFunction(transformFunction, expectedValues);
expression = RequestContextUtils.getExpression(String.format("str_r_pos(%s, 'A', 1)", STRING_ALPHANUM_SV_COLUMN));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper);
assertEquals(transformFunction.getName(), "strrpos");
expectedValues = new int[NUM_ROWS];
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = StringUtils.lastIndexOf(_stringAlphaNumericSVValues[i], 'A', 1);
}
testTransformFunction(transformFunction, expectedValues);
}
|
@Override
public PluginState stopPlugin(String pluginId) {
if (currentPluginId.equals(pluginId)) {
return original.stopPlugin(pluginId);
} else {
throw new IllegalAccessError(PLUGIN_PREFIX + currentPluginId + " tried to execute stopPlugin for foreign pluginId!");
}
}
|
@Test
public void stopPlugin() {
pluginManager.loadPlugins();
pluginManager.startPlugins();
assertThrows(IllegalAccessError.class, () -> wrappedPluginManager.stopPlugin(OTHER_PLUGIN_ID));
assertEquals(PluginState.STOPPED, wrappedPluginManager.stopPlugin(THIS_PLUGIN_ID));
}
|
public static Expression convert(Filter[] filters) {
Expression expression = Expressions.alwaysTrue();
for (Filter filter : filters) {
Expression converted = convert(filter);
Preconditions.checkArgument(
converted != null, "Cannot convert filter to Iceberg: %s", filter);
expression = Expressions.and(expression, converted);
}
return expression;
}
|
@Test
public void testQuotedAttributes() {
Map<String, String> attrMap = Maps.newHashMap();
attrMap.put("id", "id");
attrMap.put("`i.d`", "i.d");
attrMap.put("`i``d`", "i`d");
attrMap.put("`d`.b.`dd```", "d.b.dd`");
attrMap.put("a.`aa```.c", "a.aa`.c");
attrMap.forEach(
(quoted, unquoted) -> {
IsNull isNull = IsNull.apply(quoted);
Expression expectedIsNull = Expressions.isNull(unquoted);
Expression actualIsNull = SparkFilters.convert(isNull);
Assert.assertEquals(
"IsNull must match", expectedIsNull.toString(), actualIsNull.toString());
IsNotNull isNotNull = IsNotNull.apply(quoted);
Expression expectedIsNotNull = Expressions.notNull(unquoted);
Expression actualIsNotNull = SparkFilters.convert(isNotNull);
Assert.assertEquals(
"IsNotNull must match", expectedIsNotNull.toString(), actualIsNotNull.toString());
LessThan lt = LessThan.apply(quoted, 1);
Expression expectedLt = Expressions.lessThan(unquoted, 1);
Expression actualLt = SparkFilters.convert(lt);
Assert.assertEquals("LessThan must match", expectedLt.toString(), actualLt.toString());
LessThanOrEqual ltEq = LessThanOrEqual.apply(quoted, 1);
Expression expectedLtEq = Expressions.lessThanOrEqual(unquoted, 1);
Expression actualLtEq = SparkFilters.convert(ltEq);
Assert.assertEquals(
"LessThanOrEqual must match", expectedLtEq.toString(), actualLtEq.toString());
GreaterThan gt = GreaterThan.apply(quoted, 1);
Expression expectedGt = Expressions.greaterThan(unquoted, 1);
Expression actualGt = SparkFilters.convert(gt);
Assert.assertEquals("GreaterThan must match", expectedGt.toString(), actualGt.toString());
GreaterThanOrEqual gtEq = GreaterThanOrEqual.apply(quoted, 1);
Expression expectedGtEq = Expressions.greaterThanOrEqual(unquoted, 1);
Expression actualGtEq = SparkFilters.convert(gtEq);
Assert.assertEquals(
"GreaterThanOrEqual must match", expectedGtEq.toString(), actualGtEq.toString());
EqualTo eq = EqualTo.apply(quoted, 1);
Expression expectedEq = Expressions.equal(unquoted, 1);
Expression actualEq = SparkFilters.convert(eq);
Assert.assertEquals("EqualTo must match", expectedEq.toString(), actualEq.toString());
EqualNullSafe eqNullSafe = EqualNullSafe.apply(quoted, 1);
Expression expectedEqNullSafe = Expressions.equal(unquoted, 1);
Expression actualEqNullSafe = SparkFilters.convert(eqNullSafe);
Assert.assertEquals(
"EqualNullSafe must match",
expectedEqNullSafe.toString(),
actualEqNullSafe.toString());
In in = In.apply(quoted, new Integer[] {1});
Expression expectedIn = Expressions.in(unquoted, 1);
Expression actualIn = SparkFilters.convert(in);
Assert.assertEquals("In must match", expectedIn.toString(), actualIn.toString());
});
}
|
@Override
public KsMaterializedQueryResult<WindowedRow> get(
final GenericKey key,
final int partition,
final Range<Instant> windowStart,
final Range<Instant> windowEnd,
final Optional<Position> position
) {
try {
final WindowRangeQuery<GenericKey, GenericRow> query = WindowRangeQuery.withKey(key);
StateQueryRequest<KeyValueIterator<Windowed<GenericKey>, GenericRow>> request =
inStore(stateStore.getStateStoreName()).withQuery(query);
if (position.isPresent()) {
request = request.withPositionBound(PositionBound.at(position.get()));
}
final StateQueryResult<KeyValueIterator<Windowed<GenericKey>, GenericRow>> result =
stateStore.getKafkaStreams().query(request);
final QueryResult<KeyValueIterator<Windowed<GenericKey>, GenericRow>> queryResult =
result.getPartitionResults().get(partition);
if (queryResult.isFailure()) {
throw failedQueryException(queryResult);
}
try (KeyValueIterator<Windowed<GenericKey>, GenericRow> it =
queryResult.getResult()) {
final Builder<WindowedRow> builder = ImmutableList.builder();
while (it.hasNext()) {
final KeyValue<Windowed<GenericKey>, GenericRow> next = it.next();
final Window wnd = next.key.window();
if (!windowStart.contains(wnd.startTime())) {
continue;
}
if (!windowEnd.contains(wnd.endTime())) {
continue;
}
final long rowTime = wnd.end();
final WindowedRow row = WindowedRow.of(
stateStore.schema(),
next.key,
next.value,
rowTime
);
builder.add(row);
}
return KsMaterializedQueryResult.rowIteratorWithPosition(
builder.build().iterator(), queryResult.getPosition());
}
} catch (final NotUpToBoundException | MaterializationException e) {
throw e;
} catch (final Exception e) {
throw new MaterializationException("Failed to get value from materialized table", e);
}
}
|
@Test
public void shouldIgnoreSessionsThatStartAtLowerBoundIfLowerBoundOpen() {
// Given:
final Range<Instant> startBounds = Range.openClosed(
LOWER_INSTANT,
UPPER_INSTANT
);
givenSingleSession(LOWER_INSTANT, LOWER_INSTANT.plusMillis(1));
// When:
final Iterator<WindowedRow> rowIterator =
table.get(A_KEY, PARTITION, startBounds, Range.all()).rowIterator;
// Then:
assertThat(rowIterator.hasNext(), is(false));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.