focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public static List<String> validateJson(JsonNode schemaNode, JsonNode jsonNode) {
try {
return JsonSchemaValidator.validateJson(schemaNode, jsonNode);
} catch (ProcessingException e) {
log.debug("Got a ProcessingException while trying to interpret schemaNode as a real schema");
List<String> errors = new ArrayList<>();
errors.add("schemaNode does not seem to represent a valid Json schema");
return errors;
}
}
|
@Test
void testValidateJson() {
String schemaText;
String queryText = "{\n" + " hero {\n" + " name\n" + " email\n" + " family\n" + " affiliate\n"
+ " movies {\n" + " title\n" + " }\n" + " }\n" + "}";
String responseText = "{\n" + " \"data\": {\n" + " \"hero\": {\n" + " \"name\": \"Iron Man\",\n"
+ " \"email\": \"tony@stark.inc\",\n" + " \"family\": \"MARVEL\",\n"
+ " \"affiliate\": \"DC\",\n" + " \"movies\": [\n" + " {\"title\": \"Iron Man 1\"},\n"
+ " {\"title\": \"Iron Man 2\"},\n" + " {\"title\": \"Iron Man 3\"}\n" + " ]\n"
+ " }\n" + " }\n" + "}";
String badResponseText = "{\n" + " \"data\": {\n" + " \"hero\": {\n" + " \"name\": \"Iron Man\",\n"
+ " \"family\": \"MARVEL\",\n" + " \"affiliate\": \"DC\",\n" + " \"movies\": [\n"
+ " {\"title\": \"Iron Man 1\"},\n" + " {\"title\": \"Iron Man 2\"},\n"
+ " {\"title\": \"Iron Man 3\"}\n" + " ]\n" + " }\n" + " }\n" + "}";
ObjectMapper mapper = new ObjectMapper();
JsonNode responseSchema = null;
List<String> validationErrors = null;
try {
// Load schema from file.
schemaText = FileUtils
.readFileToString(new File("target/test-classes/io/github/microcks/util/graphql/basic-heroes.graphql"));
// Build JsonSchema for response.
responseSchema = GraphQLSchemaValidator.buildResponseJsonSchema(schemaText, queryText);
// Validate a correct response.
validationErrors = GraphQLSchemaValidator.validateJson(responseSchema, mapper.readTree(responseText));
} catch (Exception e) {
fail("Exception should not be thrown");
}
assertTrue(validationErrors.isEmpty());
try {
// Validate a bad response with missing email.
validationErrors = GraphQLSchemaValidator.validateJson(responseSchema, mapper.readTree(badResponseText));
} catch (Exception e) {
fail("Exception should not be thrown");
}
assertEquals(1, validationErrors.size());
assertEquals("object has missing required properties ([\"email\"])", validationErrors.get(0));
}
|
@Override
public List<RedisClientInfo> getClientList(RedisClusterNode node) {
RedisClient entry = getEntry(node);
RFuture<List<String>> f = executorService.readAsync(entry, StringCodec.INSTANCE, RedisCommands.CLIENT_LIST);
List<String> list = syncFuture(f);
return CONVERTER.convert(list.toArray(new String[list.size()]));
}
|
@Test
public void testGetClientList() {
RedisClusterNode master = getFirstMaster();
List<RedisClientInfo> list = connection.getClientList(master);
assertThat(list.size()).isGreaterThan(10);
}
|
@Override
public boolean skip(final ServerWebExchange exchange) {
return false;
}
|
@Test
public void skipTest() {
Assumptions.assumeFalse(casdoorPluginTest.skip(exchange));
}
|
@Override
public void isEqualTo(@Nullable Object expected) {
super.isEqualTo(expected);
}
|
@Test
public void smallDifferenceInLongRepresentation() {
expectFailureWhenTestingThat(array(-4.4501477170144023E-308))
.isEqualTo(array(-4.450147717014402E-308));
}
|
@Override
public void request(final long n) {
Preconditions.checkArgument(n == 1, "number of requested items must be 1");
if (needsSchema) {
if (schema != null) {
subscriber.onSchema(schema);
}
needsSchema = false;
}
// check status since request() can be reentrant through subscriber.onNext()
// this is to prevent another thread from calling onNext again
// while the first one is draining and closing the subscription after having called onNext
// with the last element polled from the queue after being marked done.
if (!draining) {
future = exec.submit(() -> {
if (done) {
draining = true;
}
final T item = poll();
if (item == null) {
if (!draining) {
future = exec.schedule(() -> request(1), BACKOFF_DELAY_MS, TimeUnit.MILLISECONDS);
}
} else {
subscriber.onNext(item);
}
if (draining) {
close();
if (exception != null) {
subscriber.onError(exception);
} else {
subscriber.onComplete();
}
}
});
}
}
|
@Test(expected = IllegalArgumentException.class)
public void testExpectsNEqualsOne() {
final TestSubscriber<String> testSubscriber = new TestSubscriber<String>() {
@Override
public void onSubscribe(final Subscription subscription) {
subscription.request(2);
}
};
final TestPublisher testPublisher = new TestPublisher();
testPublisher.subscribe(testSubscriber);
}
|
public MessageListener messageListener(MessageListener messageListener, boolean addConsumerSpan) {
if (messageListener instanceof TracingMessageListener) return messageListener;
return new TracingMessageListener(messageListener, this, addConsumerSpan);
}
|
@Test void messageListener_wrapsInput() {
assertThat(jmsTracing.messageListener(mock(MessageListener.class), false))
.isInstanceOf(TracingMessageListener.class);
}
|
static int toInteger(final JsonNode object) {
if (object instanceof NumericNode) {
return object.intValue();
}
if (object instanceof TextNode) {
try {
return Integer.parseInt(object.textValue());
} catch (final NumberFormatException e) {
throw failedStringCoercionException(SqlBaseType.INTEGER);
}
}
throw invalidConversionException(object, SqlBaseType.INTEGER);
}
|
@Test
public void shouldConvertLongToIntCorrectly() {
final Integer i = JsonSerdeUtils.toInteger(JsonNodeFactory.instance.numberNode(1L));
assertThat(i, equalTo(1));
}
|
@ApiOperation(value = "Unassign Alarm (unassignAlarm)",
notes = "Unassign the Alarm. " +
"Once unassigned, the 'assign_ts' field will be set to current timestamp and special rule chain event 'ALARM_UNASSIGNED' will be generated. " +
"Referencing non-existing Alarm Id will cause an error." + TENANT_OR_CUSTOMER_AUTHORITY_PARAGRAPH)
@PreAuthorize("hasAnyAuthority('TENANT_ADMIN', 'CUSTOMER_USER')")
@RequestMapping(value = "/alarm/{alarmId}/assign", method = RequestMethod.DELETE)
@ResponseStatus(value = HttpStatus.OK)
public Alarm unassignAlarm(@Parameter(description = ALARM_ID_PARAM_DESCRIPTION)
@PathVariable(ALARM_ID) String strAlarmId
) throws Exception {
checkParameter(ALARM_ID, strAlarmId);
AlarmId alarmId = new AlarmId(toUUID(strAlarmId));
Alarm alarm = checkAlarmId(alarmId, Operation.WRITE);
return tbAlarmService.unassign(alarm, System.currentTimeMillis(), getCurrentUser());
}
|
@Test
public void testUnassignAlarm() throws Exception {
loginTenantAdmin();
Alarm alarm = createAlarm(TEST_ALARM_TYPE);
Mockito.reset(tbClusterService, auditLogService);
long beforeAssignmentTs = System.currentTimeMillis();
Thread.sleep(2);
doPost("/api/alarm/" + alarm.getId() + "/assign/" + tenantAdminUserId.getId()).andExpect(status().isOk());
AlarmInfo foundAlarm = doGet("/api/alarm/info/" + alarm.getId(), AlarmInfo.class);
Assert.assertNotNull(foundAlarm);
Assert.assertEquals(tenantAdminUserId, foundAlarm.getAssigneeId());
Assert.assertTrue(foundAlarm.getAssignTs() > beforeAssignmentTs && foundAlarm.getAssignTs() < System.currentTimeMillis());
testNotifyEntityAllOneTime(foundAlarm, foundAlarm.getId(), foundAlarm.getOriginator(),
tenantId, customerId, tenantAdminUserId, TENANT_ADMIN_EMAIL, ActionType.ALARM_ASSIGNED);
beforeAssignmentTs = System.currentTimeMillis();
Thread.sleep(2);
doDelete("/api/alarm/" + alarm.getId() + "/assign").andExpect(status().isOk());
foundAlarm = doGet("/api/alarm/info/" + alarm.getId(), AlarmInfo.class);
Assert.assertNotNull(foundAlarm);
Assert.assertNull(foundAlarm.getAssigneeId());
Assert.assertTrue(foundAlarm.getAssignTs() > beforeAssignmentTs && foundAlarm.getAssignTs() < System.currentTimeMillis());
testNotifyEntityAllOneTime(foundAlarm, foundAlarm.getId(), foundAlarm.getOriginator(),
tenantId, customerId, tenantAdminUserId, TENANT_ADMIN_EMAIL, ActionType.ALARM_UNASSIGNED);
}
|
@Override
public int compareTo(Point point) {
if (this.x > point.x) {
return 1;
} else if (this.x < point.x) {
return -1;
} else if (this.y > point.y) {
return 1;
} else if (this.y < point.y) {
return -1;
}
return 0;
}
|
@Test
public void compareToTest() {
Point point1 = new Point(1, 2);
Point point2 = new Point(1, 2);
Point point3 = new Point(1, 1);
Point point4 = new Point(2, 2);
Assert.assertEquals(0, point1.compareTo(point2));
TestUtils.notCompareToTest(point1, point3);
TestUtils.notCompareToTest(point1, point4);
}
|
public void setMd5sum(String md5sum) {
this.md5sum = md5sum;
}
|
@Test
public void testSetMd5sum() {
String md5sum = "test";
Dependency instance = new Dependency();
instance.setMd5sum(md5sum);
assertEquals(md5sum, instance.getMd5sum());
}
|
public Plan validateReservationUpdateRequest(
ReservationSystem reservationSystem, ReservationUpdateRequest request)
throws YarnException {
ReservationId reservationId = request.getReservationId();
Plan plan = validateReservation(reservationSystem, reservationId,
AuditConstants.UPDATE_RESERVATION_REQUEST);
validateReservationDefinition(reservationId,
request.getReservationDefinition(), plan,
AuditConstants.UPDATE_RESERVATION_REQUEST);
return plan;
}
|
@Test
public void testUpdateReservationNegativeRecurrenceExpression() {
ReservationUpdateRequest request =
createSimpleReservationUpdateRequest(1, 1, 1, 5, 3, "-1234");
plan = null;
try {
plan =
rrValidator.validateReservationUpdateRequest(rSystem, request);
Assert.fail();
} catch (YarnException e) {
Assert.assertNull(plan);
String message = e.getMessage();
Assert.assertTrue(message
.startsWith("Negative Period : "));
LOG.info(message);
}
}
|
public EventWithContext addMessageContext(Message message) {
return toBuilder().messageContext(message).build();
}
|
@Test
public void addMessageContext() {
final Event event = new TestEvent();
final Message message = messageFactory.createMessage("", "", DateTime.now(DateTimeZone.UTC));
final EventWithContext withContext = EventWithContext.builder()
.event(event)
.build();
final EventWithContext withContext1 = withContext.addMessageContext(message);
assertThat(withContext.messageContext()).isNotPresent();
assertThat(withContext1.messageContext()).get().isEqualTo(message);
}
|
public static ClassLoader getClassLoader(Class<?> clazz) {
ClassLoader cl = null;
try {
cl = Thread.currentThread().getContextClassLoader();
} catch (Throwable ex) {
// Cannot access thread context ClassLoader - falling back to system class loader...
}
if (cl == null) {
// No thread context class loader -> use class loader of this class.
cl = clazz.getClassLoader();
if (cl == null) {
// getClassLoader() returning null indicates the bootstrap ClassLoader
try {
cl = ClassLoader.getSystemClassLoader();
} catch (Throwable ex) {
// Cannot access system ClassLoader - oh well, maybe the caller can live with null...
}
}
}
return cl;
}
|
@Test
public void testGetClassLoader() {
ClassLoader expectedClassLoader = Thread.currentThread().getContextClassLoader();
ClassLoader actualClassLoader = ClassUtil.getClassLoader(ClassUtilTest.class);
Assert.assertEquals(expectedClassLoader, actualClassLoader);
expectedClassLoader = ClassUtilTest.class.getClassLoader();
actualClassLoader = ClassUtil.getClassLoader(null);
Assert.assertEquals(expectedClassLoader, actualClassLoader);
expectedClassLoader = ClassLoader.getSystemClassLoader();
actualClassLoader = ClassUtil.getClassLoader(String.class);
Assert.assertEquals(expectedClassLoader, actualClassLoader);
}
|
@Override
protected RemotingCommand processRequest0(ChannelHandlerContext ctx, RemotingCommand request,
ProxyContext context) throws Exception {
PullMessageRequestHeader requestHeader = (PullMessageRequestHeader) request.decodeCommandCustomHeader(PullMessageRequestHeader.class);
int sysFlag = requestHeader.getSysFlag();
if (!PullSysFlag.hasSubscriptionFlag(sysFlag)) {
ConsumerGroupInfo consumerInfo = messagingProcessor.getConsumerGroupInfo(context, requestHeader.getConsumerGroup());
if (consumerInfo == null) {
return RemotingCommand.buildErrorResponse(ResponseCode.SUBSCRIPTION_NOT_LATEST,
"the consumer's subscription not latest");
}
SubscriptionData subscriptionData = consumerInfo.findSubscriptionData(requestHeader.getTopic());
if (subscriptionData == null) {
return RemotingCommand.buildErrorResponse(ResponseCode.SUBSCRIPTION_NOT_EXIST,
"the consumer's subscription not exist");
}
requestHeader.setSysFlag(PullSysFlag.buildSysFlagWithSubscription(sysFlag));
requestHeader.setSubscription(subscriptionData.getSubString());
requestHeader.setExpressionType(subscriptionData.getExpressionType());
request.writeCustomHeader(requestHeader);
request.makeCustomHeaderToNet();
}
long timeoutMillis = requestHeader.getSuspendTimeoutMillis() + Duration.ofSeconds(10).toMillis();
return request(ctx, request, context, timeoutMillis);
}
|
@Test
public void testPullMessageWithoutSub() throws Exception {
when(messagingProcessorMock.getConsumerGroupInfo(any(), eq(group)))
.thenReturn(consumerGroupInfoMock);
SubscriptionData subscriptionData = new SubscriptionData();
subscriptionData.setSubString(subString);
subscriptionData.setExpressionType(type);
when(consumerGroupInfoMock.findSubscriptionData(eq(topic)))
.thenReturn(subscriptionData);
PullMessageRequestHeader header = new PullMessageRequestHeader();
header.setTopic(topic);
header.setConsumerGroup(group);
header.setQueueId(0);
header.setQueueOffset(0L);
header.setMaxMsgNums(16);
header.setSysFlag(PullSysFlag.buildSysFlag(true, false, false, false));
header.setCommitOffset(0L);
header.setSuspendTimeoutMillis(1000L);
header.setSubVersion(0L);
header.setBrokerName(brokerName);
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.PULL_MESSAGE, header);
request.makeCustomHeaderToNet();
RemotingCommand expectResponse = RemotingCommand.createResponseCommand(ResponseCode.NO_MESSAGE, "success");
PullMessageRequestHeader newHeader = new PullMessageRequestHeader();
newHeader.setTopic(topic);
newHeader.setConsumerGroup(group);
newHeader.setQueueId(0);
newHeader.setQueueOffset(0L);
newHeader.setMaxMsgNums(16);
newHeader.setSysFlag(PullSysFlag.buildSysFlag(true, false, true, false));
newHeader.setCommitOffset(0L);
newHeader.setSuspendTimeoutMillis(1000L);
newHeader.setSubVersion(0L);
newHeader.setBrokerName(brokerName);
newHeader.setSubscription(subString);
newHeader.setExpressionType(type);
RemotingCommand matchRequest = RemotingCommand.createRequestCommand(RequestCode.PULL_MESSAGE, newHeader);
matchRequest.setOpaque(request.getOpaque());
matchRequest.makeCustomHeaderToNet();
ArgumentCaptor<RemotingCommand> captor = ArgumentCaptor.forClass(RemotingCommand.class);
when(messagingProcessorMock.request(any(), eq(brokerName), captor.capture(), anyLong()))
.thenReturn(CompletableFuture.completedFuture(expectResponse));
RemotingCommand response = pullMessageActivity.processRequest0(ctx, request, null);
assertThat(captor.getValue().getExtFields()).isEqualTo(matchRequest.getExtFields());
assertThat(response).isNull();
verify(ctx, times(1)).writeAndFlush(eq(expectResponse));
}
|
public ByteKey interpolateKey(double fraction) {
checkArgument(
fraction >= 0.0 && fraction < 1.0, "Fraction %s must be in the range [0, 1)", fraction);
byte[] startBytes = startKey.getBytes();
byte[] endBytes = endKey.getBytes();
// If the endKey is unspecified, add a leading 1 byte to it and a leading 0 byte to all other
// keys, to get a concrete least upper bound for the desired range.
if (endKey.isEmpty()) {
startBytes = addHeadByte(startBytes, (byte) 0);
endBytes = addHeadByte(endBytes, (byte) 1);
}
// Pad to the longest key.
int paddedKeyLength = Math.max(startBytes.length, endBytes.length);
BigInteger rangeStartInt = paddedPositiveInt(startBytes, paddedKeyLength);
BigInteger rangeEndInt = paddedPositiveInt(endBytes, paddedKeyLength);
// If the keys are equal subject to padding by 0, we can't interpolate.
BigInteger range = rangeEndInt.subtract(rangeStartInt);
checkState(
!range.equals(BigInteger.ZERO),
"Refusing to interpolate for near-empty %s where start and end keys differ only by trailing"
+ " zero bytes.",
this);
// Add precision so that range is at least 53 (double mantissa length) bits long. This way, we
// can interpolate small ranges finely, e.g., split the range key 3 to key 4 into 1024 parts.
// We add precision to range by adding zero bytes to the end of the keys, aka shifting the
// underlying BigInteger left by a multiple of 8 bits.
int bytesNeeded = ((53 - range.bitLength()) + 7) / 8;
if (bytesNeeded > 0) {
range = range.shiftLeft(bytesNeeded * 8);
rangeStartInt = rangeStartInt.shiftLeft(bytesNeeded * 8);
paddedKeyLength += bytesNeeded;
}
BigInteger interpolatedOffset =
new BigDecimal(range).multiply(BigDecimal.valueOf(fraction)).toBigInteger();
int outputKeyLength = endKey.isEmpty() ? (paddedKeyLength - 1) : paddedKeyLength;
return ByteKey.copyFrom(
fixupHeadZeros(rangeStartInt.add(interpolatedOffset).toByteArray(), outputKeyLength));
}
|
@Test
public void testInterpolateKeyIsNotEmpty() {
String fmt = "Interpolating %s at fraction 0.0 should not return the empty key";
for (ByteKeyRange ignored : TEST_RANGES) {
ByteKeyRange range = ByteKeyRange.ALL_KEYS;
assertFalse(String.format(fmt, range), range.interpolateKey(0.0).isEmpty());
}
}
|
public String toJson(boolean pretty) { return SlimeUtils.toJson(inspector, !pretty); }
|
@Test
void builds_expected_json() {
var expected =
"""
{
"string": "bar",
"integer": 42,
"floaty": 8.25,
"bool": true,
"array": [
1,
2,
3
],
"quux": {
"corge": "grault"
}
}
""";
var json = Json.Builder.newObject()
.set("string", "bar")
.set("integer", 42)
.set("floaty", 8.25)
.set("bool", true)
.set("array", Json.Builder.newArray().add(1).add(2).add(3))
.set("quux", Json.Builder.newObject().set("corge", "grault"))
.build()
.toJson(true);
assertEquals(expected, json);
}
|
@Override
public int run(InputStream in, PrintStream out, PrintStream err, List<String> args) throws Exception {
if (args.isEmpty()) {
printHelp(out);
return 0;
}
OutputStream output = out;
if (args.size() > 1) {
output = Util.fileOrStdout(args.get(args.size() - 1), out);
args = args.subList(0, args.size() - 1);
}
DataFileWriter<GenericRecord> writer = new DataFileWriter<>(new GenericDatumWriter<>());
Schema schema = null;
Map<String, byte[]> metadata = new TreeMap<>();
String inputCodec = null;
for (String inFile : expandsInputFiles(args)) {
InputStream input = Util.fileOrStdin(inFile, in);
DataFileStream<GenericRecord> reader = new DataFileStream<>(input, new GenericDatumReader<>());
if (schema == null) {
// this is the first file - set up the writer, and store the
// Schema & metadata we'll use.
schema = reader.getSchema();
for (String key : reader.getMetaKeys()) {
if (!DataFileWriter.isReservedMeta(key)) {
byte[] metadatum = reader.getMeta(key);
metadata.put(key, metadatum);
writer.setMeta(key, metadatum);
}
}
inputCodec = reader.getMetaString(DataFileConstants.CODEC);
if (inputCodec == null) {
inputCodec = DataFileConstants.NULL_CODEC;
}
writer.setCodec(CodecFactory.fromString(inputCodec));
writer.create(schema, output);
} else {
// check that we're appending to the same schema & metadata.
if (!schema.equals(reader.getSchema())) {
err.println("input files have different schemas");
reader.close();
return 1;
}
for (String key : reader.getMetaKeys()) {
if (!DataFileWriter.isReservedMeta(key)) {
byte[] metadatum = reader.getMeta(key);
byte[] writersMetadatum = metadata.get(key);
if (!Arrays.equals(metadatum, writersMetadatum)) {
err.println("input files have different non-reserved metadata");
reader.close();
return 2;
}
}
}
String thisCodec = reader.getMetaString(DataFileConstants.CODEC);
if (thisCodec == null) {
thisCodec = DataFileConstants.NULL_CODEC;
}
if (!inputCodec.equals(thisCodec)) {
err.println("input files have different codecs");
reader.close();
return 3;
}
}
writer.appendAllFrom(reader, /* recompress */ false);
reader.close();
}
writer.close();
return 0;
}
|
@Test
void differentMetadataFail() throws Exception {
Map<String, String> metadata1 = new HashMap<>();
metadata1.put("myMetaKey", "myMetaValue");
Map<String, String> metadata2 = new HashMap<>();
metadata2.put("myOtherMetaKey", "myOtherMetaValue");
File input1 = generateData(name.getMethodName() + "-1.avro", Type.STRING, metadata1, DEFLATE);
File input2 = generateData(name.getMethodName() + "-2.avro", Type.STRING, metadata2, DEFLATE);
File output = new File(OUTPUT_DIR, name.getMethodName() + ".avro");
List<String> args = asList(input1.getAbsolutePath(), input2.getAbsolutePath(), output.getAbsolutePath());
int returnCode = new ConcatTool().run(System.in, System.out, System.err, args);
assertEquals(2, returnCode);
}
|
public double calculateMinPercentageUsedBy(NormalizedResources used, double totalMemoryMb, double usedMemoryMb) {
if (LOG.isTraceEnabled()) {
LOG.trace("Calculating min percentage used by. Used Mem: {} Total Mem: {}"
+ " Used Normalized Resources: {} Total Normalized Resources: {}", totalMemoryMb, usedMemoryMb,
toNormalizedMap(), used.toNormalizedMap());
}
double min = 1.0;
if (usedMemoryMb > totalMemoryMb) {
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb);
}
if (totalMemoryMb != 0.0) {
min = Math.min(min, usedMemoryMb / totalMemoryMb);
}
double totalCpu = getTotalCpu();
if (used.getTotalCpu() > totalCpu) {
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb);
}
if (totalCpu != 0.0) {
min = Math.min(min, used.getTotalCpu() / totalCpu);
}
if (used.otherResources.length > otherResources.length) {
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb);
}
for (int i = 0; i < otherResources.length; i++) {
if (otherResources[i] == 0.0) {
//Skip any resources where the total is 0, the percent used for this resource isn't meaningful.
//We fall back to prioritizing by cpu, memory and any other resources by ignoring this value
continue;
}
if (i >= used.otherResources.length) {
//Resources missing from used are using none of that resource
return 0;
}
if (used.otherResources[i] > otherResources[i]) {
String info = String.format("%s, %f > %f", getResourceNameForResourceIndex(i), used.otherResources[i], otherResources[i]);
throwBecauseUsedIsNotSubsetOfTotal(used, totalMemoryMb, usedMemoryMb, info);
}
min = Math.min(min, used.otherResources[i] / otherResources[i]);
}
return min * 100.0;
}
|
@Test
public void testCalculateMinWithCpuMemAndGenericResource() {
Map<String, Double> allResourcesMap = new HashMap<>();
allResourcesMap.put(Constants.COMMON_CPU_RESOURCE_NAME, 2.0);
allResourcesMap.put(gpuResourceName, 10.0);
NormalizedResources resources = new NormalizedResources(normalize(allResourcesMap));
Map<String, Double> usedResourcesMap = new HashMap<>();
usedResourcesMap.put(Constants.COMMON_CPU_RESOURCE_NAME, 1.0);
usedResourcesMap.put(gpuResourceName, 1.0);
NormalizedResources usedResources = new NormalizedResources(normalize(usedResourcesMap));
double min = resources.calculateMinPercentageUsedBy(usedResources, 4, 1);
assertThat(min, is(10.0));
}
|
@Override
public void close() {
try {
if (!closed) {
closed = true;
// Fail all buffered streams.
Http2ChannelClosedException e = new Http2ChannelClosedException();
while (!pendingStreams.isEmpty()) {
PendingStream stream = pendingStreams.pollFirstEntry().getValue();
stream.close(e);
}
}
} finally {
super.close();
}
}
|
@Test
public void headersAfterCloseShouldImmediatelyFail() {
encoder.writeSettingsAck(ctx, newPromise());
encoder.close();
ChannelFuture f = encoderWriteHeaders(3, newPromise());
assertNotNull(f.cause());
}
|
public Map<String, Object> getProducerClientConfigProps() {
final Map<String, Object> map = new HashMap<>();
map.putAll(getConfigsFor(ProducerConfig.configNames()));
// producer client metrics aren't used in Confluent deployment
possiblyConfigureConfluentTelemetry(map);
return Collections.unmodifiableMap(map);
}
|
@Test
public void shouldFilterProducerConfigs() {
// Given:
final Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.ACKS_CONFIG, "all");
configs.put(ProducerConfig.CLIENT_ID_CONFIG, null);
configs.put("not.a.config", "123");
final KsqlConfig ksqlConfig = new KsqlConfig(configs);
// When:
assertThat(ksqlConfig.getProducerClientConfigProps(), hasEntry(ProducerConfig.ACKS_CONFIG, "all"));
assertThat(ksqlConfig.getProducerClientConfigProps(), hasEntry(ProducerConfig.CLIENT_ID_CONFIG, null));
assertThat(ksqlConfig.getProducerClientConfigProps(), not(hasKey("not.a.config")));
}
|
@Override
protected void doExecute() {
if (vpls == null) {
vpls = get(Vpls.class);
}
if (interfaceService == null) {
interfaceService = get(InterfaceService.class);
}
VplsCommandEnum enumCommand = VplsCommandEnum.enumFromString(command);
if (enumCommand != null) {
switch (enumCommand) {
case ADD_IFACE:
addIface(vplsName, optArg);
break;
case CREATE:
create(vplsName);
break;
case DELETE:
delete(vplsName);
break;
case LIST:
list();
break;
case REMOVE_IFACE:
removeIface(vplsName, optArg);
break;
case SET_ENCAP:
setEncap(vplsName, optArg);
break;
case SHOW:
show(vplsName);
break;
case CLEAN:
cleanVpls();
break;
default:
print(VPLS_COMMAND_NOT_FOUND, command);
}
} else {
print(VPLS_COMMAND_NOT_FOUND, command);
}
}
|
@Test
public void testIfaceAssociated() {
((TestVpls) vplsCommand.vpls).initSampleData();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream ps = new PrintStream(baos);
System.setOut(ps);
vplsCommand.command = VplsCommandEnum.ADD_IFACE.toString();
vplsCommand.vplsName = VPLS1;
vplsCommand.optArg = V200H1.name();
vplsCommand.doExecute();
String result = baos.toString();
assertEquals(IFACE_ALREADY_USED, result);
}
|
@Nullable
@Override
public Message decode(@Nonnull RawMessage rawMessage) {
final byte[] payload = rawMessage.getPayload();
final JsonNode event;
try {
event = objectMapper.readTree(payload);
if (event == null || event.isMissingNode()) {
throw new IOException("null result");
}
} catch (IOException e) {
LOG.error("Couldn't decode raw message {}", rawMessage);
return null;
}
return parseEvent(event);
}
|
@Test
public void decodeMessagesHandlesMetricbeatMessages() throws Exception {
final String[] testFiles = {
"metricbeat-docker-container.json",
"metricbeat-docker-cpu.json",
"metricbeat-docker-diskio.json",
"metricbeat-docker-info.json",
"metricbeat-docker-memory.json",
"metricbeat-docker-network.json",
"metricbeat-mongodb-status.json",
"metricbeat-mysql-status.json",
"metricbeat-system-core.json",
"metricbeat-system-cpu.json",
"metricbeat-system-filesystem.json",
"metricbeat-system-fsstat.json",
"metricbeat-system-load.json",
"metricbeat-system-memory.json",
"metricbeat-system-network.json",
"metricbeat-system-process.json"
};
for (String testFile : testFiles) {
final Message message = codec.decode(messageFromJson(testFile));
assertThat(message).isNotNull();
assertThat(message.getSource()).isEqualTo("example.local");
assertThat(message.getTimestamp()).isEqualTo(new DateTime(2016, 12, 14, 12, 0, DateTimeZone.UTC));
assertThat(message.getField("beats_type")).isEqualTo("metricbeat");
}
}
|
public byte[] getHl7AcknowledgementBytes() {
return hl7AcknowledgementBytes;
}
|
@Test
public void testGetHl7AcknowledgementBytes() {
instance = new MllpException(EXCEPTION_MESSAGE, LOG_PHI_TRUE);
assertNull(instance.getHl7AcknowledgementBytes());
instance = new MllpException(EXCEPTION_MESSAGE, NULL_BYTE_ARRAY, LOG_PHI_TRUE);
assertNull(instance.getHl7AcknowledgementBytes());
instance = new MllpException(EXCEPTION_MESSAGE, NULL_BYTE_ARRAY, NULL_BYTE_ARRAY, LOG_PHI_TRUE);
assertNull(instance.getHl7AcknowledgementBytes());
instance = new MllpException(EXCEPTION_MESSAGE, NULL_BYTE_ARRAY, EMPTY_BYTE_ARRAY, LOG_PHI_TRUE);
assertNull(instance.getHl7AcknowledgementBytes());
instance = new MllpException(EXCEPTION_MESSAGE, EMPTY_BYTE_ARRAY, LOG_PHI_TRUE);
assertNull(instance.getHl7AcknowledgementBytes());
instance = new MllpException(EXCEPTION_MESSAGE, EMPTY_BYTE_ARRAY, NULL_BYTE_ARRAY, LOG_PHI_TRUE);
assertNull(instance.getHl7AcknowledgementBytes());
instance = new MllpException(EXCEPTION_MESSAGE, EMPTY_BYTE_ARRAY, EMPTY_BYTE_ARRAY, LOG_PHI_TRUE);
assertNull(instance.getHl7AcknowledgementBytes());
instance = new MllpException(EXCEPTION_MESSAGE, HL7_MESSAGE_BYTES, LOG_PHI_TRUE);
assertNull(instance.getHl7AcknowledgementBytes());
instance = new MllpException(EXCEPTION_MESSAGE, HL7_MESSAGE_BYTES, NULL_BYTE_ARRAY, LOG_PHI_TRUE);
assertNull(instance.getHl7AcknowledgementBytes());
instance = new MllpException(EXCEPTION_MESSAGE, HL7_MESSAGE_BYTES, EMPTY_BYTE_ARRAY, LOG_PHI_TRUE);
assertNull(instance.getHl7AcknowledgementBytes());
instance = new MllpException(EXCEPTION_MESSAGE, HL7_MESSAGE_BYTES, HL7_ACKNOWLEDGEMENT_BYTES, LOG_PHI_TRUE);
assertArrayEquals(HL7_ACKNOWLEDGEMENT_BYTES, instance.getHl7AcknowledgementBytes());
instance = new MllpException(EXCEPTION_MESSAGE, null, HL7_ACKNOWLEDGEMENT_BYTES, LOG_PHI_TRUE);
assertArrayEquals(HL7_ACKNOWLEDGEMENT_BYTES, instance.getHl7AcknowledgementBytes());
instance = new MllpException(EXCEPTION_MESSAGE, EMPTY_BYTE_ARRAY, HL7_ACKNOWLEDGEMENT_BYTES, LOG_PHI_TRUE);
assertArrayEquals(HL7_ACKNOWLEDGEMENT_BYTES, instance.getHl7AcknowledgementBytes());
instance = new MllpException(EXCEPTION_MESSAGE, HL7_MESSAGE_BYTES, HL7_ACKNOWLEDGEMENT_BYTES, LOG_PHI_TRUE);
assertArrayEquals(HL7_ACKNOWLEDGEMENT_BYTES, instance.getHl7AcknowledgementBytes());
}
|
public Document process(Document input) throws IOException, TransformerException {
Document doc = Xml.copyDocument(input);
includeFile(application, doc.getDocumentElement());
return doc;
}
|
@Test(expected = NoSuchFileException.class)
public void testRequiredIncludeIsDefault() throws ParserConfigurationException, IOException, SAXException, TransformerException {
File app = new File("src/test/resources/multienvapp_failrequired");
DocumentBuilder docBuilder = Xml.getPreprocessDocumentBuilder();
new IncludeProcessor(app).process(docBuilder.parse(getServices(app)));
fail("should fail by default to include a non-existent file");
}
|
@Override
public void afterComponent(Component component) {
componentsWithUnprocessedIssues.remove(component.getUuid());
Optional<MovedFilesRepository.OriginalFile> originalFile = movedFilesRepository.getOriginalFile(component);
if (originalFile.isPresent()) {
componentsWithUnprocessedIssues.remove(originalFile.get().uuid());
}
}
|
@Test
public void remove_processed_files() {
when(movedFilesRepository.getOriginalFile(any(Component.class))).thenReturn(Optional.empty());
underTest.afterComponent(component);
verify(movedFilesRepository).getOriginalFile(component);
verify(componentsWithUnprocessedIssues).remove(UUID);
verifyNoMoreInteractions(componentsWithUnprocessedIssues);
}
|
public ConvertedTime getConvertedTime(long duration) {
Set<Seconds> keys = RULES.keySet();
for (Seconds seconds : keys) {
if (duration <= seconds.getSeconds()) {
return RULES.get(seconds).getConvertedTime(duration);
}
}
return new TimeConverter.OverTwoYears().getConvertedTime(duration);
}
|
@Test
public void testShouldReportOneMinuteFor30Seconds() {
assertEquals(TimeConverter.ABOUT_1_MINUTE_AGO, timeConverter.getConvertedTime(30));
}
|
@Override
protected long doGetContentSize() throws Exception {
return 0;
}
|
@Test
public void testDoGetContentSizeReturns0() throws Exception {
// For coverage. This method is abstract and must be overridden, however, it's not supposed to be called,
// given getContent is overridden and delegated to the resolved file object.
assertEquals( 0, fileObject.doGetContentSize() );
}
|
public Flowable<PendingTransactionNotification> newPendingTransactionsNotifications() {
return web3jService.subscribe(
new Request<>(
"eth_subscribe",
Arrays.asList("newPendingTransactions"),
web3jService,
EthSubscribe.class),
"eth_unsubscribe",
PendingTransactionNotification.class);
}
|
@Test
public void testPendingTransactionsNotifications() {
geth.newPendingTransactionsNotifications();
verify(webSocketClient)
.send(
matches(
"\\{\"jsonrpc\":\"2.0\",\"method\":\"eth_subscribe\",\"params\":"
+ "\\[\"newPendingTransactions\"],\"id\":[0-9]{1,}}"));
}
|
@Override
public String execute(CommandContext commandContext, String[] args) {
Channel channel = commandContext.getRemote();
if (ArrayUtils.isEmpty(args)) {
return "Please input service name, eg: \r\ncd XxxService\r\ncd com.xxx.XxxService";
}
String message = args[0];
StringBuilder buf = new StringBuilder();
if ("/".equals(message) || "..".equals(message)) {
String service = channel.attr(SERVICE_KEY).getAndRemove();
buf.append("Cancelled default service ").append(service).append('.');
} else {
boolean found = false;
for (Exporter<?> exporter : dubboProtocol.getExporters()) {
if (message.equals(exporter.getInvoker().getInterface().getSimpleName())
|| message.equals(exporter.getInvoker().getInterface().getName())
|| message.equals(exporter.getInvoker().getUrl().getPath())
|| message.equals(exporter.getInvoker().getUrl().getServiceKey())) {
found = true;
break;
}
}
if (found) {
channel.attr(SERVICE_KEY).set(message);
buf.append("Used the ")
.append(message)
.append(" as default.\r\nYou can cancel default service by command: cd /");
} else {
buf.append("No such service ").append(message);
}
}
return buf.toString();
}
|
@Test
void testChangeCancel2() {
String result = change.execute(mockCommandContext, new String[] {"/"});
assertEquals("Cancelled default service org.apache.dubbo.rpc.protocol.dubbo.support.DemoService.", result);
}
|
public List<String> toList(boolean trim) {
return toList((str) -> trim ? StrUtil.trim(str) : str);
}
|
@Test
public void splitLimitTest(){
String text = "55:02:18";
SplitIter splitIter = new SplitIter(text,
new CharFinder(':'),
3,
false
);
final List<String> strings = splitIter.toList(false);
assertEquals(3, strings.size());
}
|
public List<InterpreterResultMessage> message() {
return msg;
}
|
@Test
void testSimpleMagicData() {
InterpreterResult result = null;
result = new InterpreterResult(InterpreterResult.Code.SUCCESS,
"%table col1\tcol2\naaa\t123\n");
assertEquals("col1\tcol2\naaa\t123\n", result.message().get(0).getData(),
"%table col1\tcol2\naaa\t123\n");
result = new InterpreterResult(InterpreterResult.Code.SUCCESS,
"%table\ncol1\tcol2\naaa\t123\n");
assertEquals("col1\tcol2\naaa\t123\n", result.message().get(0).getData(),
"%table\ncol1\tcol2\naaa\t123\n");
result = new InterpreterResult(InterpreterResult.Code.SUCCESS,
"some text before magic word\n%table col1\tcol2\naaa\t123\n");
assertEquals("col1\tcol2\naaa\t123\n", result.message().get(1).getData(),
"some text before magic word\n%table col1\tcol2\naaa\t123\n");
}
|
public String indexName(Record<GenericObject> record) {
if (this.dateTimeFormatters.isEmpty()) {
return this.indexNameFormat;
}
Instant eventTime = Instant.ofEpochMilli(record.getEventTime()
.orElseThrow(() -> new IllegalStateException("No event time in record")));
StringBuilder builder = new StringBuilder(this.segments.get(0));
for (int i = 0; i < dateTimeFormatters.size(); i++) {
builder.append(dateTimeFormatters.get(i).format(eventTime));
builder.append(this.segments.get(i + 1));
}
return builder.toString();
}
|
@Test(dataProvider = "indexFormats")
public void testIndexFormats(String format, String result) {
Record record = Mockito.mock(Record.class);
when(record.getEventTime()).thenReturn(Optional.of(1645182000000L));
IndexNameFormatter formatter = new IndexNameFormatter(format);
assertEquals(formatter.indexName(record), result);
}
|
public ValidationResult validateMessagesAndAssignOffsets(PrimitiveRef.LongRef offsetCounter,
MetricsRecorder metricsRecorder,
BufferSupplier bufferSupplier) {
if (sourceCompressionType == CompressionType.NONE && targetCompression.type() == CompressionType.NONE) {
// check the magic value
if (!records.hasMatchingMagic(toMagic))
return convertAndAssignOffsetsNonCompressed(offsetCounter, metricsRecorder);
else
// Do in-place validation, offset assignment and maybe set timestamp
return assignOffsetsNonCompressed(offsetCounter, metricsRecorder);
} else
return validateMessagesAndAssignOffsetsCompressed(offsetCounter, metricsRecorder, bufferSupplier);
}
|
@Test
public void testCreateTimeUpConversionV1ToV2() {
long timestamp = System.currentTimeMillis();
Compression compression = Compression.gzip().build();
MemoryRecords records = createRecords(RecordBatch.MAGIC_VALUE_V1, timestamp, compression);
LogValidator validator = new LogValidator(
records,
topicPartition,
time,
CompressionType.GZIP,
compression,
false,
RecordBatch.MAGIC_VALUE_V2,
TimestampType.CREATE_TIME,
1000L,
1000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT,
MetadataVersion.latestTesting()
);
LogValidator.ValidationResult validatedResults = validator.validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(0),
metricsRecorder,
RequestLocal.withThreadConfinedCaching().bufferSupplier()
);
MemoryRecords validatedRecords = validatedResults.validatedRecords;
for (RecordBatch batch : validatedRecords.batches()) {
assertTrue(batch.isValid());
maybeCheckBaseTimestamp(timestamp, batch);
assertEquals(timestamp, batch.maxTimestamp());
assertEquals(TimestampType.CREATE_TIME, batch.timestampType());
assertEquals(RecordBatch.NO_PRODUCER_EPOCH, batch.producerEpoch());
assertEquals(RecordBatch.NO_PRODUCER_ID, batch.producerId());
assertEquals(RecordBatch.NO_SEQUENCE, batch.baseSequence());
}
assertEquals(timestamp, validatedResults.maxTimestampMs);
assertEquals(2, validatedResults.shallowOffsetOfMaxTimestamp, "Offset of max timestamp should be the last offset 2.");
assertTrue(validatedResults.messageSizeMaybeChanged, "Message size should have been changed");
verifyRecordValidationStats(
validatedResults.recordValidationStats,
3,
records,
true
);
}
|
protected int getDiffSize() {
return brokerConfigDiff.size();
}
|
@Test
public void testChangedKRaftControllerConfigForCombinedNode() {
NodeRef combinedNodeId = new NodeRef("broker-0", 0, "broker", true, true);
List<ConfigEntry> desiredControllerConfig = singletonList(new ConfigEntry("controller.quorum.election.timeout.ms", "5000"));
List<ConfigEntry> currentControllerConfig = singletonList(new ConfigEntry("controller.quorum.election.timeout.ms", "1000"));
KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(currentControllerConfig),
getDesiredConfiguration(desiredControllerConfig), kafkaVersion, combinedNodeId);
assertThat(kcd.getDiffSize(), is(1));
}
|
public ListedHashTree getClonedTree() {
return newTree;
}
|
@Test
@SuppressWarnings("ReferenceEquality")
public void testCloning() throws Exception {
ListedHashTree original = new ListedHashTree();
GenericController controller = new GenericController();
controller.setName("controller");
Arguments args = new Arguments();
args.setName("args");
TestPlan plan = new TestPlan();
plan.addParameter("server", "jakarta");
original.add(controller, args);
original.add(plan);
ResultCollector listener = new ResultCollector();
listener.setName("Collector");
original.add(controller, listener);
TreeCloner cloner = new TreeCloner();
original.traverse(cloner);
ListedHashTree newTree = cloner.getClonedTree();
assertNotSame(original, newTree);
assertEquals(original.size(), newTree.size());
assertEquals(original.getTree(original.getArray()[0]).size(), newTree.getTree(newTree.getArray()[0]).size());
assertNotSame(original.getArray()[0], newTree.getArray()[0]);
assertEquals(((GenericController) original.getArray()[0]).getName(), ((GenericController) newTree
.getArray()[0]).getName());
assertSame(original.getTree(original.getArray()[0]).getArray()[1], newTree.getTree(newTree.getArray()[0])
.getArray()[1]);
TestPlan clonedTestPlan = (TestPlan) newTree.getArray()[1];
clonedTestPlan.setRunningVersion(true);
clonedTestPlan.recoverRunningVersion();
assertFalse(plan.getUserDefinedVariablesAsProperty().isRunningVersion());
assertTrue(clonedTestPlan.getUserDefinedVariablesAsProperty().isRunningVersion());
Arguments vars = (Arguments) plan.getUserDefinedVariablesAsProperty().getObjectValue();
PropertyIterator iter = ((CollectionProperty) vars.getProperty(Arguments.ARGUMENTS)).iterator();
while (iter.hasNext()) {
JMeterProperty argProp = iter.next();
assertFalse(argProp.isRunningVersion());
assertInstanceOf(Argument.class, argProp.getObjectValue());
Argument arg = (Argument) argProp.getObjectValue();
arg.setValue("yahoo");
assertEquals("yahoo", arg.getValue());
}
vars = (Arguments) clonedTestPlan.getUserDefinedVariablesAsProperty().getObjectValue();
iter = vars.propertyIterator();
while (iter.hasNext()) {
assertTrue(iter.next().isRunningVersion());
}
}
|
@Override
public Num getValue(int index) {
return values.get(index);
}
|
@Test
public void cashFlowValue() {
// First sample series
BarSeries sampleBarSeries = new MockBarSeries(numFunction, 3d, 2d, 5d, 1000d, 5000d, 0.0001d, 4d, 7d, 6d, 7d,
8d, 5d, 6d);
TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, sampleBarSeries),
Trade.sellAt(2, sampleBarSeries), Trade.buyAt(6, sampleBarSeries), Trade.sellAt(8, sampleBarSeries),
Trade.buyAt(9, sampleBarSeries), Trade.sellAt(11, sampleBarSeries));
CashFlow cashFlow = new CashFlow(sampleBarSeries, tradingRecord);
assertNumEquals(1, cashFlow.getValue(0));
assertNumEquals(2d / 3, cashFlow.getValue(1));
assertNumEquals(5d / 3, cashFlow.getValue(2));
assertNumEquals(5d / 3, cashFlow.getValue(3));
assertNumEquals(5d / 3, cashFlow.getValue(4));
assertNumEquals(5d / 3, cashFlow.getValue(5));
assertNumEquals(5d / 3, cashFlow.getValue(6));
assertNumEquals(5d / 3 * 7d / 4, cashFlow.getValue(7));
assertNumEquals(5d / 3 * 6d / 4, cashFlow.getValue(8));
assertNumEquals(5d / 3 * 6d / 4, cashFlow.getValue(9));
assertNumEquals(5d / 3 * 6d / 4 * 8d / 7, cashFlow.getValue(10));
assertNumEquals(5d / 3 * 6d / 4 * 5d / 7, cashFlow.getValue(11));
assertNumEquals(5d / 3 * 6d / 4 * 5d / 7, cashFlow.getValue(12));
// Second sample series
sampleBarSeries = new MockBarSeries(numFunction, 5d, 6d, 3d, 7d, 8d, 6d, 10d, 15d, 6d);
tradingRecord = new BaseTradingRecord(Trade.buyAt(4, sampleBarSeries), Trade.sellAt(5, sampleBarSeries),
Trade.buyAt(6, sampleBarSeries), Trade.sellAt(8, sampleBarSeries));
CashFlow flow = new CashFlow(sampleBarSeries, tradingRecord);
assertNumEquals(1, flow.getValue(0));
assertNumEquals(1, flow.getValue(1));
assertNumEquals(1, flow.getValue(2));
assertNumEquals(1, flow.getValue(3));
assertNumEquals(1, flow.getValue(4));
assertNumEquals("0.75", flow.getValue(5));
assertNumEquals("0.75", flow.getValue(6));
assertNumEquals("1.125", flow.getValue(7));
assertNumEquals("0.45", flow.getValue(8));
}
|
@Override
protected boolean copyObject(String src, String dst) {
LOG.debug("Copying {} to {}", src, dst);
GSObject obj = new GSObject(dst);
// Retry copy for a few times, in case some Jets3t or GCS internal errors happened during copy.
int retries = 3;
for (int i = 0; i < retries; i++) {
try {
mClient.copyObject(mBucketName, src, mBucketName, obj, false);
return true;
} catch (ServiceException e) {
LOG.error("Failed to copy file {} to {}", src, dst, e);
if (i != retries - 1) {
LOG.error("Retrying copying file {} to {}", src, dst);
}
}
}
LOG.error("Failed to copy file {} to {}, after {} retries", src, dst, retries);
return false;
}
|
@Test
public void testCopyObject() throws ServiceException {
// test successful copy object
when(mClient.copyObject(ArgumentMatchers.anyString(), ArgumentMatchers.anyString(),
ArgumentMatchers.anyString(), ArgumentMatchers.any(StorageObject.class),
ArgumentMatchers.anyBoolean())).thenReturn(null);
boolean result = mGCSUnderFileSystem.copyObject(SRC, DST);
Assert.assertTrue(result);
// test copy object exception
Mockito.when(mClient.copyObject(ArgumentMatchers.anyString(), ArgumentMatchers.anyString(),
ArgumentMatchers.anyString(), ArgumentMatchers.any(StorageObject.class),
ArgumentMatchers.anyBoolean())).thenThrow(ServiceException.class);
try {
mGCSUnderFileSystem.copyObject(SRC, DST);
} catch (Exception e) {
Assert.assertTrue(e instanceof ServiceException);
}
}
|
static int getNext(final CronEntry entry, final int current, final Calendar working) throws MessageFormatException {
int result = 0;
if (entry.currentWhen == null) {
entry.currentWhen = calculateValues(entry);
}
List<Integer> list = entry.currentWhen;
int next = -1;
for (Integer i : list) {
if (i > current) {
next = i;
break;
}
}
if (next != -1) {
result = next - current;
} else {
int first = list.get(0);
int fixedEnd = entry.end;
//months have different max values
if("DayOfMonth".equals(entry.name)) {
fixedEnd = working.getActualMaximum(Calendar.DAY_OF_MONTH)+1;
}
result = fixedEnd + first - entry.start - current;
// Account for difference of one vs zero based indices.
if (entry.name.equals("DayOfWeek") || entry.name.equals("Month")) {
result++;
}
}
return result;
}
|
@Test
public void testGetNext() throws MessageFormatException {
testGetNextSingle("0 0 1 * *", "2016-04-15T00:00:00", "2016-05-01T00:00:00");
testGetNextSingle("0 0 1,15 * *", "2016-04-15T00:00:00", "2016-05-01T00:00:00");
testGetNextSingle("0 0 1 * *", "2016-05-15T00:00:00", "2016-06-01T00:00:00");
testGetNextSingle("0 0 1,15 * *", "2016-05-15T00:00:00", "2016-06-01T00:00:00");
testGetNextSingle("0 0 1 * *", "2016-06-15T00:00:00", "2016-07-01T00:00:00");
testGetNextSingle("0 0 1,15 * *", "2016-06-15T00:00:00", "2016-07-01T00:00:00");
}
|
StringBuilder codeForMapWithValueMessageType(Descriptors.FieldDescriptor desc,
String fieldNameInCode,
String valueDescClassName,
int indent,
int varNum) {
StringBuilder code = new StringBuilder();
varNum++;
String mapVarName = "map" + varNum;
StringBuilder code1 = new StringBuilder();
code.append(
completeLine(String.format("Map<Object, Map<String, Object>> %s = new HashMap<>()", mapVarName), indent));
code.append(addIndent(String.format("for (Map.Entry<%s, %s> entry: msg.%s().entrySet()) {",
ProtoBufUtils.getTypeStrFromProto(desc.getMessageType().findFieldByName("key")),
ProtoBufUtils.getTypeStrFromProto(desc),
getProtoFieldMethodName(fieldNameInCode + "Map")), indent));
code.append(completeLine(String.format("%s.put(entry.getKey(), %s( (%s) entry.getValue()))", mapVarName,
getDecoderMethodName(valueDescClassName), valueDescClassName), ++indent));
code.append(addIndent("}", --indent));
code.append(completeLine(String.format("msgMap.put(\"%s\", %s)", desc.getName(), mapVarName), indent));
return code;
}
|
@Test
public void testCodeForMapWithValueMessageType() {
MessageCodeGen messageCodeGen = new MessageCodeGen();
// Repeated decoder method is non empty
Descriptors.FieldDescriptor fd = ComplexTypes.TestMessage.getDescriptor().findFieldByName(COMPLEX_MAP);
String fieldNameInCode = ProtobufInternalUtils.underScoreToCamelCase(fd.getName(), true);
String javaType = ProtoBufUtils.getFullJavaName(fd.getMessageType());
String expectedCode = " Map<Object, Map<String, Object>> map2 = new HashMap<>();\n"
+ " for (Map.Entry<String, Map<String,"
+ "org.apache.pinot.plugin.inputformat.protobuf.ComplexTypes.TestMessage.NestedMessage>> entry:"
+ " msg.getComplexMapMap().entrySet()) {\n"
+ " map2.put(entry.getKey(), decodeNestedMessageMapMessage( (NestedMessageMap) entry.getValue()));\n"
+ " }\n"
+ " msgMap.put(\"complex_map\", map2);\n";
assertEquals(messageCodeGen.codeForMapWithValueMessageType(
fd, fieldNameInCode, "NestedMessageMap", 1, 1).toString(), expectedCode);
}
|
public static void cleanDirectory(File directory) throws IOException {
if (!directory.exists()) {
String message = directory + " does not exist";
throw new IllegalArgumentException(message);
}
if (!directory.isDirectory()) {
String message = directory + " is not a directory";
throw new IllegalArgumentException(message);
}
File[] files = directory.listFiles();
if (files == null) { // null if security restricted
throw new IOException("Failed to list contents of " + directory);
}
IOException exception = null;
for (File file : files) {
try {
delete(file);
} catch (IOException ioe) {
exception = ioe;
}
}
if (null != exception) {
throw exception;
}
}
|
@Test
public void testCleanDirectory() throws Exception {
for (int i = 0; i < 10; i++) {
IOTinyUtils.writeStringToFile(new File(testRootDir, "testCleanDirectory" + i), "testCleanDirectory", StandardCharsets.UTF_8.name());
}
File dir = new File(testRootDir);
assertTrue(dir.exists() && dir.isDirectory());
assertTrue(dir.listFiles().length > 0);
IOTinyUtils.cleanDirectory(new File(testRootDir));
assertTrue(dir.listFiles().length == 0);
}
|
static SearchProtocol.SearchRequest convertFromQuery(Query query, int hits, String serverId, double requestTimeout) {
var builder = SearchProtocol.SearchRequest.newBuilder().setHits(hits).setOffset(query.getOffset())
.setTimeout((int) (requestTimeout * 1000));
var documentDb = query.getModel().getDocumentDb();
if (documentDb != null) {
builder.setDocumentType(documentDb);
}
GrowableByteBuffer scratchPad = threadLocalBuffer.get();
builder.setQueryTreeBlob(serializeQueryTree(query.getModel().getQueryTree(), scratchPad));
if (query.getGroupingSessionCache() || query.getRanking().getQueryCache()) {
// TODO verify that the session key is included whenever rank properties would have been
builder.setSessionKey(query.getSessionId(serverId).toString());
}
if (query.properties().getBoolean(Model.ESTIMATE)) {
builder.setHits(0);
}
if (GroupingExecutor.hasGroupingList(query)) {
List<Grouping> groupingList = GroupingExecutor.getGroupingList(query);
scratchPad.clear();
BufferSerializer gbuf = new BufferSerializer(scratchPad);
gbuf.putInt(null, groupingList.size());
for (Grouping g : groupingList) {
g.serialize(gbuf);
}
gbuf.getBuf().flip();
builder.setGroupingBlob(ByteString.copyFrom(gbuf.getBuf().getByteBuffer()));
}
if (query.getGroupingSessionCache()) {
builder.setCacheGrouping(true);
}
int traceLevel = getTraceLevelForBackend(query);
builder.setTraceLevel(traceLevel);
builder.setProfileDepth(query.getTrace().getProfileDepth());
if (traceLevel > 0) {
mergeToSearchRequestFromProfiling(query.getTrace().getProfiling(), builder);
}
mergeToSearchRequestFromRanking(query.getRanking(), scratchPad, builder);
return builder.build();
}
|
@Test
void testQuerySerialization() {
CompiledQueryProfileRegistry registry = new QueryProfileXMLReader().read("src/test/java/com/yahoo/search/query/profile/config/test/tensortypes").compile();
Query query = new Query.Builder().setQueryProfile(registry.getComponent("profile1"))
.setRequest("?query=test&ranking.features.query(tensor_1)=[1.200]")
.build();
SearchProtocol.SearchRequest request1 = ProtobufSerialization.convertFromQuery(query, 9, "serverId", 0.5);
assertEquals(9, request1.getHits());
assertEquals(0, request1.getRankPropertiesCount());
assertEquals(0, request1.getTensorRankPropertiesCount());
assertEquals(0, request1.getFeatureOverridesCount());
assertEquals(2, request1.getTensorFeatureOverridesCount());
assertEquals("\"\\001\\001\\003key\\001\\rpre_key1_post?\\360\\000\\000\\000\\000\\000\\000\"",
contentsOf(request1.getTensorFeatureOverrides(0).getValue()));
assertEquals("\"\\006\\001\\001\\001x\\001?\\231\\231\\232\"",
contentsOf(request1.getTensorFeatureOverrides(1).getValue()));
assertFalse(request1.hasProfiling());
query.prepare(); // calling prepare() moves "overrides" to "features" - content stays the same
SearchProtocol.SearchRequest request2 = ProtobufSerialization.convertFromQuery(query, 9, "serverId", 0.5);
assertEquals(9, request2.getHits());
assertEquals(0, request2.getRankPropertiesCount());
assertEquals(2, request2.getTensorRankPropertiesCount());
assertEquals("\"\\001\\001\\003key\\001\\rpre_key1_post?\\360\\000\\000\\000\\000\\000\\000\"",
contentsOf(request2.getTensorRankProperties(0).getValue()));
assertEquals("\"\\006\\001\\001\\001x\\001?\\231\\231\\232\"",
contentsOf(request2.getTensorRankProperties(1).getValue()));
assertEquals(0, request2.getFeatureOverridesCount());
assertEquals(0, request2.getTensorFeatureOverridesCount());
}
|
public static Instruction writeMetadata(long metadata, long metadataMask) {
return new MetadataInstruction(metadata, metadataMask);
}
|
@Test
public void testWriteMetadataMethod() {
final Instruction instruction =
Instructions.writeMetadata(metadata1, metadataMask1);
final Instructions.MetadataInstruction metadataInstruction =
checkAndConvert(instruction,
Instruction.Type.METADATA,
Instructions.MetadataInstruction.class);
assertThat(metadataInstruction.metadata(), is(metadata1));
assertThat(metadataInstruction.metadataMask(), is(metadataMask1));
}
|
@Pointcut("@annotation(org.apache.shenyu.admin.aspect.annotation.DataPermission)")
public void dataPermissionCut() { }
|
@Test
public void dataPermissionCutTest() {
assertDoesNotThrow(() -> dataPermissionAspect.dataPermissionCut());
}
|
public Map<String, Parameter> generateMergedStepParams(
WorkflowSummary workflowSummary,
Step stepDefinition,
StepRuntime stepRuntime,
StepRuntimeSummary runtimeSummary) {
Map<String, ParamDefinition> allParamDefs = new LinkedHashMap<>();
// Start with default step level params if present
Map<String, ParamDefinition> globalDefault = defaultParamManager.getDefaultStepParams();
if (globalDefault != null) {
ParamsMergeHelper.mergeParams(
allParamDefs,
globalDefault,
ParamsMergeHelper.MergeContext.stepCreate(ParamSource.SYSTEM_DEFAULT));
}
// Merge in injected params returned by step if present (template schema)
Map<String, ParamDefinition> injectedParams =
stepRuntime.injectRuntimeParams(workflowSummary, stepDefinition);
maybeOverrideParamType(allParamDefs);
if (injectedParams != null) {
maybeOverrideParamType(injectedParams);
ParamsMergeHelper.mergeParams(
allParamDefs,
injectedParams,
ParamsMergeHelper.MergeContext.stepCreate(ParamSource.TEMPLATE_SCHEMA));
}
// Merge in params applicable to step type
Optional<Map<String, ParamDefinition>> defaultStepTypeParams =
defaultParamManager.getDefaultParamsForType(stepDefinition.getType());
if (defaultStepTypeParams.isPresent()) {
LOG.debug("Merging step level default for {}", stepDefinition.getType());
ParamsMergeHelper.mergeParams(
allParamDefs,
defaultStepTypeParams.get(),
ParamsMergeHelper.MergeContext.stepCreate(ParamSource.SYSTEM_DEFAULT));
}
// Merge in workflow and step info
ParamsMergeHelper.mergeParams(
allParamDefs,
injectWorkflowAndStepInfoParams(workflowSummary, runtimeSummary),
ParamsMergeHelper.MergeContext.stepCreate(ParamSource.SYSTEM_INJECTED));
// merge step run param and user provided restart step run params
// first to get undefined params from both run param and restart params
Map<String, ParamDefinition> undefinedRestartParams = new LinkedHashMap<>();
Optional<Map<String, ParamDefinition>> stepRestartParams =
getUserStepRestartParam(workflowSummary, runtimeSummary);
stepRestartParams.ifPresent(undefinedRestartParams::putAll);
Optional<Map<String, ParamDefinition>> stepRunParams =
getStepRunParams(workflowSummary, runtimeSummary);
Map<String, ParamDefinition> systemInjectedRestartRunParams = new LinkedHashMap<>();
stepRunParams.ifPresent(
params -> {
params.forEach(
(key, val) -> {
if (runtimeSummary.getRestartConfig() != null
&& Constants.RESERVED_PARAM_NAMES.contains(key)
&& val.getMode() == ParamMode.CONSTANT
&& val.getSource() == ParamSource.SYSTEM_INJECTED) {
((AbstractParamDefinition) val)
.getMeta()
.put(Constants.METADATA_SOURCE_KEY, ParamSource.RESTART.name());
systemInjectedRestartRunParams.put(key, val);
}
});
systemInjectedRestartRunParams.keySet().forEach(params::remove);
});
stepRunParams.ifPresent(undefinedRestartParams::putAll);
Optional.ofNullable(stepDefinition.getParams())
.ifPresent(
stepDefParams ->
stepDefParams.keySet().stream()
.filter(undefinedRestartParams::containsKey)
.forEach(undefinedRestartParams::remove));
// Then merge undefined restart params
if (!undefinedRestartParams.isEmpty()) {
mergeUserProvidedStepParams(allParamDefs, undefinedRestartParams, workflowSummary);
}
// Final merge from step definition
if (stepDefinition.getParams() != null) {
maybeOverrideParamType(stepDefinition.getParams());
ParamsMergeHelper.mergeParams(
allParamDefs,
stepDefinition.getParams(),
ParamsMergeHelper.MergeContext.stepCreate(ParamSource.DEFINITION));
}
// merge step run params
stepRunParams.ifPresent(
stepParams -> mergeUserProvidedStepParams(allParamDefs, stepParams, workflowSummary));
// merge all user provided restart step run params
stepRestartParams.ifPresent(
stepParams -> mergeUserProvidedStepParams(allParamDefs, stepParams, workflowSummary));
// merge all system injected restart step run params with mode and source already set.
allParamDefs.putAll(systemInjectedRestartRunParams);
// Cleanup any params that are missing and convert to params
return ParamsMergeHelper.convertToParameters(ParamsMergeHelper.cleanupParams(allParamDefs));
}
|
@Test
public void testStepParamSanity() {
Map<String, Parameter> stepParams =
paramsManager.generateMergedStepParams(workflowSummary, step, stepRuntime, runtimeSummary);
Assert.assertTrue(stepParams.isEmpty());
when(defaultParamManager.getDefaultStepParams())
.thenReturn(
Collections.singletonMap(
"workflow_id",
ParamDefinition.buildParamDefinition("workflow_id", "test-workflow")));
stepParams =
paramsManager.generateMergedStepParams(workflowSummary, step, stepRuntime, runtimeSummary);
Assert.assertFalse(stepParams.isEmpty());
}
|
@Override
public List<String> listDbNames() {
return icebergCatalog.listAllDatabases();
}
|
@Test
public void testListDatabaseNames(@Mocked IcebergCatalog icebergCatalog) {
new Expectations() {
{
icebergCatalog.listAllDatabases();
result = Lists.newArrayList("db1", "db2");
minTimes = 0;
}
};
IcebergMetadata metadata = new IcebergMetadata(CATALOG_NAME, HDFS_ENVIRONMENT, icebergCatalog,
Executors.newSingleThreadExecutor(), Executors.newSingleThreadExecutor(), null);
List<String> expectResult = Lists.newArrayList("db1", "db2");
Assert.assertEquals(expectResult, metadata.listDbNames());
}
|
@Override
public MapperResult findAllConfigKey(MapperContext context) {
String sql = " SELECT data_id,group_id,app_name FROM ( "
+ " SELECT id FROM config_info WHERE tenant_id LIKE ? ORDER BY id LIMIT " + context.getStartRow() + ","
+ context.getPageSize() + " )" + " g, config_info t WHERE g.id = t.id ";
return new MapperResult(sql, CollectionUtils.list(context.getWhereParameter(FieldConstant.TENANT_ID)));
}
|
@Test
void testFindAllConfigKey() {
MapperResult mapperResult = configInfoMapperByMySql.findAllConfigKey(context);
assertEquals(mapperResult.getSql(),
" SELECT data_id,group_id,app_name FROM ( " + " SELECT id FROM config_info WHERE tenant_id LIKE ? ORDER BY id LIMIT "
+ context.getStartRow() + "," + context.getPageSize() + " )" + " g, config_info t WHERE g.id = t.id ");
assertArrayEquals(new Object[] {tenantId}, mapperResult.getParamList().toArray());
}
|
public static <T> T getBean(Class<T> interfaceClass, Class typeClass) {
Object object = serviceMap.get(interfaceClass.getName() + "<" + typeClass.getName() + ">");
if(object == null) return null;
if(object instanceof Object[]) {
return (T)Array.get(object, 0);
} else {
return (T)object;
}
}
|
@Test
public void testTwoGenericTypes() {
Pair p = SingletonServiceFactory.getBean(Pair.class);
Assert.assertEquals("key1", p.getKey());
Assert.assertEquals("value1", p.getValue());
}
|
@Override
public Set<NodeDiskUsageStats> diskUsageStats() {
final List<NodeResponse> result = nodes();
return result.stream()
.map(node -> NodeDiskUsageStats.create(node.name(), node.role(), node.ip(), node.host(), node.diskUsed(), node.diskTotal(), node.diskUsedPercent()))
.collect(Collectors.toSet());
}
|
@Test
void testDiskUsageStats() {
doReturn(List.of(NODE_WITH_CORRECT_INFO, NODE_WITH_MISSING_DISK_STATISTICS)).when(catApi).nodes();
final Set<NodeDiskUsageStats> diskUsageStats = clusterAdapter.diskUsageStats();
assertThat(diskUsageStats)
.hasSize(1)
.noneSatisfy(
diskStats -> assertThat(diskStats.name()).isEqualTo("nodeWithMissingDiskStatistics")
)
.first()
.satisfies(
nodeDescr -> {
assertThat(nodeDescr.name()).isEqualTo("nodeWithCorrectInfo");
assertThat(nodeDescr.ip()).isEqualTo("182.88.0.2");
assertThat(nodeDescr.roles()).isEqualTo(NodeRole.parseSymbolString("dimr"));
assertThat(nodeDescr.diskUsed().getBytes()).isEqualTo(SIUnitParser.parseBytesSizeValue("45gb").getBytes());
assertThat(nodeDescr.diskTotal().getBytes()).isEqualTo(SIUnitParser.parseBytesSizeValue("411.5gb").getBytes());
assertThat(nodeDescr.diskUsedPercent()).isEqualTo(10.95d);
}
);
}
|
public static String convertToYamlString(final ShardingTableReferenceRuleConfiguration data) {
return String.format("%s:%s", data.getName(), data.getReference());
}
|
@Test
void assertConvertToYamlString() {
ShardingTableReferenceRuleConfiguration data = new ShardingTableReferenceRuleConfiguration("foo", "LOGIC_TABLE,SUB_LOGIC_TABLE");
String actual = YamlShardingTableReferenceRuleConfigurationConverter.convertToYamlString(data);
assertThat(actual, is("foo:LOGIC_TABLE,SUB_LOGIC_TABLE"));
}
|
@Override
public void filterConsumer(Exchange exchange, WebServiceMessage response) {
if (exchange != null) {
AttachmentMessage responseMessage = exchange.getMessage(AttachmentMessage.class);
processHeaderAndAttachments(responseMessage, response);
}
}
|
@Test
public void consumerWithHeader() throws Exception {
exchange.getOut().getHeaders().put("headerAttributeKey", "testAttributeValue");
exchange.getOut().getHeaders().put("headerAttributeElement", new QName("http://shouldBeInHeader", "myElement"));
filter.filterConsumer(exchange, message);
Assertions.assertThat(message.getAttachments()).isEmpty();
Assertions.assertThat(message.getSoapHeader().examineAllHeaderElements()).isNotEmpty().hasSize(1);
Assertions.assertThat(message.getSoapHeader().getAllAttributes()).isNotEmpty().hasSize(1);
}
|
public static String generateRandomPassword() {
SecureRandom random = new SecureRandom();
List<Character> pwdChars = new ArrayList<>();
pwdChars.add(LOWER_CASE.charAt(random.nextInt(LOWER_CASE.length())));
pwdChars.add(UPPER_CASE.charAt(random.nextInt(UPPER_CASE.length())));
pwdChars.add(DIGITS.charAt(random.nextInt(DIGITS.length())));
pwdChars.add(SPECIAL_CHARS.charAt(random.nextInt(SPECIAL_CHARS.length())));
// Fill the rest of the password with random characters from all categories
String allCharacters = LOWER_CASE + UPPER_CASE + DIGITS + SPECIAL_CHARS;
while (pwdChars.size() < PASSWORD_LENGTH) {
pwdChars.add(allCharacters.charAt(random.nextInt(allCharacters.length())));
}
// Shuffle to avoid predictable order
Collections.shuffle(pwdChars, random);
// Build the final password string
return pwdChars.stream().map(String::valueOf).collect(Collectors.joining());
}
|
@Test
void generatePwd() {
String pwd = PasswordGeneratorUtil.generateRandomPassword();
assertEquals(8, pwd.length());
}
|
public void dropTable(String dbName, String tableName) {
try (Timer ignored = Tracers.watchScope(EXTERNAL, "HMS.dropTable")) {
callRPC("dropTable", "Failed to drop table " + dbName + "." + tableName,
dbName, tableName, true, false);
}
}
|
@Test
public void testDropTable(@Mocked HiveMetaStoreClient metaStoreClient) throws TException {
new Expectations() {
{
metaStoreClient.dropTable("hive_db", "hive_table", anyBoolean, anyBoolean);
result = any;
}
};
HiveConf hiveConf = new HiveConf();
hiveConf.set(MetastoreConf.ConfVars.THRIFT_URIS.getHiveName(), "thrift://127.0.0.1:90300");
HiveMetaClient client = new HiveMetaClient(hiveConf);
client.dropTable("hive_db", "hive_table");
}
|
@Override
public void close() throws IOException {
httpAsyncRequestProducer.close();
}
|
@Test
public void close() throws IOException {
final HttpAsyncRequestProducer delegate = Mockito.mock(HttpAsyncRequestProducer.class);
final HttpAsyncRequestProducerDecorator decorator = new HttpAsyncRequestProducerDecorator(
delegate, null, null);
decorator.close();
Mockito.verify(delegate, Mockito.times(1)).close();
}
|
String loadAllKeys() {
return loadAllKeys;
}
|
@Test
public void testLoadAllKeysIsQuoted() {
Queries queries = new Queries(mapping, idColumn, columnMetadata);
String result = queries.loadAllKeys();
assertEquals("SELECT \"id\" FROM \"mymapping\"", result);
}
|
@Override
public boolean isWriteable(Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType) {
return typesMatch(type, genericType) && MoreMediaTypes.TEXT_CSV_TYPE.isCompatible(mediaType);
}
|
@Test
void isWritableForSimpleMessagesAutoValueType() {
boolean isWritable = sut.isWriteable(AutoValue_SimpleMessageChunk.class, SimpleMessageChunk.class, null, MoreMediaTypes.TEXT_CSV_TYPE);
assertThat(isWritable).isTrue();
}
|
@Override
public Type getDefaultType() {
return PROJECTS;
}
|
@Test
public void default_type() {
assertThat(underTest.getDefaultType()).isEqualTo(PROJECTS);
}
|
@Override
public KTable<K, V> toTable() {
return toTable(NamedInternal.empty(), Materialized.with(keySerde, valueSerde));
}
|
@Test
public void shouldNotAllowNullMaterializedOnToTable() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.toTable((Materialized<String, String, KeyValueStore<Bytes, byte[]>>) null));
assertThat(exception.getMessage(), equalTo("materialized can't be null"));
}
|
static String generateTopicName(String baseString) {
return generateResourceId(
baseString,
ILLEGAL_TOPIC_NAME_CHARS,
REPLACE_TOPIC_NAME_CHAR,
MAX_TOPIC_NAME_LENGTH,
TIME_FORMAT);
}
|
@Test
public void testGenerateTopicNameShouldReplaceIllegalChars() {
String testBaseString = "^apache_beam/io\\kafka\0";
String actual = KafkaResourceManagerUtils.generateTopicName(testBaseString);
assertThat(actual).matches("-apache_beam-io-kafka--\\d{8}-\\d{6}-\\d{6}");
}
|
@Override
public T deserialize(final String topic, final byte[] bytes) {
try {
if (bytes == null) {
return null;
}
// don't use the JsonSchemaConverter to read this data because
// we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS,
// which is not currently available in the standard converters
final JsonNode value = isJsonSchema
? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class)
: MAPPER.readTree(bytes);
final Object coerced = enforceFieldType(
"$",
new JsonValueContext(value, schema)
);
if (LOG.isTraceEnabled()) {
LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced);
}
return SerdeUtils.castToTargetType(coerced, targetType);
} catch (final Exception e) {
// Clear location in order to avoid logging data, for security reasons
if (e instanceof JsonParseException) {
((JsonParseException) e).clearLocation();
}
throw new SerializationException(
"Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e);
}
}
|
@Test
public void shouldDeserializedJsonNumberAsDouble() {
// Given:
final KsqlJsonDeserializer<Double> deserializer =
givenDeserializerForSchema(Schema.OPTIONAL_FLOAT64_SCHEMA, Double.class);
final List<String> validCoercions = ImmutableList.of(
"42",
"42.000",
"\"42\"",
"\"42.000\""
);
validCoercions.forEach(value -> {
final byte[] bytes = addMagic(value.getBytes(StandardCharsets.UTF_8));
// When:
final Object result = deserializer.deserialize(SOME_TOPIC, bytes);
// Then:
assertThat(result, is(42.0));
});
}
|
public static String serializeRecordToJsonExpandingValue(ObjectMapper mapper, Record<GenericObject> record,
boolean flatten)
throws JsonProcessingException {
JsonRecord jsonRecord = new JsonRecord();
GenericObject value = record.getValue();
if (value != null) {
jsonRecord.setPayload(toJsonSerializable(record.getSchema(), value.getNativeObject()));
}
record.getKey().ifPresent(jsonRecord::setKey);
record.getTopicName().ifPresent(jsonRecord::setTopicName);
record.getEventTime().ifPresent(jsonRecord::setEventTime);
record.getProperties().forEach(jsonRecord::addProperty);
if (flatten) {
JsonNode jsonNode = mapper.convertValue(jsonRecord, JsonNode.class);
return JsonFlattener.flatten(new JacksonJsonValue(jsonNode));
} else {
return mapper.writeValueAsString(jsonRecord);
}
}
|
@Test(dataProvider = "schemaType")
public void testKeyValueSerializeRecordToJsonExpandingValue(SchemaType schemaType) throws Exception {
RecordSchemaBuilder keySchemaBuilder = org.apache.pulsar.client.api.schema.SchemaBuilder.record("key");
keySchemaBuilder.field("a").type(SchemaType.STRING).optional().defaultValue(null);
keySchemaBuilder.field("b").type(SchemaType.INT32).optional().defaultValue(null);
GenericSchema<GenericRecord> keySchema = Schema.generic(keySchemaBuilder.build(schemaType));
GenericRecord keyGenericRecord = keySchema.newRecordBuilder()
.set("a", "1")
.set("b", 1)
.build();
RecordSchemaBuilder valueSchemaBuilder = org.apache.pulsar.client.api.schema.SchemaBuilder.record("value");
valueSchemaBuilder.field("c").type(SchemaType.STRING).optional().defaultValue(null);
valueSchemaBuilder.field("d").type(SchemaType.INT32).optional().defaultValue(null);
RecordSchemaBuilder udtSchemaBuilder = SchemaBuilder.record("type1");
udtSchemaBuilder.field("a").type(SchemaType.STRING).optional().defaultValue(null);
udtSchemaBuilder.field("b").type(SchemaType.BOOLEAN).optional().defaultValue(null);
udtSchemaBuilder.field("d").type(SchemaType.DOUBLE).optional().defaultValue(null);
udtSchemaBuilder.field("f").type(SchemaType.FLOAT).optional().defaultValue(null);
udtSchemaBuilder.field("i").type(SchemaType.INT32).optional().defaultValue(null);
udtSchemaBuilder.field("l").type(SchemaType.INT64).optional().defaultValue(null);
GenericSchema<GenericRecord> udtGenericSchema = Schema.generic(udtSchemaBuilder.build(schemaType));
valueSchemaBuilder.field("e", udtGenericSchema).type(schemaType).optional().defaultValue(null);
GenericSchema<GenericRecord> valueSchema = Schema.generic(valueSchemaBuilder.build(schemaType));
GenericRecord valueGenericRecord = valueSchema.newRecordBuilder()
.set("c", "1")
.set("d", 1)
.set("e", udtGenericSchema.newRecordBuilder()
.set("a", "a")
.set("b", true)
.set("d", 1.0)
.set("f", 1.0f)
.set("i", 1)
.set("l", 10L)
.build())
.build();
Schema<org.apache.pulsar.common.schema.KeyValue<GenericRecord, GenericRecord>> keyValueSchema =
Schema.KeyValue(keySchema, valueSchema, KeyValueEncodingType.INLINE);
org.apache.pulsar.common.schema.KeyValue<GenericRecord, GenericRecord>
keyValue = new org.apache.pulsar.common.schema.KeyValue<>(keyGenericRecord, valueGenericRecord);
GenericObject genericObject = new GenericObject() {
@Override
public SchemaType getSchemaType() {
return SchemaType.KEY_VALUE;
}
@Override
public Object getNativeObject() {
return keyValue;
}
};
Map<String, String> properties = new HashMap<>();
properties.put("prop-key", "prop-value");
Record<GenericObject> genericObjectRecord = new Record<GenericObject>() {
@Override
public Optional<String> getTopicName() {
return Optional.of("data-ks1.table1");
}
@Override
public org.apache.pulsar.client.api.Schema getSchema() {
return keyValueSchema;
}
@Override
public Optional<String> getKey() {
return Optional.of("message-key");
}
@Override
public GenericObject getValue() {
return genericObject;
}
@Override
public Map<String, String> getProperties() {
return properties;
}
@Override
public Optional<Long> getEventTime() {
return Optional.of(1648502845803L);
}
};
ObjectMapper objectMapper = new ObjectMapper().setSerializationInclusion(JsonInclude.Include.NON_NULL);
String json = Utils.serializeRecordToJsonExpandingValue(objectMapper, genericObjectRecord, false);
assertEquals(json, "{\"topicName\":\"data-ks1.table1\",\"key\":\"message-key\","
+ "\"payload\":{\"value\":{\"c\":\"1\",\"d\":1,\"e\":{\"a\":\"a\",\"b\":true,\"d\":1.0,\"f\":1.0,"
+ "\"i\":1,\"l\":10}},\"key\":{\"a\":\"1\",\"b\":1}},\"properties\":{\"prop-key\":\"prop-value\"},"
+ "\"eventTime\":1648502845803}");
json = Utils.serializeRecordToJsonExpandingValue(objectMapper, genericObjectRecord, true);
assertEquals(json, "{\"topicName\":\"data-ks1.table1\",\"key\":\"message-key\",\"payload.value.c\":\"1\","
+ "\"payload.value.d\":1,\"payload.value.e.a\":\"a\",\"payload.value.e.b\":true,\"payload.value.e"
+ ".d\":1.0,\"payload.value.e.f\":1.0,\"payload.value.e.i\":1,\"payload.value.e.l\":10,\"payload.key"
+ ".a\":\"1\",\"payload.key.b\":1,\"properties.prop-key\":\"prop-value\",\"eventTime\":1648502845803}");
}
|
public String getServiceAccount() {
return flinkConfig.get(KubernetesConfigOptions.JOB_MANAGER_SERVICE_ACCOUNT);
}
|
@Test
void testGetServiceAccountFallback() {
flinkConfig.set(KubernetesConfigOptions.KUBERNETES_SERVICE_ACCOUNT, "flink-fallback");
assertThat(kubernetesJobManagerParameters.getServiceAccount()).isEqualTo("flink-fallback");
}
|
public static SQLStatementParserEngine getSQLStatementParserEngine(final DatabaseType databaseType,
final CacheOption sqlStatementCacheOption, final CacheOption parseTreeCacheOption) {
SQLStatementParserEngine result = ENGINES.get(databaseType);
if (null == result) {
result = ENGINES.computeIfAbsent(databaseType, key -> new SQLStatementParserEngine(key, sqlStatementCacheOption, parseTreeCacheOption));
} else if (!result.getSqlStatementCacheOption().equals(sqlStatementCacheOption) || !result.getParseTreeCacheOption().equals(parseTreeCacheOption)) {
result = new SQLStatementParserEngine(databaseType, sqlStatementCacheOption, parseTreeCacheOption);
ENGINES.put(databaseType, result);
}
return result;
}
|
@Test
void assertGetSQLStatementParserEngineNotSame() {
SQLStatementParserEngine before = SQLStatementParserEngineFactory.getSQLStatementParserEngine(databaseType, new CacheOption(2000, 65535L), new CacheOption(64, 1024L));
SQLStatementParserEngine after = SQLStatementParserEngineFactory.getSQLStatementParserEngine(databaseType, new CacheOption(2000, 65535L), new CacheOption(128, 1024L));
assertNotSame(before, after);
}
|
public static void checkContextPath(String contextPath) {
if (contextPath == null) {
return;
}
Matcher matcher = CONTEXT_PATH_MATCH.matcher(contextPath);
if (matcher.find()) {
throw new IllegalArgumentException("Illegal url path expression");
}
}
|
@Test
void testContextPathLegal() {
String contextPath1 = "/nacos";
ValidatorUtils.checkContextPath(contextPath1);
String contextPath2 = "nacos";
ValidatorUtils.checkContextPath(contextPath2);
String contextPath3 = "/";
ValidatorUtils.checkContextPath(contextPath3);
String contextPath4 = "";
ValidatorUtils.checkContextPath(contextPath4);
// allow null
ValidatorUtils.checkContextPath(null);
}
|
public ShardingSphereDatabase getDatabase(final String databaseName) {
return databases.get(databaseName);
}
|
@Test
void assertGetDatabase() {
ShardingSphereRule globalRule = mock(ShardingSphereRule.class);
ShardingSphereDatabase database = mockDatabase(mock(ResourceMetaData.class, RETURNS_DEEP_STUBS), new MockedDataSource(), globalRule);
Map<String, ShardingSphereDatabase> databases = new HashMap<>(Collections.singletonMap("foo_db", database));
ConfigurationProperties configProps = new ConfigurationProperties(new Properties());
ShardingSphereMetaData metaData = new ShardingSphereMetaData(databases, mock(ResourceMetaData.class), new RuleMetaData(Collections.singleton(globalRule)), configProps);
assertThat(metaData.getDatabase("foo_db"), is(database));
}
|
public void createOrUpdateItem(final String key, final Object value, final String comment) {
this.createOrUpdateItem(key, GsonUtils.getInstance().toJson(value), comment);
}
|
@Test
public void testCreateOrUpdateItem() {
doNothing().when(apolloClient)
.createOrUpdateItem(Mockito.any(), Mockito.<Object>any(), Mockito.any());
apolloClient.createOrUpdateItem("Key", (Object) "Value", "Comment");
verify(apolloClient).createOrUpdateItem(Mockito.any(), Mockito.<Object>any(), Mockito.any());
}
|
public static Read read() {
return new AutoValue_TFRecordIO_Read.Builder()
.setValidate(true)
.setCompression(Compression.AUTO)
.build();
}
|
@Test
public void testReadNamed() {
readPipeline.enableAbandonedNodeEnforcement(false);
assertThat(
readPipeline.apply(TFRecordIO.read().from("foo.*").withoutValidation()).getName(),
startsWith("TFRecordIO.Read/Read"));
assertThat(
readPipeline.apply("MyRead", TFRecordIO.read().from("foo.*").withoutValidation()).getName(),
startsWith("MyRead/Read"));
}
|
public static String byteArrayToHexString(byte[] bytes) {
return byteArrayToHexString(bytes, "0x", " ");
}
|
@Test
public void byteArrayToHexString() {
assertEquals("", FormatUtils.byteArrayToHexString(new byte[0]));
assertEquals("0x01", FormatUtils.byteArrayToHexString(new byte[]{1}));
assertEquals("0x01 0xac", FormatUtils.byteArrayToHexString(new byte[]{1, (byte) 0xac}));
assertEquals("01ac",
FormatUtils.byteArrayToHexString(new byte[] {1, (byte) 0xac}, "", ""));
}
|
public static byte[] baToHexBytes(byte[] ba) {
byte[] hb = new byte[ba.length * 2];
for (int i = 0; i < ba.length; i++) {
byte upper = (byte) ((ba[i] & 0xf0) >> 4);
byte lower = (byte) (ba[i] & 0x0f);
hb[2 * i] = toHexChar(upper);
hb[2 * i + 1] = toHexChar(lower);
}
return hb;
}
|
@Test
public void testbaToByte() throws Exception {
assertEqualsArray(new byte[]{}, JOrphanUtils.baToHexBytes(new byte[]{}));
assertEqualsArray(new byte[]{'0', '0'}, JOrphanUtils.baToHexBytes(new byte[]{0}));
assertEqualsArray("0f107f8081ff".getBytes(StandardCharsets.UTF_8),
JOrphanUtils.baToHexBytes(new byte[]{15, 16, 127, -128, -127, -1}));
}
|
@Override
public void register(long ref, String uuid, boolean file) {
requireNonNull(uuid, "uuid can not be null");
Long existingRef = refsByUuid.get(uuid);
if (existingRef != null) {
checkArgument(ref == existingRef, "Uuid '%s' already registered under ref '%s' in repository", uuid, existingRef);
boolean existingIsFile = fileUuids.contains(uuid);
checkArgument(file == existingIsFile, "Uuid '%s' already registered but %sas a File", uuid, existingIsFile ? "" : "not ");
} else {
refsByUuid.put(uuid, ref);
if (file) {
fileUuids.add(uuid);
}
}
}
|
@Test
public void register_throws_IAE_same_uuid_added_with_different_refs() {
underTest.register(SOME_REF, SOME_UUID, true);
assertThatThrownBy(() -> underTest.register(946512, SOME_UUID, true))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Uuid '" + SOME_UUID + "' already registered under ref '" + SOME_REF + "' in repository");
}
|
public static String toJsonStr(JSON json, int indentFactor) {
if (null == json) {
return null;
}
return json.toJSONString(indentFactor);
}
|
@Test
public void toJsonStrFromSortedTest() {
final SortedMap<Object, Object> sortedMap = new TreeMap<Object, Object>() {
private static final long serialVersionUID = 1L;
{
put("attributes", "a");
put("b", "b");
put("c", "c");
}
};
assertEquals("{\"attributes\":\"a\",\"b\":\"b\",\"c\":\"c\"}", JSONUtil.toJsonStr(sortedMap));
}
|
@Override
@Deprecated
public <K1, V1> KStream<K1, V1> flatTransform(final org.apache.kafka.streams.kstream.TransformerSupplier<? super K, ? super V, Iterable<KeyValue<K1, V1>>> transformerSupplier,
final String... stateStoreNames) {
Objects.requireNonNull(transformerSupplier, "transformerSupplier can't be null");
final String name = builder.newProcessorName(TRANSFORM_NAME);
return flatTransform(transformerSupplier, Named.as(name), stateStoreNames);
}
|
@Test
@SuppressWarnings("deprecation")
public void shouldNotAllowNullNamedOnFlatTransformWithStoreName() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.flatTransform(flatTransformerSupplier, (Named) null, "storeName"));
assertThat(exception.getMessage(), equalTo("named can't be null"));
}
|
@Override
public String render(String text) {
if (StringUtils.isBlank(text)) {
return "";
}
if (regex.isEmpty() || link.isEmpty()) {
Comment comment = new Comment();
comment.escapeAndAdd(text);
return comment.render();
}
try {
Matcher matcher = Pattern.compile(regex).matcher(text);
int start = 0;
Comment comment = new Comment();
while (hasMatch(matcher)) {
comment.escapeAndAdd(text.substring(start, matcher.start()));
comment.add(dynamicLink(matcher));
start = matcher.end();
}
comment.escapeAndAdd(text.substring(start));
return comment.render();
} catch (PatternSyntaxException e) {
LOGGER.warn("Illegal regular expression: {} - {}", regex, e.getMessage());
}
return text;
}
|
@Test
public void shouldEscapeTheWholeCommentIfNoneIsMatched() {
trackingTool = new DefaultCommentRenderer("", "");
String toRender = "some <string>";
String result = trackingTool.render(toRender);
assertThat(result, is(StringEscapeUtils.escapeHtml4(toRender)));
}
|
@Transactional
public AppNamespace createAppNamespaceInLocal(AppNamespace appNamespace) {
return createAppNamespaceInLocal(appNamespace, true);
}
|
@Test
@Sql(scripts = "/sql/appnamespaceservice/init-appnamespace.sql", executionPhase = Sql.ExecutionPhase.BEFORE_TEST_METHOD)
@Sql(scripts = "/sql/cleanup.sql", executionPhase = Sql.ExecutionPhase.AFTER_TEST_METHOD)
public void testCreatePublicAppNamespaceNotExistedWithNoAppendnamespacePrefix() {
AppNamespace appNamespace = assembleBaseAppNamespace();
appNamespace.setPublic(true);
appNamespace.setName("old");
appNamespace.setFormat(ConfigFileFormat.Properties.getValue());
AppNamespace createdAppNamespace = appNamespaceService.createAppNamespaceInLocal(appNamespace, false);
Assert.assertNotNull(createdAppNamespace);
Assert.assertEquals(appNamespace.getName(), createdAppNamespace.getName());
}
|
@Override
protected String defaultWarehouseLocation(TableIdentifier table) {
return SLASH.join(defaultNamespaceLocation(table.namespace()), table.name());
}
|
@Test
public void testDefaultWarehouseLocation() throws Exception {
TableIdentifier testTable = TableIdentifier.of("tbl");
TableIdentifier testTable2 = TableIdentifier.of(Namespace.of("ns"), "tbl");
assertThat(warehouseLocation + "/" + testTable.name())
.isEqualTo(catalog.defaultWarehouseLocation(testTable));
assertThat(warehouseLocation + "/" + testTable2.namespace() + "/" + testTable2.name())
.isEqualTo(catalog.defaultWarehouseLocation(testTable2));
}
|
@Override
public Type classify(final Throwable e) {
final Type type = SchemaRegistryUtil.isAuthErrorCode(e) ? Type.USER : Type.UNKNOWN;
if (type == Type.USER) {
LOG.info(
"Classified error as USER error based on missing SR subject access rights. "
+ "Query ID: {} Exception: {}",
queryId,
e);
}
return type;
}
|
@Test
public void shouldClassifySRAuthorizationErrorCodeAsUserError() {
// Given:
final Exception e = new RestClientException("foo", 403, 40301);
// When:
final QueryError.Type type = new SchemaAuthorizationClassifier("").classify(e);
// Then:
assertThat(type, is(QueryError.Type.USER));
}
|
@Override
@SuppressWarnings("unchecked")
public <U> U[] toArray(U[] a) {
if (a == null) {
throw new NullPointerException("Input array can not be null");
}
if (a.length < size) {
a = (U[]) java.lang.reflect.Array.newInstance(a.getClass()
.getComponentType(), size);
}
int currentIndex = 0;
DoubleLinkedElement<T> current = head;
while (current != null) {
T curr = current.element;
a[currentIndex++] = (U) curr;
current = current.after;
}
return a;
}
|
@Test
public void testOther() {
LOG.info("Test other");
assertTrue(set.addAll(list));
// to array
Integer[] array = set.toArray(new Integer[0]);
assertEquals(NUM, array.length);
for (int i = 0; i < array.length; i++) {
assertTrue(list.contains(array[i]));
}
assertEquals(NUM, set.size());
// to array
Object[] array2 = set.toArray();
assertEquals(NUM, array2.length);
for (int i = 0; i < array2.length; i++) {
assertTrue(list.contains(array2[i]));
}
LOG.info("Test capacity - DONE");
}
|
@Override
public List<?> deserialize(final String topic, final byte[] bytes) {
if (bytes == null) {
return null;
}
try {
final String recordCsvString = new String(bytes, StandardCharsets.UTF_8);
final List<CSVRecord> csvRecords = CSVParser.parse(recordCsvString, csvFormat)
.getRecords();
if (csvRecords.isEmpty()) {
throw new SerializationException("No fields in record");
}
final CSVRecord csvRecord = csvRecords.get(0);
if (csvRecord == null || csvRecord.size() == 0) {
throw new SerializationException("No fields in record.");
}
SerdeUtils.throwOnColumnCountMismatch(parsers.size(), csvRecord.size(), false, topic);
final List<Object> values = new ArrayList<>(parsers.size());
final Iterator<Parser> pIt = parsers.iterator();
for (int i = 0; i < csvRecord.size(); i++) {
final String value = csvRecord.get(i);
final Parser parser = pIt.next();
final Object parsed = value == null || value.isEmpty()
? null
: parser.parse(value);
values.add(parsed);
}
return values;
} catch (final Exception e) {
throw new SerializationException("Error deserializing delimited", e);
}
}
|
@Test
public void shouldDeserializedTopLevelPrimitiveTypeIfSchemaHasOnlySingleField() {
// Given:
final PersistenceSchema schema = persistenceSchema(
column("id", SqlTypes.INTEGER)
);
final KsqlDelimitedDeserializer deserializer =
createDeserializer(schema);
final byte[] bytes = "10".getBytes(StandardCharsets.UTF_8);
// When:
final List<?> result = deserializer.deserialize("", bytes);
// Then:
assertThat(result, contains(10));
}
|
public static String getViewContent(View view) {
return getViewContent(view, false);
}
|
@Test
public void testGetViewContent() {
TextView textView1 = new TextView(mApplication);
textView1.setText("child1");
Assert.assertEquals("child1", SAViewUtils.getViewContent(textView1));
}
|
@Override
public void putTaskConfigs(String connector, List<Map<String, String>> configs) {
Timer timer = time.timer(READ_WRITE_TOTAL_TIMEOUT_MS);
// Make sure we're at the end of the log. We should be the only writer, but we want to make sure we don't have
// any outstanding lagging data to consume.
try {
configLog.readToEnd().get(timer.remainingMs(), TimeUnit.MILLISECONDS);
timer.update();
} catch (InterruptedException | ExecutionException | TimeoutException e) {
log.error("Failed to write root configuration to Kafka: ", e);
throw new ConnectException("Error writing root configuration to Kafka", e);
}
int taskCount = configs.size();
// Send all the individual updates
int index = 0;
List<ProducerKeyValue> keyValues = new ArrayList<>();
for (Map<String, String> taskConfig: configs) {
Struct connectConfig = new Struct(TASK_CONFIGURATION_V0);
connectConfig.put("properties", taskConfig);
byte[] serializedConfig = converter.fromConnectData(topic, TASK_CONFIGURATION_V0, connectConfig);
log.debug("Writing configuration for connector '{}' task {}", connector, index);
ConnectorTaskId connectorTaskId = new ConnectorTaskId(connector, index);
keyValues.add(new ProducerKeyValue(TASK_KEY(connectorTaskId), serializedConfig));
index++;
}
try {
sendPrivileged(keyValues, timer);
} catch (ExecutionException | InterruptedException | TimeoutException e) {
log.error("Failed to write task configurations to Kafka", e);
throw new ConnectException("Error writing task configurations to Kafka", e);
}
// Finally, send the commit to update the number of tasks and apply the new configs, then wait until we read to
// the end of the log
try {
// Read to end to ensure all the task configs have been written
if (taskCount > 0) {
configLog.readToEnd().get(timer.remainingMs(), TimeUnit.MILLISECONDS);
timer.update();
}
// Write the commit message
Struct connectConfig = new Struct(CONNECTOR_TASKS_COMMIT_V0);
connectConfig.put("tasks", taskCount);
byte[] serializedConfig = converter.fromConnectData(topic, CONNECTOR_TASKS_COMMIT_V0, connectConfig);
log.debug("Writing commit for connector '{}' with {} tasks.", connector, taskCount);
sendPrivileged(COMMIT_TASKS_KEY(connector), serializedConfig, timer);
// Read to end to ensure all the commit messages have been written
configLog.readToEnd().get(timer.remainingMs(), TimeUnit.MILLISECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
log.error("Failed to write root configuration to Kafka: ", e);
throw new ConnectException("Error writing root configuration to Kafka", e);
}
}
|
@Test
public void testPutTaskConfigs() throws Exception {
configStorage.setupAndCreateKafkaBasedLog(TOPIC, config);
verifyConfigure();
configStorage.start();
verify(configLog).start();
doAnswer(expectReadToEnd(new LinkedHashMap<>()))
.doAnswer(expectReadToEnd(new LinkedHashMap<>()))
.doAnswer(expectReadToEnd(new LinkedHashMap<String, byte[]>() {{
put(TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0));
put(TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(1));
put(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(2));
}})
)
.when(configLog).readToEnd();
// Task configs should read to end, write to the log, read to end, write root, then read to end again
expectConvertWriteRead2(
TASK_CONFIG_KEYS.get(0), KafkaConfigBackingStore.TASK_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(0),
new Struct(KafkaConfigBackingStore.TASK_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(0)));
expectConvertWriteRead2(
TASK_CONFIG_KEYS.get(1), KafkaConfigBackingStore.TASK_CONFIGURATION_V0, CONFIGS_SERIALIZED.get(1),
new Struct(KafkaConfigBackingStore.TASK_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(1)));
expectConvertWriteRead2(
COMMIT_TASKS_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(2),
new Struct(KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0).put("tasks", 2)); // Starts with 0 tasks, after update has 2
// Bootstrap as if we had already added the connector, but no tasks had been added yet
addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.emptyList());
// Null before writing
ClusterConfigState configState = configStorage.snapshot();
assertEquals(-1, configState.offset());
assertNull(configState.taskConfig(TASK_IDS.get(0)));
assertNull(configState.taskConfig(TASK_IDS.get(1)));
// Writing task configs should block until all the writes have been performed and the root record update
// has completed
List<Map<String, String>> taskConfigs = Arrays.asList(SAMPLE_CONFIGS.get(0), SAMPLE_CONFIGS.get(1));
configStorage.putTaskConfigs("connector1", taskConfigs);
// Validate root config by listing all connectors and tasks
configState = configStorage.snapshot();
assertEquals(3, configState.offset());
String connectorName = CONNECTOR_IDS.get(0);
assertEquals(Collections.singletonList(connectorName), new ArrayList<>(configState.connectors()));
assertEquals(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1)), configState.tasks(connectorName));
assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(0)));
assertEquals(SAMPLE_CONFIGS.get(1), configState.taskConfig(TASK_IDS.get(1)));
assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors());
// As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks
verify(configUpdateListener).onTaskConfigUpdate(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1)));
configStorage.stop();
verify(configLog).stop();
}
|
public Integer doCall() throws Exception {
File resolvedWorkingDir;
if (workingDir != null) {
resolvedWorkingDir = new File(workingDir);
} else {
String projectName;
if (name != null) {
projectName = KubernetesHelper.sanitize(name);
} else if (filePath != null) {
projectName = KubernetesHelper.sanitize(FileUtil.onlyName(SourceScheme.onlyName(filePath)));
} else {
printer().println("Name or source file must be set");
return 1;
}
resolvedWorkingDir = new File(RUN_PLATFORM_DIR + "/" + projectName);
}
if (!resolvedWorkingDir.exists()) {
printer().printf("Failed to resolve exported project from path '%s'%n", resolvedWorkingDir);
return 1;
}
File manifest = KubernetesHelper.resolveKubernetesManifest(new File(resolvedWorkingDir, "target/kubernetes"));
try (FileInputStream fis = new FileInputStream(manifest)) {
List<StatusDetails> status;
if (namespace != null) {
status = client().load(fis).inNamespace(namespace).delete();
} else {
status = client().load(fis).delete();
}
status.forEach(s -> printer().printf("Deleted: %s '%s'%n", StringHelper.capitalize(s.getKind()), s.getName()));
}
return 0;
}
|
@Test
public void shouldDeleteKubernetesResources() throws Exception {
kubernetesClient.apps().deployments().resource(new DeploymentBuilder()
.withNewMetadata()
.withName("route")
.addToLabels(BaseTrait.INTEGRATION_LABEL, "route")
.endMetadata()
.withNewSpec()
.withNewTemplate()
.withNewSpec()
.addToContainers(new ContainerBuilder()
.withName("route")
.withImage("quay.io/camel-test/route:1.0-SNAPSHOT")
.build())
.endSpec()
.endTemplate()
.endSpec()
.build()).create();
kubernetesClient.services().resource(new ServiceBuilder()
.withNewMetadata()
.withName("route")
.endMetadata()
.withNewSpec()
.withPorts(new ServicePortBuilder()
.withPort(80)
.withProtocol("TCP")
.withName("http")
.withTargetPort(new IntOrString(8080))
.build())
.endSpec()
.build()).create();
KubernetesRun run = new KubernetesRun(new CamelJBangMain().withPrinter(printer));
run.withClient(kubernetesClient);
run.imageGroup = "camel-test";
run.imageBuild = false;
run.imagePush = false;
run.filePaths = new String[] { "classpath:route.yaml" };
run.output = "yaml";
int exit = run.doCall();
Assertions.assertEquals(0, exit);
KubernetesDelete command = new KubernetesDelete(new CamelJBangMain().withPrinter(printer));
command.withClient(kubernetesClient);
command.name = "route";
exit = command.doCall();
Assertions.assertEquals(0, exit);
Assertions.assertNull(kubernetesClient.apps().deployments().withName("route").get());
Assertions.assertNull(kubernetesClient.services().withName("route").get());
}
|
static MemberMap createNew(MemberImpl... members) {
return createNew(0, members);
}
|
@Test
public void createNew() {
MemberImpl[] members = newMembers(5);
MemberMap map = MemberMap.createNew(members);
assertEquals(members.length, map.getMembers().size());
assertEquals(members.length, map.getAddresses().size());
assertEquals(members.length, map.size());
for (MemberImpl member : members) {
assertContains(map, member.getAddress());
assertContains(map, member.getUuid());
assertSame(member, map.getMember(member.getAddress()));
assertSame(member, map.getMember(member.getUuid()));
}
assertMemberSet(map);
}
|
public boolean isGameRunning() {
return status == GameStatus.RUNNING;
}
|
@Test
void testIsGameRunning() {
assertFalse(gameLoop.isGameRunning());
}
|
@Override
@Transactional(rollbackFor = Exception.class)
public Long createJob(JobSaveReqVO createReqVO) throws SchedulerException {
validateCronExpression(createReqVO.getCronExpression());
// 校验唯一性
if (jobMapper.selectByHandlerName(createReqVO.getHandlerName()) != null) {
throw exception(JOB_HANDLER_EXISTS);
}
// 插入
JobDO job = BeanUtils.toBean(createReqVO, JobDO.class);
job.setStatus(JobStatusEnum.INIT.getStatus());
fillJobMonitorTimeoutEmpty(job);
jobMapper.insert(job);
// 添加 Job 到 Quartz 中
schedulerManager.addJob(job.getId(), job.getHandlerName(), job.getHandlerParam(), job.getCronExpression(),
createReqVO.getRetryCount(), createReqVO.getRetryInterval());
// 更新
JobDO updateObj = JobDO.builder().id(job.getId()).status(JobStatusEnum.NORMAL.getStatus()).build();
jobMapper.updateById(updateObj);
// 返回
return job.getId();
}
|
@Test
public void testCreateJob_cronExpressionValid() {
// 准备参数。Cron 表达式为 String 类型,默认随机字符串。
JobSaveReqVO reqVO = randomPojo(JobSaveReqVO.class);
// 调用,并断言异常
assertServiceException(() -> jobService.createJob(reqVO), JOB_CRON_EXPRESSION_VALID);
}
|
@Bean
public PluginDataHandler wafPluginDataHandler() {
return new WafPluginDataHandler();
}
|
@Test
public void testWafPluginDataHandler() {
new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(WafPluginConfiguration.class))
.withBean(WafPluginConfigurationTest.class)
.withPropertyValues("debug=true")
.run(context -> {
PluginDataHandler handler = context.getBean("wafPluginDataHandler", PluginDataHandler.class);
assertNotNull(handler);
assertThat(handler.pluginNamed()).isEqualTo(PluginEnum.WAF.getName());
});
}
|
@Override
public void accept(ModemVisitor modemVisitor) {
if (modemVisitor instanceof HayesVisitor) {
((HayesVisitor) modemVisitor).visit(this);
} else {
LOGGER.info("Only HayesVisitor is allowed to visit Hayes modem");
}
}
|
@Test
void testAcceptForDos() {
var hayes = new Hayes();
var mockVisitor = mock(ConfigureForDosVisitor.class);
hayes.accept(mockVisitor);
verify((HayesVisitor) mockVisitor).visit(eq(hayes));
}
|
public void set(PropertyKey key, Object value) {
set(key, value, Source.RUNTIME);
}
|
@Test
public void getMalformedBooleanThrowsException() {
mThrown.expect(IllegalArgumentException.class);
mConfiguration.set(PropertyKey.WEB_THREAD_DUMP_TO_LOG, 2);
}
|
@VisibleForTesting
WxMaService getWxMaService(Integer userType) {
// 第一步,查询 DB 的配置项,获得对应的 WxMaService 对象
SocialClientDO client = socialClientMapper.selectBySocialTypeAndUserType(
SocialTypeEnum.WECHAT_MINI_APP.getType(), userType);
if (client != null && Objects.equals(client.getStatus(), CommonStatusEnum.ENABLE.getStatus())) {
return wxMaServiceCache.getUnchecked(client.getClientId() + ":" + client.getClientSecret());
}
// 第二步,不存在 DB 配置项,则使用 application-*.yaml 对应的 WxMaService 对象
return wxMaService;
}
|
@Test
public void testGetWxMaService_clientNull() {
// 准备参数
Integer userType = randomPojo(UserTypeEnum.class).getValue();
// mock 方法
// 调用
WxMaService result = socialClientService.getWxMaService(userType);
// 断言
assertSame(wxMaService, result);
}
|
public void initialize(ServiceConfiguration conf) throws Exception {
for (ProtocolHandler handler : handlers.values()) {
handler.initialize(conf);
}
}
|
@Test
public void testInitialize() throws Exception {
ServiceConfiguration conf = new ServiceConfiguration();
handlers.initialize(conf);
verify(handler1, times(1)).initialize(same(conf));
verify(handler2, times(1)).initialize(same(conf));
}
|
@Override
public void init(InitContext context) {
String state = context.generateCsrfState();
OAuth20Service scribe = newScribeBuilder(context).build(scribeApi);
String url = scribe.getAuthorizationUrl(state);
context.redirectTo(url);
}
|
@Test
public void init() {
enableBitbucketAuthentication(true);
OAuth2IdentityProvider.InitContext context = mock(OAuth2IdentityProvider.InitContext.class);
when(context.generateCsrfState()).thenReturn("state");
when(context.getCallbackUrl()).thenReturn("http://localhost/callback");
underTest.init(context);
verify(context).redirectTo("https://bitbucket.org/site/oauth2/authorize?response_type=code&client_id=id&redirect_uri=http%3A%2F%2Flocalhost%2Fcallback&scope=account&state=state");
}
|
@Override
public void initialize(String inputName, Map<String, String> properties) {
this.catalogProperties = ImmutableMap.copyOf(properties);
this.name = inputName;
if (conf == null) {
LOG.warn("No Hadoop Configuration was set, using the default environment Configuration");
this.conf = new Configuration();
}
if (properties.containsKey(CatalogProperties.URI)) {
this.conf.set(HiveConf.ConfVars.METASTOREURIS.varname, properties.get(CatalogProperties.URI));
}
if (properties.containsKey(CatalogProperties.WAREHOUSE_LOCATION)) {
this.conf.set(
HiveConf.ConfVars.METASTOREWAREHOUSE.varname,
LocationUtil.stripTrailingSlash(properties.get(CatalogProperties.WAREHOUSE_LOCATION)));
}
this.listAllTables =
Boolean.parseBoolean(properties.getOrDefault(LIST_ALL_TABLES, LIST_ALL_TABLES_DEFAULT));
String fileIOImpl = properties.get(CatalogProperties.FILE_IO_IMPL);
this.fileIO =
fileIOImpl == null
? new HadoopFileIO(conf)
: CatalogUtil.loadFileIO(fileIOImpl, properties, conf);
this.clients = new CachedClientPool(conf, properties);
this.fileIOTracker = new FileIOTracker();
}
|
@Test
public void testInitialize() {
assertThatNoException()
.isThrownBy(
() -> {
HiveCatalog hiveCatalog = new HiveCatalog();
hiveCatalog.initialize("hive", Maps.newHashMap());
});
}
|
public TableStats merge(TableStats other, @Nullable Set<String> partitionKeys) {
if (this.rowCount < 0 || other.rowCount < 0) {
return TableStats.UNKNOWN;
}
long rowCount =
this.rowCount >= 0 && other.rowCount >= 0
? this.rowCount + other.rowCount
: UNKNOWN.rowCount;
return new TableStats(rowCount, mergeColumnStates(other, partitionKeys));
}
|
@Test
void testMerge() {
Map<String, ColumnStats> colStats1 = new HashMap<>();
colStats1.put("a", new ColumnStats(4L, 5L, 2D, 3, 15, 2));
TableStats stats1 = new TableStats(30, colStats1);
Map<String, ColumnStats> colStats2 = new HashMap<>();
colStats2.put("a", new ColumnStats(3L, 15L, 12D, 23, 35, 6));
TableStats stats2 = new TableStats(32, colStats2);
Map<String, ColumnStats> colStatsMerge = new HashMap<>();
colStatsMerge.put("a", new ColumnStats(4L, 20L, 7D, 23, 35, 2));
assertThat(stats1.merge(stats2, null)).isEqualTo(new TableStats(62, colStatsMerge));
Map<String, ColumnStats> colStatsMerge2 = new HashMap<>();
colStatsMerge2.put("a", new ColumnStats(4L, 20L, 7D, 23, 35, 2));
assertThat(stats1.merge(stats2, new HashSet<>()))
.isEqualTo(new TableStats(62, colStatsMerge2));
// test column stats merge while column 'a' is partition key. Merged Ndv for columns which
// are partition keys using sum instead of max.
Map<String, ColumnStats> colStatsMerge3 = new HashMap<>();
colStatsMerge3.put("a", new ColumnStats(7L, 20L, 7D, 23, 35, 2));
assertThat(stats1.merge(stats2, new HashSet<>(Collections.singletonList("a"))))
.isEqualTo(new TableStats(62, colStatsMerge3));
Map<String, ColumnStats> colStats3 = new HashMap<>();
colStats3.put("a", new ColumnStats(4L, 5L, 2D, 3, 15, 2));
colStats3.put("b", new ColumnStats(4L, 5L, 2D, 3, 15, 2));
stats1 = new TableStats(30, colStats3);
Map<String, ColumnStats> colStats4 = new HashMap<>();
colStats4.put("a", new ColumnStats(3L, 15L, 12D, 23, 35, 6));
colStats4.put("b", new ColumnStats(3L, 15L, 12D, 23, 35, 6));
stats2 = new TableStats(32, colStats4);
Map<String, ColumnStats> colStatsMerge4 = new HashMap<>();
colStatsMerge4.put("a", new ColumnStats(7L, 20L, 7D, 23, 35, 2));
colStatsMerge4.put("b", new ColumnStats(4L, 20L, 7D, 23, 35, 2));
assertThat(stats1.merge(stats2, new HashSet<>(Collections.singletonList("a"))))
.isEqualTo(new TableStats(62, colStatsMerge4));
// test merge with one side is TableStats.UNKNOWN.
stats2 = TableStats.UNKNOWN;
assertThat(stats1.merge(stats2, null)).isEqualTo(TableStats.UNKNOWN);
// test merge with one side have no column stats.
stats2 = new TableStats(32);
assertThat(stats1.merge(stats2, null)).isEqualTo(new TableStats(62));
}
|
@Description("Inverse of normal cdf given a mean, std, and probability")
@ScalarFunction
@SqlType(StandardTypes.DOUBLE)
public static double inverseNormalCdf(@SqlType(StandardTypes.DOUBLE) double mean, @SqlType(StandardTypes.DOUBLE) double sd, @SqlType(StandardTypes.DOUBLE) double p)
{
checkCondition(p > 0 && p < 1, INVALID_FUNCTION_ARGUMENT, "inverseNormalCdf Function: p must be 0 > p > 1");
checkCondition(sd > 0, INVALID_FUNCTION_ARGUMENT, "inverseNormalCdf Function: sd must be > 0");
return mean + sd * 1.4142135623730951 * Erf.erfInv(2 * p - 1);
}
|
@Test
public void testInverseNormalCdf()
{
assertFunction("inverse_normal_cdf(0, 1, 0.3)", DOUBLE, -0.52440051270804089);
assertFunction("inverse_normal_cdf(10, 9, 0.9)", DOUBLE, 21.533964089901406);
assertFunction("inverse_normal_cdf(0.5, 0.25, 0.65)", DOUBLE, 0.59633011660189195);
assertInvalidFunction("inverse_normal_cdf(4, 48, 0)", "inverseNormalCdf Function: p must be 0 > p > 1");
assertInvalidFunction("inverse_normal_cdf(4, 48, 1)", "inverseNormalCdf Function: p must be 0 > p > 1");
assertInvalidFunction("inverse_normal_cdf(4, 0, 0.4)", "inverseNormalCdf Function: sd must be > 0");
}
|
public String getClientVersion() {
return clientVersion;
}
|
@Test
void testGetClientVersion() {
assertEquals("1.0.0", requestMeta.getClientVersion());
}
|
@Override
public void doDelete(HttpServletRequest req, HttpServletResponse resp) {
resp.setContentType(CONTENT_TYPE);
try (PrintWriter out = resp.getWriter()) {
out.println(msgPartOne + " Delete " + msgPartTwo);
} catch (Exception e) {
LOGGER.error("Exception occurred DELETE request processing ", e);
}
}
|
@Test
void testDoDelete() throws Exception {
HttpServletRequest mockReq = Mockito.mock(HttpServletRequest.class);
HttpServletResponse mockResp = Mockito.mock(HttpServletResponse.class);
StringWriter stringWriter = new StringWriter();
PrintWriter printWriter = new PrintWriter(stringWriter);
when(mockResp.getWriter()).thenReturn(printWriter);
AppServlet curServlet = new AppServlet();
curServlet.doDelete(mockReq, mockResp);
printWriter.flush();
assertTrue(stringWriter.toString().contains(msgPartOne + " Delete " + msgPartTwo));
}
|
@Nullable
@Override
public Message decode(@Nonnull RawMessage rawMessage) {
String s = new String(rawMessage.getPayload(), StandardCharsets.UTF_8);
LOG.trace("Received raw message: {}", s);
String timezoneID = configuration.getString(CK_TIMEZONE);
// previously existing PA inputs after updating will not have a Time Zone configured, default to UTC
DateTimeZone timezone = timezoneID != null ? DateTimeZone.forID(timezoneID) : DateTimeZone.UTC;
LOG.trace("Configured time zone: {}", timezone);
PaloAltoMessageBase p = parser.parse(s, timezone);
// Return when error occurs parsing syslog header.
if (p == null) {
return null;
}
Message message = messageFactory.createMessage(p.payload(), p.source(), p.timestamp());
switch (p.panType()) {
case "THREAT":
final PaloAltoTypeParser parserThreat = new PaloAltoTypeParser(templates.getThreatMessageTemplate());
message.addFields(parserThreat.parseFields(p.fields(), timezone));
break;
case "SYSTEM":
final PaloAltoTypeParser parserSystem = new PaloAltoTypeParser(templates.getSystemMessageTemplate());
message.addFields(parserSystem.parseFields(p.fields(), timezone));
break;
case "TRAFFIC":
final PaloAltoTypeParser parserTraffic = new PaloAltoTypeParser(templates.getTrafficMessageTemplate());
message.addFields(parserTraffic.parseFields(p.fields(), timezone));
break;
default:
LOG.error("Unsupported PAN type [{}]. Not adding any parsed fields.", p.panType());
}
LOG.trace("Successfully processed [{}] message with [{}] fields.", p.panType(), message.getFieldCount());
return message;
}
|
@Test
public void valuesTest() {
// Test System message results
PaloAltoCodec codec = new PaloAltoCodec(Configuration.EMPTY_CONFIGURATION, messageFactory);
Message message = codec.decode(new RawMessage(PANORAMA_SYSTEM_MESSAGE.getBytes(StandardCharsets.UTF_8)));
assertEquals("SYSTEM", message.getField("type"));
assertEquals(message.getField("module"), "general");
// Test quoted value with embedded commas.
assertEquals(message.getField("description"), "Deviating device: Prod--2, Serial: 453524335, Object: N/A, Metric: mp-cpu, Value: 34");
assertEquals(message.getField("serial_number"), "000710000506");
assertEquals(message.getField("source"), "Panorama-1");
assertEquals(message.getField("message"), "1,2018/09/19 11:50:35,000710000506,SYSTEM,general,0,2018/09/19 11:50:35,,general,,0,0,general,informational,\"Deviating device: Prod--2, Serial: 453524335, Object: N/A, Metric: mp-cpu, Value: 34\",1163103,0x0,0,0,0,0,,Panorama-1");
assertEquals(message.getField("severity"), "informational");
assertEquals(message.getField("generated_time"), "2018/09/19 11:50:35");
assertEquals(message.getField("event_id"), "general");
assertEquals(message.getField("device_name"), "Panorama-1");
assertEquals(message.getField("content_threat_type"), "general");
assertEquals(message.getField("virtual_system_name"), null);
assertEquals(0, ((DateTime) message.getField("timestamp")).compareTo(new DateTime("2018-09-19T11:50:35.000-05:00", DateTimeZone.UTC)));
// Test Traffic message results
message = codec.decode(new RawMessage(PANORAMA_TRAFFIC_MESSAGE.getBytes(StandardCharsets.UTF_8)));
assertEquals(message.getField("bytes_received"), 140L);
assertEquals(message.getField("source"), "Panorama--2");
assertEquals(message.getField("repeat_count"), 1L);
assertEquals(message.getField("receive_time"), "2018/09/19 11:50:32");
assertEquals(message.getField("outbound_interface"), "ethernet1/1");
assertEquals(message.getField("packets"), 6L);
assertEquals(message.getField("dest_location"), "10.20.30.40-10.20.30.40");
assertEquals(message.getField("src_addr"), "10.20.30.40");
assertEquals(message.getField("generated_time"), "2018/09/19 11:50:32");
assertEquals(message.getField("protocol"), "tcp");
assertEquals(message.getField("threat_content_type"), "end");
assertEquals(message.getField("packets_sent"), 4L);
assertEquals(message.getField("packets_received"), 2L);
assertEquals(message.getField("action"), "allow");
assertEquals(message.getField("virtual_system"), "vsys1");
assertEquals(message.getField("dest_port"), 443L);
assertEquals(((DateTime) message.getField("timestamp")).compareTo(new DateTime("2018-09-19T11:50:32.000-05:00", DateTimeZone.UTC)), 0);
assertEquals(message.getField("rule_name"), "HTTPS-strict");
assertEquals(message.getField("nat_src_addr"), "10.20.30.40");
assertEquals(message.getField("session_id"), 205742L);
assertEquals(message.getField("serial_number"), "453524335");
assertEquals(message.getField("message"), "1,2018/09/19 11:50:32,453524335,TRAFFIC,end,2049,2018/09/19 11:50:32,10.20.30.40,10.20.30.40,10.20.30.40,10.20.30.40,HTTPS-strict,,,incomplete,vsys1,Public,Public,ethernet1/1,ethernet1/1,ALK Logging,2018/09/19 11:50:32,205742,1,64575,443,41304,443,0x400070,tcp,allow,412,272,140,6,2018/09/19 11:50:15,0,any,0,54196730,0x8000000000000000,10.20.30.40-10.20.30.40,10.20.30.40-10.20.30.40,0,4,2,tcp-fin,13,16,0,0,,Prod--2,from-policy,,,0,,0,,N/A,0,0,0,0");
assertEquals(message.getField("bytes_sent"), 272L);
assertEquals(message.getField("dest_zone"), "Public");
assertEquals(message.getField("nat_src_port"), 41304L);
assertEquals(message.getField("src_port"), 64575L);
assertEquals(message.getField("src_location"), "10.20.30.40-10.20.30.40");
assertEquals(message.getField("log_action"), "ALK Logging");
assertEquals(message.getField("inbound_interface"), "ethernet1/1");
assertEquals(message.getField("application"), "incomplete");
assertEquals(message.getField("src_zone"), "Public");
assertEquals(message.getField("bytes"), 412L);
assertEquals(message.getField("dest_addr"), "10.20.30.40");
assertEquals(message.getField("type"), "TRAFFIC");
assertEquals(message.getField("nat_dest_addr"), "10.20.30.40");
assertEquals(message.getField("category"), "any");
assertEquals(message.getField("nat_dest_port"), 443L);
}
|
@SuppressWarnings("unchecked")
public final T get() {
if (maxCapacityPerThread == 0) {
return newObject((Handle<T>) NOOP_HANDLE);
}
LocalPool<T> localPool = threadLocal.get();
DefaultHandle<T> handle = localPool.claim();
T obj;
if (handle == null) {
handle = localPool.newHandle();
if (handle != null) {
obj = newObject(handle);
handle.set(obj);
} else {
obj = newObject((Handle<T>) NOOP_HANDLE);
}
} else {
obj = handle.get();
}
return obj;
}
|
@Test
public void verySmallRecycer() {
newRecycler(2, 0, 1).get();
}
|
@Override
public boolean load() {
boolean cqLoadResult = loadConsumeQueues(getStorePathConsumeQueue(this.messageStoreConfig.getStorePathRootDir()), CQType.SimpleCQ);
boolean bcqLoadResult = loadConsumeQueues(getStorePathBatchConsumeQueue(this.messageStoreConfig.getStorePathRootDir()), CQType.BatchCQ);
return cqLoadResult && bcqLoadResult;
}
|
@Test
public void testLoadConsumeQueuesWithWrongAttribute() {
String normalTopic = UUID.randomUUID().toString();
ConcurrentMap<String, TopicConfig> topicConfigTable = createTopicConfigTable(normalTopic, CQType.SimpleCQ);
this.topicConfigTableMap.putAll(topicConfigTable);
for (int i = 0; i < 10; i++) {
PutMessageResult putMessageResult = messageStore.putMessage(buildMessage(normalTopic, -1));
assertEquals(PutMessageStatus.PUT_OK, putMessageResult.getPutMessageStatus());
}
await().atMost(5, SECONDS).until(fullyDispatched(messageStore));
// simulate delete topic but with files left.
this.topicConfigTableMap.clear();
topicConfigTable = createTopicConfigTable(normalTopic, CQType.BatchCQ);
this.topicConfigTableMap.putAll(topicConfigTable);
RuntimeException runtimeException = Assert.assertThrows(RuntimeException.class, () -> messageStore.getQueueStore().load());
Assert.assertTrue(runtimeException.getMessage().endsWith("should be SimpleCQ, but is BatchCQ"));
}
|
public void setOuterJoinType(OuterJoinType outerJoinType) {
this.outerJoinType = outerJoinType;
}
|
@Test
void testLeftOuterJoin() throws Exception {
final List<String> leftInput =
Arrays.asList("foo", "foo", "foo", "bar", "bar", "foobar", "foobar");
final List<String> rightInput =
Arrays.asList("foo", "foo", "bar", "bar", "bar", "barfoo", "barfoo");
baseOperator.setOuterJoinType(OuterJoinOperatorBase.OuterJoinType.LEFT);
List<String> expected =
Arrays.asList(
"bar,bar",
"bar,bar",
"bar,bar",
"bar,bar",
"bar,bar",
"bar,bar",
"foo,foo",
"foo,foo",
"foo,foo",
"foo,foo",
"foo,foo",
"foo,foo",
"foobar,null",
"foobar,null");
testOuterJoin(leftInput, rightInput, expected);
}
|
private void unionFailResponse(final ServletResponse response) throws IOException {
HttpServletResponse httpResponse = (HttpServletResponse) response;
httpResponse.setContentType("application/json;charset=utf-8");
httpResponse.setCharacterEncoding("utf-8");
wrapCorsResponse(httpResponse);
httpResponse.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
ShenyuAdminResult result = ShenyuAdminResult.error(CommonErrorCode.TOKEN_ERROR,
ShenyuResultMessage.TOKEN_IS_ERROR);
httpResponse.getWriter().println(GsonUtils.getInstance().toJson(result));
}
|
@Test
public void testUnionFailResponse() {
PrintWriter printWriter = mock(PrintWriter.class);
try {
when(httpServletResponse.getWriter()).thenReturn(printWriter);
doNothing().when(printWriter).println();
Method testMethod = statelessAuthFilter.getClass().getDeclaredMethod("unionFailResponse", ServletResponse.class);
testMethod.setAccessible(true);
testMethod.invoke(statelessAuthFilter, httpServletResponse);
verify(httpServletResponse).setContentType("application/json;charset=utf-8");
verify(httpServletResponse).setCharacterEncoding("utf-8");
verify(httpServletResponse).setStatus(HttpServletResponse.SC_UNAUTHORIZED);
} catch (Exception e) {
throw new ShenyuException(e.getCause());
}
}
|
@Override
public Serializable read(final MySQLBinlogColumnDef columnDef, final MySQLPacketPayload payload) {
int type = columnDef.getColumnMeta() >> 8;
int length = columnDef.getColumnMeta() & 0xff;
// unpack type & length, see https://bugs.mysql.com/bug.php?id=37426.
if (0x30 != (type & 0x30)) {
length += ((type & 0x30) ^ 0x30) << 4;
type |= 0x30;
}
switch (MySQLBinaryColumnType.valueOf(type)) {
case ENUM:
return readEnumValue(length, payload);
case SET:
return payload.getByteBuf().readByte();
case STRING:
return new MySQLBinaryString(payload.readStringFixByBytes(readActualLength(length, payload)));
default:
throw new UnsupportedSQLOperationException(MySQLBinaryColumnType.valueOf(type).toString());
}
}
|
@Test
void assertReadEnumValueWithMetaFailure() {
columnDef.setColumnMeta((MySQLBinaryColumnType.ENUM.getValue() << 8) + 3);
assertThrows(UnsupportedSQLOperationException.class, () -> new MySQLStringBinlogProtocolValue().read(columnDef, payload));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.