focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public KillApplicationResponse forceKillApplication(
KillApplicationRequest request) throws YarnException, IOException {
if (request == null || request.getApplicationId() == null) {
routerMetrics.incrAppsFailedKilled();
String msg = "Missing forceKillApplication request or ApplicationId.";
RouterAuditLogger.logFailure(user.getShortUserName(), FORCE_KILL_APP, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, msg);
RouterServerUtil.logAndThrowException(msg, null);
}
long startTime = clock.getTime();
ApplicationId applicationId = request.getApplicationId();
SubClusterId subClusterId = null;
try {
subClusterId = federationFacade
.getApplicationHomeSubCluster(request.getApplicationId());
} catch (YarnException e) {
routerMetrics.incrAppsFailedKilled();
String msg =
String.format("Application %s does not exist in FederationStateStore.", applicationId);
RouterAuditLogger.logFailure(user.getShortUserName(), FORCE_KILL_APP, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, msg, applicationId);
RouterServerUtil.logAndThrowException(msg, e);
}
ApplicationClientProtocol clientRMProxy =
getClientRMProxyForSubCluster(subClusterId);
KillApplicationResponse response = null;
try {
LOG.info("forceKillApplication {} on SubCluster {}.", applicationId, subClusterId);
response = clientRMProxy.forceKillApplication(request);
// If kill home sub-cluster application is successful,
// we will try to kill the same application in other sub-clusters.
if (response != null) {
ClientMethod remoteMethod = new ClientMethod("forceKillApplication",
new Class[]{KillApplicationRequest.class}, new Object[]{request});
invokeConcurrent(remoteMethod, KillApplicationResponse.class, subClusterId);
}
} catch (Exception e) {
routerMetrics.incrAppsFailedKilled();
String msg = "Unable to kill the application report.";
RouterAuditLogger.logFailure(user.getShortUserName(), FORCE_KILL_APP, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, msg, applicationId, subClusterId);
RouterServerUtil.logAndThrowException(msg, e);
}
if (response == null) {
LOG.error("No response when attempting to kill the application {} to SubCluster {}.",
applicationId, subClusterId.getId());
}
long stopTime = clock.getTime();
routerMetrics.succeededAppsKilled(stopTime - startTime);
RouterAuditLogger.logSuccess(user.getShortUserName(), FORCE_KILL_APP,
TARGET_CLIENT_RM_SERVICE, applicationId);
return response;
}
|
@Test
public void testForceKillApplicationNotExists() throws Exception {
LOG.info("Test FederationClientInterceptor: Force Kill Application - Not Exists");
ApplicationId appId =
ApplicationId.newInstance(System.currentTimeMillis(), 1);
KillApplicationRequest requestKill =
KillApplicationRequest.newInstance(appId);
LambdaTestUtils.intercept(YarnException.class,
"Application " + appId + " does not exist in FederationStateStore.",
() -> interceptor.forceKillApplication(requestKill));
}
|
@Override
public void invoke(final MainAction runnable) {
this.invoke(runnable, false);
}
|
@Test
public void testInvoke() throws Exception {
final CountDownLatch entry = new CountDownLatch(1);
final AbstractController controller = new AbstractController() {
@Override
public void invoke(final MainAction runnable, final boolean wait) {
assertFalse(wait);
entry.countDown();
}
};
new Thread(() -> controller.invoke(new DefaultMainAction() {
@Override
public void run() {
//
}
})).start();
entry.await(1, TimeUnit.SECONDS);
}
|
public static boolean isValidLocalHost(String host) {
return !isInvalidLocalHost(host);
}
|
@Test
void testIsValidLocalHost() {
assertTrue(NetUtils.isValidLocalHost("1.2.3.4"));
assertTrue(NetUtils.isValidLocalHost("128.0.0.1"));
}
|
public static <T> List<T> batchTransform(final Class<T> clazz, List<?> srcList) {
if (CollectionUtils.isEmpty(srcList)) {
return Collections.emptyList();
}
List<T> result = new ArrayList<>(srcList.size());
for (Object srcObject : srcList) {
result.add(transform(clazz, srcObject));
}
return result;
}
|
@Test
public void testBatchTransformSrcIsNull() {
someList.add(null);
assertNotNull(BeanUtils.batchTransform(String.class, someList));
}
|
@Override
public byte[] toBitSet() {
if (size == 0) {
return Util.EMPTY_BYTE_ARRAY;
}
int offset = (size >>> 3);
if ((size & 0xf) == 0) {
byte[] array = new byte[offset];
Arrays.fill(array, (byte) 0xff);
return array;
}
byte[] array = new byte[offset + 1];
if (offset > 0) {
Arrays.fill(array, 0, offset, (byte) 0xff);
}
int lastBitOffset = size > 8 ? size % 8 : size;
array[offset] = (byte) (0xff >> (8 - lastBitOffset));
return array;
}
|
@Test
public void testToBitSet() {
testToArray(13);
testToArray(12);
testToArray(0);
testToArray(16);
testToArray(43);
testToArray(5);
}
|
public static CoordinatorRecord newShareGroupEpochTombstoneRecord(
String groupId
) {
return new CoordinatorRecord(
new ApiMessageAndVersion(
new ShareGroupMetadataKey()
.setGroupId(groupId),
(short) 11
),
null // Tombstone.
);
}
|
@Test
public void testNewShareGroupEpochTombstoneRecord() {
CoordinatorRecord expectedRecord = new CoordinatorRecord(
new ApiMessageAndVersion(
new ShareGroupMetadataKey()
.setGroupId("group-id"),
(short) 11),
null);
assertEquals(expectedRecord, newShareGroupEpochTombstoneRecord(
"group-id"
));
}
|
public static Map<String, AdvertisedListener> validateAndAnalysisAdvertisedListener(ServiceConfiguration config) {
if (StringUtils.isBlank(config.getAdvertisedListeners())) {
return Collections.emptyMap();
}
Optional<String> firstListenerName = Optional.empty();
Map<String, List<String>> listeners = new LinkedHashMap<>();
for (final String str : StringUtils.split(config.getAdvertisedListeners(), ",")) {
int index = str.indexOf(":");
if (index <= 0) {
throw new IllegalArgumentException("the configure entry `advertisedListeners` is invalid. because "
+ str + " do not contain listener name");
}
String listenerName = StringUtils.trim(str.substring(0, index));
if (!firstListenerName.isPresent()) {
firstListenerName = Optional.of(listenerName);
}
String value = StringUtils.trim(str.substring(index + 1));
listeners.computeIfAbsent(listenerName, k -> new ArrayList<>(2));
listeners.get(listenerName).add(value);
}
if (StringUtils.isBlank(config.getInternalListenerName())) {
config.setInternalListenerName(firstListenerName.get());
}
if (!listeners.containsKey(config.getInternalListenerName())) {
throw new IllegalArgumentException("the `advertisedListeners` configure do not contain "
+ "`internalListenerName` entry");
}
final Map<String, AdvertisedListener> result = new LinkedHashMap<>();
final Map<String, Set<String>> reverseMappings = new LinkedHashMap<>();
for (final Map.Entry<String, List<String>> entry : listeners.entrySet()) {
if (entry.getValue().size() > 2) {
throw new IllegalArgumentException("there are redundant configure for listener `" + entry.getKey()
+ "`");
}
URI pulsarAddress = null, pulsarSslAddress = null, pulsarHttpAddress = null, pulsarHttpsAddress = null;
for (final String strUri : entry.getValue()) {
try {
URI uri = URI.create(strUri);
if (StringUtils.equalsIgnoreCase(uri.getScheme(), "pulsar")) {
if (pulsarAddress == null) {
pulsarAddress = uri;
} else {
throw new IllegalArgumentException("there are redundant configure for listener `"
+ entry.getKey() + "`");
}
} else if (StringUtils.equalsIgnoreCase(uri.getScheme(), "pulsar+ssl")) {
if (pulsarSslAddress == null) {
pulsarSslAddress = uri;
} else {
throw new IllegalArgumentException("there are redundant configure for listener `"
+ entry.getKey() + "`");
}
} else if (StringUtils.equalsIgnoreCase(uri.getScheme(), "http")) {
if (pulsarHttpAddress == null) {
pulsarHttpAddress = uri;
} else {
throw new IllegalArgumentException("there are redundant configure for listener `"
+ entry.getKey() + "`");
}
} else if (StringUtils.equalsIgnoreCase(uri.getScheme(), "https")) {
if (pulsarHttpsAddress == null) {
pulsarHttpsAddress = uri;
} else {
throw new IllegalArgumentException("there are redundant configure for listener `"
+ entry.getKey() + "`");
}
}
String hostPort = String.format("%s:%d", uri.getHost(), uri.getPort());
Set<String> sets = reverseMappings.computeIfAbsent(hostPort, k -> new TreeSet<>());
sets.add(entry.getKey());
if (sets.size() > 1) {
throw new IllegalArgumentException("must not specify `" + hostPort
+ "` to different listener.");
}
} catch (Throwable cause) {
throw new IllegalArgumentException("the value " + strUri + " in the `advertisedListeners` "
+ "configure is invalid", cause);
}
}
result.put(entry.getKey(), AdvertisedListener.builder()
.brokerServiceUrl(pulsarAddress)
.brokerServiceUrlTls(pulsarSslAddress)
.brokerHttpUrl(pulsarHttpAddress)
.brokerHttpsUrl(pulsarHttpsAddress)
.build());
}
return result;
}
|
@Test(expectedExceptions = IllegalArgumentException.class)
public void testMalformedListener() {
ServiceConfiguration config = new ServiceConfiguration();
config.setAdvertisedListeners(":pulsar://127.0.0.1:6660");
MultipleListenerValidator.validateAndAnalysisAdvertisedListener(config);
}
|
public static Manifest getManifest(Class<?> cls) throws IORuntimeException {
URL url = ResourceUtil.getResource(null, cls);
URLConnection connection;
try {
connection = url.openConnection();
}catch (final IOException e) {
throw new IORuntimeException(e);
}
if (connection instanceof JarURLConnection) {
JarURLConnection conn = (JarURLConnection) connection;
return getManifest(conn);
}
return null;
}
|
@Test
public void getManiFestTest(){
final Manifest manifest = ManifestUtil.getManifest(Test.class);
assertNotNull(manifest);
}
|
@Override
public int hashCode() {
return raw.hashCode();
}
|
@Test
void two_different_tables_are_considered_non_equal() {
assertNotEquals(createSimpleTable(), createSimpleNumberTable());
assertNotEquals(createSimpleTable().hashCode(), createSimpleNumberTable().hashCode());
}
|
public void subtractNutrients(Nutrients nutrients) {
if(nutrients == null) return;
subtractCalories(nutrients.getCalories());
subtractCarbohydrates(nutrients.getCarbohydrates());
subtractFats(nutrients.getFats());
subtractProteins(nutrients.getProteins());
}
|
@Test
void subtractNegativeValues_shouldIncreaseValues() {
modifyNutrients = new Nutrients(
new Calories(new BigDecimal("-130")),
new Carbohydrates(new BigDecimal("-5"), new BigDecimal("-2"), new BigDecimal("-3")),
new Proteins(new BigDecimal("-5")),
new Fats(new BigDecimal("-10"), new BigDecimal("-5"))
);
baseNutrients.subtractNutrients(modifyNutrients);
assertAll("Should increase",
() -> assertEquals(new BigDecimal("780"), baseNutrients.getCalories().getTotalCalories()),
() -> assertEquals(new BigDecimal("25"), baseNutrients.getCarbohydrates().getTotalCarbohydrates()),
() -> assertEquals(new BigDecimal("35"), baseNutrients.getProteins().getTotalProteins()),
() -> assertEquals(new BigDecimal("60"), baseNutrients.getFats().getTotalFats())
);
}
|
@Override
public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException {
URL url = invoker.getUrl();
String methodName = RpcUtils.getMethodName(invocation);
int max = invoker.getUrl().getMethodParameter(methodName, ACTIVES_KEY, 0);
final RpcStatus rpcStatus = RpcStatus.getStatus(invoker.getUrl(), RpcUtils.getMethodName(invocation));
if (!RpcStatus.beginCount(url, methodName, max)) {
long timeout = invoker.getUrl().getMethodParameter(RpcUtils.getMethodName(invocation), TIMEOUT_KEY, 0);
long start = System.currentTimeMillis();
long remain = timeout;
synchronized (rpcStatus) {
while (!RpcStatus.beginCount(url, methodName, max)) {
try {
rpcStatus.wait(remain);
} catch (InterruptedException e) {
// ignore
}
long elapsed = System.currentTimeMillis() - start;
remain = timeout - elapsed;
if (remain <= 0) {
throw new RpcException(
RpcException.LIMIT_EXCEEDED_EXCEPTION,
"Waiting concurrent invoke timeout in client-side for service: "
+ invoker.getInterface().getName()
+ ", method: " + RpcUtils.getMethodName(invocation) + ", elapsed: "
+ elapsed + ", timeout: " + timeout + ". concurrent invokes: "
+ rpcStatus.getActive()
+ ". max concurrent invoke limit: " + max);
}
}
}
}
invocation.put(ACTIVE_LIMIT_FILTER_START_TIME, System.currentTimeMillis());
return invoker.invoke(invocation);
}
|
@Test
void testInvokeRuntimeException() {
Assertions.assertThrows(RuntimeException.class, () -> {
URL url = URL.valueOf("test://test:11/test?accesslog=true&group=dubbo&version=1.1&actives=0");
Invoker<ActiveLimitFilterTest> invoker = new RuntimeExceptionInvoker(url);
Invocation invocation = new MockInvocation();
RpcStatus count = RpcStatus.getStatus(invoker.getUrl(), invocation.getMethodName());
int beforeExceptionActiveCount = count.getActive();
activeLimitFilter.invoke(invoker, invocation);
int afterExceptionActiveCount = count.getActive();
assertEquals(
beforeExceptionActiveCount,
afterExceptionActiveCount,
"After exception active count should be same");
});
}
|
@Override
public String eventName() {
return "role";
}
|
@Test
public void testEventName() {
assertEquals("role", roleChangedEventTest.eventName());
}
|
public List<CloudtrailSNSNotification> parse(Message message) {
LOG.debug("Parsing message.");
try {
LOG.debug("Reading message body {}.", message.getBody());
final SQSMessage envelope = objectMapper.readValue(message.getBody(), SQSMessage.class);
if (envelope.message == null) {
LOG.warn("Message is empty. Processing of message has been aborted. Verify that the SQS subscription in AWS is NOT set to send raw data.");
return Collections.emptyList();
}
LOG.debug("Reading message envelope {}.", envelope.message);
if (envelope.message.contains(CLOUD_TRAIL_VALIDATION_MESSAGE)) {
return Collections.emptyList();
}
final CloudtrailWriteNotification notification = objectMapper.readValue(envelope.message, CloudtrailWriteNotification.class);
final List<String> s3ObjectKeys = notification.s3ObjectKey;
if (s3ObjectKeys == null) {
LOG.debug("No S3 object keys parsed.");
return Collections.emptyList();
}
LOG.debug("Processing [{}] S3 keys.", s3ObjectKeys.size());
final List<CloudtrailSNSNotification> notifications = new ArrayList<>(s3ObjectKeys.size());
for (String s3ObjectKey : s3ObjectKeys) {
notifications.add(new CloudtrailSNSNotification(message.getReceiptHandle(), notification.s3Bucket, s3ObjectKey));
}
LOG.debug("Returning [{}] notifications.", notifications.size());
return notifications;
} catch (IOException e) {
LOG.error("Parsing exception.", e);
/* Don't throw an exception that would halt processing for one parsing failure.
* Sometimes occasional non-JSON test messages will come through. If this happens,
* just log the error and keep processing.
*
* Returning an empty list here is OK and should be caught by the caller. */
return new ArrayList<>();
}
}
|
@Test
public void issue_44() throws Exception {
// https://github.com/Graylog2/graylog-plugin-aws/issues/44
final Message message = new Message()
.withBody("{\n" +
" \"Type\" : \"Notification\",\n" +
" \"MessageId\" : \"5b0a73e6-a4f8-11e7-8dfb-8f76310a10a8\",\n" +
" \"TopicArn\" : \"arn:aws:sns:eu-west-1:123456789012:cloudtrail-log-write\",\n" +
" \"Subject\" : \"[AWS Config:eu-west-1] AWS::RDS::DBSnapshot rds:instance-2017-09-03-23-11 Dele...\",\n" +
" \"Message\" : \"{\\\"configurationItemDiff\\\":{\\\"changedProperties\\\":{\\\"Relationships.0\\\":{\\\"previousValue\\\":{\\\"resourceId\\\":\\\"vpc-12345678\\\",\\\"resourceName\\\":null,\\\"resourceType\\\":\\\"AWS::EC2::VPC\\\",\\\"name\\\":\\\"Is associated with Vpc\\\"},\\\"updatedValue\\\":null,\\\"changeType\\\":\\\"DELETE\\\"},\\\"SupplementaryConfiguration.Tags\\\":{\\\"previousValue\\\":[],\\\"updatedValue\\\":null,\\\"changeType\\\":\\\"DELETE\\\"},\\\"SupplementaryConfiguration.DBSnapshotAttributes\\\":{\\\"previousValue\\\":[{\\\"attributeName\\\":\\\"restore\\\",\\\"attributeValues\\\":[]}],\\\"updatedValue\\\":null,\\\"changeType\\\":\\\"DELETE\\\"},\\\"Configuration\\\":{\\\"previousValue\\\":{\\\"dBSnapshotIdentifier\\\":\\\"rds:instance-2017-09-03-23-11\\\",\\\"dBInstanceIdentifier\\\":\\\"instance\\\",\\\"snapshotCreateTime\\\":\\\"2017-09-03T23:11:38.218Z\\\",\\\"engine\\\":\\\"mysql\\\",\\\"allocatedStorage\\\":200,\\\"status\\\":\\\"available\\\",\\\"port\\\":3306,\\\"availabilityZone\\\":\\\"eu-west-1b\\\",\\\"vpcId\\\":\\\"vpc-12345678\\\",\\\"instanceCreateTime\\\":\\\"2015-04-09T07:08:07.476Z\\\",\\\"masterUsername\\\":\\\"root\\\",\\\"engineVersion\\\":\\\"5.6.34\\\",\\\"licenseModel\\\":\\\"general-public-license\\\",\\\"snapshotType\\\":\\\"automated\\\",\\\"iops\\\":null,\\\"optionGroupName\\\":\\\"default:mysql-5-6\\\",\\\"percentProgress\\\":100,\\\"sourceRegion\\\":null,\\\"sourceDBSnapshotIdentifier\\\":null,\\\"storageType\\\":\\\"standard\\\",\\\"tdeCredentialArn\\\":null,\\\"encrypted\\\":false,\\\"kmsKeyId\\\":null,\\\"dBSnapshotArn\\\":\\\"arn:aws:rds:eu-west-1:123456789012:snapshot:rds:instance-2017-09-03-23-11\\\",\\\"timezone\\\":null,\\\"iAMDatabaseAuthenticationEnabled\\\":false},\\\"updatedValue\\\":null,\\\"changeType\\\":\\\"DELETE\\\"}},\\\"changeType\\\":\\\"DELETE\\\"},\\\"configurationItem\\\":{\\\"relatedEvents\\\":[],\\\"relationships\\\":[],\\\"configuration\\\":null,\\\"supplementaryConfiguration\\\":{},\\\"tags\\\":{},\\\"configurationItemVersion\\\":\\\"1.2\\\",\\\"configurationItemCaptureTime\\\":\\\"2017-09-28T19:54:47.815Z\\\",\\\"configurationStateId\\\":1234567890123,\\\"awsAccountId\\\":\\\"123456789012\\\",\\\"configurationItemStatus\\\":\\\"ResourceDeleted\\\",\\\"resourceType\\\":\\\"AWS::RDS::DBSnapshot\\\",\\\"resourceId\\\":\\\"rds:instance-2017-09-03-23-11\\\",\\\"resourceName\\\":\\\"rds:instance-2017-09-03-23-11\\\",\\\"ARN\\\":\\\"arn:aws:rds:eu-west-1:123456789012:snapshot:rds:instance-2017-09-03-23-11\\\",\\\"awsRegion\\\":\\\"eu-west-1\\\",\\\"availabilityZone\\\":null,\\\"configurationStateMd5Hash\\\":\\\"b026324c6904b2a9cb4b88d6d61c81d1\\\",\\\"resourceCreationTime\\\":null},\\\"notificationCreationTime\\\":\\\"2017-09-28T19:54:48.311Z\\\",\\\"messageType\\\":\\\"ConfigurationItemChangeNotification\\\",\\\"recordVersion\\\":\\\"1.2\\\"}\",\n" +
" \"Timestamp\" : \"2017-09-28T19:54:58.543Z\",\n" +
" \"SignatureVersion\" : \"1\",\n" +
" \"Signature\" : \"...\",\n" +
" \"SigningCertURL\" : \"https://sns.eu-west-1.amazonaws.com/SimpleNotificationService-....pem\",\n" +
" \"UnsubscribeURL\" : \"https://sns.eu-west-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:eu-west-1:123456789012:cloudtrail-log-write:5b0a73e6-a4f8-11e7-8dfb-8f76310a10a8\"\n" +
"}");
CloudtrailSNSNotificationParser parser = new CloudtrailSNSNotificationParser(objectMapper);
List<CloudtrailSNSNotification> notifications = parser.parse(message);
assertTrue(notifications.isEmpty());
}
|
@Override
public <T extends State> T state(StateNamespace namespace, StateTag<T> address) {
return workItemState.get(namespace, address, StateContexts.nullContext());
}
|
@Test
public void testMultimapEntriesCombineCacheAndWindmill() {
final String tag = "multimap";
StateTag<MultimapState<byte[], Integer>> addr =
StateTags.multimap(tag, ByteArrayCoder.of(), VarIntCoder.of());
MultimapState<byte[], Integer> multimapState = underTest.state(NAMESPACE, addr);
final byte[] key1 = "key1".getBytes(StandardCharsets.UTF_8);
final byte[] key2 = "key2".getBytes(StandardCharsets.UTF_8);
final byte[] key3 = "key3".getBytes(StandardCharsets.UTF_8);
SettableFuture<Iterable<Integer>> entryFuture = SettableFuture.create();
when(mockReader.multimapFetchSingleEntryFuture(
encodeWithCoder(key1, ByteArrayCoder.of()),
key(NAMESPACE, tag),
STATE_FAMILY,
VarIntCoder.of()))
.thenReturn(entryFuture);
// to set up the entry key1 as cache complete and add some local changes
waitAndSet(entryFuture, weightedList(1, 2, 3), 30);
multimapState.get(key1).read();
multimapState.put(dup(key1), 2);
multimapState.put(dup(key3), 20);
SettableFuture<Iterable<Map.Entry<ByteString, Iterable<Integer>>>> entriesFuture =
SettableFuture.create();
when(mockReader.multimapFetchAllFuture(
false, key(NAMESPACE, tag), STATE_FAMILY, VarIntCoder.of()))
.thenReturn(entriesFuture);
SettableFuture<Iterable<Map.Entry<ByteString, Iterable<Integer>>>> keysFuture =
SettableFuture.create();
when(mockReader.multimapFetchAllFuture(
true, key(NAMESPACE, tag), STATE_FAMILY, VarIntCoder.of()))
.thenReturn(keysFuture);
// windmill contains extra entry key2, and this time the entries returned should not be cached.
waitAndSet(
entriesFuture,
Arrays.asList(multimapEntry(key1, 1, 2, 3), multimapEntry(key2, 4, 5, 6)),
30);
waitAndSet(keysFuture, Arrays.asList(multimapEntry(key1), multimapEntry(key2)), 30);
// key1 exist in both cache and windmill; key2 exists only in windmill; key3 exists only in
// cache. They should all be merged.
Iterable<Map.Entry<byte[], Integer>> entries = multimapState.entries().read();
assertEquals(8, Iterables.size(entries));
assertThat(
entries,
Matchers.containsInAnyOrder(
multimapEntryMatcher(key1, 1),
multimapEntryMatcher(key1, 2),
multimapEntryMatcher(key1, 2),
multimapEntryMatcher(key1, 3),
multimapEntryMatcher(key2, 4),
multimapEntryMatcher(key2, 5),
multimapEntryMatcher(key2, 6),
multimapEntryMatcher(key3, 20)));
assertThat(multimapState.keys().read(), Matchers.containsInAnyOrder(key1, key2, key3));
}
|
public void setSha1(String sha1) {
this.sha1 = sha1;
}
|
@Test
@SuppressWarnings("squid:S2699")
public void testSetSha1() {
//already tested, this is just left so the IDE doesn't recreate it.
}
|
@Override
public void stop() {
threadLocalSettings.unload();
}
|
@Test
public void stop_calls_ThreadLocalSettings_remove() {
underTest.stop();
verify(threadLocalSettings).unload();
verifyNoMoreInteractions(threadLocalSettings);
}
|
@Override
public ParSeqBasedCompletionStage<Void> runAfterBothAsync(CompletionStage<?> other, Runnable action,
Executor executor)
{
Task<?> that = getOrGenerateTaskFromStage(other);
return nextStageByComposingTask(Task.par(_task, that).flatMap("thenAcceptBothAsync", (t, u) -> Task.blocking(() -> {
action.run();
return null;
}, executor)));
}
|
@Test public void testRunAfterBothAsync() throws Exception {
CountDownLatch waitLatch = new CountDownLatch(1);
CompletionStage<String> completionStage1 = createTestStage(TESTVALUE1);
CompletionStage<String> completionStage2 = createTestStage(TESTVALUE2);
finish(completionStage1.runAfterBothAsync(completionStage2, () -> {
assertEquals(THREAD_NAME_VALUE, Thread.currentThread().getName());
waitLatch.countDown();
}, _mockExecutor));
assertTrue(waitLatch.await(1000, TimeUnit.MILLISECONDS));
}
|
public static void initColumns(Table tbl, List<ImportColumnDesc> columnExprs,
Map<String, Pair<String, List<String>>> columnToHadoopFunction)
throws UserException {
initColumns(tbl, columnExprs, columnToHadoopFunction, null, null,
null, null, null, false, false, Lists.newArrayList());
}
|
@Test
public void testSourceColumnCaseSensitive() throws UserException {
// columns
String c0Name = "c0";
columns.add(new Column(c0Name, Type.INT, true, null, true, null, ""));
columnExprs.add(new ImportColumnDesc(c0Name, null));
String c1Name = "C1";
columns.add(new Column(c1Name, Type.INT, true, null, true, null, ""));
// column name in source file is c1
String c1NameInSource = "c1";
columnExprs.add(new ImportColumnDesc(c1NameInSource, null));
// column mappings
// C1 = year(c1)
List<Expr> params1 = Lists.newArrayList();
params1.add(new SlotRef(null, c1NameInSource));
Expr mapping1 = new FunctionCallExpr(FunctionSet.YEAR, params1);
columnExprs.add(new ImportColumnDesc(c1Name, mapping1));
new Expectations() {
{
table.getBaseSchema();
result = columns;
table.getColumn(c0Name);
result = columns.get(0);
table.getColumn(c1Name);
result = columns.get(1);
}
};
Load.initColumns(table, columnExprs, null, exprsByName, analyzer, srcTupleDesc,
slotDescByName, params, true, true, columnsFromPath);
// check
System.out.println(slotDescByName);
Assert.assertEquals(2, slotDescByName.size());
Assert.assertFalse(slotDescByName.containsKey(c1Name));
Assert.assertTrue(slotDescByName.containsKey(c1NameInSource));
}
|
T getFunction(final List<SqlArgument> arguments) {
// first try to get the candidates without any implicit casting
Optional<T> candidate = findMatchingCandidate(arguments, false);
if (candidate.isPresent()) {
return candidate.get();
} else if (!supportsImplicitCasts) {
throw createNoMatchingFunctionException(arguments);
}
// if none were found (candidate isn't present) try again with implicit casting
candidate = findMatchingCandidate(arguments, true);
if (candidate.isPresent()) {
return candidate.get();
}
throw createNoMatchingFunctionException(arguments);
}
|
@Test
public void shouldChooseIntervalUnit() {
// Given:
givenFunctions(
function(EXPECTED, -1, INTERVALUNIT)
);
// When:
final KsqlScalarFunction fun = udfIndex.getFunction(ImmutableList.of(SqlArgument.of(
SqlIntervalUnit.INSTANCE)));
// Then:
assertThat(fun.name(), equalTo(EXPECTED));
}
|
public static void mergeMap(boolean decrypt, Map<String, Object> config) {
merge(decrypt, config);
}
|
@Test
public void testMap_mergeWhenFieldNotInValues_throwsException() {
Map<String, Object> testMap = new HashMap<>();
testMap.put("key", "${TEST.somethingNotInValues}");
try {
CentralizedManagement.mergeMap(true, testMap);
fail();
} catch (ConfigException expected) {
// pass
}
}
|
@SuppressWarnings("unchecked")
@Override
public boolean setFlushListener(final CacheFlushListener<K, V> listener,
final boolean sendOldValues) {
final KeyValueStore<Bytes, byte[]> wrapped = wrapped();
if (wrapped instanceof CachedStateStore) {
return ((CachedStateStore<byte[], byte[]>) wrapped).setFlushListener(
record -> listener.apply(
record.withKey(serdes.keyFrom(record.key()))
.withValue(new Change<>(
record.value().newValue != null ? serdes.valueFrom(record.value().newValue) : null,
record.value().oldValue != null ? serdes.valueFrom(record.value().oldValue) : null,
record.value().isLatest
))
),
sendOldValues);
}
return false;
}
|
@SuppressWarnings("unchecked")
@Test
public void shouldSetFlushListenerOnWrappedCachingStore() {
setUpWithoutContext();
final CachedKeyValueStore cachedKeyValueStore = mock(CachedKeyValueStore.class);
when(cachedKeyValueStore.setFlushListener(any(CacheFlushListener.class), eq(false))).thenReturn(true);
metered = new MeteredKeyValueStore<>(
cachedKeyValueStore,
STORE_TYPE,
new MockTime(),
Serdes.String(),
Serdes.String()
);
assertTrue(metered.setFlushListener(null, false));
}
|
@Transactional
public Release rollback(long releaseId, String operator) {
Release release = findOne(releaseId);
if (release == null) {
throw NotFoundException.releaseNotFound(releaseId);
}
if (release.isAbandoned()) {
throw new BadRequestException("release is not active");
}
String appId = release.getAppId();
String clusterName = release.getClusterName();
String namespaceName = release.getNamespaceName();
PageRequest page = PageRequest.of(0, 2);
List<Release> twoLatestActiveReleases = findActiveReleases(appId, clusterName, namespaceName, page);
if (twoLatestActiveReleases == null || twoLatestActiveReleases.size() < 2) {
throw new BadRequestException(
"Can't rollback namespace(appId=%s, clusterName=%s, namespaceName=%s) because there is only one active release",
appId,
clusterName,
namespaceName);
}
release.setAbandoned(true);
release.setDataChangeLastModifiedBy(operator);
releaseRepository.save(release);
releaseHistoryService.createReleaseHistory(appId, clusterName,
namespaceName, clusterName, twoLatestActiveReleases.get(1).getId(),
release.getId(), ReleaseOperation.ROLLBACK, null, operator);
//publish child namespace if namespace has child
rollbackChildNamespace(appId, clusterName, namespaceName, twoLatestActiveReleases, operator);
return release;
}
|
@Test
public void testRollback() {
when(releaseRepository.findById(releaseId)).thenReturn(Optional.of(firstRelease));
when(releaseRepository.findByAppIdAndClusterNameAndNamespaceNameAndIsAbandonedFalseOrderByIdDesc(appId,
clusterName,
namespaceName,
pageRequest))
.thenReturn(
Arrays.asList(firstRelease, secondRelease));
releaseService.rollback(releaseId, user);
verify(releaseRepository).save(firstRelease);
Assert.assertEquals(true, firstRelease.isAbandoned());
Assert.assertEquals(user, firstRelease.getDataChangeLastModifiedBy());
}
|
public MethodBuilder onreturn(Object onreturn) {
this.onreturn = onreturn;
return getThis();
}
|
@Test
void onreturn() {
MethodBuilder builder = MethodBuilder.newBuilder();
builder.onreturn("on-return-object");
Assertions.assertEquals("on-return-object", builder.build().getOnreturn());
}
|
public static ClassLoader findClassLoader(final ClassLoader proposed) {
ClassLoader classLoader = proposed;
if (classLoader == null) {
classLoader = ReflectHelpers.class.getClassLoader();
}
if (classLoader == null) {
classLoader = ClassLoader.getSystemClassLoader();
}
return classLoader;
}
|
@Test
public void testFindProperClassLoaderIfContextClassLoaderIsNull() throws InterruptedException {
final ClassLoader[] classLoader = new ClassLoader[1];
Thread thread = new Thread(() -> classLoader[0] = ReflectHelpers.findClassLoader());
thread.setContextClassLoader(null);
thread.start();
thread.join();
assertEquals(ReflectHelpers.class.getClassLoader(), classLoader[0]);
}
|
public boolean hasLogicTable(final String logicTable) {
return shardingTables.containsKey(logicTable);
}
|
@Test
void assertNotHasLogicTable() {
assertFalse(createBindingTableRule().hasLogicTable("New_Table"));
}
|
public String findBaseType(String name) throws XPathExpressionException {
return (String) xPath.compile(
"/xs:schema/xs:complexType[@name='" + name + "']//xs:extension/@base")
.evaluate(document, XPathConstants.STRING);
}
|
@Test
public void testFindBaseType() throws Exception {
Document document = XmlHelper.buildNamespaceAwareDocument(
ResourceUtils.getResourceAsFile("xmls/complex_type_w_parent.xml"));
XPath xPath = XmlHelper.buildXPath(new CamelSpringNamespace());
domFinder = new DomFinder(document, xPath);
String baseTypeName = domFinder.findBaseType("keyManagersParametersFactoryBean");
assertEquals("tns:abstractKeyManagersParametersFactoryBean", baseTypeName);
}
|
public boolean isLastBlock(int blockNumber) {
if (fileSize == 0) {
return false;
}
throwIfInvalidBlockNumber(blockNumber);
return blockNumber == (numBlocks - 1);
}
|
@Test
public void testArgChecks() throws Exception {
// Should not throw.
new BlockData(10, 5);
new BlockData(5, 10);
new BlockData(0, 10);
// Verify it throws correctly.
intercept(IllegalArgumentException.class, "'fileSize' must not be negative",
() -> new BlockData(-1, 2));
intercept(IllegalArgumentException.class,
"'blockSize' must be a positive integer",
() -> new BlockData(10, 0));
intercept(IllegalArgumentException.class,
"'blockSize' must be a positive integer",
() -> new BlockData(10, -2));
intercept(IllegalArgumentException.class,
"'blockNumber' (-1) must be within the range [0, 3]",
() -> new BlockData(10, 3).isLastBlock(
-1));
intercept(IllegalArgumentException.class,
"'blockNumber' (11) must be within the range [0, 3]",
() -> new BlockData(10, 3).isLastBlock(
11));
}
|
@Override
public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) throws SubCommandException {
try {
defaultMQAdminExt = createMQAdminExt(rpcHook);
String brokerAddr = commandLine.getOptionValue('b').trim();
boolean isOrder = false;
long timeoutMillis = 50000;
long diffLevel = 0;
if (commandLine.hasOption('o')) {
isOrder = Boolean.parseBoolean(commandLine.getOptionValue('o').trim());
}
if (commandLine.hasOption('t')) {
timeoutMillis = Long.parseLong(commandLine.getOptionValue('t').trim());
}
if (commandLine.hasOption('l')) {
diffLevel = Long.parseLong(commandLine.getOptionValue('l').trim());
}
ConsumeStatsList consumeStatsList = defaultMQAdminExt.fetchConsumeStatsInBroker(brokerAddr, isOrder, timeoutMillis);
System.out.printf("%-64s %-64s %-32s %-4s %-20s %-20s %-20s %s%n",
"#Topic",
"#Group",
"#Broker Name",
"#QID",
"#Broker Offset",
"#Consumer Offset",
"#Diff",
"#LastTime");
for (Map<String, List<ConsumeStats>> map : consumeStatsList.getConsumeStatsList()) {
for (Map.Entry<String, List<ConsumeStats>> entry : map.entrySet()) {
String group = entry.getKey();
List<ConsumeStats> consumeStatsArray = entry.getValue();
for (ConsumeStats consumeStats : consumeStatsArray) {
List<MessageQueue> mqList = new LinkedList<>();
mqList.addAll(consumeStats.getOffsetTable().keySet());
Collections.sort(mqList);
for (MessageQueue mq : mqList) {
OffsetWrapper offsetWrapper = consumeStats.getOffsetTable().get(mq);
long diff = offsetWrapper.getBrokerOffset() - offsetWrapper.getConsumerOffset();
if (diff < diffLevel) {
continue;
}
String lastTime = "-";
try {
lastTime = UtilAll.formatDate(new Date(offsetWrapper.getLastTimestamp()), UtilAll.YYYY_MM_DD_HH_MM_SS);
} catch (Exception ignored) {
}
if (offsetWrapper.getLastTimestamp() > 0)
System.out.printf("%-64s %-64s %-32s %-4d %-20d %-20d %-20d %s%n",
UtilAll.frontStringAtLeast(mq.getTopic(), 64),
group,
UtilAll.frontStringAtLeast(mq.getBrokerName(), 32),
mq.getQueueId(),
offsetWrapper.getBrokerOffset(),
offsetWrapper.getConsumerOffset(),
diff,
lastTime
);
}
}
}
}
System.out.printf("%nDiff Total: %d%n", consumeStatsList.getTotalDiff());
} catch (Exception e) {
throw new SubCommandException(this.getClass().getSimpleName() + " command failed", e);
} finally {
defaultMQAdminExt.shutdown();
}
}
|
@Test
public void testExecute() throws SubCommandException, IllegalAccessException, NoSuchFieldException {
Field field = BrokerConsumeStatsSubCommad.class.getDeclaredField("defaultMQAdminExt");
field.setAccessible(true);
field.set(cmd, defaultMQAdminExt);
Options options = ServerUtil.buildCommandlineOptions(new Options());
String[] subargs = new String[] {"-b 127.0.0.1:10911", "-t 3000", "-l 5", "-o true"};
final CommandLine commandLine =
ServerUtil.parseCmdLine("mqadmin " + cmd.commandName(), subargs,
cmd.buildCommandlineOptions(options), new DefaultParser());
cmd.execute(commandLine, options, null);
}
|
Plugin create(Options.Plugin plugin) {
try {
return instantiate(plugin.pluginString(), plugin.pluginClass(), plugin.argument());
} catch (IOException | URISyntaxException e) {
throw new CucumberException(e);
}
}
|
@Test
void instantiates_rerun_plugin_with_file_arg() {
PluginOption option = parse("rerun:" + tmp.resolve("rerun.txt"));
plugin = fc.create(option);
assertThat(plugin.getClass(), is(equalTo(RerunFormatter.class)));
}
|
public Schema mergeTables(
Map<FeatureOption, MergingStrategy> mergingStrategies,
Schema sourceSchema,
List<SqlNode> derivedColumns,
List<SqlWatermark> derivedWatermarkSpecs,
SqlTableConstraint derivedPrimaryKey) {
SchemaBuilder schemaBuilder =
new SchemaBuilder(
mergingStrategies,
sourceSchema,
(FlinkTypeFactory) validator.getTypeFactory(),
dataTypeFactory,
validator,
escapeExpression);
schemaBuilder.appendDerivedColumns(mergingStrategies, derivedColumns);
schemaBuilder.appendDerivedWatermarks(mergingStrategies, derivedWatermarkSpecs);
schemaBuilder.appendDerivedPrimaryKey(derivedPrimaryKey);
return schemaBuilder.build();
}
|
@Test
void mergeExcludingConstraintsOnDuplicate() {
Schema sourceSchema =
Schema.newBuilder()
.column("one", DataTypes.INT().notNull())
.column("two", DataTypes.STRING().notNull())
.column("three", DataTypes.FLOAT())
.primaryKeyNamed("constraint-42", new String[] {"one", "two", "three"})
.build();
Map<FeatureOption, MergingStrategy> mergingStrategies = getDefaultMergingStrategies();
mergingStrategies.put(FeatureOption.CONSTRAINTS, MergingStrategy.EXCLUDING);
Schema mergedSchema =
util.mergeTables(
mergingStrategies,
sourceSchema,
Collections.emptyList(),
Collections.emptyList(),
primaryKey("one", "two"));
Schema expectedSchema =
Schema.newBuilder()
.column("one", DataTypes.INT().notNull())
.column("two", DataTypes.STRING().notNull())
.column("three", DataTypes.FLOAT())
.primaryKeyNamed("PK_one_two", new String[] {"one", "two"})
.build();
assertThat(mergedSchema).isEqualTo(expectedSchema);
}
|
@Override
public SecurityLevel getSecurityLevel() {
return this.securityLevel;
}
|
@Test
public void testGetSecurityLevel() {
assertEquals(SecurityLevel.AUTH_PRIV, v3SnmpConfiguration.getSecurityLevel().getSnmpValue());
}
|
@Override
public ObjectNode encode(RoleInfo roleInfo, CodecContext context) {
checkNotNull(roleInfo, "RoleInfo cannot be null");
ObjectNode result = context.mapper().createObjectNode();
if (roleInfo.master() != null) {
result.put(MASTER, roleInfo.master().id());
}
ArrayNode backups = context.mapper().createArrayNode();
roleInfo.backups().forEach(backup -> backups.add(backup.id()));
if (!roleInfo.backups().isEmpty()) {
result.set(BACKUPS, backups);
}
return result;
}
|
@Test
public void testRoleInfoEncode() {
NodeId masterNodeId = NodeId.nodeId("1");
NodeId backupNodeId1 = NodeId.nodeId("1");
NodeId backupNodeId2 = NodeId.nodeId("2");
NodeId backupNodeId3 = NodeId.nodeId("3");
List<NodeId> backupNodeIds =
ImmutableList.of(backupNodeId1, backupNodeId2, backupNodeId3);
RoleInfo roleInfo = new RoleInfo(masterNodeId, backupNodeIds);
ObjectNode roleInfoJson = roleInfoCodec.encode(roleInfo, context);
assertThat(roleInfoJson, RoleInfoJsonMatcher.matchesRoleInfo(roleInfo));
}
|
public List<Chapter> getChapters() {
return chapters;
}
|
@Test
public void testRealFileHindenburgJournalistPro() throws IOException, ID3ReaderException {
CountingInputStream inputStream = new CountingInputStream(getClass().getClassLoader()
.getResource("hindenburg-journalist-pro.mp3").openStream());
ChapterReader reader = new ChapterReader(inputStream);
reader.readInputStream();
List<Chapter> chapters = reader.getChapters();
assertEquals(2, chapters.size());
assertEquals(0, chapters.get(0).getStart());
assertEquals(5006, chapters.get(1).getStart());
assertEquals("Chapter Marker 1", chapters.get(0).getTitle());
assertEquals("Chapter Marker 2", chapters.get(1).getTitle());
assertEquals("https://example.com/chapter1url", chapters.get(0).getLink());
assertEquals("https://example.com/chapter2url", chapters.get(1).getLink());
assertEquals(EmbeddedChapterImage.makeUrl(5330, 4015), chapters.get(0).getImageUrl());
assertEquals(EmbeddedChapterImage.makeUrl(9498, 4364), chapters.get(1).getImageUrl());
}
|
@Override
public Batch toBatch() {
return new SparkBatch(
sparkContext, table, readConf, groupingKeyType(), taskGroups(), expectedSchema, hashCode());
}
|
@Test
public void testPartitionedBucketLong() throws Exception {
createPartitionedTable(spark, tableName, "bucket(5, id)");
SparkScanBuilder builder = scanBuilder();
BucketFunction.BucketLong function = new BucketFunction.BucketLong(DataTypes.LongType);
UserDefinedScalarFunc udf = toUDF(function, expressions(intLit(5), fieldRef("id")));
Predicate predicate = new Predicate(">=", expressions(udf, intLit(2)));
pushFilters(builder, predicate);
Batch scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(6);
// NOT GTEQ
builder = scanBuilder();
predicate = new Not(predicate);
pushFilters(builder, predicate);
scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(4);
}
|
public static List<UpdateRequirement> forReplaceView(
ViewMetadata base, List<MetadataUpdate> metadataUpdates) {
Preconditions.checkArgument(null != base, "Invalid view metadata: null");
Preconditions.checkArgument(null != metadataUpdates, "Invalid metadata updates: null");
Builder builder = new Builder(null, false);
builder.require(new UpdateRequirement.AssertViewUUID(base.uuid()));
metadataUpdates.forEach(builder::update);
return builder.build();
}
|
@Test
public void addSchemaForView() {
int lastColumnId = 1;
List<UpdateRequirement> requirements =
UpdateRequirements.forReplaceView(
viewMetadata,
ImmutableList.of(
new MetadataUpdate.AddSchema(new Schema(), lastColumnId),
new MetadataUpdate.AddSchema(new Schema(), lastColumnId + 1),
new MetadataUpdate.AddSchema(new Schema(), lastColumnId + 2)));
requirements.forEach(req -> req.validate(viewMetadata));
assertThat(requirements)
.hasSize(1)
.hasOnlyElementsOfTypes(UpdateRequirement.AssertViewUUID.class);
assertViewUUID(requirements);
}
|
@Override
public ConfiguredDataSourceProvenance getProvenance() {
return provenance;
}
|
@Test
public void testBasic() {
CSVDataSource<MockOutput> dataSource = new CSVDataSource<>(dataFile, rowProcessor, true);
MutableDataset<MockOutput> dataset = new MutableDataset<>(dataSource);
assertEquals(6,dataset.size(),"Found an incorrect number of rows when loading the csv.");
DatasetProvenance prov = dataset.getProvenance();
List<ObjectMarshalledProvenance> datasetProvenance = ProvenanceUtil.marshalProvenance(prov);
assertFalse(datasetProvenance.isEmpty());
ObjectProvenance unmarshalledProvenance = ProvenanceUtil.unmarshalProvenance(datasetProvenance);
assertEquals(prov,unmarshalledProvenance);
}
|
public final void addStateStore(final StoreBuilder<?> storeBuilder,
final String... processorNames) {
addStateStore(new StoreBuilderWrapper(storeBuilder), false, processorNames);
}
|
@Test
public void shouldNotAddNullStateStoreSupplier() {
assertThrows(NullPointerException.class, () -> builder.addStateStore((StoreBuilder<?>) null));
}
|
public static String format( String xml ) {
XMLStreamReader rd = null;
XMLStreamWriter wr = null;
StringWriter result = new StringWriter();
try {
rd = INPUT_FACTORY.createXMLStreamReader( new StringReader( xml ) );
synchronized ( OUTPUT_FACTORY ) {
// BACKLOG-18743: This object was not thread safe in some scenarios
// causing the `result` variable to have data from other concurrent executions
// and making the final output invalid.
wr = OUTPUT_FACTORY.createXMLStreamWriter( result );
}
StartElementBuffer startElementBuffer = null;
StringBuilder str = new StringBuilder();
StringBuilder prefix = new StringBuilder();
StringBuilder cdata = new StringBuilder();
boolean wasStart = false;
boolean wasSomething = false;
while ( rd.hasNext() ) {
int event = rd.next();
if ( event != XMLStreamConstants.CDATA && cdata.length() > 0 ) {
// was CDATA
wr.writeCData( cdata.toString() );
cdata.setLength( 0 );
}
if ( startElementBuffer != null ) {
if ( event == XMLStreamConstants.END_ELEMENT ) {
startElementBuffer.writeTo( wr, true );
startElementBuffer = null;
prefix.setLength( prefix.length() - STEP_PREFIX.length() );
wasStart = false;
continue;
} else {
startElementBuffer.writeTo( wr, false );
startElementBuffer = null;
}
}
switch ( event ) {
case XMLStreamConstants.START_ELEMENT:
if ( !whitespacesOnly( str ) ) {
wr.writeCharacters( str.toString() );
} else if ( wasSomething ) {
wr.writeCharacters( "\n" + prefix );
}
str.setLength( 0 );
prefix.append( STEP_PREFIX );
startElementBuffer = new StartElementBuffer( rd );
wasStart = true;
wasSomething = true;
break;
case XMLStreamConstants.END_ELEMENT:
prefix.setLength( prefix.length() - STEP_PREFIX.length() );
if ( wasStart ) {
wr.writeCharacters( str.toString() );
} else {
if ( !whitespacesOnly( str ) ) {
wr.writeCharacters( str.toString() );
} else {
wr.writeCharacters( "\n" + prefix );
}
}
str.setLength( 0 );
wr.writeEndElement();
wasStart = false;
break;
case XMLStreamConstants.SPACE:
case XMLStreamConstants.CHARACTERS:
str.append( rd.getText() );
break;
case XMLStreamConstants.CDATA:
if ( !whitespacesOnly( str ) ) {
wr.writeCharacters( str.toString() );
}
str.setLength( 0 );
cdata.append( rd.getText() );
wasSomething = true;
break;
case XMLStreamConstants.COMMENT:
if ( !whitespacesOnly( str ) ) {
wr.writeCharacters( str.toString() );
} else if ( wasSomething ) {
wr.writeCharacters( "\n" + prefix );
}
str.setLength( 0 );
wr.writeComment( rd.getText() );
wasSomething = true;
break;
case XMLStreamConstants.END_DOCUMENT:
wr.writeCharacters( "\n" );
wr.writeEndDocument();
break;
default:
throw new RuntimeException( "Unknown XML event: " + event );
}
}
wr.flush();
return result.toString();
} catch ( XMLStreamException ex ) {
throw new RuntimeException( ex );
} finally {
try {
if ( wr != null ) {
wr.close();
}
} catch ( Exception ex ) {
}
try {
if ( rd != null ) {
rd.close();
}
} catch ( Exception ex ) {
}
}
}
|
@Test
public void test2() throws Exception {
String inXml, expectedXml;
try ( InputStream in = XMLFormatterTest.class.getResourceAsStream( "XMLFormatterIn2.xml" ) ) {
inXml = IOUtils.toString( in );
}
try ( InputStream in = XMLFormatterTest.class.getResourceAsStream( "XMLFormatterExpected2.xml" ) ) {
expectedXml = IOUtils.toString( in );
}
String result = XMLFormatter.format( inXml );
assertXMLEqual( expectedXml, result );
}
|
@Override
public boolean isDone() {
if (delegate.isDone()) {
try {
ensureResultSet(Long.MAX_VALUE, TimeUnit.DAYS);
} catch (ExecutionException | CancellationException | TimeoutException ignored) {
ignore(ignored);
}
return true;
} else {
return super.isDone();
}
}
|
@Test
public void completeDelegate_withException_callbackBeforeGet_invokeIsDoneOnOuter_callbacksRun() {
BiConsumer<String, Throwable> callback = getStringExecutionCallback();
delegateThrowException = true;
delegateFuture.run();
outerFuture.whenCompleteAsync(callback, CALLER_RUNS);
outerFuture.isDone();
verify(callback, times(0)).accept(any(String.class), isNull());
verify(callback, times(1)).accept(isNull(), any(Throwable.class));
verifyNoMoreInteractions(callback);
}
|
@SuppressWarnings("unchecked")
public static void main(String[] args)
throws IOException, InterruptedException, ClassNotFoundException {
Job job = ValueAggregatorJob.createValueAggregatorJob(args
, new Class[] {WordCountPlugInClass.class});
job.setJarByClass(AggregateWordCount.class);
int ret = job.waitForCompletion(true) ? 0 : 1;
ExitUtil.terminate(ret);
}
|
@Test
void testAggregateTestCount()
throws IOException, ClassNotFoundException, InterruptedException {
ExitUtil.disableSystemExit();
FileSystem fs = getFileSystem();
fs.mkdirs(INPUT_PATH);
Path file1 = new Path(INPUT_PATH, "file1");
Path file2 = new Path(INPUT_PATH, "file2");
FileUtil.write(fs, file1, "Hello World");
FileUtil.write(fs, file2, "Hello Hadoop");
String[] args =
new String[]{INPUT_PATH.toString(), OUTPUT_PATH.toString(), "1",
"textinputformat"};
// Run AggregateWordCount Job.
try {
AggregateWordCount.main(args);
} catch (ExitException e) {
assertEquals(0, e.status);
}
String allEntries;
try (FSDataInputStream stream = fs
.open(new Path(OUTPUT_PATH, "part-r-00000"));) {
allEntries = IOUtils.toString(stream, Charset.defaultCharset());
}
assertEquals("Hadoop\t1\n" + "Hello\t2\n" + "World\t1\n", allEntries);
}
|
@VisibleForTesting
synchronized List<RemoteNode> getLeastLoadedNodes() {
long currTime = System.currentTimeMillis();
if ((currTime - lastCacheUpdateTime > cacheRefreshInterval)
|| (cachedNodes == null)) {
cachedNodes = convertToRemoteNodes(
this.nodeMonitor.selectLeastLoadedNodes(this.numNodes));
if (cachedNodes.size() > 0) {
lastCacheUpdateTime = currTime;
}
}
return cachedNodes;
}
|
@Test(timeout = 600000)
public void testContainerPromoteAndDemoteBeforeContainerStart() throws Exception {
HashMap<NodeId, MockNM> nodes = new HashMap<>();
MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
nodes.put(nm1.getNodeId(), nm1);
MockNM nm2 = new MockNM("h1:4321", 4096, rm.getResourceTrackerService());
nodes.put(nm2.getNodeId(), nm2);
MockNM nm3 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService());
nodes.put(nm3.getNodeId(), nm3);
MockNM nm4 = new MockNM("h2:4321", 4096, rm.getResourceTrackerService());
nodes.put(nm4.getNodeId(), nm4);
nm1.registerNode();
nm2.registerNode();
nm3.registerNode();
nm4.registerNode();
nm1.nodeHeartbeat(oppContainersStatus, true);
nm2.nodeHeartbeat(oppContainersStatus, true);
nm3.nodeHeartbeat(oppContainersStatus, true);
nm4.nodeHeartbeat(oppContainersStatus, true);
OpportunisticContainerAllocatorAMService amservice =
(OpportunisticContainerAllocatorAMService) rm
.getApplicationMasterService();
MockRMAppSubmissionData data =
MockRMAppSubmissionData.Builder.createWithMemory(1 * GB, rm)
.withAppName("app")
.withUser("user")
.withAcls(null)
.withQueue("default")
.withUnmanagedAM(false)
.build();
RMApp app1 = MockRMAppSubmitter.submit(rm, data);
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2);
ResourceScheduler scheduler = rm.getResourceScheduler();
// All nodes 1 - 4 will be applicable for scheduling.
nm1.nodeHeartbeat(oppContainersStatus, true);
nm2.nodeHeartbeat(oppContainersStatus, true);
nm3.nodeHeartbeat(oppContainersStatus, true);
nm4.nodeHeartbeat(oppContainersStatus, true);
GenericTestUtils.waitFor(() ->
amservice.getLeastLoadedNodes().size() == 4, 10, 10 * 100);
QueueMetrics metrics = ((CapacityScheduler) scheduler).getRootQueue()
.getMetrics();
// Verify Metrics
verifyMetrics(metrics, 15360, 15, 1024, 1, 1);
AllocateResponse allocateResponse = am1.allocate(
Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1),
"*", Resources.createResource(1 * GB), 2, true, null,
ExecutionTypeRequest.newInstance(
ExecutionType.OPPORTUNISTIC, true))),
null);
List<Container> allocatedContainers = allocateResponse
.getAllocatedContainers();
Assert.assertEquals(2, allocatedContainers.size());
Container container = allocatedContainers.get(0);
MockNM allocNode = nodes.get(container.getNodeId());
MockNM sameHostDiffNode = null;
for (NodeId n : nodes.keySet()) {
if (n.getHost().equals(allocNode.getNodeId().getHost()) &&
n.getPort() != allocNode.getNodeId().getPort()) {
sameHostDiffNode = nodes.get(n);
}
}
// Verify Metrics After OPP allocation (Nothing should change)
verifyMetrics(metrics, 15360, 15, 1024, 1, 1);
am1.sendContainerUpdateRequest(
Arrays.asList(UpdateContainerRequest.newInstance(0,
container.getId(), ContainerUpdateType.PROMOTE_EXECUTION_TYPE,
null, ExecutionType.GUARANTEED)));
// Node on same host should not result in allocation
sameHostDiffNode.nodeHeartbeat(oppContainersStatus, true);
rm.drainEvents();
allocateResponse = am1.allocate(new ArrayList<>(), new ArrayList<>());
Assert.assertEquals(0, allocateResponse.getUpdatedContainers().size());
// Wait for scheduler to process all events
dispatcher.waitForEventThreadToWait();
rm.drainEvents();
// Verify Metrics After OPP allocation (Nothing should change again)
verifyMetrics(metrics, 15360, 15, 1024, 1, 1);
// Send Promotion req again... this should result in update error
allocateResponse = am1.sendContainerUpdateRequest(
Arrays.asList(UpdateContainerRequest.newInstance(0,
container.getId(), ContainerUpdateType.PROMOTE_EXECUTION_TYPE,
null, ExecutionType.GUARANTEED)));
Assert.assertEquals(0, allocateResponse.getUpdatedContainers().size());
Assert.assertEquals(1, allocateResponse.getUpdateErrors().size());
Assert.assertEquals("UPDATE_OUTSTANDING_ERROR",
allocateResponse.getUpdateErrors().get(0).getReason());
Assert.assertEquals(container.getId(),
allocateResponse.getUpdateErrors().get(0)
.getUpdateContainerRequest().getContainerId());
// Send Promotion req again with incorrect version...
// this should also result in update error
allocateResponse = am1.sendContainerUpdateRequest(
Arrays.asList(UpdateContainerRequest.newInstance(1,
container.getId(), ContainerUpdateType.PROMOTE_EXECUTION_TYPE,
null, ExecutionType.GUARANTEED)));
Assert.assertEquals(0, allocateResponse.getUpdatedContainers().size());
Assert.assertEquals(1, allocateResponse.getUpdateErrors().size());
Assert.assertEquals("INCORRECT_CONTAINER_VERSION_ERROR",
allocateResponse.getUpdateErrors().get(0).getReason());
Assert.assertEquals(0,
allocateResponse.getUpdateErrors().get(0)
.getCurrentContainerVersion());
Assert.assertEquals(container.getId(),
allocateResponse.getUpdateErrors().get(0)
.getUpdateContainerRequest().getContainerId());
// Ensure after correct node heartbeats, we should get the allocation
allocNode.nodeHeartbeat(oppContainersStatus, true);
rm.drainEvents();
allocateResponse = am1.allocate(new ArrayList<>(), new ArrayList<>());
Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size());
Container uc =
allocateResponse.getUpdatedContainers().get(0).getContainer();
Assert.assertEquals(ExecutionType.GUARANTEED, uc.getExecutionType());
Assert.assertEquals(uc.getId(), container.getId());
Assert.assertEquals(uc.getVersion(), container.getVersion() + 1);
// Verify Metrics After OPP allocation :
// Allocated cores+mem should have increased, available should decrease
verifyMetrics(metrics, 14336, 14, 2048, 2, 2);
nm1.nodeHeartbeat(oppContainersStatus, true);
nm2.nodeHeartbeat(oppContainersStatus, true);
nm3.nodeHeartbeat(oppContainersStatus, true);
nm4.nodeHeartbeat(oppContainersStatus, true);
rm.drainEvents();
// Verify that the container is still in ACQUIRED state wrt the RM.
RMContainer rmContainer = ((CapacityScheduler) scheduler)
.getApplicationAttempt(
uc.getId().getApplicationAttemptId()).getRMContainer(uc.getId());
Assert.assertEquals(RMContainerState.ACQUIRED, rmContainer.getState());
// Now demote the container back..
allocateResponse = am1.sendContainerUpdateRequest(
Arrays.asList(UpdateContainerRequest.newInstance(uc.getVersion(),
uc.getId(), ContainerUpdateType.DEMOTE_EXECUTION_TYPE,
null, ExecutionType.OPPORTUNISTIC)));
// This should happen in the same heartbeat..
Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size());
uc = allocateResponse.getUpdatedContainers().get(0).getContainer();
Assert.assertEquals(ExecutionType.OPPORTUNISTIC, uc.getExecutionType());
Assert.assertEquals(uc.getId(), container.getId());
Assert.assertEquals(uc.getVersion(), container.getVersion() + 2);
// Wait for scheduler to finish processing events
dispatcher.waitForEventThreadToWait();
rm.drainEvents();
// Verify Metrics After OPP allocation :
// Everything should have reverted to what it was
verifyMetrics(metrics, 15360, 15, 1024, 1, 1);
}
|
private static GuardedByExpression bind(JCTree.JCExpression exp, BinderContext context) {
GuardedByExpression expr = BINDER.visit(exp, context);
checkGuardedBy(expr != null, String.valueOf(exp));
checkGuardedBy(expr.kind() != Kind.TYPE_LITERAL, "Raw type literal: %s", exp);
return expr;
}
|
@Test
public void outer_lock_simpleName() {
assertThat(
bind(
"Test",
"lock",
forSourceLines(
"threadsafety/Test.java",
"package threadsafety;",
"import javax.annotation.concurrent.GuardedBy;",
"class Outer {",
" final Object lock = new Object();",
" class Test {}",
"}")))
.isEqualTo("(SELECT (SELECT (THIS) outer$threadsafety.Outer) lock)");
}
|
public static UserAgent parse(String userAgentString) {
return UserAgentParser.parse(userAgentString);
}
|
@Test
public void parseWindows10WithEdgeTest() {
final String uaStr = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763";
final UserAgent ua = UserAgentUtil.parse(uaStr);
assertEquals("MSEdge", ua.getBrowser().toString());
assertEquals("18.17763", ua.getVersion());
assertEquals("Webkit", ua.getEngine().toString());
assertEquals("537.36", ua.getEngineVersion());
assertEquals("Windows 10 or Windows Server 2016", ua.getOs().toString());
assertEquals("10.0", ua.getOsVersion());
assertEquals("Windows", ua.getPlatform().toString());
assertFalse(ua.isMobile());
}
|
@Override
public Map<String, Metric> getMetrics() {
final Map<String, Metric> gauges = new HashMap<>();
gauges.put("total.init", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getInit() +
mxBean.getNonHeapMemoryUsage().getInit());
gauges.put("total.used", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getUsed() +
mxBean.getNonHeapMemoryUsage().getUsed());
gauges.put("total.max", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getMax() == -1 ?
-1 : mxBean.getHeapMemoryUsage().getMax() + mxBean.getNonHeapMemoryUsage().getMax());
gauges.put("total.committed", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getCommitted() +
mxBean.getNonHeapMemoryUsage().getCommitted());
gauges.put("heap.init", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getInit());
gauges.put("heap.used", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getUsed());
gauges.put("heap.max", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getMax());
gauges.put("heap.committed", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getCommitted());
gauges.put("heap.usage", new RatioGauge() {
@Override
protected Ratio getRatio() {
final MemoryUsage usage = mxBean.getHeapMemoryUsage();
return Ratio.of(usage.getUsed(), usage.getMax());
}
});
gauges.put("non-heap.init", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getInit());
gauges.put("non-heap.used", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getUsed());
gauges.put("non-heap.max", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getMax());
gauges.put("non-heap.committed", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getCommitted());
gauges.put("non-heap.usage", new RatioGauge() {
@Override
protected Ratio getRatio() {
final MemoryUsage usage = mxBean.getNonHeapMemoryUsage();
return Ratio.of(usage.getUsed(), usage.getMax() == -1 ? usage.getCommitted() : usage.getMax());
}
});
for (final MemoryPoolMXBean pool : memoryPools) {
final String poolName = name("pools", WHITESPACE.matcher(pool.getName()).replaceAll("-"));
gauges.put(name(poolName, "usage"), new RatioGauge() {
@Override
protected Ratio getRatio() {
MemoryUsage usage = pool.getUsage();
return Ratio.of(usage.getUsed(),
usage.getMax() == -1 ? usage.getCommitted() : usage.getMax());
}
});
gauges.put(name(poolName, "max"), (Gauge<Long>) () -> pool.getUsage().getMax());
gauges.put(name(poolName, "used"), (Gauge<Long>) () -> pool.getUsage().getUsed());
gauges.put(name(poolName, "committed"), (Gauge<Long>) () -> pool.getUsage().getCommitted());
// Only register GC usage metrics if the memory pool supports usage statistics.
if (pool.getCollectionUsage() != null) {
gauges.put(name(poolName, "used-after-gc"), (Gauge<Long>) () ->
pool.getCollectionUsage().getUsed());
}
gauges.put(name(poolName, "init"), (Gauge<Long>) () -> pool.getUsage().getInit());
}
return Collections.unmodifiableMap(gauges);
}
|
@Test
public void hasAGaugeForHeapUsed() {
final Gauge gauge = (Gauge) gauges.getMetrics().get("heap.used");
assertThat(gauge.getValue())
.isEqualTo(30L);
}
|
private static int countJoinNode(OptExpression root, boolean[] hasOuterOrSemi) {
int count = 0;
Operator operator = root.getOp();
for (OptExpression child : root.getInputs()) {
if (operator instanceof LogicalJoinOperator && ((LogicalJoinOperator) operator).getJoinHint().isEmpty()) {
count += countJoinNode(child, hasOuterOrSemi);
} else {
count = Math.max(count, countJoinNode(child, hasOuterOrSemi));
}
}
if (operator instanceof LogicalJoinOperator && ((LogicalJoinOperator) operator).getJoinHint().isEmpty()) {
count += 1;
if (!hasOuterOrSemi[0]) {
LogicalJoinOperator joinOperator = (LogicalJoinOperator) operator;
if (joinOperator.getJoinType().isOuterJoin() || joinOperator.getJoinType().isSemiAntiJoin()) {
hasOuterOrSemi[0] = true;
}
}
}
return count;
}
|
@Test
public void testCountJoinNode() {
OptExpression root = OptExpression.create(
new LogicalJoinOperator(JoinOperator.LEFT_OUTER_JOIN, null),
OptExpression.create(new LogicalJoinOperator(JoinOperator.LEFT_OUTER_JOIN, null),
OptExpression.create(new LogicalJoinOperator(JoinOperator.LEFT_SEMI_JOIN, null)),
OptExpression.create(new LogicalValuesOperator(Lists.newArrayList(), Lists.newArrayList()))),
OptExpression.create(new LogicalValuesOperator(Lists.newArrayList(), Lists.newArrayList())));
assertEquals(1, Utils.countJoinNodeSize(root, JoinOperator.semiAntiJoinSet()));
// outer join (left child semi join node = 1, right child semi join node = 3) => result is 3
// / \
// semi join semi join (left child semi join node = 1, right child semi join node = 1)
// / \ => result is 1 + 1 + 1 = 3
// outer join semi join
// / \
// semi join node
root = OptExpression.create(
new LogicalJoinOperator(JoinOperator.LEFT_OUTER_JOIN, null),
OptExpression.create(new LogicalJoinOperator(JoinOperator.LEFT_SEMI_JOIN, null)),
OptExpression.create(new LogicalProjectOperator(Maps.newHashMap()),
OptExpression.create(new LogicalJoinOperator(JoinOperator.LEFT_SEMI_JOIN, null),
OptExpression.create(new LogicalJoinOperator(JoinOperator.LEFT_OUTER_JOIN, null),
OptExpression.create(
new LogicalJoinOperator(JoinOperator.LEFT_SEMI_JOIN, null)),
OptExpression.create(
new LogicalValuesOperator(Lists.newArrayList(), Lists.newArrayList()))),
OptExpression.create(
new LogicalJoinOperator(JoinOperator.LEFT_SEMI_JOIN, null)))));
assertEquals(3, Utils.countJoinNodeSize(root, JoinOperator.semiAntiJoinSet()));
// semi join (left child semi join node = 0, right child semi join node = 3) => result is 0 + 3 + 1 = 4
// / \
// inner join semi join (left child semi join node = 2, right child semi join node = 0)
// / \ => result is 2 + 0 + 1 = 3
// semi join node
// / \
// semi join node
root = OptExpression.create(
new LogicalJoinOperator(JoinOperator.LEFT_SEMI_JOIN, null),
OptExpression.create(new LogicalJoinOperator(JoinOperator.INNER_JOIN, null)),
OptExpression.create(new LogicalProjectOperator(Maps.newHashMap()),
OptExpression.create(new LogicalJoinOperator(JoinOperator.LEFT_SEMI_JOIN, null),
OptExpression.create(new LogicalJoinOperator(JoinOperator.LEFT_SEMI_JOIN, null),
OptExpression.create(
new LogicalJoinOperator(JoinOperator.LEFT_SEMI_JOIN, null)),
OptExpression.create(
new LogicalValuesOperator(Lists.newArrayList(), Lists.newArrayList()))),
OptExpression.create(
new LogicalValuesOperator(Lists.newArrayList(), Lists.newArrayList())))));
assertEquals(4, Utils.countJoinNodeSize(root, JoinOperator.semiAntiJoinSet()));
}
|
public int getPathLength() {
return beginPath.length + endPath.length;
}
|
@Test
public void oneLevelAncestorPathLength(){
final NodeModel parent = root();
final NodeModel node1 = new NodeModel("node1", map);
parent.insert(node1);
final NodeModel node2 = new NodeModel("node2", map);
parent.insert(node2);
final NodeRelativePath nodeRelativePath = new NodeRelativePath(node1, node2);
assertThat(nodeRelativePath.getPathLength(), equalTo(2));
}
|
@Override
public int getLineHashesVersion(Component component) {
if (significantCodeRepository.getRangesPerLine(component).isPresent()) {
return LineHashVersion.WITH_SIGNIFICANT_CODE.getDbValue();
} else {
return LineHashVersion.WITHOUT_SIGNIFICANT_CODE.getDbValue();
}
}
|
@Test
public void should_return_version_of_line_hashes_without_significant_code_in_the_report() {
when(significantCodeRepository.getRangesPerLine(file)).thenReturn(Optional.empty());
assertThat(underTest.getLineHashesVersion(file)).isEqualTo(LineHashVersion.WITHOUT_SIGNIFICANT_CODE.getDbValue());
verify(significantCodeRepository).getRangesPerLine(file);
verifyNoMoreInteractions(significantCodeRepository);
verifyNoInteractions(dbLineHashVersion);
}
|
@Override
public String generate() {
return UUID.randomUUID().toString();
}
|
@Test
public void generate_shouldGenerateUniqueIds() {
String requestId1 = requestIdGenerator.generate();
String requestId2 = requestIdGenerator.generate();
String requestId3 = requestIdGenerator.generate();
assertThat(requestId1).isNotEqualTo(requestId2).isNotEqualTo(requestId3);
}
|
public RuntimeOptionsBuilder parse(String... args) {
return parse(Arrays.asList(args));
}
|
@Test
void threads_default_1() {
RuntimeOptions options = parser
.parse()
.build();
assertThat(options.getThreads(), is(1));
}
|
@Override
public double p(int k) {
if (k == 0) {
return q;
} else if (k == 1) {
return p;
} else {
return 0.0;
}
}
|
@Test
public void testP() {
System.out.println("p");
BernoulliDistribution instance = new BernoulliDistribution(0.3);
instance.rand();
assertEquals(0.7, instance.p(0), 1E-7);
assertEquals(0.3, instance.p(1), 1E-7);
assertEquals(0.0, instance.p(2), 1E-7);
}
|
protected void updateCurrentDir() {
String prevCurrentDir = variables.getVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY );
String currentDir = variables.getVariable(
repository != null
? Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY
: filename != null
? Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY
: Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY );
variables.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, currentDir );
fireCurrentDirectoryChanged( prevCurrentDir, currentDir );
}
|
@Test
public void testUpdateCurrentDirWithRepository( ) {
JobMeta jobMetaTest = new JobMeta( );
RepositoryDirectoryInterface path = mock( RepositoryDirectoryInterface.class );
when( path.getPath() ).thenReturn( "aPath" );
jobMetaTest.setRepository( mock( Repository.class ) );
jobMetaTest.setRepositoryDirectory( path );
jobMetaTest.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, "Original value defined at run execution" );
jobMetaTest.setVariable( Const.INTERNAL_VARIABLE_JOB_FILENAME_DIRECTORY, "file:///C:/SomeFilenameDirectory" );
jobMetaTest.setVariable( Const.INTERNAL_VARIABLE_JOB_REPOSITORY_DIRECTORY, "/SomeRepDirectory" );
jobMetaTest.updateCurrentDir();
assertEquals( "/SomeRepDirectory", jobMetaTest.getVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY ) );
}
|
@SuppressWarnings({
"nullness" // TODO(https://github.com/apache/beam/issues/21068)
})
/*
* Returns an iterables containing all distinct keys in this multimap.
*/
public PrefetchableIterable<K> keys() {
checkState(
!isClosed,
"Multimap user state is no longer usable because it is closed for %s",
keysStateRequest.getStateKey());
if (isCleared) {
List<K> keys = new ArrayList<>(pendingAdds.size());
for (Map.Entry<?, KV<K, List<V>>> entry : pendingAdds.entrySet()) {
keys.add(entry.getValue().getKey());
}
return PrefetchableIterables.concat(keys);
}
Set<Object> pendingRemovesNow = new HashSet<>(pendingRemoves.keySet());
Map<Object, K> pendingAddsNow = new HashMap<>();
for (Map.Entry<Object, KV<K, List<V>>> entry : pendingAdds.entrySet()) {
pendingAddsNow.put(entry.getKey(), entry.getValue().getKey());
}
return new PrefetchableIterables.Default<K>() {
@Override
public PrefetchableIterator<K> createIterator() {
return new PrefetchableIterator<K>() {
PrefetchableIterator<K> persistedKeysIterator = persistedKeys.iterator();
Iterator<K> pendingAddsNowIterator;
boolean hasNext;
K nextKey;
@Override
public boolean isReady() {
return persistedKeysIterator.isReady();
}
@Override
public void prefetch() {
if (!isReady()) {
persistedKeysIterator.prefetch();
}
}
@Override
public boolean hasNext() {
if (hasNext) {
return true;
}
while (persistedKeysIterator.hasNext()) {
nextKey = persistedKeysIterator.next();
Object nextKeyStructuralValue = mapKeyCoder.structuralValue(nextKey);
if (!pendingRemovesNow.contains(nextKeyStructuralValue)) {
// Remove all keys that we will visit when passing over the persistedKeysIterator
// so we do not revisit them when passing over the pendingAddsNowIterator
if (pendingAddsNow.containsKey(nextKeyStructuralValue)) {
pendingAddsNow.remove(nextKeyStructuralValue);
}
hasNext = true;
return true;
}
}
if (pendingAddsNowIterator == null) {
pendingAddsNowIterator = pendingAddsNow.values().iterator();
}
while (pendingAddsNowIterator.hasNext()) {
nextKey = pendingAddsNowIterator.next();
hasNext = true;
return true;
}
return false;
}
@Override
public K next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
hasNext = false;
return nextKey;
}
};
}
};
}
|
@Test
public void testNoPersistedValues() throws Exception {
FakeBeamFnStateClient fakeClient = new FakeBeamFnStateClient(Collections.emptyMap());
MultimapUserState<byte[], String> userState =
new MultimapUserState<>(
Caches.noop(),
fakeClient,
"instructionId",
createMultimapKeyStateKey(),
ByteArrayCoder.of(),
StringUtf8Coder.of());
assertThat(userState.keys(), is(emptyIterable()));
}
|
@Override
protected void runPendingJob() throws AlterCancelException {
Preconditions.checkState(jobState == JobState.PENDING, jobState);
LOG.info("begin to send create temp partitions. job: {}", jobId);
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
throw new AlterCancelException("Database " + dbId + " does not exist");
}
if (!checkTableStable(db)) {
return;
}
if (optimizeClause == null) {
throw new AlterCancelException("optimize clause is null since FE restart, job: " + jobId);
}
if (optimizeClause.isTableOptimize()) {
allPartitionOptimized = true;
}
// 1. create temp partitions
for (int i = 0; i < optimizeClause.getSourcePartitionIds().size(); ++i) {
tmpPartitionIds.add(GlobalStateMgr.getCurrentState().getNextId());
}
long createPartitionStartTimestamp = System.currentTimeMillis();
OlapTable targetTable = checkAndGetTable(db, tableId);
try {
PartitionUtils.createAndAddTempPartitionsForTable(db, targetTable, postfix,
optimizeClause.getSourcePartitionIds(), getTmpPartitionIds(), optimizeClause.getDistributionDesc(),
warehouseId);
LOG.debug("create temp partitions {} success. job: {}", getTmpPartitionIds(), jobId);
} catch (Exception e) {
LOG.warn("create temp partitions failed", e);
throw new AlterCancelException("create temp partitions failed " + e);
}
long createPartitionElapse = System.currentTimeMillis() - createPartitionStartTimestamp;
// wait previous transactions finished
this.jobState = JobState.WAITING_TXN;
this.optimizeOperation = optimizeClause.toString();
span.setAttribute("createPartitionElapse", createPartitionElapse);
span.setAttribute("watershedTxnId", this.watershedTxnId);
span.addEvent("setWaitingTxn");
// write edit log
GlobalStateMgr.getCurrentState().getEditLog().logAlterJob(this);
LOG.info("transfer optimize job {} state to {}, watershed txn_id: {}", jobId, this.jobState, watershedTxnId);
}
|
@Test
public void testSchemaChangeWhileTabletNotStable() throws Exception {
SchemaChangeHandler schemaChangeHandler = GlobalStateMgr.getCurrentState().getSchemaChangeHandler();
Database db = GlobalStateMgr.getCurrentState().getDb(GlobalStateMgrTestUtil.testDb1);
OlapTable olapTable = (OlapTable) db.getTable(GlobalStateMgrTestUtil.testTable7);
Partition testPartition = olapTable.getPartition(GlobalStateMgrTestUtil.testTable7);
schemaChangeHandler.process(alterTableStmt.getAlterClauseList(), db, olapTable);
Map<Long, AlterJobV2> alterJobsV2 = schemaChangeHandler.getAlterJobsV2();
Assert.assertEquals(1, alterJobsV2.size());
OnlineOptimizeJobV2 optimizeJob = (OnlineOptimizeJobV2) alterJobsV2.values().stream().findAny().get();
MaterializedIndex baseIndex = testPartition.getBaseIndex();
LocalTablet baseTablet = (LocalTablet) baseIndex.getTablets().get(0);
List<Replica> replicas = baseTablet.getImmutableReplicas();
Replica replica1 = replicas.get(0);
// runPendingJob
replica1.setState(Replica.ReplicaState.DECOMMISSION);
optimizeJob.runPendingJob();
Assert.assertEquals(JobState.PENDING, optimizeJob.getJobState());
// table is stable runPendingJob again
replica1.setState(Replica.ReplicaState.NORMAL);
optimizeJob.runPendingJob();
Assert.assertEquals(JobState.WAITING_TXN, optimizeJob.getJobState());
}
|
public int validate(
final ServiceContext serviceContext,
final List<ParsedStatement> statements,
final SessionProperties sessionProperties,
final String sql
) {
requireSandbox(serviceContext);
final KsqlExecutionContext ctx = requireSandbox(snapshotSupplier.apply(serviceContext));
final Injector injector = injectorFactory.apply(ctx, serviceContext);
final KsqlConfig ksqlConfig = ctx.getKsqlConfig();
int numPersistentQueries = 0;
for (final ParsedStatement parsed : statements) {
final PreparedStatement<?> prepared = ctx.prepare(
parsed,
(isVariableSubstitutionEnabled(sessionProperties, ksqlConfig)
? sessionProperties.getSessionVariables()
: Collections.emptyMap())
);
final ConfiguredStatement<?> configured = ConfiguredStatement.of(prepared,
SessionConfig.of(ksqlConfig, sessionProperties.getMutableScopedProperties())
);
final int currNumPersistentQueries = validate(
serviceContext,
configured,
sessionProperties,
ctx,
injector
);
numPersistentQueries += currNumPersistentQueries;
if (currNumPersistentQueries > 0
&& QueryCapacityUtil.exceedsPersistentQueryCapacity(ctx, ksqlConfig)) {
QueryCapacityUtil.throwTooManyActivePersistentQueriesException(ctx, ksqlConfig, sql);
}
}
return numPersistentQueries;
}
|
@Test
public void shouldThrowExceptionIfValidationFails() {
// Given:
givenRequestValidator(
ImmutableMap.of(CreateStream.class, statementValidator)
);
doThrow(new KsqlException("Fail"))
.when(statementValidator).validate(any(), any(), any(), any());
final List<ParsedStatement> statements =
givenParsed(SOME_STREAM_SQL);
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> validator.validate(serviceContext, statements, sessionProperties, "sql")
);
// Then:
assertThat(e.getMessage(), containsString(
"Fail"));
}
|
public List<String> get(Component component) {
Preconditions.checkState(contains(component), "Source line hashes for component %s not cached", component);
return load(getId(component));
}
|
@Test
public void get_throws_ISE_if_not_cached() {
Component component = createComponent(1);
assertThatThrownBy(() -> underTest.get(component))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Source line hashes for component ReportComponent{ref=1, key='FILE_KEY', type=FILE} not cached");
}
|
public Node getCurrent() {
return current;
}
|
@Test
public void getCurrentNoPushReturnsRoot() {
assertThat(hierarchy.getCurrent().isRootNode(), is(true));
}
|
@Override
public Integer addScoreAndGetRank(V object, Number value) {
return get(addScoreAndGetRankAsync(object, value));
}
|
@Test
public void testAddScoreAndGetRank() {
RScoredSortedSet<String> set = redisson.getScoredSortedSet("simple");
Integer res1 = set.addScoreAndGetRank("12", 12);
assertThat(res1).isEqualTo(0);
Integer res2 = set.addScoreAndGetRank("15", 10);
assertThat(res2).isEqualTo(0);
assertThat(set.rank("12")).isEqualTo(1);
assertThat(set.rank("15")).isEqualTo(0);
Integer res3 = set.addScoreAndGetRank("12", 2);
assertThat(res3).isEqualTo(1);
Double score = set.getScore("12");
assertThat(score).isEqualTo(14);
}
|
public static <T> CheckedFunction0<T> recover(CheckedFunction0<T> function,
CheckedFunction1<Throwable, T> exceptionHandler) {
return () -> {
try {
return function.apply();
} catch (Throwable throwable) {
return exceptionHandler.apply(throwable);
}
};
}
|
@Test
public void shouldRecoverFromException() throws Throwable {
CheckedFunction0<String> callable = () -> {
throw new IOException("BAM!");
};
CheckedFunction0<String> callableWithRecovery = VavrCheckedFunctionUtils.recover(callable, (ex) -> "Bla");
String result = callableWithRecovery.apply();
assertThat(result).isEqualTo("Bla");
}
|
@Override
public V get(final K key) {
Objects.requireNonNull(key);
final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType);
for (final ReadOnlyKeyValueStore<K, V> store : stores) {
try {
final V result = store.get(key);
if (result != null) {
return result;
}
} catch (final InvalidStateStoreException e) {
throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata.");
}
}
return null;
}
|
@Test
public void shouldThrowNullPointerExceptionOnGetNullKey() {
assertThrows(NullPointerException.class, () -> theStore.get(null));
}
|
Future<RecordMetadata> send(final ProducerRecord<byte[], byte[]> record,
final Callback callback) {
maybeBeginTransaction();
try {
return producer.send(record, callback);
} catch (final KafkaException uncaughtException) {
if (isRecoverable(uncaughtException)) {
// producer.send() call may throw a KafkaException which wraps a FencedException,
// in this case we should throw its wrapped inner cause so that it can be
// captured and re-wrapped as TaskMigratedException
throw new TaskMigratedException(
formatException("Producer got fenced trying to send a record"),
uncaughtException.getCause()
);
} else {
throw new StreamsException(
formatException(String.format("Error encountered trying to send record to topic %s", record.topic())),
uncaughtException
);
}
}
}
|
@Test
public void shouldThrowTaskMigrateExceptionOnEosBeginTxnError() {
eosAlphaMockProducer.beginTransactionException = new KafkaException("KABOOM!");
// calling `send()` implicitly starts a new transaction
final StreamsException thrown = assertThrows(
StreamsException.class,
() -> eosAlphaStreamsProducer.send(null, null));
assertThat(thrown.getCause(), is(eosAlphaMockProducer.beginTransactionException));
assertThat(
thrown.getMessage(),
is("Error encountered trying to begin a new transaction [test]")
);
}
|
@Private
@VisibleForTesting
static void checkResourceRequestAgainstAvailableResource(Resource reqResource,
Resource availableResource) throws InvalidResourceRequestException {
for (int i = 0; i < ResourceUtils.getNumberOfCountableResourceTypes(); i++) {
final ResourceInformation requestedRI =
reqResource.getResourceInformation(i);
final String reqResourceName = requestedRI.getName();
if (requestedRI.getValue() < 0) {
throwInvalidResourceException(reqResource, availableResource,
reqResourceName, InvalidResourceType.LESS_THAN_ZERO);
}
boolean valid = checkResource(requestedRI, availableResource);
if (!valid) {
throwInvalidResourceException(reqResource, availableResource,
reqResourceName, InvalidResourceType.GREATER_THEN_MAX_ALLOCATION);
}
}
}
|
@Test
public void testCustomResourceRequestedUnitIsGreaterThanAvailableUnit2() {
Resource requestedResource = ResourceTypesTestHelper.newResource(1, 1,
ImmutableMap.<String, String>builder().put("custom-resource-1", "11M")
.build());
Resource availableResource =
ResourceTypesTestHelper.newResource(1, 1,
ImmutableMap.of("custom-resource-1", "1G"));
try {
SchedulerUtils.checkResourceRequestAgainstAvailableResource(
requestedResource, availableResource);
} catch (InvalidResourceRequestException e) {
fail(String.format(
"Resource request should be accepted. Requested: %s, available: %s",
requestedResource, availableResource));
}
}
|
@Override
public String getLongFormat(final long milliseconds, boolean natural) {
synchronized(longDateFormatter) {
if(-1 == milliseconds) {
return LocaleFactory.localizedString("Unknown");
}
longDateFormatter.setTimeZone(NSTimeZone.timeZoneWithName(timezone));
if(natural) {
return longDateNaturalFormatter.stringFromDate(toDate(milliseconds));
}
return longDateFormatter.stringFromDate(toDate(milliseconds));
}
}
|
@Test
public void testGetLongFormat() {
final UserDateFormatter f = new UserDefaultsDateFormatter(TimeZone.getDefault().getID());
assertNotNull(f.getLongFormat(System.currentTimeMillis(), false));
assertNotNull(f.getLongFormat(System.currentTimeMillis(), true));
}
|
@Override
public ExecuteContext before(ExecuteContext context) {
Object object = context.getObject();
if (object instanceof BaseLoadBalancer) {
List<Object> serverList = getServerList(context.getMethod().getName(), object);
if (CollectionUtils.isEmpty(serverList)) {
return context;
}
BaseLoadBalancer loadBalancer = (BaseLoadBalancer) object;
String name = loadBalancer.getName();
RequestData requestData = getRequestData().orElse(null);
List<Object> targetInstances = loadBalancerService.getTargetInstances(name, serverList, requestData);
context.skip(Collections.unmodifiableList(targetInstances));
}
return context;
}
|
@Test
public void testBeforeWithThreadLocal() {
ThreadLocalUtils.setRequestData(new RequestData(Collections.emptyMap(), "", ""));
interceptor.before(context);
BaseLoadBalancer loadBalancer = (BaseLoadBalancer) context.getObject();
List<Server> servers = loadBalancer.getAllServers();
Assert.assertNotNull(servers);
Assert.assertEquals(1, servers.size());
Assert.assertEquals("foo", servers.get(0).getHost());
Assert.assertEquals(8081, servers.get(0).getPort());
}
|
@Override
public DataNodeDto removeNode(String nodeId) throws NodeNotFoundException {
final DataNodeDto node = nodeService.byNodeId(nodeId);
if (node.getDataNodeStatus() != DataNodeStatus.AVAILABLE) {
throw new IllegalArgumentException("Only running data nodes can be removed from the cluster.");
}
if (nodeService.allActive().values().stream()
.filter(n -> n.getDataNodeStatus() == DataNodeStatus.AVAILABLE && n.getActionQueue() == null)
.count() <= 1) {
throw new IllegalArgumentException("Cannot remove last data node in the cluster.");
}
DataNodeLifecycleTrigger trigger = DataNodeLifecycleTrigger.REMOVE;
DataNodeStatus lockingStatus = DataNodeStatus.REMOVING;
addToQueue(node, trigger, lockingStatus);
return node;
}
|
@Test
public void removeNodeFailsForLastNode() throws NodeNotFoundException {
final String testNodeId = "node";
nodeService.registerServer(buildTestNode(testNodeId, DataNodeStatus.AVAILABLE));
Exception e = assertThrows(IllegalArgumentException.class, () -> {
classUnderTest.removeNode(testNodeId);
});
assertEquals("Cannot remove last data node in the cluster.", e.getMessage());
verifyNoMoreInteractions(clusterEventBus);
}
|
public static long write(InputStream is, OutputStream os) throws IOException {
return write(is, os, BUFFER_SIZE);
}
|
@Test
void testWrite5() throws Exception {
assertThat((int) IOUtils.write(reader, writer), equalTo(TEXT.length()));
}
|
public void addConfigListenContext(String group, String dataId, String tenant, String md5) {
ConfigListenContext configListenContext = new ConfigListenContext();
configListenContext.dataId = dataId;
configListenContext.group = group;
configListenContext.md5 = md5;
configListenContext.tenant = tenant;
configListenContexts.add(configListenContext);
}
|
@Override
@Test
public void testSerialize() throws JsonProcessingException {
ConfigBatchListenRequest configBatchListenRequest = new ConfigBatchListenRequest();
configBatchListenRequest.putAllHeader(HEADERS);
configBatchListenRequest.addConfigListenContext(GROUP, DATA_ID, TENANT, MD5);
final String requestId = injectRequestUuId(configBatchListenRequest);
String json = mapper.writeValueAsString(configBatchListenRequest);
assertTrue(json.contains("\"listen\":" + "true"));
assertTrue(json.contains(
"\"configListenContexts\":[{\"dataId\":\"test_data\",\"group\":\"group\",\"md5\":\"test_MD5\",\"tenant\":\"test_tenant\"}]"));
assertTrue(json.contains("\"module\":\"" + Constants.Config.CONFIG_MODULE));
assertTrue(json.contains("\"requestId\":\"" + requestId));
}
|
JavaClasses getClassesToAnalyzeFor(Class<?> testClass, ClassAnalysisRequest classAnalysisRequest) {
checkNotNull(testClass);
checkNotNull(classAnalysisRequest);
if (cachedByTest.containsKey(testClass)) {
return cachedByTest.get(testClass);
}
LocationsKey locations = RequestedLocations.by(classAnalysisRequest, testClass).asKey();
JavaClasses classes = classAnalysisRequest.getCacheMode() == FOREVER
? cachedByLocations.getUnchecked(locations).get()
: new LazyJavaClasses(locations.locations, locations.importOptionTypes).get();
cachedByTest.put(testClass, classes);
return classes;
}
|
@Test
public void filters_jars_relative_to_class() {
JavaClasses classes = cache.getClassesToAnalyzeFor(TestClass.class, analyzePackagesOf(Rule.class));
assertThat(classes).isNotEmpty();
for (JavaClass clazz : classes) {
assertThat(clazz.getPackageName()).doesNotContain("tngtech");
}
}
|
public static String generateDatabaseId(String baseString) {
checkArgument(baseString.length() != 0, "baseString cannot be empty!");
String databaseId =
generateResourceId(
baseString,
ILLEGAL_DATABASE_CHARS,
REPLACE_DATABASE_CHAR,
MAX_DATABASE_ID_LENGTH,
DATABASE_TIME_FORMAT);
// replace hyphen with underscore, so there's no need for backticks
String trimmed = CharMatcher.is('_').trimTrailingFrom(databaseId);
checkArgument(
trimmed.length() > 0,
"Database id is empty after removing illegal characters and trailing underscores");
// if first char is not a letter, replace with a padding letter, so it doesn't
// violate spanner's database naming rules
char padding = generatePadding();
if (!Character.isLetter(trimmed.charAt(0))) {
trimmed = padding + trimmed.substring(1);
}
return trimmed;
}
|
@Test
public void testGenerateDatabaseIdShouldThrowErrorWithEmptyInput() {
String testBaseString = "";
assertThrows(IllegalArgumentException.class, () -> generateDatabaseId(testBaseString));
}
|
public static int compare(String unitA, long valueA, String unitB,
long valueB) {
checkUnitArgument(unitA);
checkUnitArgument(unitB);
if (unitA.equals(unitB)) {
return Long.compare(valueA, valueB);
}
Converter unitAC = getConverter(unitA);
Converter unitBC = getConverter(unitB);
int unitAPos = SORTED_UNITS.indexOf(unitA);
int unitBPos = SORTED_UNITS.indexOf(unitB);
try {
long tmpA = valueA;
long tmpB = valueB;
if (unitAPos < unitBPos) {
tmpB = convert(unitB, unitA, valueB);
} else {
tmpA = convert(unitA, unitB, valueA);
}
return Long.compare(tmpA, tmpB);
} catch (IllegalArgumentException ie) {
BigInteger tmpA = BigInteger.valueOf(valueA);
BigInteger tmpB = BigInteger.valueOf(valueB);
if (unitAPos < unitBPos) {
tmpB = tmpB.multiply(BigInteger.valueOf(unitBC.numerator));
tmpB = tmpB.multiply(BigInteger.valueOf(unitAC.denominator));
tmpB = tmpB.divide(BigInteger.valueOf(unitBC.denominator));
tmpB = tmpB.divide(BigInteger.valueOf(unitAC.numerator));
} else {
tmpA = tmpA.multiply(BigInteger.valueOf(unitAC.numerator));
tmpA = tmpA.multiply(BigInteger.valueOf(unitBC.denominator));
tmpA = tmpA.divide(BigInteger.valueOf(unitAC.denominator));
tmpA = tmpA.divide(BigInteger.valueOf(unitBC.numerator));
}
return tmpA.compareTo(tmpB);
}
}
|
@Test
void testCompare() {
String unitA = "P";
long valueA = 1;
String unitB = "p";
long valueB = 2;
assertEquals(1,
UnitsConversionUtil.compare(unitA, valueA, unitB, valueB));
assertEquals(-1,
UnitsConversionUtil.compare(unitB, valueB, unitA, valueA));
assertEquals(0,
UnitsConversionUtil.compare(unitA, valueA, unitA, valueA));
assertEquals(-1,
UnitsConversionUtil.compare(unitA, valueA, unitA, valueB));
assertEquals(1,
UnitsConversionUtil.compare(unitA, valueB, unitA, valueA));
unitB = "T";
assertEquals(1,
UnitsConversionUtil.compare(unitA, valueA, unitB, valueB));
assertEquals(-1,
UnitsConversionUtil.compare(unitB, valueB, unitA, valueA));
assertEquals(0,
UnitsConversionUtil.compare(unitA, valueA, unitB, 1000L));
unitA = "p";
unitB = "n";
assertEquals(-1,
UnitsConversionUtil.compare(unitA, valueA, unitB, valueB));
assertEquals(1,
UnitsConversionUtil.compare(unitB, valueB, unitA, valueA));
assertEquals(0,
UnitsConversionUtil.compare(unitA, 1000L, unitB, valueA));
}
|
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
try {
if(file.isDirectory()) {
return this.toAttributes(new FoldersApi(new BoxApiClient(session.getClient())).getFoldersId(fileid.getFileId(file),
DEFAULT_FIELDS, null, null));
}
return this.toAttributes(new FilesApi(new BoxApiClient(session.getClient())).getFilesId(fileid.getFileId(file),
StringUtils.EMPTY, DEFAULT_FIELDS, null, null));
}
catch(ApiException e) {
throw new BoxExceptionMappingService(fileid).map("Failure to read attributes of {0}", e, file);
}
}
|
@Test
public void testFindRoot() throws Exception {
final BoxFileidProvider fileid = new BoxFileidProvider(session);
final BoxAttributesFinderFeature f = new BoxAttributesFinderFeature(session, fileid);
final PathAttributes attributes = f.find(new Path("/", EnumSet.of(Path.Type.volume, Path.Type.directory)));
assertNotEquals(PathAttributes.EMPTY, attributes);
}
|
@Override
public void mutate() {
for (int i = 0; i < bits.length; i++) {
if (MathEx.random() < mutationRate) {
bits[i] ^= 1;
}
}
}
|
@Test
public void testMutate() {
System.out.println("mutate");
MathEx.setSeed(19650218); // to get repeatable results.
byte[] father = {1,1,1,0,1,0,0,1,0,0,0};
BitString instance = new BitString(father.clone(), null, Crossover.SINGLE_POINT, 1.0, 0.1);
instance.mutate();
byte[] mutant = {1,1,1,1,1,0,0,1,0,1,0};
for (int i = 0; i < father.length; i++) {
assertEquals(mutant[i], instance.bits()[i]);
}
}
|
public boolean remove(long key1, long key2) {
checkBiggerEqualZero(key1);
long h = hash(key1, key2);
return getSection(h).remove(key1, key2, ValueNotFound, ValueNotFound, (int) h);
}
|
@Test
public void testRemove() {
ConcurrentLongLongPairHashMap map = ConcurrentLongLongPairHashMap
.newBuilder()
.build();
assertTrue(map.isEmpty());
assertTrue(map.put(1, 1, 11, 11));
assertFalse(map.isEmpty());
assertFalse(map.remove(0, 0));
assertFalse(map.remove(1, 1, 111, 111));
assertFalse(map.isEmpty());
assertTrue(map.remove(1, 1, 11, 11));
assertTrue(map.isEmpty());
}
|
@Override
public void define(Context context) {
NewController features = context.createController("api/features")
.setDescription("Provides information about features available in SonarQube")
.setSince("9.6");
list.define(features);
features.done();
}
|
@Test
public void define_shouldHasAtLeastOneAction() {
WebService.Context context = new WebService.Context();
underTest.define(context);
WebService.Controller controller = context.controller(CONTROLLER_FEATURES);
assertThat(controller).isNotNull();
assertThat(controller.description()).isNotEmpty();
assertThat(controller.path()).isEqualTo(CONTROLLER_FEATURES);
assertThat(controller.since()).isEqualTo("9.6");
assertThat(controller.actions()).hasSizeGreaterThanOrEqualTo(1);
}
|
public Properties getProperties()
{
return properties;
}
|
@Test
public void testUriWithHttpProtocols()
throws SQLException
{
String protocols = "h2,http/1.1";
PrestoDriverUri parameters = createDriverUri("presto://localhost:8080?protocols=" + protocols);
Properties properties = parameters.getProperties();
assertEquals(properties.getProperty(HTTP_PROTOCOLS.getKey()), protocols);
}
|
public OffsetAndMetadata findNextCommitOffset(final String commitMetadata) {
boolean found = false;
long currOffset;
long nextCommitOffset = committedOffset;
for (KafkaSpoutMessageId currAckedMsg : ackedMsgs) { // complexity is that of a linear scan on a TreeMap
currOffset = currAckedMsg.offset();
if (currOffset == nextCommitOffset) {
// found the next offset to commit
found = true;
nextCommitOffset = currOffset + 1;
} else if (currOffset > nextCommitOffset) {
if (emittedOffsets.contains(nextCommitOffset)) {
LOG.debug("topic-partition [{}] has non-sequential offset [{}]."
+ " It will be processed in a subsequent batch.", tp, currOffset);
break;
} else {
/*
This case will arise in case of non-sequential offset being processed.
So, if the topic doesn't contain offset = nextCommitOffset (possible
if the topic is compacted or deleted), the consumer should jump to
the next logical point in the topic. Next logical offset should be the
first element after nextCommitOffset in the ascending ordered emitted set.
*/
LOG.debug("Processed non-sequential offset."
+ " The earliest uncommitted offset is no longer part of the topic."
+ " Missing offset: [{}], Processed: [{}]", nextCommitOffset, currOffset);
final Long nextEmittedOffset = emittedOffsets.ceiling(nextCommitOffset);
if (nextEmittedOffset != null && currOffset == nextEmittedOffset) {
LOG.debug("Found committable offset: [{}] after missing offset: [{}], skipping to the committable offset",
currOffset, nextCommitOffset);
found = true;
nextCommitOffset = currOffset + 1;
} else {
LOG.debug("Topic-partition [{}] has non-sequential offset [{}]."
+ " Next offset to commit should be [{}]", tp, currOffset, nextCommitOffset);
break;
}
}
} else {
throw new IllegalStateException("The offset [" + currOffset + "] is below the current nextCommitOffset "
+ "[" + nextCommitOffset + "] for [" + tp + "]."
+ " This should not be possible, and likely indicates a bug in the spout's acking or emit logic.");
}
}
OffsetAndMetadata nextCommitOffsetAndMetadata = null;
if (found) {
nextCommitOffsetAndMetadata = new OffsetAndMetadata(nextCommitOffset, commitMetadata);
LOG.debug("Topic-partition [{}] has offsets [{}-{}] ready to be committed."
+ " Processing will resume at offset [{}] upon spout restart",
tp, committedOffset, nextCommitOffsetAndMetadata.offset() - 1, nextCommitOffsetAndMetadata.offset());
} else {
LOG.debug("Topic-partition [{}] has no offsets ready to be committed", tp);
}
LOG.trace("{}", this);
return nextCommitOffsetAndMetadata;
}
|
@Test
public void testFindNextOffsetWithAckedButNotEmittedOffsetGap() {
/**
* If topic compaction is enabled in Kafka some offsets may be deleted.
* We distinguish this case from regular gaps in the acked offset sequence caused by out of order acking
* by checking that offsets in the gap have been emitted at some point previously.
* If they haven't then they can't exist in Kafka, since the spout emits tuples in order.
*/
emitAndAckMessage(getMessageId(initialFetchOffset + 2));
emitAndAckMessage(getMessageId(initialFetchOffset));
OffsetAndMetadata nextCommitOffset = manager.findNextCommitOffset(COMMIT_METADATA);
assertThat("The next commit offset should cover all the acked offsets, since the offset in the gap hasn't been emitted and doesn't exist",
nextCommitOffset.offset(), is(initialFetchOffset + 3));
}
|
@Override
public Material toOldMaterial(String name, String folder, String password) {
GitMaterial git = new GitMaterial(url, branch, folder);
setName(name, git);
git.setUserName(username);
git.setPassword(password);
git.setSubmoduleFolder(submoduleFolder);
git.setId(id);
return git;
}
|
@Test
void shouldCreateMaterialFromMaterialInstance() {
final GitMaterialInstance materialInstance = new GitMaterialInstance("https://example.com", "bob",
"feature", "submodule_folder", "some-flyweight");
materialInstance.setId(100L);
final GitMaterial material = (GitMaterial) materialInstance.toOldMaterial("example", "destination", "pass");
assertThat(material.getName()).isEqualTo(new CaseInsensitiveString("example"));
assertThat(material.getUrl()).isEqualTo("https://example.com");
assertThat(material.getUserName()).isEqualTo("bob");
assertThat(material.getPassword()).isEqualTo("pass");
assertThat(material.getBranch()).isEqualTo("feature");
assertThat(material.getSubmoduleFolder()).isEqualTo("submodule_folder");
assertThat(material.getFolder()).isEqualTo("destination");
assertThat(material.getId()).isEqualTo(materialInstance.getId());
}
|
public BooleanPredicate setValue(boolean value) {
this.value = value;
return this;
}
|
@Test
void requireThatEqualsIsImplemented() {
BooleanPredicate lhs = new BooleanPredicate(true);
assertEquals(lhs, lhs);
assertNotEquals(lhs, new Object());
BooleanPredicate rhs = new BooleanPredicate(false);
assertNotEquals(lhs, rhs);
rhs.setValue(true);
assertEquals(lhs, rhs);
}
|
@Override
public boolean add(E element) {
return add(element, element.hashCode());
}
|
@Test(expected = NullPointerException.class)
public void testAddAllThrowsOnNullElement() {
final OAHashSet<Integer> set = new OAHashSet<>(8);
final Collection<Integer> elementsToAdd = new ArrayList<>(2);
elementsToAdd.add(1);
elementsToAdd.add(null);
set.addAll(elementsToAdd);
}
|
public static MethodHandle findMethod(Class<?> callerClass, String name, MethodType type) {
if (StrUtil.isBlank(name)) {
return findConstructor(callerClass, type);
}
MethodHandle handle = null;
final MethodHandles.Lookup lookup = lookup(callerClass);
try {
handle = lookup.findVirtual(callerClass, name, type);
} catch (IllegalAccessException | NoSuchMethodException ignore) {
//ignore
}
// static方法
if (null == handle) {
try {
handle = lookup.findStatic(callerClass, name, type);
} catch (IllegalAccessException | NoSuchMethodException ignore) {
//ignore
}
}
// 特殊方法,包括构造方法、私有方法等
if (null == handle) {
try {
handle = lookup.findSpecial(callerClass, name, type, callerClass);
} catch (NoSuchMethodException ignore) {
//ignore
} catch (IllegalAccessException e) {
throw new UtilException(e);
}
}
return handle;
}
|
@Test
public void findMethodTest() throws Throwable {
MethodHandle handle = MethodHandleUtil.findMethod(Duck.class, "quack",
MethodType.methodType(String.class));
assertNotNull(handle);
// 对象方法自行需要绑定对象或者传入对象参数
String invoke = (String) handle.invoke(new BigDuck());
assertEquals("Quack", invoke);
// 对象的方法获取
handle = MethodHandleUtil.findMethod(BigDuck.class, "getSize",
MethodType.methodType(int.class));
assertNotNull(handle);
int invokeInt = (int) handle.invoke(new BigDuck());
assertEquals(36, invokeInt);
}
|
@Override
public FileType getType() throws FileSystemException {
return resolvedFileObject.getType();
}
|
@Test
public void testDelegatesGetType() throws FileSystemException {
when( resolvedFileObject.getType() ).thenReturn( FileType.FILE );
assertEquals( FileType.FILE, fileObject.getType() );
when( resolvedFileObject.getType() ).thenReturn( FileType.FOLDER );
assertEquals( FileType.FOLDER, fileObject.getType() );
verify( resolvedFileObject, times( 2 ) ).getType();
}
|
public static String findAddress(List<NodeAddress> addresses, NodeAddressType preferredAddressType) {
if (addresses == null) {
return null;
}
Map<String, String> addressMap = addresses.stream()
.collect(Collectors.toMap(NodeAddress::getType, NodeAddress::getAddress, (address1, address2) -> {
LOGGER.warnOp("Found multiple addresses with the same type. Only the first address '{}' will be used.", address1);
return address1;
}));
// If user set preferred address type, we should check it first
if (preferredAddressType != null && addressMap.containsKey(preferredAddressType.toValue())) {
return addressMap.get(preferredAddressType.toValue());
}
if (addressMap.containsKey("ExternalDNS")) {
return addressMap.get("ExternalDNS");
} else if (addressMap.containsKey("ExternalIP")) {
return addressMap.get("ExternalIP");
} else if (addressMap.containsKey("InternalDNS")) {
return addressMap.get("InternalDNS");
} else if (addressMap.containsKey("InternalIP")) {
return addressMap.get("InternalIP");
} else if (addressMap.containsKey("Hostname")) {
return addressMap.get("Hostname");
}
return null;
}
|
@Test
public void testFindAddressNotFound() {
List<NodeAddress> addresses = new ArrayList<>(3);
addresses.add(new NodeAddressBuilder().withType("SomeAddress").withAddress("my.external.address").build());
addresses.add(new NodeAddressBuilder().withType("SomeOtherAddress").withAddress("my.internal.address").build());
addresses.add(new NodeAddressBuilder().withType("YetAnotherAddress").withAddress("192.168.2.94").build());
String address = NodeUtils.findAddress(addresses, null);
assertThat(address, is(CoreMatchers.nullValue()));
}
|
public void startAsync() {
try {
udfLoader.load();
ProcessingLogServerUtils.maybeCreateProcessingLogTopic(
serviceContext.getTopicClient(),
processingLogConfig,
ksqlConfig);
if (processingLogConfig.getBoolean(ProcessingLogConfig.STREAM_AUTO_CREATE)) {
log.warn("processing log auto-create is enabled, but this is not supported "
+ "for headless mode.");
}
rocksDBConfigSetterHandler.accept(ksqlConfig);
processesQueryFile(readQueriesFile(queriesFile));
showWelcomeMessage();
final Properties properties = new Properties();
ksqlConfig.originals().forEach((key, value) -> {
if (nonNull(value)) {
properties.put(key, value.toString());
}
});
versionChecker.start(KsqlModuleType.SERVER, properties);
} catch (final Exception e) {
log.error("Failed to start KSQL Server with query file: " + queriesFile, e);
throw e;
}
}
|
@Test
public void shouldRunCtasStatements() {
// Given:
final PreparedStatement<?> ctas = PreparedStatement.of("CTAS",
new CreateTableAsSelect(SOME_NAME, query, false, false, CreateSourceAsProperties.none()));
final ConfiguredStatement<?> configured = ConfiguredStatement
.of(ctas, SessionConfig.of(ksqlConfig, emptyMap()));
givenQueryFileParsesTo(ctas);
when(sandBox.execute(sandBoxServiceContext, configured))
.thenReturn(ExecuteResult.of(persistentQuery));
// When:
standaloneExecutor.startAsync();
// Then:
verify(ksqlEngine).execute(serviceContext, configured);
}
|
public boolean isMatch(Resource resource) {
if (this.resourceType == ResourceType.ANY) {
return true;
}
if (this.resourceType != resource.resourceType) {
return false;
}
switch (resourcePattern) {
case ANY:
return true;
case LITERAL:
return StringUtils.equals(resource.resourceName, this.resourceName);
case PREFIXED:
return StringUtils.startsWith(resource.resourceName, this.resourceName);
default:
return false;
}
}
|
@Test
public void isMatch() {
}
|
@Override
public PageResult<PostDO> getPostPage(PostPageReqVO reqVO) {
return postMapper.selectPage(reqVO);
}
|
@Test
public void testGetPostPage() {
// mock 数据
PostDO postDO = randomPojo(PostDO.class, o -> {
o.setName("码仔");
o.setStatus(CommonStatusEnum.ENABLE.getStatus());
});
postMapper.insert(postDO);
// 测试 name 不匹配
postMapper.insert(cloneIgnoreId(postDO, o -> o.setName("程序员")));
// 测试 status 不匹配
postMapper.insert(cloneIgnoreId(postDO, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus())));
// 准备参数
PostPageReqVO reqVO = new PostPageReqVO();
reqVO.setName("码");
reqVO.setStatus(CommonStatusEnum.ENABLE.getStatus());
// 调用
PageResult<PostDO> pageResult = postService.getPostPage(reqVO);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(postDO, pageResult.getList().get(0));
}
|
boolean shouldSetAvailableForAnotherInput() {
return (selectedInputsMask & allSelectedMask & ~availableInputsMask) != 0;
}
|
@Test
void testShouldSetAvailableForAnotherInput() {
InputSelection secondAndThird = new InputSelection.Builder().select(2).select(3).build();
MultipleInputSelectionHandler selectionHandler =
new MultipleInputSelectionHandler(() -> secondAndThird, 3);
selectionHandler.nextSelection();
assertThat(selectionHandler.shouldSetAvailableForAnotherInput()).isFalse();
selectionHandler.setUnavailableInput(0);
assertThat(selectionHandler.shouldSetAvailableForAnotherInput()).isFalse();
selectionHandler.setUnavailableInput(2);
assertThat(selectionHandler.shouldSetAvailableForAnotherInput()).isTrue();
selectionHandler.setAvailableInput(0);
assertThat(selectionHandler.shouldSetAvailableForAnotherInput()).isTrue();
selectionHandler.setAvailableInput(2);
assertThat(selectionHandler.shouldSetAvailableForAnotherInput()).isFalse();
}
|
public Object getField(Object record, String name, int position) {
return ((IndexedRecord) record).get(position);
}
|
@Test
void getEmptySchemaField() throws Exception {
assertThrows(AvroRuntimeException.class, () -> {
Schema s = Schema.createRecord("schemaName", "schemaDoc", "namespace", false);
s.getField("foo");
});
}
|
public static Status unblock(
final UnsafeBuffer logMetaDataBuffer,
final UnsafeBuffer termBuffer,
final int blockedOffset,
final int tailOffset,
final int termId)
{
Status status = NO_ACTION;
int frameLength = frameLengthVolatile(termBuffer, blockedOffset);
if (frameLength < 0)
{
resetHeader(logMetaDataBuffer, termBuffer, blockedOffset, termId, -frameLength);
status = UNBLOCKED;
}
else if (0 == frameLength)
{
int currentOffset = blockedOffset + FRAME_ALIGNMENT;
while (currentOffset < tailOffset)
{
frameLength = frameLengthVolatile(termBuffer, currentOffset);
if (frameLength != 0)
{
if (scanBackToConfirmZeroed(termBuffer, currentOffset, blockedOffset))
{
final int length = currentOffset - blockedOffset;
resetHeader(logMetaDataBuffer, termBuffer, blockedOffset, termId, length);
status = UNBLOCKED;
}
break;
}
currentOffset += FRAME_ALIGNMENT;
}
if (currentOffset == termBuffer.capacity())
{
if (0 == frameLengthVolatile(termBuffer, blockedOffset))
{
final int length = currentOffset - blockedOffset;
resetHeader(logMetaDataBuffer, termBuffer, blockedOffset, termId, length);
status = UNBLOCKED_TO_END;
}
}
}
return status;
}
|
@Test
void shouldPatchNonCommittedMessage()
{
final int termOffset = 0;
final int messageLength = HEADER_LENGTH * 4;
final int tailOffset = messageLength;
when(mockTermBuffer.getIntVolatile(termOffset)).thenReturn(-messageLength);
assertEquals(
UNBLOCKED, TermUnblocker.unblock(mockLogMetaDataBuffer, mockTermBuffer, termOffset, tailOffset, TERM_ID));
final InOrder inOrder = inOrder(mockTermBuffer);
inOrder.verify(mockTermBuffer).putShort(typeOffset(termOffset), (short)HDR_TYPE_PAD, LITTLE_ENDIAN);
inOrder.verify(mockTermBuffer).putInt(termOffsetOffset(termOffset), termOffset, LITTLE_ENDIAN);
inOrder.verify(mockTermBuffer).putIntOrdered(termOffset, messageLength);
}
|
@Override
public <PS extends Serializer<P>, P> KeyValueIterator<K, V> prefixScan(final P prefix, final PS prefixKeySerializer) {
Objects.requireNonNull(prefix);
Objects.requireNonNull(prefixKeySerializer);
final NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>> nextIteratorFunction = new NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>>() {
@Override
public KeyValueIterator<K, V> apply(final ReadOnlyKeyValueStore<K, V> store) {
try {
return store.prefixScan(prefix, prefixKeySerializer);
} catch (final InvalidStateStoreException e) {
throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata.");
}
}
};
final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType);
return new DelegatingPeekingKeyValueIterator<>(
storeName,
new CompositeKeyValueIterator<>(stores.iterator(), nextIteratorFunction));
}
|
@Test
public void shouldThrowNullPointerExceptionOnPrefixScanNullPrefix() {
assertThrows(NullPointerException.class, () -> theStore.prefixScan(null, new StringSerializer()));
}
|
@LiteralParameters("x")
@ScalarOperator(GREATER_THAN_OR_EQUAL)
@SqlType(StandardTypes.BOOLEAN)
public static boolean greaterThanOrEqual(@SqlType("char(x)") Slice left, @SqlType("char(x)") Slice right)
{
return compareChars(left, right) >= 0;
}
|
@Test
public void testGreaterThanOrEqual()
{
assertFunction("cast('bar' as char(5)) >= cast('foo' as char(3))", BOOLEAN, false);
assertFunction("cast('foo' as char(5)) >= cast('bar' as char(3))", BOOLEAN, true);
assertFunction("cast('bar' as char(3)) >= cast('foo' as char(5))", BOOLEAN, false);
assertFunction("cast('foo' as char(3)) >= cast('bar' as char(5))", BOOLEAN, true);
assertFunction("cast('foo' as char(3)) >= cast('foo' as char(3))", BOOLEAN, true);
assertFunction("cast('foo' as char(3)) >= cast('foo' as char(5))", BOOLEAN, true);
assertFunction("cast('foo' as char(5)) >= cast('foo' as char(3))", BOOLEAN, true);
assertFunction("cast('foo' as char(3)) >= cast('bar' as char(3))", BOOLEAN, true);
assertFunction("cast('bar' as char(3)) >= cast('foo' as char(3))", BOOLEAN, false);
assertFunction("cast('foobar' as char(6)) >= cast('foobaz' as char(6))", BOOLEAN, false);
assertFunction("cast('foob r' as char(6)) >= cast('foobar' as char(6))", BOOLEAN, false);
assertFunction("cast(' ' as char(1)) >= cast('\0' as char(1))", BOOLEAN, true);
assertFunction("cast('' as char(0)) >= cast('\0' as char(1))", BOOLEAN, true);
assertFunction("cast('abc' as char(4)) >= cast('abc\0' as char(4))", BOOLEAN, true); // 'abc' is implicitly padded with spaces -> 'abc' is greater
assertFunction("cast('\0 ' as char(2)) >= cast('\0' as char(1))", BOOLEAN, true); // length mismatch, coercion to VARCHAR applies
assertFunction("cast('\0 ' as char(2)) >= cast('\0' as char(2))", BOOLEAN, true); // '\0' is implicitly padded with spaces -> both are equal
assertFunction("cast('\0 a' as char(3)) >= cast('\0' as char(3))", BOOLEAN, true);
}
|
public int triggerBlockReport(String[] argv) throws IOException {
List<String> args = new LinkedList<String>();
for (int j = 1; j < argv.length; j++) {
args.add(argv[j]);
}
// Block report to a specific namenode
InetSocketAddress namenodeAddr = null;
String nnHostPort = StringUtils.popOptionWithArgument("-namenode", args);
if (nnHostPort != null) {
namenodeAddr = NetUtils.createSocketAddr(nnHostPort);
}
boolean incremental = StringUtils.popOption("-incremental", args);
String hostPort = StringUtils.popFirstNonOption(args);
if (hostPort == null) {
System.err.println("You must specify a host:port pair.");
return 1;
}
if (!args.isEmpty()) {
System.err.print("Can't understand arguments: " +
Joiner.on(" ").join(args) + "\n");
return 1;
}
ClientDatanodeProtocol dnProxy = getDataNodeProxy(hostPort);
try {
dnProxy.triggerBlockReport(
new BlockReportOptions.Factory().
setNamenodeAddr(namenodeAddr).
setIncremental(incremental).
build());
} catch (IOException e) {
System.err.println("triggerBlockReport error: " + e);
return 1;
}
System.out.println("Triggering " +
(incremental ? "an incremental " : "a full ") +
"block report on " + hostPort +
(namenodeAddr == null ? "" : " to namenode " + nnHostPort) +
".");
return 0;
}
|
@Test(timeout = 30000)
public void testTriggerBlockReport() throws Exception {
redirectStream();
final DFSAdmin dfsAdmin = new DFSAdmin(conf);
final DataNode dn = cluster.getDataNodes().get(0);
final NameNode nn = cluster.getNameNode();
final String dnAddr = String.format(
"%s:%d",
dn.getXferAddress().getHostString(),
dn.getIpcPort());
final String nnAddr = nn.getHostAndPort();
resetStream();
final List<String> outs = Lists.newArrayList();
final int ret = ToolRunner.run(dfsAdmin,
new String[]{"-triggerBlockReport", dnAddr, "-incremental", "-namenode", nnAddr});
assertEquals(0, ret);
scanIntoList(out, outs);
assertEquals(1, outs.size());
assertThat(outs.get(0),
is(allOf(containsString("Triggering an incremental block report on "),
containsString(" to namenode "))));
}
|
static Builder builder() {
return new AutoValue_CsvIOParseError.Builder();
}
|
@Test
public void usableInSingleOutput() {
List<CsvIOParseError> want =
Arrays.asList(
CsvIOParseError.builder()
.setMessage("error message")
.setObservedTimestamp(Instant.now())
.setStackTrace("stack trace")
.build(),
CsvIOParseError.builder()
.setMessage("error message")
.setObservedTimestamp(Instant.now())
.setStackTrace("stack trace")
.setFilename("filename")
.setCsvRecord("csv record")
.build());
PCollection<CsvIOParseError> errors = pipeline.apply(Create.of(want));
PAssert.that(errors).containsInAnyOrder(want);
pipeline.run();
}
|
public void createNewCodeDefinition(DbSession dbSession, String projectUuid, String mainBranchUuid,
String defaultBranchName, String newCodeDefinitionType, @Nullable String newCodeDefinitionValue) {
boolean isCommunityEdition = editionProvider.get().filter(EditionProvider.Edition.COMMUNITY::equals).isPresent();
NewCodePeriodType newCodePeriodType = parseNewCodeDefinitionType(newCodeDefinitionType);
NewCodePeriodDto dto = new NewCodePeriodDto();
dto.setType(newCodePeriodType);
dto.setProjectUuid(projectUuid);
if (isCommunityEdition) {
dto.setBranchUuid(mainBranchUuid);
}
getNewCodeDefinitionValueProjectCreation(newCodePeriodType, newCodeDefinitionValue, defaultBranchName).ifPresent(dto::setValue);
if (!CaycUtils.isNewCodePeriodCompliant(dto.getType(), dto.getValue())) {
throw new IllegalArgumentException("Failed to set the New Code Definition. The given value is not compatible with the Clean as You Code methodology. "
+ "Please refer to the documentation for compliant options.");
}
dbClient.newCodePeriodDao().insert(dbSession, dto);
}
|
@Test
public void createNewCodeDefinition_return_days_value_for_number_of_days_type() {
String numberOfDays = "30";
newCodeDefinitionResolver.createNewCodeDefinition(dbSession, DEFAULT_PROJECT_ID, MAIN_BRANCH_UUID, MAIN_BRANCH, NUMBER_OF_DAYS.name(), numberOfDays);
Optional<NewCodePeriodDto> newCodePeriodDto = dbClient.newCodePeriodDao().selectByProject(dbSession, DEFAULT_PROJECT_ID);
assertThat(newCodePeriodDto)
.isPresent()
.get()
.extracting(NewCodePeriodDto::getType, NewCodePeriodDto::getValue)
.containsExactly(NUMBER_OF_DAYS, numberOfDays);
}
|
public synchronized void lockLeakCheck() {
if (!openLockTrace) {
LOG.warn("not open lock leak check func");
return;
}
if (threadCountMap.isEmpty()) {
LOG.warn("all lock has release");
return;
}
setLastException(new Exception("lock Leak"));
threadCountMap.forEach((name, trackLog) -> trackLog.showLockMessage());
}
|
@Test(timeout = 5000)
public void testLockLeakCheck() {
manager.writeLock(LockLevel.BLOCK_POOl, "test");
manager.lockLeakCheck();
Exception lastException = manager.getLastException();
assertEquals(lastException.getMessage(), "lock Leak");
}
|
@SuppressWarnings({"CyclomaticComplexity"})
@Override
public void process(ApplicationEvent event) {
switch (event.type()) {
case COMMIT_ASYNC:
process((AsyncCommitEvent) event);
return;
case COMMIT_SYNC:
process((SyncCommitEvent) event);
return;
case POLL:
process((PollEvent) event);
return;
case FETCH_COMMITTED_OFFSETS:
process((FetchCommittedOffsetsEvent) event);
return;
case NEW_TOPICS_METADATA_UPDATE:
process((NewTopicsMetadataUpdateRequestEvent) event);
return;
case ASSIGNMENT_CHANGE:
process((AssignmentChangeEvent) event);
return;
case TOPIC_METADATA:
process((TopicMetadataEvent) event);
return;
case ALL_TOPICS_METADATA:
process((AllTopicsMetadataEvent) event);
return;
case LIST_OFFSETS:
process((ListOffsetsEvent) event);
return;
case RESET_POSITIONS:
process((ResetPositionsEvent) event);
return;
case VALIDATE_POSITIONS:
process((ValidatePositionsEvent) event);
return;
case SUBSCRIPTION_CHANGE:
process((SubscriptionChangeEvent) event);
return;
case UNSUBSCRIBE:
process((UnsubscribeEvent) event);
return;
case CONSUMER_REBALANCE_LISTENER_CALLBACK_COMPLETED:
process((ConsumerRebalanceListenerCallbackCompletedEvent) event);
return;
case COMMIT_ON_CLOSE:
process((CommitOnCloseEvent) event);
return;
case SHARE_FETCH:
process((ShareFetchEvent) event);
return;
case SHARE_ACKNOWLEDGE_SYNC:
process((ShareAcknowledgeSyncEvent) event);
return;
case SHARE_ACKNOWLEDGE_ASYNC:
process((ShareAcknowledgeAsyncEvent) event);
return;
case SHARE_SUBSCRIPTION_CHANGE:
process((ShareSubscriptionChangeEvent) event);
return;
case SHARE_UNSUBSCRIBE:
process((ShareUnsubscribeEvent) event);
return;
case SHARE_ACKNOWLEDGE_ON_CLOSE:
process((ShareAcknowledgeOnCloseEvent) event);
return;
default:
log.warn("Application event type {} was not expected", event.type());
}
}
|
@Test
public void testPrepClosingCommitEvents() {
setupProcessor(true);
List<NetworkClientDelegate.UnsentRequest> results = mockCommitResults();
doReturn(new NetworkClientDelegate.PollResult(100, results)).when(commitRequestManager).pollOnClose();
processor.process(new CommitOnCloseEvent());
verify(commitRequestManager).signalClose();
}
|
@Override
public ByteBufFormat byteBufFormat() {
if (byteBufFormat == SIMPLE) {
return ByteBufFormat.SIMPLE;
}
else if (byteBufFormat == HEX_DUMP) {
return ByteBufFormat.HEX_DUMP;
}
throw new UnsupportedOperationException("ReactorNettyLoggingHandler isn't using the classic ByteBufFormat.");
}
|
@Test
void shouldThrowUnsupportedOperationExceptionWhenByteBufFormatIsCalled() {
assertThatExceptionOfType(UnsupportedOperationException.class)
.isThrownBy(() -> defaultCharsetReactorNettyLoggingHandler.byteBufFormat());
}
|
@Override
protected SchemaTransform from(SchemaTransformConfiguration configuration) {
return new IcebergWriteSchemaTransform(configuration);
}
|
@Test
public void testBuildTransformWithRow() {
Map<String, String> properties = new HashMap<>();
properties.put("type", CatalogUtil.ICEBERG_CATALOG_TYPE_HADOOP);
properties.put("warehouse", "test_location");
Row transformConfigRow =
Row.withSchema(new IcebergWriteSchemaTransformProvider().configurationSchema())
.withFieldValue("table", "test_table_identifier")
.withFieldValue("catalog_name", "test-name")
.withFieldValue("catalog_properties", properties)
.build();
new IcebergWriteSchemaTransformProvider().from(transformConfigRow);
}
|
void loadState(final long nextServiceSessionId, final long logServiceSessionId, final int pendingMessageCapacity)
{
this.nextServiceSessionId = nextServiceSessionId;
this.logServiceSessionId = logServiceSessionId;
pendingMessages.reset(pendingMessageCapacity);
}
|
@Test
void loadInvalid()
{
final CountersManager countersManager = Tests.newCountersManager(16 * 1024);
final int counterId = countersManager.allocate("test");
final Counter counter = new Counter(countersManager, 0, counterId);
final LogPublisher logPublisher = mock(LogPublisher.class);
final TestClusterClock clusterClock = new TestClusterClock(TimeUnit.MILLISECONDS);
final PendingServiceMessageTracker tracker = new PendingServiceMessageTracker(
0, counter, logPublisher, clusterClock);
tracker.loadState(-9223372036854774166L, -9223372036854774166L, 0);
assertThrows(ClusterException.class, tracker::verify);
}
|
public void commit() throws SQLException {
Collection<SQLException> exceptions = new LinkedList<>();
if (databaseConnectionManager.getConnectionSession().getConnectionContext().getTransactionContext().isExceptionOccur()) {
exceptions.addAll(rollbackConnections());
} else {
exceptions.addAll(commitConnections());
}
throwSQLExceptionIfNecessary(exceptions);
}
|
@Test
void assertCommit() throws SQLException {
localTransactionManager.commit();
verify(connectionContext.getTransactionContext()).isExceptionOccur();
verify(connection).commit();
}
|
@Override
public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
try {
final RemoteFile handle = session.sftp().open(file.getAbsolute(), EnumSet.of(OpenMode.READ));
final int maxUnconfirmedReads = this.getMaxUnconfirmedReads(status);
if(log.isInfoEnabled()) {
log.info(String.format("Skipping %d bytes", status.getOffset()));
}
return handle.new ReadAheadRemoteFileInputStream(maxUnconfirmedReads, status.getOffset(), status.getLength()) {
private final AtomicBoolean close = new AtomicBoolean();
@Override
public void close() throws IOException {
if(close.get()) {
log.warn(String.format("Skip double close of stream %s", this));
return;
}
try {
super.close();
}
finally {
handle.close();
close.set(true);
}
}
};
}
catch(IOException e) {
throw new SFTPExceptionMappingService().map("Download {0} failed", e, file);
}
}
|
@Test
public void testReadRange() throws Exception {
final Path home = new SFTPHomeDirectoryService(session).find();
final Path test = new Path(home, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file));
new SFTPTouchFeature(session).touch(test, new TransferStatus());
final int length = 1048576;
final byte[] content = RandomUtils.nextBytes(length);
{
final TransferStatus status = new TransferStatus().withLength(content.length);
final OutputStream out = new SFTPWriteFeature(session).write(test, status, new DisabledConnectionCallback());
assertNotNull(out);
new StreamCopier(status, status).withLimit(new Long(content.length)).transfer(new ByteArrayInputStream(content), out);
out.close();
}
{
final TransferStatus status = new TransferStatus();
status.setLength(content.length);
status.setAppend(true);
status.setOffset(100L);
final InputStream in = new SFTPReadFeature(session).read(test, status, new DisabledConnectionCallback());
assertNotNull(in);
final ByteArrayOutputStream buffer = new ByteArrayOutputStream(content.length - 100);
new StreamCopier(status, status).withLimit(new Long(content.length - 100)).transfer(in, buffer);
in.close();
final byte[] reference = new byte[content.length - 100];
System.arraycopy(content, 100, reference, 0, content.length - 100);
assertArrayEquals(reference, buffer.toByteArray());
}
new SFTPDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public List<R> scanForResourcesInClasspathRoot(URI root, Predicate<String> packageFilter) {
requireNonNull(root, "root must not be null");
requireNonNull(packageFilter, "packageFilter must not be null");
BiFunction<Path, Path, Resource> createResource = createClasspathRootResource();
return findResourcesForUri(root, DEFAULT_PACKAGE_NAME, packageFilter, createResource);
}
|
@Test
void scanForResourcesInClasspathRoot() {
URI classpathRoot = new File("src/test/resources/io/cucumber/core/resource/test").toURI();
List<URI> resources = resourceScanner.scanForResourcesInClasspathRoot(classpathRoot, aPackage -> true);
assertThat(resources, containsInAnyOrder(
URI.create("classpath:resource.txt"),
URI.create("classpath:other-resource.txt"),
URI.create("classpath:spaces%20in%20name%20resource.txt")));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.