focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public static String toCsv(List<String> src) {
// return src == null ? null : String.join(", ", src.toArray(new String[0]));
return join(src, ", ");
}
|
@Test
public void testToCsv() {
assertEquals(toCsv(Collections.<String>emptyList()), (""));
assertEquals(toCsv(Collections.singletonList("a")), ("a"));
assertEquals(toCsv(Arrays.asList("a", "b", "c")), ("a, b, c"));
}
|
public static DataflowRunner fromOptions(PipelineOptions options) {
DataflowPipelineOptions dataflowOptions =
PipelineOptionsValidator.validate(DataflowPipelineOptions.class, options);
ArrayList<String> missing = new ArrayList<>();
if (dataflowOptions.getAppName() == null) {
missing.add("appName");
}
if (Strings.isNullOrEmpty(dataflowOptions.getRegion())
&& isServiceEndpoint(dataflowOptions.getDataflowEndpoint())) {
missing.add("region");
}
if (missing.size() > 0) {
throw new IllegalArgumentException(
"Missing required pipeline options: " + Joiner.on(',').join(missing));
}
validateWorkerSettings(
PipelineOptionsValidator.validate(DataflowPipelineWorkerPoolOptions.class, options));
PathValidator validator = dataflowOptions.getPathValidator();
String gcpTempLocation;
try {
gcpTempLocation = dataflowOptions.getGcpTempLocation();
} catch (Exception e) {
throw new IllegalArgumentException(
"DataflowRunner requires gcpTempLocation, "
+ "but failed to retrieve a value from PipelineOptions",
e);
}
validator.validateOutputFilePrefixSupported(gcpTempLocation);
String stagingLocation;
try {
stagingLocation = dataflowOptions.getStagingLocation();
} catch (Exception e) {
throw new IllegalArgumentException(
"DataflowRunner requires stagingLocation, "
+ "but failed to retrieve a value from PipelineOptions",
e);
}
validator.validateOutputFilePrefixSupported(stagingLocation);
if (!isNullOrEmpty(dataflowOptions.getSaveProfilesToGcs())) {
validator.validateOutputFilePrefixSupported(dataflowOptions.getSaveProfilesToGcs());
}
if (dataflowOptions.getFilesToStage() != null) {
// The user specifically requested these files, so fail now if they do not exist.
// (automatically detected classpath elements are permitted to not exist, so later
// staging will not fail on nonexistent files)
dataflowOptions.getFilesToStage().stream()
.forEach(
stagedFileSpec -> {
File localFile;
if (stagedFileSpec.contains("=")) {
String[] components = stagedFileSpec.split("=", 2);
localFile = new File(components[1]);
} else {
localFile = new File(stagedFileSpec);
}
if (!localFile.exists()) {
// should be FileNotFoundException, but for build-time backwards compatibility
// cannot add checked exception
throw new RuntimeException(
String.format("Non-existent files specified in filesToStage: %s", localFile));
}
});
} else {
dataflowOptions.setFilesToStage(
detectClassPathResourcesToStage(DataflowRunner.class.getClassLoader(), options));
if (dataflowOptions.getFilesToStage().isEmpty()) {
throw new IllegalArgumentException("No files to stage has been found.");
} else {
LOG.info(
"PipelineOptions.filesToStage was not specified. "
+ "Defaulting to files from the classpath: will stage {} files. "
+ "Enable logging at DEBUG level to see which files will be staged.",
dataflowOptions.getFilesToStage().size());
LOG.debug("Classpath elements: {}", dataflowOptions.getFilesToStage());
}
}
// Verify jobName according to service requirements, truncating converting to lowercase if
// necessary.
String jobName = dataflowOptions.getJobName().toLowerCase();
checkArgument(
jobName.matches("[a-z]([-a-z0-9]*[a-z0-9])?"),
"JobName invalid; the name must consist of only the characters "
+ "[-a-z0-9], starting with a letter and ending with a letter "
+ "or number");
if (!jobName.equals(dataflowOptions.getJobName())) {
LOG.info(
"PipelineOptions.jobName did not match the service requirements. "
+ "Using {} instead of {}.",
jobName,
dataflowOptions.getJobName());
}
dataflowOptions.setJobName(jobName);
// Verify project
String project = dataflowOptions.getProject();
if (project.matches("[0-9]*")) {
throw new IllegalArgumentException(
"Project ID '"
+ project
+ "' invalid. Please make sure you specified the Project ID, not project number.");
} else if (!project.matches(PROJECT_ID_REGEXP)) {
throw new IllegalArgumentException(
"Project ID '"
+ project
+ "' invalid. Please make sure you specified the Project ID, not project"
+ " description.");
}
DataflowPipelineDebugOptions debugOptions =
dataflowOptions.as(DataflowPipelineDebugOptions.class);
// Verify the number of worker threads is a valid value
if (debugOptions.getNumberOfWorkerHarnessThreads() < 0) {
throw new IllegalArgumentException(
"Number of worker harness threads '"
+ debugOptions.getNumberOfWorkerHarnessThreads()
+ "' invalid. Please make sure the value is non-negative.");
}
// Verify that if recordJfrOnGcThrashing is set, the pipeline is at least on java 11
if (dataflowOptions.getRecordJfrOnGcThrashing()
&& Environments.getJavaVersion() == Environments.JavaVersion.java8) {
throw new IllegalArgumentException(
"recordJfrOnGcThrashing is only supported on java 9 and up.");
}
if (dataflowOptions.isStreaming() && dataflowOptions.getGcsUploadBufferSizeBytes() == null) {
dataflowOptions.setGcsUploadBufferSizeBytes(GCS_UPLOAD_BUFFER_SIZE_BYTES_DEFAULT);
}
// Adding the Java version to the SDK name for user's and support convenience.
String agentJavaVer = "(JRE 8 environment)";
if (Environments.getJavaVersion() != Environments.JavaVersion.java8) {
agentJavaVer =
String.format("(JRE %s environment)", Environments.getJavaVersion().specification());
}
DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo();
String userAgentName = dataflowRunnerInfo.getName();
Preconditions.checkArgument(
!userAgentName.equals(""), "Dataflow runner's `name` property cannot be empty.");
String userAgentVersion = dataflowRunnerInfo.getVersion();
Preconditions.checkArgument(
!userAgentVersion.equals(""), "Dataflow runner's `version` property cannot be empty.");
String userAgent =
String.format("%s/%s%s", userAgentName, userAgentVersion, agentJavaVer).replace(" ", "_");
dataflowOptions.setUserAgent(userAgent);
return new DataflowRunner(dataflowOptions);
}
|
@Test
public void testInvalidJobName() throws IOException {
List<String> invalidNames = Arrays.asList("invalid_name", "0invalid", "invalid-");
List<String> expectedReason =
Arrays.asList("JobName invalid", "JobName invalid", "JobName invalid");
for (int i = 0; i < invalidNames.size(); ++i) {
DataflowPipelineOptions options = buildPipelineOptions();
options.setJobName(invalidNames.get(i));
try {
DataflowRunner.fromOptions(options);
fail("Expected IllegalArgumentException for jobName " + options.getJobName());
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString(expectedReason.get(i)));
}
}
}
|
@Override
public List<FileEntriesLayer> createLayers() throws IOException {
// Clear the exploded-artifact root first
if (Files.exists(targetExplodedJarRoot)) {
MoreFiles.deleteRecursively(targetExplodedJarRoot, RecursiveDeleteOption.ALLOW_INSECURE);
}
// Add dependencies layers.
List<FileEntriesLayer> layers =
JarLayers.getDependenciesLayers(jarPath, ProcessingMode.exploded);
// Determine class and resource files in the directory containing jar contents and create
// FileEntriesLayer for each type of layer (classes or resources).
ZipUtil.unzip(jarPath, targetExplodedJarRoot, true);
Predicate<Path> isClassFile = path -> path.getFileName().toString().endsWith(".class");
Predicate<Path> isResourceFile = isClassFile.negate().and(Files::isRegularFile);
FileEntriesLayer classesLayer =
ArtifactLayers.getDirectoryContentsAsLayer(
ArtifactLayers.CLASSES,
targetExplodedJarRoot,
isClassFile,
JarLayers.APP_ROOT.resolve("explodedJar"));
FileEntriesLayer resourcesLayer =
ArtifactLayers.getDirectoryContentsAsLayer(
ArtifactLayers.RESOURCES,
targetExplodedJarRoot,
isResourceFile,
JarLayers.APP_ROOT.resolve("explodedJar"));
if (!resourcesLayer.getEntries().isEmpty()) {
layers.add(resourcesLayer);
}
if (!classesLayer.getEntries().isEmpty()) {
layers.add(classesLayer);
}
return layers;
}
|
@Test
public void testCreateLayers_withoutClassPathInManifest() throws IOException, URISyntaxException {
Path standardJar =
Paths.get(Resources.getResource(STANDARD_JAR_WITHOUT_CLASS_PATH_MANIFEST).toURI());
Path destDir = temporaryFolder.newFolder().toPath();
StandardExplodedProcessor standardExplodedModeProcessor =
new StandardExplodedProcessor(standardJar, destDir, JAR_JAVA_VERSION);
List<FileEntriesLayer> layers = standardExplodedModeProcessor.createLayers();
assertThat(layers.size()).isEqualTo(2);
FileEntriesLayer resourcesLayer = layers.get(0);
FileEntriesLayer classesLayer = layers.get(1);
// Validate resources layer.
assertThat(resourcesLayer.getName()).isEqualTo("resources");
List<AbsoluteUnixPath> actualResourcesPaths =
resourcesLayer.getEntries().stream()
.map(FileEntry::getExtractionPath)
.collect(Collectors.toList());
assertThat(actualResourcesPaths)
.containsExactly(
AbsoluteUnixPath.get("/app/explodedJar/META-INF/MANIFEST.MF"),
AbsoluteUnixPath.get("/app/explodedJar/directory1/resource1.txt"),
AbsoluteUnixPath.get("/app/explodedJar/directory2/directory3/resource2.sql"),
AbsoluteUnixPath.get("/app/explodedJar/directory4/resource3.txt"),
AbsoluteUnixPath.get("/app/explodedJar/resource4.sql"));
// Validate classes layer.
assertThat(classesLayer.getName()).isEqualTo("classes");
List<AbsoluteUnixPath> actualClassesPaths =
classesLayer.getEntries().stream()
.map(FileEntry::getExtractionPath)
.collect(Collectors.toList());
assertThat(actualClassesPaths)
.containsExactly(
AbsoluteUnixPath.get("/app/explodedJar/class5.class"),
AbsoluteUnixPath.get("/app/explodedJar/directory1/class1.class"),
AbsoluteUnixPath.get("/app/explodedJar/directory1/class2.class"),
AbsoluteUnixPath.get("/app/explodedJar/directory2/class4.class"),
AbsoluteUnixPath.get("/app/explodedJar/directory2/directory3/class3.class"));
}
|
public Set<String> getClusterList(String topic,
long timeoutMillis) {
return Collections.EMPTY_SET;
}
|
@Test
public void assertGetClusterList() {
Set<String> actual = mqClientAPI.getClusterList(topic, defaultTimeout);
assertNotNull(actual);
assertEquals(0, actual.size());
}
|
public boolean poll(Timer timer, boolean waitForJoinGroup) {
maybeUpdateSubscriptionMetadata();
invokeCompletedOffsetCommitCallbacks();
if (subscriptions.hasAutoAssignedPartitions()) {
if (protocol == null) {
throw new IllegalStateException("User configured " + ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG +
" to empty while trying to subscribe for group protocol to auto assign partitions");
}
// Always update the heartbeat last poll time so that the heartbeat thread does not leave the
// group proactively due to application inactivity even if (say) the coordinator cannot be found.
pollHeartbeat(timer.currentTimeMs());
if (coordinatorUnknownAndUnreadySync(timer)) {
return false;
}
if (rejoinNeededOrPending()) {
// due to a race condition between the initial metadata fetch and the initial rebalance,
// we need to ensure that the metadata is fresh before joining initially. This ensures
// that we have matched the pattern against the cluster's topics at least once before joining.
if (subscriptions.hasPatternSubscription()) {
// For consumer group that uses pattern-based subscription, after a topic is created,
// any consumer that discovers the topic after metadata refresh can trigger rebalance
// across the entire consumer group. Multiple rebalances can be triggered after one topic
// creation if consumers refresh metadata at vastly different times. We can significantly
// reduce the number of rebalances caused by single topic creation by asking consumer to
// refresh metadata before re-joining the group as long as the refresh backoff time has
// passed.
if (this.metadata.timeToAllowUpdate(timer.currentTimeMs()) == 0) {
this.metadata.requestUpdate(true);
}
if (!client.ensureFreshMetadata(timer)) {
return false;
}
maybeUpdateSubscriptionMetadata();
}
// if not wait for join group, we would just use a timer of 0
if (!ensureActiveGroup(waitForJoinGroup ? timer : time.timer(0L))) {
// since we may use a different timer in the callee, we'd still need
// to update the original timer's current time after the call
timer.update(time.milliseconds());
return false;
}
}
} else {
// For manually assigned partitions, we do not try to pro-actively lookup coordinator;
// instead we only try to refresh metadata when necessary.
// If connections to all nodes fail, wakeups triggered while attempting to send fetch
// requests result in polls returning immediately, causing a tight loop of polls. Without
// the wakeup, poll() with no channels would block for the timeout, delaying re-connection.
// awaitMetadataUpdate() in ensureCoordinatorReady initiates new connections with configured backoff and avoids the busy loop.
if (metadata.updateRequested() && !client.hasReadyNodes(timer.currentTimeMs())) {
client.awaitMetadataUpdate(timer);
}
// if there is pending coordinator requests, ensure they have a chance to be transmitted.
client.pollNoWakeup();
}
maybeAutoCommitOffsetsAsync(timer.currentTimeMs());
return true;
}
|
@Test
public void testNotCoordinator() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// not_coordinator will mark coordinator as unknown
time.sleep(sessionTimeoutMs);
RequestFuture<Void> future = coordinator.sendHeartbeatRequest(); // should send out the heartbeat
assertEquals(1, consumerClient.pendingRequestCount());
assertFalse(future.isDone());
client.prepareResponse(heartbeatResponse(Errors.NOT_COORDINATOR));
time.sleep(sessionTimeoutMs);
consumerClient.poll(time.timer(0));
assertTrue(future.isDone());
assertTrue(future.failed());
assertEquals(Errors.NOT_COORDINATOR.exception(), future.exception());
assertTrue(coordinator.coordinatorUnknown());
}
|
public static MemberSelector selectorForProcessIds(ProcessId... processIds) {
List<ProcessId> processIdList = asList(processIds);
return member -> {
ProcessId memberProcessId = fromKey(member.getAttribute(PROCESS_KEY.getKey()));
return processIdList.contains(memberProcessId);
};
}
|
@Test
public void selecting_ce_nodes() {
Member member = mock(Member.class);
MemberSelector underTest = HazelcastMemberSelectors.selectorForProcessIds(COMPUTE_ENGINE);
when(member.getAttribute(PROCESS_KEY.getKey())).thenReturn(COMPUTE_ENGINE.getKey());
assertThat(underTest.select(member)).isTrue();
when(member.getAttribute(PROCESS_KEY.getKey())).thenReturn(WEB_SERVER.getKey());
assertThat(underTest.select(member)).isFalse();
when(member.getAttribute(PROCESS_KEY.getKey())).thenReturn(APP.getKey());
assertThat(underTest.select(member)).isFalse();
}
|
@Override
public boolean remove(K key) {
return cache.remove(key) != null;
}
|
@Test
public void testRemove() {
Cache<String, String> cache = new LRUCache<>(4);
cache.put("a", "b");
cache.put("c", "d");
cache.put("e", "f");
assertEquals(3, cache.size());
assertTrue(cache.remove("a"));
assertEquals(2, cache.size());
assertNull(cache.get("a"));
assertEquals("d", cache.get("c"));
assertEquals("f", cache.get("e"));
assertFalse(cache.remove("key-does-not-exist"));
assertTrue(cache.remove("c"));
assertEquals(1, cache.size());
assertNull(cache.get("c"));
assertEquals("f", cache.get("e"));
assertTrue(cache.remove("e"));
assertEquals(0, cache.size());
assertNull(cache.get("e"));
}
|
public static IOException maybeExtractIOException(
String path,
Throwable thrown,
String message) {
if (thrown == null) {
return null;
}
// walk down the chain of exceptions to find the innermost.
Throwable cause = getInnermostThrowable(thrown.getCause(), thrown);
// see if this is an http channel exception
HttpChannelEOFException channelException =
maybeExtractChannelException(path, message, cause);
if (channelException != null) {
return channelException;
}
// not a channel exception, not an IOE.
if (!(cause instanceof IOException)) {
return null;
}
// the cause can be extracted to an IOE.
// rather than just return it, we try to preserve the stack trace
// of the outer exception.
// as a new instance is created through reflection, the
// class of the returned instance will be that of the innermost,
// unless no suitable constructor is available.
final IOException ioe = (IOException) cause;
return wrapWithInnerIOE(path, message, thrown, ioe);
}
|
@Test
public void testUncheckedIOExceptionExtraction() throws Throwable {
intercept(SocketTimeoutException.class, "top",
() -> {
final SdkClientException thrown = sdkException("top",
sdkException("middle",
new UncheckedIOException(
new SocketTimeoutException("bottom"))));
throw maybeExtractIOException("p1",
new NoAwsCredentialsException("IamProvider", thrown.toString(), thrown), null);
});
}
|
@Subscribe
public void onChatMessage(ChatMessage event)
{
if (event.getType() == ChatMessageType.GAMEMESSAGE || event.getType() == ChatMessageType.SPAM)
{
String message = Text.removeTags(event.getMessage());
Matcher dodgyCheckMatcher = DODGY_CHECK_PATTERN.matcher(message);
Matcher dodgyProtectMatcher = DODGY_PROTECT_PATTERN.matcher(message);
Matcher dodgyBreakMatcher = DODGY_BREAK_PATTERN.matcher(message);
Matcher bindingNecklaceCheckMatcher = BINDING_CHECK_PATTERN.matcher(message);
Matcher bindingNecklaceUsedMatcher = BINDING_USED_PATTERN.matcher(message);
Matcher ringOfForgingCheckMatcher = RING_OF_FORGING_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryCheckMatcher = AMULET_OF_CHEMISTRY_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryUsedMatcher = AMULET_OF_CHEMISTRY_USED_PATTERN.matcher(message);
Matcher amuletOfChemistryBreakMatcher = AMULET_OF_CHEMISTRY_BREAK_PATTERN.matcher(message);
Matcher amuletOfBountyCheckMatcher = AMULET_OF_BOUNTY_CHECK_PATTERN.matcher(message);
Matcher amuletOfBountyUsedMatcher = AMULET_OF_BOUNTY_USED_PATTERN.matcher(message);
Matcher chronicleAddMatcher = CHRONICLE_ADD_PATTERN.matcher(message);
Matcher chronicleUseAndCheckMatcher = CHRONICLE_USE_AND_CHECK_PATTERN.matcher(message);
Matcher slaughterActivateMatcher = BRACELET_OF_SLAUGHTER_ACTIVATE_PATTERN.matcher(message);
Matcher slaughterCheckMatcher = BRACELET_OF_SLAUGHTER_CHECK_PATTERN.matcher(message);
Matcher expeditiousActivateMatcher = EXPEDITIOUS_BRACELET_ACTIVATE_PATTERN.matcher(message);
Matcher expeditiousCheckMatcher = EXPEDITIOUS_BRACELET_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceCheckMatcher = BLOOD_ESSENCE_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceExtractMatcher = BLOOD_ESSENCE_EXTRACT_PATTERN.matcher(message);
Matcher braceletOfClayCheckMatcher = BRACELET_OF_CLAY_CHECK_PATTERN.matcher(message);
if (message.contains(RING_OF_RECOIL_BREAK_MESSAGE))
{
notifier.notify(config.recoilNotification(), "Your Ring of Recoil has shattered");
}
else if (dodgyBreakMatcher.find())
{
notifier.notify(config.dodgyNotification(), "Your dodgy necklace has crumbled to dust.");
updateDodgyNecklaceCharges(MAX_DODGY_CHARGES);
}
else if (dodgyCheckMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyCheckMatcher.group(1)));
}
else if (dodgyProtectMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyProtectMatcher.group(1)));
}
else if (amuletOfChemistryCheckMatcher.find())
{
updateAmuletOfChemistryCharges(Integer.parseInt(amuletOfChemistryCheckMatcher.group(1)));
}
else if (amuletOfChemistryUsedMatcher.find())
{
final String match = amuletOfChemistryUsedMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateAmuletOfChemistryCharges(charges);
}
else if (amuletOfChemistryBreakMatcher.find())
{
notifier.notify(config.amuletOfChemistryNotification(), "Your amulet of chemistry has crumbled to dust.");
updateAmuletOfChemistryCharges(MAX_AMULET_OF_CHEMISTRY_CHARGES);
}
else if (amuletOfBountyCheckMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyCheckMatcher.group(1)));
}
else if (amuletOfBountyUsedMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyUsedMatcher.group(1)));
}
else if (message.equals(AMULET_OF_BOUNTY_BREAK_TEXT))
{
updateAmuletOfBountyCharges(MAX_AMULET_OF_BOUNTY_CHARGES);
}
else if (message.contains(BINDING_BREAK_TEXT))
{
notifier.notify(config.bindingNotification(), BINDING_BREAK_TEXT);
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateBindingNecklaceCharges(MAX_BINDING_CHARGES + 1);
}
else if (bindingNecklaceUsedMatcher.find())
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
if (equipment.contains(ItemID.BINDING_NECKLACE))
{
updateBindingNecklaceCharges(getItemCharges(ItemChargeConfig.KEY_BINDING_NECKLACE) - 1);
}
}
else if (bindingNecklaceCheckMatcher.find())
{
final String match = bindingNecklaceCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateBindingNecklaceCharges(charges);
}
else if (ringOfForgingCheckMatcher.find())
{
final String match = ringOfForgingCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateRingOfForgingCharges(charges);
}
else if (message.equals(RING_OF_FORGING_USED_TEXT) || message.equals(RING_OF_FORGING_VARROCK_PLATEBODY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player smelted with a Ring of Forging equipped.
if (equipment == null)
{
return;
}
if (equipment.contains(ItemID.RING_OF_FORGING) && (message.equals(RING_OF_FORGING_USED_TEXT) || inventory.count(ItemID.IRON_ORE) > 1))
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_RING_OF_FORGING) - 1, 0, MAX_RING_OF_FORGING_CHARGES);
updateRingOfForgingCharges(charges);
}
}
else if (message.equals(RING_OF_FORGING_BREAK_TEXT))
{
notifier.notify(config.ringOfForgingNotification(), "Your ring of forging has melted.");
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateRingOfForgingCharges(MAX_RING_OF_FORGING_CHARGES + 1);
}
else if (chronicleAddMatcher.find())
{
final String match = chronicleAddMatcher.group(1);
if (match.equals("one"))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(match));
}
}
else if (chronicleUseAndCheckMatcher.find())
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(chronicleUseAndCheckMatcher.group(1)));
}
else if (message.equals(CHRONICLE_ONE_CHARGE_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else if (message.equals(CHRONICLE_EMPTY_TEXT) || message.equals(CHRONICLE_NO_CHARGES_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 0);
}
else if (message.equals(CHRONICLE_FULL_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1000);
}
else if (slaughterActivateMatcher.find())
{
final String found = slaughterActivateMatcher.group(1);
if (found == null)
{
updateBraceletOfSlaughterCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.slaughterNotification(), BRACELET_OF_SLAUGHTER_BREAK_TEXT);
}
else
{
updateBraceletOfSlaughterCharges(Integer.parseInt(found));
}
}
else if (slaughterCheckMatcher.find())
{
updateBraceletOfSlaughterCharges(Integer.parseInt(slaughterCheckMatcher.group(1)));
}
else if (expeditiousActivateMatcher.find())
{
final String found = expeditiousActivateMatcher.group(1);
if (found == null)
{
updateExpeditiousBraceletCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.expeditiousNotification(), EXPEDITIOUS_BRACELET_BREAK_TEXT);
}
else
{
updateExpeditiousBraceletCharges(Integer.parseInt(found));
}
}
else if (expeditiousCheckMatcher.find())
{
updateExpeditiousBraceletCharges(Integer.parseInt(expeditiousCheckMatcher.group(1)));
}
else if (bloodEssenceCheckMatcher.find())
{
updateBloodEssenceCharges(Integer.parseInt(bloodEssenceCheckMatcher.group(1)));
}
else if (bloodEssenceExtractMatcher.find())
{
updateBloodEssenceCharges(getItemCharges(ItemChargeConfig.KEY_BLOOD_ESSENCE) - Integer.parseInt(bloodEssenceExtractMatcher.group(1)));
}
else if (message.contains(BLOOD_ESSENCE_ACTIVATE_TEXT))
{
updateBloodEssenceCharges(MAX_BLOOD_ESSENCE_CHARGES);
}
else if (braceletOfClayCheckMatcher.find())
{
updateBraceletOfClayCharges(Integer.parseInt(braceletOfClayCheckMatcher.group(1)));
}
else if (message.equals(BRACELET_OF_CLAY_USE_TEXT) || message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN))
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player mined with a Bracelet of Clay equipped.
if (equipment != null && equipment.contains(ItemID.BRACELET_OF_CLAY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
// Charge is not used if only 1 inventory slot is available when mining in Prifddinas
boolean ignore = inventory != null
&& inventory.count() == 27
&& message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN);
if (!ignore)
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_BRACELET_OF_CLAY) - 1, 0, MAX_BRACELET_OF_CLAY_CHARGES);
updateBraceletOfClayCharges(charges);
}
}
}
else if (message.equals(BRACELET_OF_CLAY_BREAK_TEXT))
{
notifier.notify(config.braceletOfClayNotification(), "Your bracelet of clay has crumbled to dust");
updateBraceletOfClayCharges(MAX_BRACELET_OF_CLAY_CHARGES);
}
}
}
|
@Test
public void testDodgyCheck()
{
ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.GAMEMESSAGE, "", CHECK, "", 0);
itemChargePlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration(ItemChargeConfig.GROUP, ItemChargeConfig.KEY_DODGY_NECKLACE, 10);
}
|
public static void handle(Exception e, RetryContext context) throws Exception {
if (e instanceof RemoteFileNotFoundException) {
handleRemoteFileNotFound((RemoteFileNotFoundException) e, context);
} else if (e instanceof RpcException) {
handleRpcException((RpcException) e, context);
} else if (e instanceof UserException) {
handleUserException((UserException) e, context);
} else {
throw e;
}
}
|
@Test
public void testHandleRemoteFileNotFoundException_2() throws Exception {
ConnectorPlanTestBase.mockHiveCatalog(connectContext);
String sql = "select * from hive0.tpch.customer_view";
StatementBase statementBase = SqlParser.parse(sql, connectContext.getSessionVariable()).get(0);
ExecPlan execPlan = getExecPlan(sql);
ExecuteExceptionHandler.RetryContext retryContext =
new ExecuteExceptionHandler.RetryContext(0, execPlan, connectContext, statementBase);
try {
ExecuteExceptionHandler.handle(new RemoteFileNotFoundException("mock"), retryContext);
} catch (Exception e) {
fail("should not throw any exception");
}
}
|
public BoardsSimpleResponse findAllBoards(final Pageable pageable, final Long memberId) {
Page<BoardSimpleResponse> response = boardRepository.findAllBoardsWithPaging(pageable, memberId);
return BoardsSimpleResponse.of(response, pageable);
}
|
@Test
void 게시글을_모두_조회한다() {
// when
BoardsSimpleResponse result = boardQueryService.findAllBoards(PageRequest.of(0, 10), 1L);
// then
assertThat(result.nextPage()).isEqualTo(-1);
}
|
@Override
public boolean implies(Permission p) {
// By default only supports comparisons with other WildcardPermissions
if (!(p instanceof WildcardPermission)) {
return false;
}
WildcardPermission wp = (WildcardPermission) p;
List<Set<String>> otherParts = getParts(wp);
int i = 0;
for (Set<String> otherPart : otherParts) {
// If this permission has less parts than the other permission, everything after the number of parts contained
// in this permission is automatically implied, so return true
if (getParts().size() - 1 < i) {
return true;
} else {
Set<String> thisPart = getParts().get(i);
// all tokens from otherPart must pass at least one token from thisPart
for (String otherToken : otherPart) {
if (!caseSensitive) {
otherToken = otherToken.toLowerCase();
}
boolean otherIsMatched = false;
for (String token : thisPart) {
if (token.equals(WILDCARD_TOKEN)) {
otherIsMatched = true;
break;
}
if (matches(token, otherToken)) {
otherIsMatched = true;
break;
}
}
if (!otherIsMatched) {
return false;
}
}
i++;
}
}
// If this permission has more parts than the other parts, only imply it if all of the other parts are wildcards
for (; i < getParts().size(); i++) {
Set<String> part = getParts().get(i);
if (!part.contains(WILDCARD_TOKEN)) {
return false;
}
}
return true;
}
|
@Test
public void testNotWildcardPermission() {
ActiveMQWildcardPermission perm = new ActiveMQWildcardPermission("topic:TEST:*");
Permission dummy = new Permission() {
@Override
public boolean implies(Permission p) {
return false;
}
};
assertFalse(perm.implies(dummy));
}
|
public Function.FunctionDetails.ComponentType calculateSubjectType(Function.FunctionDetails functionDetails) {
if (functionDetails.getComponentType() != Function.FunctionDetails.ComponentType.UNKNOWN) {
return functionDetails.getComponentType();
}
Function.SourceSpec sourceSpec = functionDetails.getSource();
Function.SinkSpec sinkSpec = functionDetails.getSink();
if (sourceSpec.getInputSpecsCount() == 0) {
return Function.FunctionDetails.ComponentType.SOURCE;
}
// Now its between sink and function
if (!isEmpty(sinkSpec.getBuiltin())) {
// if its built in, its a sink
return Function.FunctionDetails.ComponentType.SINK;
}
if (isEmpty(sinkSpec.getClassName()) || sinkSpec.getClassName().equals(PulsarSink.class.getName())) {
return Function.FunctionDetails.ComponentType.FUNCTION;
}
return Function.FunctionDetails.ComponentType.SINK;
}
|
@Test
public void testCalculateSubjectTypeForSource() {
FunctionDetails.Builder builder = FunctionDetails.newBuilder();
// no input topics mean source
builder.setSource(Function.SourceSpec.newBuilder().build());
assertEquals(InstanceUtils.calculateSubjectType(builder.build()), FunctionDetails.ComponentType.SOURCE);
// make sure that if the componenttype is set, that gets precedence.
builder.setComponentType(FunctionDetails.ComponentType.SINK);
assertEquals(InstanceUtils.calculateSubjectType(builder.build()), FunctionDetails.ComponentType.SINK);
builder.setComponentType(FunctionDetails.ComponentType.FUNCTION);
assertEquals(InstanceUtils.calculateSubjectType(builder.build()), FunctionDetails.ComponentType.FUNCTION);
}
|
public boolean evaluateIfActiveVersion(UpdateCenter updateCenter) {
Version installedVersion = Version.create(sonarQubeVersion.get().toString());
if (compareWithoutPatchVersion(installedVersion, updateCenter.getSonar().getLtaVersion().getVersion()) == 0) {
return true;
}
SortedSet<Release> allReleases = updateCenter.getSonar().getAllReleases();
if (compareWithoutPatchVersion(installedVersion, updateCenter.getSonar().getPastLtaVersion().getVersion()) == 0) {
Release initialLtaRelease = findInitialVersionOfMajorRelease(allReleases, updateCenter.getSonar().getLtaVersion().getVersion());
Date initialLtaReleaseDate = initialLtaRelease.getDate();
if (initialLtaReleaseDate == null) {
throw new IllegalStateException("Initial Major release date is missing in releases");
}
// date of the latest major release should be within 6 months
Calendar c = Calendar.getInstance();
c.setTime(new Date(system2.now()));
c.add(Calendar.MONTH, -6);
return initialLtaReleaseDate.after(c.getTime());
} else {
return compareWithoutPatchVersion(installedVersion, findPreviousReleaseIgnoringPatch(allReleases).getVersion()) >= 0;
}
}
|
@Test
void evaluateIfActiveVersion_whenInstalledVersionIsLatestMinusOne_shouldReturnVersionIsActive() {
when(sonarQubeVersion.get()).thenReturn(parse("10.9"));
when(updateCenter.getSonar().getAllReleases()).thenReturn(getReleases());
assertThat(underTest.evaluateIfActiveVersion(updateCenter)).isTrue();
}
|
public static <K, V> Write<K, V> write() {
return new AutoValue_KafkaIO_Write.Builder<K, V>()
.setWriteRecordsTransform(writeRecords())
.build();
}
|
@Test
public void testSink() throws Exception {
// Simply read from kafka source and write to kafka sink. Then verify the records
// are correctly published to mock kafka producer.
int numElements = 1000;
try (MockProducerWrapper producerWrapper = new MockProducerWrapper(new LongSerializer())) {
ProducerSendCompletionThread completionThread =
new ProducerSendCompletionThread(producerWrapper.mockProducer).start();
String topic = "test";
String bootStrapServer = "none";
p.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn()).withoutMetadata())
.apply(
KafkaIO.<Integer, Long>write()
.withBootstrapServers(bootStrapServer)
.withTopic(topic)
.withKeySerializer(IntegerSerializer.class)
.withValueSerializer(LongSerializer.class)
.withInputTimestamp()
.withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)));
PipelineResult result = p.run();
completionThread.shutdown();
verifyProducerRecords(producerWrapper.mockProducer, topic, numElements, false, true);
assertThat(
Lineage.query(result.metrics(), Lineage.Type.SINK),
hasItem(String.format("kafka:%s.%s", bootStrapServer, topic)));
}
}
|
public static Optional<String> getTableNameByRowPath(final String rowPath) {
Pattern pattern = Pattern.compile(getShardingSphereDataNodePath() + "/([\\w\\-]+)/schemas/([\\w\\-]+)/tables" + "/([\\w\\-]+)?", Pattern.CASE_INSENSITIVE);
Matcher matcher = pattern.matcher(rowPath);
return matcher.find() ? Optional.of(matcher.group(3)) : Optional.empty();
}
|
@Test
void assertGetTableNameByRowPathTableNameNotFoundScenario() {
assertThat(ShardingSphereDataNode.getTableNameByRowPath("/statistics/databases/db_name/schemas/db_schema"), is(Optional.empty()));
}
|
@Override
public Long createMailAccount(MailAccountSaveReqVO createReqVO) {
MailAccountDO account = BeanUtils.toBean(createReqVO, MailAccountDO.class);
mailAccountMapper.insert(account);
return account.getId();
}
|
@Test
public void testCreateMailAccount_success() {
// 准备参数
MailAccountSaveReqVO reqVO = randomPojo(MailAccountSaveReqVO.class, o -> o.setMail(randomEmail()))
.setId(null); // 防止 id 被赋值
// 调用
Long mailAccountId = mailAccountService.createMailAccount(reqVO);
// 断言
assertNotNull(mailAccountId);
// 校验记录的属性是否正确
MailAccountDO mailAccount = mailAccountMapper.selectById(mailAccountId);
assertPojoEquals(reqVO, mailAccount, "id");
}
|
public void startAsync() {
try {
udfLoader.load();
ProcessingLogServerUtils.maybeCreateProcessingLogTopic(
serviceContext.getTopicClient(),
processingLogConfig,
ksqlConfig);
if (processingLogConfig.getBoolean(ProcessingLogConfig.STREAM_AUTO_CREATE)) {
log.warn("processing log auto-create is enabled, but this is not supported "
+ "for headless mode.");
}
rocksDBConfigSetterHandler.accept(ksqlConfig);
processesQueryFile(readQueriesFile(queriesFile));
showWelcomeMessage();
final Properties properties = new Properties();
ksqlConfig.originals().forEach((key, value) -> {
if (nonNull(value)) {
properties.put(key, value.toString());
}
});
versionChecker.start(KsqlModuleType.SERVER, properties);
} catch (final Exception e) {
log.error("Failed to start KSQL Server with query file: " + queriesFile, e);
throw e;
}
}
|
@Test
public void shouldThrowOnCreateStatementWithNoElements() {
// Given:
final PreparedStatement<CreateStream> cs = PreparedStatement.of("CS",
new CreateStream(SOME_NAME, TableElements.of(), false, false, JSON_PROPS, false));
givenQueryFileParsesTo(cs);
// When:
final Exception e = assertThrows(
KsqlStatementException.class,
() -> standaloneExecutor.startAsync()
);
// Then:
assertThat(e.getMessage(), containsString("statement does not define the schema and the supplied format does not support schema inference"));
}
|
public void appendStandbySnapshot(
final long recordingId,
final long leadershipTermId,
final long termBaseLogPosition,
final long logPosition,
final long timestamp,
final int serviceId,
final String archiveEndpoint)
{
validateRecordingId(recordingId);
if (Strings.isEmpty(archiveEndpoint))
{
throw new ClusterException("Remote snapshots must have a valid endpoint");
}
if (archiveEndpoint.length() > MAX_ENDPOINT_LENGTH)
{
throw new ClusterException(
"Endpoint is too long: " + archiveEndpoint.length() + " vs " + MAX_ENDPOINT_LENGTH);
}
if (!restoreInvalidSnapshot(
recordingId, leadershipTermId, termBaseLogPosition, logPosition, timestamp, serviceId))
{
append(
ENTRY_TYPE_STANDBY_SNAPSHOT,
recordingId,
leadershipTermId,
termBaseLogPosition,
logPosition,
timestamp,
serviceId,
archiveEndpoint);
}
}
|
@Test
void shouldRejectSnapshotEntryIfEndpointIsTooLong(@TempDir final File tempDir)
{
final String endpoint = Tests.generateStringWithSuffix("a", "x", 5000);
try (RecordingLog log = new RecordingLog(tempDir, true))
{
final ClusterException exception = assertThrowsExactly(ClusterException.class,
() -> log.appendStandbySnapshot(1, 2, 1000, 2000, 1_000_000_000L, SERVICE_ID, endpoint));
assertEquals("ERROR - Endpoint is too long: " + endpoint.length() + " vs " + MAX_ENDPOINT_LENGTH,
exception.getMessage());
}
}
|
@Override
public KvMetadata resolveMetadata(
boolean isKey,
List<MappingField> resolvedFields,
Map<String, String> options,
InternalSerializationService serializationService
) {
Map<QueryPath, MappingField> fieldsByPath = extractFields(resolvedFields, isKey);
Entry<QueryDataType, Class<?>> entry = getTopLevelType(fieldsByPath)
.<Entry<QueryDataType, Class<?>>>map(type -> entry(type, loadClass(type.getObjectTypeMetadata())))
.orElseGet(() -> {
Class<?> typeClass = loadClass(options, isKey);
return entry(QueryDataTypeUtils.resolveTypeForClass(typeClass), typeClass);
});
QueryDataType type = entry.getKey();
Class<?> typeClass = entry.getValue();
if (type.getTypeFamily() != QueryDataTypeFamily.OBJECT || type.isCustomType()) {
return resolvePrimitiveMetadata(isKey, resolvedFields, fieldsByPath, type);
} else {
return resolveObjectMetadata(isKey, resolvedFields, fieldsByPath, typeClass);
}
}
|
@Test
@Parameters({
"true",
"false"
})
public void when_noKeyOrThisPrefixInExternalName_then_usesValue(boolean key) {
Map<String, String> options = Map.of(
(key ? OPTION_KEY_FORMAT : OPTION_VALUE_FORMAT), JAVA_FORMAT,
(key ? OPTION_KEY_CLASS : OPTION_VALUE_CLASS), Object.class.getName()
);
KvMetadata metadata = INSTANCE.resolveMetadata(
key,
singletonList(field("field", QueryDataType.INT, "extField")),
options,
null
);
assertThat(metadata.getFields()).containsExactly(
key
? new MapTableField[]{
new MapTableField("__key", QueryDataType.OBJECT, true, QueryPath.KEY_PATH)
}
: new MapTableField[]{
new MapTableField("field", QueryDataType.INT, false, new QueryPath("extField", false)),
new MapTableField("this", QueryDataType.OBJECT, true, QueryPath.VALUE_PATH)
});
}
|
public static Map<String, Object> map(Object... objects) {
if (objects.length % 2 != 0) {
throw new FlowableIllegalArgumentException("The input should always be even since we expect a list of key-value pairs!");
}
Map<String, Object> map = new HashMap<>();
for (int i = 0; i < objects.length; i += 2) {
map.put((String) objects[i], objects[i + 1]);
}
return map;
}
|
@Test
void mapOddNumberOfParameters() {
assertThatThrownBy(() -> CollectionUtil.map("key"))
.isInstanceOf(FlowableIllegalArgumentException.class)
.hasMessage("The input should always be even since we expect a list of key-value pairs!");
assertThatThrownBy(() -> CollectionUtil.map("key1", "value1", "key2"))
.isInstanceOf(FlowableIllegalArgumentException.class)
.hasMessage("The input should always be even since we expect a list of key-value pairs!");
}
|
@Override
public boolean init( StepMetaInterface stepMetaInterface, StepDataInterface stepDataInterface ) {
Preconditions.checkNotNull( stepMetaInterface );
variablizedStepMeta = (BaseStreamStepMeta) stepMetaInterface;
variablizedStepMeta.setParentStepMeta( getStepMeta() );
variablizedStepMeta.setFileName( variablizedStepMeta.getTransformationPath() );
boolean superInit = super.init( stepMetaInterface, stepDataInterface );
try {
TransMeta transMeta = TransExecutorMeta
.loadMappingMeta( variablizedStepMeta, getTransMeta().getRepository(), getTransMeta().getMetaStore(),
getParentVariableSpace() );
variablizedStepMeta = (BaseStreamStepMeta) variablizedStepMeta.withVariables( this );
subtransExecutor = new SubtransExecutor( getStepname(),
getTrans(), transMeta, true,
new TransExecutorParameters(), variablizedStepMeta.getSubStep(), getPrefetchCount() );
} catch ( KettleException e ) {
log.logError( e.getLocalizedMessage(), e );
return false;
}
List<CheckResultInterface> remarks = new ArrayList<>();
variablizedStepMeta.check(
remarks, getTransMeta(), variablizedStepMeta.getParentStepMeta(),
null, null, null, null, //these parameters are not used inside the method
variables, getRepository(), getMetaStore() );
boolean errorsPresent =
remarks.stream().filter( result -> result.getType() == CheckResultInterface.TYPE_RESULT_ERROR )
.peek( result -> logError( result.getText() ) )
.count() > 0;
if ( errorsPresent ) {
return false;
}
return superInit;
}
|
@Test
public void testInitFilenameSubstitution() throws IOException {
// verifies that filename resolution uses the parents ${Internal.Entry.Current.Directory}.
// Necessary since the Current.Directory may change when running non-locally.
// Variables should all be set in variableizedStepMeta after init, with the caveat that
// the substrans location must be set using the parents Current.Directory.
File testFile = File.createTempFile( "testInitFilenameSubstitution", ".ktr",
folder.getRoot() );
try ( PrintWriter pw = new PrintWriter( testFile ) ) {
// empty subtrans definition
pw.write( "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
+ "<transformation/>" );
}
when( meta.getParentStepMeta() ).thenReturn( parentStepMeta );
when( meta.withVariables( baseStreamStep ) ).thenReturn( metaWithVariables );
baseStreamStep.getParentVariableSpace()
.setVariable( "Internal.Entry.Current.Directory",
testFile.getParentFile().getAbsolutePath() );
when( meta.getSpecificationMethod() ).thenReturn( ObjectLocationSpecificationMethod.FILENAME );
when( meta.getFileName() ).thenReturn( "${Internal.Entry.Current.Directory}/" + testFile.getName() );
assertTrue( baseStreamStep.init( meta, stepData ) );
assertThat( baseStreamStep.variablizedStepMeta, equalTo( metaWithVariables ) );
}
|
public List<String> toBatchTaskArgumentString() {
List<String> res = new ArrayList<>(Arrays.asList(
CLUSTER_LIMIT_FLAG, String.valueOf(mClusterLimit),
CLUSTER_START_DELAY_FLAG, mClusterStartDelay,
BENCH_TIMEOUT, mBenchTimeout,
START_MS_FLAG, String.valueOf(mStartMs)));
if (!mProfileAgent.isEmpty()) {
res.add(PROFILE_AGENT);
res.add(mProfileAgent);
}
if (!mId.equals(DEFAULT_TASK_ID)) {
res.add(ID_FLAG);
res.add(mId);
}
if (!mIndex.equals(DEFAULT_TASK_ID)) {
res.add(INDEX_FLAG);
res.add(mIndex);
}
if (!mJavaOpts.isEmpty()) {
for (String s : mJavaOpts) {
res.add(JAVA_OPT_FLAG);
res.add(s);
}
}
if (mCluster) {
res.add(CLUSTER_FLAG);
}
if (mDistributed) {
res.add(DISTRIBUTED_FLAG);
}
if (mInProcess) {
res.add(IN_PROCESS_FLAG);
}
if (mHelp) {
res.add(HELP_FLAG);
}
return res;
}
|
@Test
public void parseSingleParametersToArgument() throws Exception {
// test single parameter
List<String[]> inputArgs = Arrays.asList(
new String[]{"--cluster-limit", "4"},
new String[]{"--cluster-start-delay", "5s"},
new String[]{"--profile-agent", "TestProfile"},
new String[]{"--bench-timeout", "10m"},
new String[]{"--id", "TestID"},
new String[]{"--start-ms", "1000"},
new String[]{"--distributed"},
new String[]{"--in-process"},
new String[]{"--help"}
);
for (String[] s : inputArgs) {
mBaseParameter = new BaseParameters();
JCommander jc = new JCommander(this);
jc.parse(s);
List<String> outputArgs = mBaseParameter.toBatchTaskArgumentString();
if (s.length == 1) {
assertTrue(outputArgs.contains(s[0]));
}
else {
validateOutput(Arrays.asList(s), outputArgs, 1);
}
}
}
|
@Override
public V getAndExpire(Instant time) {
return get(getAndExpireAsync(time));
}
|
@Test
public void testGetAndExpire() throws InterruptedException {
RBucket<Integer> al = redisson.getBucket("test");
al.set(1);
assertThat(al.getAndExpire(Duration.ofSeconds(1))).isEqualTo(1);
Thread.sleep(500);
assertThat(al.get()).isEqualTo(1);
Thread.sleep(600);
assertThat(al.get()).isNull();
al.set(2);
assertThat(al.getAndExpire(Instant.now().plusSeconds(1))).isEqualTo(2);
Thread.sleep(500);
assertThat(al.get()).isEqualTo(2);
Thread.sleep(600);
assertThat(al.get()).isNull();
}
|
@Override
public EurekaHttpResponse<InstanceInfo> sendHeartBeat(String appName, String id, InstanceInfo info, InstanceStatus overriddenStatus) {
String urlPath = "apps/" + appName + '/' + id;
Response response = null;
try {
WebTarget webResource = jerseyClient.target(serviceUrl)
.path(urlPath)
.queryParam("status", info.getStatus().toString())
.queryParam("lastDirtyTimestamp", info.getLastDirtyTimestamp().toString());
if (overriddenStatus != null) {
webResource = webResource.queryParam("overriddenstatus", overriddenStatus.name());
}
Builder requestBuilder = webResource.request();
addExtraHeaders(requestBuilder);
response = requestBuilder.accept(MediaType.APPLICATION_JSON_TYPE).put(Entity.entity("{}", MediaType.APPLICATION_JSON_TYPE)); // Jersey2 refuses to handle PUT with no body
InstanceInfo infoFromPeer = null;
if (response.getStatus() == Status.CONFLICT.getStatusCode() && response.hasEntity()) {
infoFromPeer = response.readEntity(InstanceInfo.class);
}
return anEurekaHttpResponse(response.getStatus(), infoFromPeer).type(MediaType.APPLICATION_JSON_TYPE).build();
} finally {
if (logger.isDebugEnabled()) {
logger.debug("[heartbeat] Jersey HTTP PUT {}; statusCode={}", urlPath, response == null ? "N/A" : response.getStatus());
}
if (response != null) {
response.close();
}
}
}
|
@Test
public void testHeartbeatReplicationWithResponseBody() throws Exception {
InstanceInfo remoteInfo = new InstanceInfo(this.instanceInfo);
remoteInfo.setStatus(InstanceStatus.DOWN);
byte[] responseBody = toGzippedJson(remoteInfo);
serverMockClient.when(
request()
.withMethod("PUT")
.withHeader(header(PeerEurekaNode.HEADER_REPLICATION, "true"))
.withPath("/eureka/v2/apps/" + this.instanceInfo.getAppName() + '/' + this.instanceInfo.getId())
).respond(
response()
.withStatusCode(Status.CONFLICT.getStatusCode())
.withHeader(header("Content-Type", MediaType.APPLICATION_JSON))
.withHeader(header("Content-Encoding", "gzip"))
.withBody(responseBody)
);
EurekaHttpResponse<InstanceInfo> response = replicationClient.sendHeartBeat(this.instanceInfo.getAppName(), this.instanceInfo.getId(), this.instanceInfo, null);
assertThat(response.getStatusCode(), is(equalTo(Status.CONFLICT.getStatusCode())));
assertThat(response.getEntity(), is(notNullValue()));
}
|
@Override
public RouteContext route(final ShardingRule shardingRule) {
RouteContext result = new RouteContext();
String dataSourceName = getDataSourceName(shardingRule.getDataSourceNames());
RouteMapper dataSourceMapper = new RouteMapper(dataSourceName, dataSourceName);
if (logicTables.isEmpty()) {
result.getRouteUnits().add(new RouteUnit(dataSourceMapper, Collections.emptyList()));
} else if (1 == logicTables.size()) {
String logicTableName = logicTables.iterator().next();
if (!shardingRule.findShardingTable(logicTableName).isPresent()) {
result.getRouteUnits().add(new RouteUnit(dataSourceMapper, Collections.emptyList()));
return result;
}
DataNode dataNode = shardingRule.getDataNode(logicTableName);
result.getRouteUnits().add(new RouteUnit(new RouteMapper(dataNode.getDataSourceName(), dataNode.getDataSourceName()),
Collections.singletonList(new RouteMapper(logicTableName, dataNode.getTableName()))));
} else {
routeWithMultipleTables(result, shardingRule);
}
return result;
}
|
@Test
void assertRoutingForShardingTable() {
RouteContext actual = new ShardingUnicastRoutingEngine(mock(SQLStatementContext.class), Collections.singleton("t_order"), new ConnectionContext(Collections::emptySet)).route(shardingRule);
assertThat(actual.getRouteUnits().size(), is(1));
assertFalse("ds_2".equalsIgnoreCase(actual.getRouteUnits().iterator().next().getDataSourceMapper().getLogicName()));
}
|
@Override
public String getName() {
return _name;
}
|
@Test
public void testStringPadTransformFunction() {
int padLength = 50;
String padString = "#";
ExpressionContext expression = RequestContextUtils.getExpression(
String.format("lpad(%s, %d, '%s')", STRING_ALPHANUM_SV_COLUMN, padLength, padString));
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper);
assertEquals(transformFunction.getName(), "lpad");
String[] expectedValues = new String[NUM_ROWS];
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = StringUtils.leftPad(_stringAlphaNumericSVValues[i], padLength, padString);
}
testTransformFunction(transformFunction, expectedValues);
expression = RequestContextUtils.getExpression(
String.format("rpad(%s, %d, '%s')", STRING_ALPHANUM_SV_COLUMN, padLength, padString));
transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper);
assertEquals(transformFunction.getName(), "rpad");
expectedValues = new String[NUM_ROWS];
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = StringUtils.rightPad(_stringAlphaNumericSVValues[i], padLength, padString);
}
testTransformFunction(transformFunction, expectedValues);
}
|
public void addEdge(V from, V to) {
addVertex(from);
addVertex(to);
neighbors.get(from).add(to);
}
|
@Test
void addEdge() {
graph.addEdge('B', 'G');
List<Character> result = graph.getNeighbors('B');
List<Character> expected = Arrays.asList('C', 'F', 'G');
assertEquals(expected, result);
}
|
@Override
public void loadData(Priority priority, DataCallback<? super T> callback) {
this.callback = callback;
serializer.startRequest(priority, url, this);
}
|
@Test
public void testRequestComplete_withNon200StatusCode_callsCallbackWithException()
throws Exception {
UrlResponseInfo info = getInfo(0, HttpURLConnection.HTTP_INTERNAL_ERROR);
fetcher.loadData(Priority.LOW, callback);
UrlRequest.Callback urlCallback = urlRequestListenerCaptor.getValue();
succeed(info, urlCallback, ByteBuffer.allocateDirect(0));
ArgumentCaptor<HttpException> captor = ArgumentCaptor.forClass(HttpException.class);
verify(callback, timeout(1000)).onLoadFailed(captor.capture());
assertThat(captor.getValue())
.hasMessageThat()
.isEqualTo("Http request failed, status code: 500");
}
|
@ConstantFunction(name = "subtract", argTypes = {INT, INT}, returnType = INT, isMonotonic = true)
public static ConstantOperator subtractInt(ConstantOperator first, ConstantOperator second) {
return ConstantOperator.createInt(Math.subtractExact(first.getInt(), second.getInt()));
}
|
@Test
public void subtractInt() {
assertEquals(0,
ScalarOperatorFunctions.subtractInt(O_INT_10, O_INT_10).getInt());
}
|
@Override
public <VO, VR> KStream<K, VR> leftJoin(final KStream<K, VO> otherStream,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final JoinWindows windows) {
return leftJoin(otherStream, toValueJoinerWithKey(joiner), windows);
}
|
@SuppressWarnings("deprecation")
@Test
public void shouldNotAllowNullOtherStreamOnLeftJoin() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.leftJoin(null, MockValueJoiner.TOSTRING_JOINER, JoinWindows.of(ofMillis(10))));
assertThat(exception.getMessage(), equalTo("otherStream can't be null"));
}
|
@Override
public void doFilter(ServletRequest req, ServletResponse resp, FilterChain chain)
throws IOException, ServletException {
if (bizConfig.isAdminServiceAccessControlEnabled()) {
HttpServletRequest request = (HttpServletRequest) req;
HttpServletResponse response = (HttpServletResponse) resp;
String token = request.getHeader(HttpHeaders.AUTHORIZATION);
if (!checkAccessToken(token)) {
logger.warn("Invalid access token: {} for uri: {}", token, request.getRequestURI());
response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "Unauthorized");
return;
}
}
chain.doFilter(req, resp);
}
|
@Test
public void testWithConfigChanged() throws Exception {
String someToken = "someToken";
String anotherToken = "anotherToken";
String yetAnotherToken = "yetAnotherToken";
// case 1: init state
when(bizConfig.isAdminServiceAccessControlEnabled()).thenReturn(true);
when(bizConfig.getAdminServiceAccessTokens()).thenReturn(someToken);
when(servletRequest.getHeader(HttpHeaders.AUTHORIZATION)).thenReturn(someToken);
authenticationFilter.doFilter(servletRequest, servletResponse, filterChain);
verify(filterChain, times(1)).doFilter(servletRequest, servletResponse);
verify(servletResponse, never()).sendError(anyInt(), anyString());
// case 2: change access tokens specified
initVariables();
when(bizConfig.getAdminServiceAccessTokens())
.thenReturn(String.format("%s,%s", anotherToken, yetAnotherToken));
when(servletRequest.getHeader(HttpHeaders.AUTHORIZATION)).thenReturn(someToken);
authenticationFilter.doFilter(servletRequest, servletResponse, filterChain);
verify(servletResponse, times(1))
.sendError(HttpServletResponse.SC_UNAUTHORIZED, "Unauthorized");
verify(filterChain, never()).doFilter(servletRequest, servletResponse);
initVariables();
when(servletRequest.getHeader(HttpHeaders.AUTHORIZATION)).thenReturn(anotherToken);
authenticationFilter.doFilter(servletRequest, servletResponse, filterChain);
verify(filterChain, times(1)).doFilter(servletRequest, servletResponse);
verify(servletResponse, never()).sendError(anyInt(), anyString());
// case 3: change access control flag
initVariables();
when(bizConfig.isAdminServiceAccessControlEnabled()).thenReturn(false);
authenticationFilter.doFilter(servletRequest, servletResponse, filterChain);
verify(filterChain, times(1)).doFilter(servletRequest, servletResponse);
verify(servletResponse, never()).sendError(anyInt(), anyString());
verify(servletRequest, never()).getHeader(HttpHeaders.AUTHORIZATION);
}
|
public static <T> int lastIndexOfSub(T[] array, T[] subArray) {
if (isEmpty(array) || isEmpty(subArray)) {
return INDEX_NOT_FOUND;
}
return lastIndexOfSub(array, array.length - 1, subArray);
}
|
@Test
public void lastIndexOfSubTest() {
Integer[] a = {0x12, 0x34, 0x56, 0x78, 0x9A};
Integer[] b = {0x56, 0x78};
Integer[] c = {0x12, 0x56};
Integer[] d = {0x78, 0x9A};
Integer[] e = {0x78, 0x9A, 0x10};
int i = ArrayUtil.lastIndexOfSub(a, b);
assertEquals(2, i);
i = ArrayUtil.lastIndexOfSub(a, c);
assertEquals(-1, i);
i = ArrayUtil.lastIndexOfSub(a, d);
assertEquals(3, i);
i = ArrayUtil.lastIndexOfSub(a, e);
assertEquals(-1, i);
i = ArrayUtil.lastIndexOfSub(a, null);
assertEquals(-1, i);
i = ArrayUtil.lastIndexOfSub(null, null);
assertEquals(-1, i);
i = ArrayUtil.lastIndexOfSub(null, b);
assertEquals(-1, i);
}
|
@Override
protected CouchbaseEndpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception {
CouchbaseEndpoint endpoint = new CouchbaseEndpoint(uri, remaining, this);
setProperties(endpoint, parameters);
return endpoint;
}
|
@Test
public void testCouchbaseDuplicateAdditionalHosts() throws Exception {
Map<String, Object> params = new HashMap<>();
params.put("additionalHosts", "127.0.0.1,localhost, localhost");
params.put("bucket", "bucket");
String uri = "couchbase:http://localhost";
String remaining = "http://localhost";
CouchbaseEndpoint endpoint = new CouchbaseComponent(context).createEndpoint(uri, remaining, params);
URI[] endpointArray = endpoint.makeBootstrapURI();
assertEquals(2, endpointArray.length);
assertEquals(new URI("http://localhost:8091/pools"), endpointArray[0]);
assertEquals(new URI("http://127.0.0.1:8091/pools"), endpointArray[1]);
}
|
public final void isNotNaN() {
if (actual == null) {
failWithActual(simpleFact("expected a float other than NaN"));
} else {
isNotEqualTo(NaN);
}
}
|
@Test
public void isNotNaNIsNull() {
expectFailureWhenTestingThat(null).isNotNaN();
assertFailureKeys("expected a float other than NaN", "but was");
}
|
public boolean overlaps(final BoundingBox pBoundingBox, double pZoom) {
//FIXME this is a total hack but it works around a number of issues related to vertical map
//replication and horiztonal replication that can cause polygons to completely disappear when
//panning
if (pZoom < 3)
return true;
boolean latMatch = false;
boolean lonMatch = false;
//vertical wrapping detection
if (pBoundingBox.mLatSouth <= mLatNorth &&
pBoundingBox.mLatSouth >= mLatSouth)
latMatch = true;
//normal case, non overlapping
if (mLonWest >= pBoundingBox.mLonWest && mLonWest <= pBoundingBox.mLonEast)
lonMatch = true;
//normal case, non overlapping
if (mLonEast >= pBoundingBox.mLonWest && mLonWest <= pBoundingBox.mLonEast)
lonMatch = true;
//special case for when *this completely surrounds the pBoundbox
if (mLonWest <= pBoundingBox.mLonWest &&
mLonEast >= pBoundingBox.mLonEast &&
mLatNorth >= pBoundingBox.mLatNorth &&
mLatSouth <= pBoundingBox.mLatSouth)
return true;
//normal case, non overlapping
if (mLatNorth >= pBoundingBox.mLatSouth && mLatNorth <= mLatSouth)
latMatch = true;
//normal case, non overlapping
if (mLatSouth >= pBoundingBox.mLatSouth && mLatSouth <= mLatSouth)
latMatch = true;
if (mLonWest > mLonEast) {
//the date line is included in the bounding box
//we want to match lon from the dateline to the eastern bounds of the box
//and the dateline to the western bounds of the box
if (mLonEast <= pBoundingBox.mLonEast && pBoundingBox.mLonWest >= mLonWest)
lonMatch = true;
if (mLonWest >= pBoundingBox.mLonEast &&
mLonEast <= pBoundingBox.mLonEast) {
lonMatch = true;
if (pBoundingBox.mLonEast < mLonWest &&
pBoundingBox.mLonWest < mLonWest)
lonMatch = false;
if (pBoundingBox.mLonEast > mLonEast &&
pBoundingBox.mLonWest > mLonEast)
lonMatch = false;
}
if (mLonWest >= pBoundingBox.mLonEast &&
mLonEast >= pBoundingBox.mLonEast) {
lonMatch = true;
}
/*
//that is completely within this
if (mLonWest>= pBoundingBox.mLonEast &&
mLonEast<= pBoundingBox.mLonEast) {
lonMatch = true;
if (pBoundingBox.mLonEast < mLonWest &&
pBoundingBox.mLonWest < mLonWest)
lonMatch = false;
if (pBoundingBox.mLonEast > mLonEast &&
pBoundingBox.mLonWest > mLonEast )
lonMatch = false;
}
if (mLonWest>= pBoundingBox.mLonEast &&
mLonEast>= pBoundingBox.mLonEast) {
lonMatch = true;
}*/
}
return latMatch && lonMatch;
}
|
@Test
public void testSouthernBoundsSimple() {
//item's southern bounds is just out of view
BoundingBox view = new BoundingBox(2, 2, -2, -2);
BoundingBox item = new BoundingBox(1, 1, 2.1, -1);
Assert.assertTrue(view.overlaps(item, 4));
}
|
@Override
public String getAELSafeURIString() {
return resolvedFileObject.getPublicURIString().replaceFirst( "s3://", "s3a://" );
}
|
@Test
public void testGetAELSafeURIString() {
when( resolvedFileObject.getPublicURIString() ).thenReturn( "s3://bucket" );
assertEquals( "s3a://bucket", fileObject.getAELSafeURIString() );
}
|
public CqExtUnit get(final long address) {
CqExtUnit cqExtUnit = new CqExtUnit();
if (get(address, cqExtUnit)) {
return cqExtUnit;
}
return null;
}
|
@Test
public void testGet() {
ConsumeQueueExt consumeQueueExt = genExt();
putSth(consumeQueueExt, false, false, UNIT_COUNT);
try {
// from start.
long addr = consumeQueueExt.decorate(0);
ConsumeQueueExt.CqExtUnit unit = new ConsumeQueueExt.CqExtUnit();
while (true) {
boolean ret = consumeQueueExt.get(addr, unit);
if (!ret) {
break;
}
assertThat(unit.getSize()).isGreaterThanOrEqualTo(ConsumeQueueExt.CqExtUnit.MIN_EXT_UNIT_SIZE);
addr += unit.getSize();
}
} finally {
consumeQueueExt.destroy();
UtilAll.deleteFile(new File(STORE_PATH));
}
}
|
public CreateStreamCommand createStreamCommand(final KsqlStructuredDataOutputNode outputNode) {
return new CreateStreamCommand(
outputNode.getSinkName().get(),
outputNode.getSchema(),
outputNode.getTimestampColumn(),
outputNode.getKsqlTopic().getKafkaTopicName(),
Formats.from(outputNode.getKsqlTopic()),
outputNode.getKsqlTopic().getKeyFormat().getWindowInfo(),
Optional.of(outputNode.getOrReplace()),
Optional.of(false)
);
}
|
@Test
public void shouldBuildSchemaWithImplicitKeyFieldForStream() {
// Given:
final CreateStream statement = new CreateStream(SOME_NAME, STREAM_ELEMENTS, false, true,
withProperties, false);
// When:
final CreateStreamCommand result = createSourceFactory.createStreamCommand(
statement,
ksqlConfig
);
// Then:
assertThat(result.getSchema(), is(EXPECTED_SCHEMA));
}
|
public static long getTimeInHour(long ms) {
return ms - ms % HOUR_IN_MILL;
}
|
@Test
public void testGetTimeInHour() throws ParseException {
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
Date date = sdf.parse("2019-04-18 15:32:33");
long timeInHour = TimeUtils.getTimeInHour(date.getTime());
Assert.assertEquals("2019-04-18 15:00:00", sdf.format(timeInHour));
}
|
public KsqlEntityList execute(
final KsqlSecurityContext securityContext,
final List<ParsedStatement> statements,
final SessionProperties sessionProperties
) {
final KsqlEntityList entities = new KsqlEntityList();
for (final ParsedStatement parsed : statements) {
final PreparedStatement<?> prepared = ksqlEngine.prepare(
parsed,
(isVariableSubstitutionEnabled(sessionProperties)
? sessionProperties.getSessionVariables()
: Collections.emptyMap())
);
executeStatement(
securityContext,
prepared,
sessionProperties,
entities
).ifPresent(entities::add);
}
return entities;
}
|
@SuppressFBWarnings("RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT")
@Test
public void shouldCallPrepareStatementWithEmptySessionVariablesIfSubstitutionDisabled() {
// Given
final StatementExecutor<CreateStream> customExecutor =
givenReturningExecutor(CreateStream.class, mock(KsqlEntity.class));
givenRequestHandler(ImmutableMap.of(CreateStream.class, customExecutor));
when(ksqlConfig.getBoolean(KsqlConfig.KSQL_VARIABLE_SUBSTITUTION_ENABLE)).thenReturn(false);
// When
final List<ParsedStatement> statements = KSQL_PARSER.parse(SOME_STREAM_SQL);
handler.execute(securityContext, statements, sessionProperties);
// Then
verify(ksqlEngine).prepare(statements.get(0), Collections.emptyMap());
verify(sessionProperties, never()).getSessionVariables();
}
|
@Override
public Type determine(final BackgroundException failure) {
if(log.isDebugEnabled()) {
log.debug(String.format("Determine cause for failure %s", failure));
}
for(Throwable cause : ExceptionUtils.getThrowableList(failure)) {
if(failure instanceof UnsupportedException) {
return Type.unsupported;
}
if(failure instanceof LoginFailureException) {
return Type.login;
}
if(cause instanceof ResolveFailedException) {
return Type.network;
}
if(failure instanceof TransferCanceledException) {
return Type.skip;
}
if(failure instanceof ConnectionCanceledException) {
return Type.cancel;
}
if(cause instanceof ConnectionTimeoutException) {
return Type.network;
}
if(cause instanceof ConnectionRefusedException) {
return Type.network;
}
if(cause instanceof SSLNegotiateException) {
return Type.application;
}
if(cause instanceof SSLHandshakeException) {
return Type.application;
}
if(cause instanceof SSLException) {
return Type.network;
}
if(cause instanceof NoHttpResponseException) {
return Type.network;
}
if(cause instanceof ConnectTimeoutException) {
return Type.network;
}
if(cause instanceof SocketException
|| cause instanceof IOResumeException
|| cause instanceof TimeoutException // Used in Promise#retrieve
|| cause instanceof SocketTimeoutException
|| cause instanceof UnknownHostException) {
return Type.network;
}
if(cause instanceof QuotaException) {
return Type.quota;
}
if(cause instanceof AntiVirusAccessDeniedException) {
return Type.antivirus;
}
}
return Type.application;
}
|
@Test
public void testDetermine() {
assertEquals(FailureDiagnostics.Type.application, new DefaultFailureDiagnostics().determine(null));
assertEquals(FailureDiagnostics.Type.network, new DefaultFailureDiagnostics().determine(new ResolveFailedException("d", null)));
assertEquals(FailureDiagnostics.Type.cancel, new DefaultFailureDiagnostics().determine(new ResolveCanceledException()));
assertEquals(FailureDiagnostics.Type.login, new DefaultFailureDiagnostics().determine(new LoginFailureException("d")));
assertEquals(FailureDiagnostics.Type.cancel, new DefaultFailureDiagnostics().determine(new ConnectionCanceledException()));
// By user
assertEquals(FailureDiagnostics.Type.skip, new DefaultFailureDiagnostics().determine(new TransferCanceledException()));
assertEquals(FailureDiagnostics.Type.application, new DefaultFailureDiagnostics().determine(new BackgroundException(new ConnectionCanceledException())));
// By transfer status
assertEquals(FailureDiagnostics.Type.cancel, new DefaultFailureDiagnostics().determine(new TransferStatusCanceledException()));
}
|
public static Map<String, Object> getTopologySummary(TopologyPageInfo topologyPageInfo,
String window, Map<String, Object> config, String remoteUser) {
Map<String, Object> result = new HashMap();
Map<String, Object> topologyConf = (Map<String, Object>) JSONValue.parse(topologyPageInfo.get_topology_conf());
int messageTimeout = (int) topologyConf.get(Config.TOPOLOGY_MESSAGE_TIMEOUT_SECS);
Map<String, Object> unpackedTopologyPageInfo =
unpackTopologyInfo(topologyPageInfo, window, config);
result.putAll(unpackedTopologyPageInfo);
result.put("user", remoteUser);
result.put("window", window);
result.put("windowHint", getWindowHint(window));
result.put("msgTimeout", messageTimeout);
result.put("configuration", topologyConf);
result.put("visualizationTable", new ArrayList());
result.put("schedulerDisplayResource", config.get(DaemonConfig.SCHEDULER_DISPLAY_RESOURCE));
result.put("bugtracker-url", config.get(DaemonConfig.UI_PROJECT_BUGTRACKER_URL));
result.put("central-log-url", config.get(DaemonConfig.UI_CENTRAL_LOGGING_URL));
return result;
}
|
@Test
void test_getTopologySpoutAggStatsMap_includesLastError() {
// Define inputs
final String expectedSpoutId = "MySpoutId";
final String expectedErrorMsg = "This is my test error message";
final int expectedErrorTime = Time.currentTimeSecs();
final int errorElapsedTimeSecs = 13;
final int expectedErrorElapsedTime = expectedErrorTime + errorElapsedTimeSecs;
final int expectedErrorPort = 4321;
final String expectedErrorHost = "my.errored.host";
// Define our Last Error
final ErrorInfo expectedLastError = new ErrorInfo(expectedErrorMsg, expectedErrorTime);
expectedLastError.set_port(expectedErrorPort);
expectedLastError.set_host(expectedErrorHost);
// Build stats instance for our spout
final ComponentAggregateStats aggregateStats = buildSpoutAggregateStatsBase();
aggregateStats.set_last_error(expectedLastError);
addSpoutStats(expectedSpoutId, aggregateStats);
// Advance time by 'errorElapsedTimeSecs'
Time.advanceTimeSecs(errorElapsedTimeSecs);
// Call method under test.
final Map<String, Object> result = UIHelpers.getTopologySummary(
topoPageInfo,
WINDOW,
new HashMap<>(),
"spp"
);
// Validate
assertNotNull(result, "Should never return null");
// Validate our Spout result
final Map<String, Object> spoutResult = getSpoutStatsFromTopologySummaryResult(result, expectedSpoutId);
assertNotNull(spoutResult, "Should have an entry for spout");
// Verify each piece
assertEquals(expectedSpoutId, spoutResult.get("spoutId"));
assertEquals(expectedSpoutId, spoutResult.get("encodedSpoutId"));
// Verify error
assertEquals(expectedErrorMsg, spoutResult.get("lastError"));
assertEquals(expectedErrorPort, spoutResult.get("errorPort"));
assertEquals(expectedErrorHost, spoutResult.get("errorHost"));
assertEquals(expectedErrorTime, spoutResult.get("errorTime"));
assertEquals(expectedErrorElapsedTime, spoutResult.get("errorLapsedSecs"));
}
|
public TargetAssignmentResult build() throws PartitionAssignorException {
Map<String, MemberSubscriptionAndAssignmentImpl> memberSpecs = new HashMap<>();
// Prepare the member spec for all members.
members.forEach((memberId, member) ->
memberSpecs.put(memberId, createMemberSubscriptionAndAssignment(
member,
targetAssignment.getOrDefault(memberId, Assignment.EMPTY),
topicsImage
))
);
// Update the member spec if updated or deleted members.
updatedMembers.forEach((memberId, updatedMemberOrNull) -> {
if (updatedMemberOrNull == null) {
memberSpecs.remove(memberId);
} else {
Assignment assignment = targetAssignment.getOrDefault(memberId, Assignment.EMPTY);
// A new static member joins and needs to replace an existing departed one.
if (updatedMemberOrNull.instanceId() != null) {
String previousMemberId = staticMembers.get(updatedMemberOrNull.instanceId());
if (previousMemberId != null && !previousMemberId.equals(memberId)) {
assignment = targetAssignment.getOrDefault(previousMemberId, Assignment.EMPTY);
}
}
memberSpecs.put(memberId, createMemberSubscriptionAndAssignment(
updatedMemberOrNull,
assignment,
topicsImage
));
}
});
// Prepare the topic metadata.
Map<Uuid, TopicMetadata> topicMetadataMap = new HashMap<>();
subscriptionMetadata.forEach((topicName, topicMetadata) ->
topicMetadataMap.put(
topicMetadata.id(),
topicMetadata
)
);
// Compute the assignment.
GroupAssignment newGroupAssignment = assignor.assign(
new GroupSpecImpl(
Collections.unmodifiableMap(memberSpecs),
subscriptionType,
invertedTargetAssignment
),
new SubscribedTopicDescriberImpl(topicMetadataMap)
);
// Compute delta from previous to new target assignment and create the
// relevant records.
List<CoordinatorRecord> records = new ArrayList<>();
for (String memberId : memberSpecs.keySet()) {
Assignment oldMemberAssignment = targetAssignment.get(memberId);
Assignment newMemberAssignment = newMemberAssignment(newGroupAssignment, memberId);
if (!newMemberAssignment.equals(oldMemberAssignment)) {
// If the member had no assignment or had a different assignment, we
// create a record for the new assignment.
records.add(targetAssignmentRecordBuilder.build(
groupId,
memberId,
newMemberAssignment.partitions()
));
}
}
// Bump the target assignment epoch.
records.add(targetAssignmentEpochRecordBuilder.build(groupId, groupEpoch));
return new TargetAssignmentResult(records, newGroupAssignment.members());
}
|
@Test
public void testReplaceStaticMember() {
TargetAssignmentBuilderTestContext context = new TargetAssignmentBuilderTestContext(
"my-group",
20
);
Uuid fooTopicId = context.addTopicMetadata("foo", 6, Collections.emptyMap());
Uuid barTopicId = context.addTopicMetadata("bar", 6, Collections.emptyMap());
context.addGroupMember("member-1", "instance-member-1", Arrays.asList("foo", "bar", "zar"), mkAssignment(
mkTopicAssignment(fooTopicId, 1, 2),
mkTopicAssignment(barTopicId, 1, 2)
));
context.addGroupMember("member-2", "instance-member-2", Arrays.asList("foo", "bar", "zar"), mkAssignment(
mkTopicAssignment(fooTopicId, 3, 4),
mkTopicAssignment(barTopicId, 3, 4)
));
context.addGroupMember("member-3", "instance-member-3", Arrays.asList("foo", "bar", "zar"), mkAssignment(
mkTopicAssignment(fooTopicId, 5, 6),
mkTopicAssignment(barTopicId, 5, 6)
));
// Static member 3 leaves
context.removeMemberSubscription("member-3");
// Another static member joins with the same instance id as the departed one
context.updateMemberSubscription("member-3-a", Arrays.asList("foo", "bar", "zar"), Optional.of("instance-member-3"), Optional.empty());
context.prepareMemberAssignment("member-1", mkAssignment(
mkTopicAssignment(fooTopicId, 1, 2),
mkTopicAssignment(barTopicId, 1, 2)
));
context.prepareMemberAssignment("member-2", mkAssignment(
mkTopicAssignment(fooTopicId, 3, 4),
mkTopicAssignment(barTopicId, 3, 4)
));
context.prepareMemberAssignment("member-3-a", mkAssignment(
mkTopicAssignment(fooTopicId, 5, 6),
mkTopicAssignment(barTopicId, 5, 6)
));
TargetAssignmentBuilder.TargetAssignmentResult result = context.build();
assertEquals(2, result.records().size());
assertUnorderedListEquals(Collections.singletonList(
newConsumerGroupTargetAssignmentRecord("my-group", "member-3-a", mkAssignment(
mkTopicAssignment(fooTopicId, 5, 6),
mkTopicAssignment(barTopicId, 5, 6)
))
), result.records().subList(0, 1));
assertEquals(newConsumerGroupTargetAssignmentEpochRecord(
"my-group",
20
), result.records().get(1));
Map<String, MemberAssignment> expectedAssignment = new HashMap<>();
expectedAssignment.put("member-1", new MemberAssignmentImpl(mkAssignment(
mkTopicAssignment(fooTopicId, 1, 2),
mkTopicAssignment(barTopicId, 1, 2)
)));
expectedAssignment.put("member-2", new MemberAssignmentImpl(mkAssignment(
mkTopicAssignment(fooTopicId, 3, 4),
mkTopicAssignment(barTopicId, 3, 4)
)));
expectedAssignment.put("member-3-a", new MemberAssignmentImpl(mkAssignment(
mkTopicAssignment(fooTopicId, 5, 6),
mkTopicAssignment(barTopicId, 5, 6)
)));
assertEquals(expectedAssignment, result.targetAssignment());
}
|
public XADataSource swap(final DataSource dataSource) {
XADataSource result = createXADataSource();
setProperties(result, getDatabaseAccessConfiguration(dataSource));
return result;
}
|
@Test
void assertSwap() {
DataSourceSwapper swapper = new DataSourceSwapper(xaDataSourceDefinition);
assertResult(swapper.swap(new MockedDataSource()));
}
|
public String getProperty(String name) {
return properties.getProperty(name);
}
|
@Test
public void getProperty() {
assertNull(new MapStoreConfig().getProperty("a"));
}
|
public List<HistoryKey> getCurrentHistory() {
if (mLoadedKeys.size() == 0)
// For a unknown reason, we cannot have 0 history emoji...
mLoadedKeys.add(new HistoryKey(DEFAULT_EMOJI, DEFAULT_EMOJI));
return Collections.unmodifiableList(mLoadedKeys);
}
|
@Test
public void testLoadMoreThanLimit() {
StringBuilder exceedString = new StringBuilder();
for (int i = 0; i < QuickKeyHistoryRecords.MAX_LIST_SIZE * 2; i++) {
exceedString
.append(Integer.toString(2 * i))
.append(QuickKeyHistoryRecords.HISTORY_TOKEN_SEPARATOR)
.append(Integer.toString(2 * i + 1))
.append(QuickKeyHistoryRecords.HISTORY_TOKEN_SEPARATOR);
}
mSharedPreferences
.getString(R.string.settings_key_quick_text_history, R.string.settings_default_empty)
.set(exceedString.toString());
mUnderTest = new QuickKeyHistoryRecords(mSharedPreferences);
List<QuickKeyHistoryRecords.HistoryKey> keys = mUnderTest.getCurrentHistory();
Assert.assertEquals(QuickKeyHistoryRecords.MAX_LIST_SIZE, keys.size());
Assert.assertEquals("0", keys.get(0).name);
Assert.assertEquals("1", keys.get(0).value);
Assert.assertEquals(
Integer.toString(QuickKeyHistoryRecords.MAX_LIST_SIZE * 2 - 2),
keys.get(QuickKeyHistoryRecords.MAX_LIST_SIZE - 1).name);
Assert.assertEquals(
Integer.toString(QuickKeyHistoryRecords.MAX_LIST_SIZE * 2 - 1),
keys.get(QuickKeyHistoryRecords.MAX_LIST_SIZE - 1).value);
}
|
public TableMetaData revise(final TableMetaData originalMetaData) {
Optional<? extends TableNameReviser<T>> tableNameReviser = reviseEntry.getTableNameReviser();
String revisedTableName = tableNameReviser.map(optional -> optional.revise(originalMetaData.getName(), rule)).orElse(originalMetaData.getName());
return new TableMetaData(revisedTableName, new ColumnReviseEngine<>(rule, databaseType, dataSource, reviseEntry).revise(originalMetaData.getName(), originalMetaData.getColumns()),
new IndexReviseEngine<>(rule, reviseEntry).revise(originalMetaData.getName(), originalMetaData.getIndexes()),
new ConstraintReviseEngine<>(rule, reviseEntry).revise(originalMetaData.getName(), originalMetaData.getConstraints()), originalMetaData.getType());
}
|
@SuppressWarnings("unchecked")
@Test
void assertGetOriginalTableName() {
Collection<ColumnMetaData> columns = new LinkedList<>();
columns.add(new ColumnMetaData("column1", 2, true, true, true, false, false, false));
Collection<IndexMetaData> indexes = new LinkedList<>();
indexes.add(new IndexMetaData("index1"));
TableMetaData tableMetaData = new TableMetaData("originalTableName", columns, indexes, null);
TableMetaDataReviseEngine<T> tableMetaDataReviseEngine = new TableMetaDataReviseEngine<T>(rule, databaseType, dataSource, metaDataReviseEntry);
when(metaDataReviseEntry.getTableNameReviser()).thenReturn(Optional.empty());
TableMetaData revisedMetaData = tableMetaDataReviseEngine.revise(tableMetaData);
assertThat(revisedMetaData.getName(), is("originalTableName"));
}
|
@Override
@Cacheable(value = RedisKeyConstants.MAIL_ACCOUNT, key = "#id", unless = "#result == null")
public MailAccountDO getMailAccountFromCache(Long id) {
return getMailAccount(id);
}
|
@Test
public void testGetMailAccountFromCache() {
// mock 数据
MailAccountDO dbMailAccount = randomPojo(MailAccountDO.class);
mailAccountMapper.insert(dbMailAccount);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbMailAccount.getId();
// 调用
MailAccountDO mailAccount = mailAccountService.getMailAccountFromCache(id);
// 断言
assertPojoEquals(dbMailAccount, mailAccount);
}
|
@Override
public <VO, VR> KStream<K, VR> join(final KStream<K, VO> otherStream,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final JoinWindows windows) {
return join(otherStream, toValueJoinerWithKey(joiner), windows);
}
|
@Test
public void shouldNotAllowNullValueJoinerWithKeyOnJoinWithGlobalTable() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.join(testGlobalTable, MockMapper.selectValueMapper(), (ValueJoinerWithKey<? super String, ? super String, ? super String, ?>) null));
assertThat(exception.getMessage(), equalTo("joiner can't be null"));
}
|
@CanIgnoreReturnValue
public GsonBuilder setVersion(double version) {
if (Double.isNaN(version) || version < 0.0) {
throw new IllegalArgumentException("Invalid version: " + version);
}
excluder = excluder.withVersion(version);
return this;
}
|
@Test
public void testSetVersionInvalid() {
GsonBuilder builder = new GsonBuilder();
var e = assertThrows(IllegalArgumentException.class, () -> builder.setVersion(Double.NaN));
assertThat(e).hasMessageThat().isEqualTo("Invalid version: NaN");
e = assertThrows(IllegalArgumentException.class, () -> builder.setVersion(-0.1));
assertThat(e).hasMessageThat().isEqualTo("Invalid version: -0.1");
}
|
public static String stripLeadingAndTrailingQuotes(String str) {
int length = str.length();
if (length > 1 && str.startsWith("\"") && str.endsWith("\"")) {
str = str.substring(1, length - 1);
}
return str;
}
|
@Test
public void stripLeadingAndTrailingQuotes() throws Exception {
assertEquals("", CommonUtils.stripLeadingAndTrailingQuotes(""));
assertEquals("\"", CommonUtils.stripLeadingAndTrailingQuotes("\""));
assertEquals("", CommonUtils.stripLeadingAndTrailingQuotes("\"\""));
assertEquals("\"", CommonUtils.stripLeadingAndTrailingQuotes("\"\"\""));
assertEquals("\"\"", CommonUtils.stripLeadingAndTrailingQuotes("\"\"\"\""));
assertEquals("noquote", CommonUtils.stripLeadingAndTrailingQuotes("noquote"));
assertEquals(
"\"singlequote", CommonUtils.stripLeadingAndTrailingQuotes("\"singlequote"));
assertEquals(
"singlequote\"", CommonUtils.stripLeadingAndTrailingQuotes("singlequote\""));
assertEquals("quoted", CommonUtils.stripLeadingAndTrailingQuotes("\"quoted\""));
assertEquals("\"quoted\"", CommonUtils.stripLeadingAndTrailingQuotes("\"\"quoted\"\""));
}
|
@Override
public OAuth2CodeDO consumeAuthorizationCode(String code) {
OAuth2CodeDO codeDO = oauth2CodeMapper.selectByCode(code);
if (codeDO == null) {
throw exception(OAUTH2_CODE_NOT_EXISTS);
}
if (DateUtils.isExpired(codeDO.getExpiresTime())) {
throw exception(OAUTH2_CODE_EXPIRE);
}
oauth2CodeMapper.deleteById(codeDO.getId());
return codeDO;
}
|
@Test
public void testConsumeAuthorizationCode_null() {
// 调用,并断言
assertServiceException(() -> oauth2CodeService.consumeAuthorizationCode(randomString()),
OAUTH2_CODE_NOT_EXISTS);
}
|
public static String removeCharacter(String str, char charToRemove) {
if (str == null || str.indexOf(charToRemove) == -1) {
return str;
}
char[] chars = str.toCharArray();
int pos = 0;
for (int i = 0; i < chars.length; i++) {
if (chars[i] != charToRemove) {
chars[pos++] = chars[i];
}
}
return new String(chars, 0, pos);
}
|
@Test
void when_removingNotExistingCharactersFromString_then_sameInstanceIsReturned() {
assertSame("-------", StringUtil.removeCharacter("-------", '0'));
}
|
@Override
public void registerRecoveryResource(final String dataSourceName, final XADataSource xaDataSource) {
if (null != xaRecoveryModule) {
xaRecoveryModule.addXAResourceRecoveryHelper(new DataSourceXAResourceRecoveryHelper(xaDataSource));
}
}
|
@Test
void assertRegisterRecoveryResource() {
transactionManagerProvider.registerRecoveryResource("ds1", xaDataSource);
verify(xaRecoveryModule).addXAResourceRecoveryHelper(any(DataSourceXAResourceRecoveryHelper.class));
}
|
@Override
public void onMsg(TbContext ctx, TbMsg msg) {
ctx.logJsEvalRequest();
withCallback(scriptEngine.executeFilterAsync(msg),
filterResult -> {
ctx.logJsEvalResponse();
ctx.tellNext(msg, filterResult ? TbNodeConnectionType.TRUE : TbNodeConnectionType.FALSE);
},
t -> {
ctx.tellFailure(msg, t);
ctx.logJsEvalFailure();
}, ctx.getDbCallbackExecutor());
}
|
@Test
public void metadataConditionCanBeTrue() throws TbNodeException {
initWithScript();
TbMsgMetaData metaData = new TbMsgMetaData();
TbMsg msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, null, metaData, TbMsgDataType.JSON, TbMsg.EMPTY_JSON_OBJECT, ruleChainId, ruleNodeId);
when(scriptEngine.executeFilterAsync(msg)).thenReturn(Futures.immediateFuture(true));
node.onMsg(ctx, msg);
verify(ctx).getDbCallbackExecutor();
verify(ctx).tellNext(msg, TbNodeConnectionType.TRUE);
}
|
public static Catalog createCatalog(
String catalogName,
Map<String, String> options,
ReadableConfig configuration,
ClassLoader classLoader) {
// Use the legacy mechanism first for compatibility
try {
final CatalogFactory legacyFactory =
TableFactoryService.find(CatalogFactory.class, options, classLoader);
return legacyFactory.createCatalog(catalogName, options);
} catch (NoMatchingTableFactoryException e) {
// No matching legacy factory found, try using the new stack
final DefaultCatalogContext discoveryContext =
new DefaultCatalogContext(catalogName, options, configuration, classLoader);
try {
final CatalogFactory factory = getCatalogFactory(discoveryContext);
// The type option is only used for discovery, we don't actually want to forward it
// to the catalog factory itself.
final Map<String, String> factoryOptions =
options.entrySet().stream()
.filter(
entry ->
!CommonCatalogOptions.CATALOG_TYPE
.key()
.equals(entry.getKey()))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
final DefaultCatalogContext context =
new DefaultCatalogContext(
catalogName, factoryOptions, configuration, classLoader);
return factory.createCatalog(context);
} catch (Throwable t) {
throw new ValidationException(
String.format(
"Unable to create catalog '%s'.%n%nCatalog options are:%n%s",
catalogName,
options.entrySet().stream()
.map(
optionEntry ->
stringifyOption(
optionEntry.getKey(),
optionEntry.getValue()))
.sorted()
.collect(Collectors.joining("\n"))),
t);
}
}
}
|
@Test
void testCreateCatalog() {
final Map<String, String> options = new HashMap<>();
options.put(CommonCatalogOptions.CATALOG_TYPE.key(), TestCatalogFactory.IDENTIFIER);
options.put(TestCatalogFactory.DEFAULT_DATABASE.key(), "my-database");
final Catalog catalog =
FactoryUtil.createCatalog(
"my-catalog",
options,
null,
Thread.currentThread().getContextClassLoader());
assertThat(catalog).isInstanceOf(TestCatalogFactory.TestCatalog.class);
final TestCatalogFactory.TestCatalog testCatalog = (TestCatalogFactory.TestCatalog) catalog;
assertThat("my-catalog").isEqualTo(testCatalog.getName());
assertThat("my-database")
.isEqualTo(testCatalog.getOptions().get(TestCatalogFactory.DEFAULT_DATABASE.key()));
}
|
@Udf
public String chr(@UdfParameter(
description = "Decimal codepoint") final Integer decimalCode) {
if (decimalCode == null) {
return null;
}
if (!Character.isValidCodePoint(decimalCode)) {
return null;
}
final char[] resultChars = Character.toChars(decimalCode);
return String.valueOf(resultChars);
}
|
@Test
public void shouldReturnNullForNegativeDecimalCode() {
final String result = udf.chr(-1);
assertThat(result, is(nullValue()));
}
|
@VisibleForTesting
static void enforceStreamStateDirAvailability(final File streamsStateDir) {
if (!streamsStateDir.exists()) {
final boolean mkDirSuccess = streamsStateDir.mkdirs();
if (!mkDirSuccess) {
throw new KsqlServerException("Could not create the kafka streams state directory: "
+ streamsStateDir.getPath()
+ "\n Make sure the directory exists and is writable for KSQL server "
+ "\n or its parent directory is writable by KSQL server"
+ "\n or change it to a writable directory by setting '"
+ KsqlConfig.KSQL_STREAMS_PREFIX + StreamsConfig.STATE_DIR_CONFIG
+ "' config in the properties file."
);
}
}
if (!streamsStateDir.isDirectory()) {
throw new KsqlServerException(streamsStateDir.getPath()
+ " is not a directory."
+ "\n Make sure the directory exists and is writable for KSQL server "
+ "\n or its parent directory is writable by KSQL server"
+ "\n or change it to a writable directory by setting '"
+ KsqlConfig.KSQL_STREAMS_PREFIX + StreamsConfig.STATE_DIR_CONFIG
+ "' config in the properties file."
);
}
if (!streamsStateDir.canWrite() || !streamsStateDir.canExecute()) {
throw new KsqlServerException("The kafka streams state directory is not writable "
+ "for KSQL server: "
+ streamsStateDir.getPath()
+ "\n Make sure the directory exists and is writable for KSQL server "
+ "\n or change it to a writable directory by setting '"
+ KsqlConfig.KSQL_STREAMS_PREFIX + StreamsConfig.STATE_DIR_CONFIG
+ "' config in the properties file."
);
}
}
|
@Test
public void shouldFailIfStreamsStateDirectoryIsNotWritable() {
// Given:
when(mockStreamsStateDir.canWrite()).thenReturn(false);
// When:
final Exception e = assertThrows(
KsqlServerException.class,
() -> KsqlServerMain.enforceStreamStateDirAvailability(mockStreamsStateDir)
);
// Then:
assertThat(e.getMessage(), containsString(
"The kafka streams state directory is not writable for KSQL server: /var/lib/kafka-streams\n"
+ " Make sure the directory exists and is writable for KSQL server \n"
+ " or change it to a writable directory by setting 'ksql.streams.state.dir' "
+ "config in the properties file."));
}
|
@Override
public synchronized void cleanupAll() {
LOG.info("Attempting to cleanup MongoDB manager.");
boolean producedError = false;
// First, delete the database if it was not given as a static argument
try {
if (!usingStaticDatabase) {
mongoClient.getDatabase(databaseName).drop();
}
} catch (Exception e) {
LOG.error("Failed to delete MongoDB database {}.", databaseName, e);
producedError = true;
}
// Next, try to close the MongoDB client connection
try {
mongoClient.close();
} catch (Exception e) {
LOG.error("Failed to delete MongoDB client.", e);
producedError = true;
}
// Throw Exception at the end if there were any errors
if (producedError) {
throw new MongoDBResourceManagerException(
"Failed to delete resources. Check above for errors.");
}
super.cleanupAll();
LOG.info("MongoDB manager successfully cleaned up.");
}
|
@Test
public void testCleanupAllShouldThrowErrorWhenMongoClientFailsToClose() {
doThrow(RuntimeException.class).when(mongoClient).close();
assertThrows(MongoDBResourceManagerException.class, () -> testManager.cleanupAll());
}
|
@SuppressWarnings("unchecked")
@Override
public Object handle(ProceedingJoinPoint proceedingJoinPoint, CircuitBreaker circuitBreaker,
String methodName) throws Throwable {
CircuitBreakerOperator circuitBreakerOperator = CircuitBreakerOperator.of(circuitBreaker);
Object returnValue = proceedingJoinPoint.proceed();
return executeRxJava2Aspect(circuitBreakerOperator, returnValue, methodName);
}
|
@Test
public void testReactorTypes() throws Throwable {
CircuitBreaker circuitBreaker = CircuitBreaker.ofDefaults("test");
when(proceedingJoinPoint.proceed()).thenReturn(Single.just("Test"));
assertThat(rxJava2CircuitBreakerAspectExt
.handle(proceedingJoinPoint, circuitBreaker, "testMethod")).isNotNull();
when(proceedingJoinPoint.proceed()).thenReturn(Flowable.just("Test"));
assertThat(rxJava2CircuitBreakerAspectExt
.handle(proceedingJoinPoint, circuitBreaker, "testMethod")).isNotNull();
}
|
public static byte[] getSolicitNodeAddress(byte[] targetIp) {
checkArgument(targetIp.length == Ip6Address.BYTE_LENGTH);
return new byte[] {
(byte) 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01, (byte) 0xff,
targetIp[targetIp.length - 3],
targetIp[targetIp.length - 2],
targetIp[targetIp.length - 1]
};
}
|
@Test
public void testSolicitationNodeAddress() {
assertArrayEquals(SOLICITATION_NODE_ADDRESS, getSolicitNodeAddress(DESTINATION_ADDRESS));
}
|
@Override
public UpdateSchema makeColumnOptional(String name) {
internalUpdateColumnRequirement(name, true);
return this;
}
|
@Test
public void testMakeColumnOptional() {
Schema schema = new Schema(required(1, "id", Types.IntegerType.get()));
Schema expected = new Schema(optional(1, "id", Types.IntegerType.get()));
Schema result = new SchemaUpdate(schema, 1).makeColumnOptional("id").apply();
assertThat(result.asStruct()).isEqualTo(expected.asStruct());
}
|
public ConfigCenterBuilder timeout(Long timeout) {
this.timeout = timeout;
return getThis();
}
|
@Test
void timeout() {
ConfigCenterBuilder builder = ConfigCenterBuilder.newBuilder();
builder.timeout(1000L);
Assertions.assertEquals(1000L, builder.build().getTimeout());
}
|
public static DisruptContext timeout()
{
return new TimeoutDisruptContext();
}
|
@Test
public void testTimeout()
{
DisruptContexts.TimeoutDisruptContext context =
(DisruptContexts.TimeoutDisruptContext) DisruptContexts.timeout();
Assert.assertEquals(context.mode(), DisruptMode.TIMEOUT);
}
|
@NonNull
@Override
public HealthResponse healthResponse(final Map<String, Collection<String>> queryParams) {
final String type = queryParams.getOrDefault(CHECK_TYPE_QUERY_PARAM, Collections.emptyList())
.stream()
.findFirst()
.orElse(null);
final Collection<HealthStateView> views = getViews(queryParams);
final String responseBody;
try {
responseBody = mapper.writeValueAsString(views);
} catch (final Exception e) {
LOGGER.error("Failed to serialize health state views: {}", views, e);
throw new RuntimeException(e);
}
final boolean healthy = healthStatusChecker.isHealthy(type);
final int status;
if (healthy) {
// HTTP OK
status = 200;
} else {
// HTTP Service unavailable
status = 503;
}
return new HealthResponse(healthy, responseBody, MEDIA_TYPE, status);
}
|
@Test
void shouldHandleSingleHealthStateViewCorrectly() throws IOException {
// given
final HealthStateView view = new HealthStateView("foo", true, HealthCheckType.READY, true);
final Map<String, Collection<String>> queryParams = Collections.singletonMap(
JsonHealthResponseProvider.NAME_QUERY_PARAM, Collections.singleton(view.getName()));
// when
when(healthStateAggregator.healthStateView(view.getName())).thenReturn(Optional.of(view));
when(healthStatusChecker.isHealthy(isNull())).thenReturn(true);
final HealthResponse response = jsonHealthResponseProvider.healthResponse(queryParams);
// then
assertThat(response.isHealthy()).isTrue();
assertThat(response.getContentType()).isEqualTo(MediaType.APPLICATION_JSON);
assertThat(mapper.readTree(response.getMessage())).isEqualTo(mapper.readTree(fixture("/json/single-healthy-response.json")));
}
|
@Override
public void execute(SensorContext context) {
for (InputFile file : context.fileSystem().inputFiles(context.fileSystem().predicates().hasLanguages(Xoo.KEY))) {
processFileError(file, context);
}
}
|
@Test
public void test() throws IOException {
Path baseDir = temp.newFolder().toPath().toAbsolutePath();
createErrorFile(baseDir);
int[] startOffsets = {10, 20, 30, 40};
int[] endOffsets = {19, 29, 39, 49};
DefaultInputFile inputFile = new TestInputFileBuilder("foo", "src/foo.xoo")
.setLanguage("xoo")
.setOriginalLineStartOffsets(startOffsets)
.setOriginalLineEndOffsets(endOffsets)
.setModuleBaseDir(baseDir)
.setLines(4)
.build();
SensorContextTester context = SensorContextTester.create(baseDir);
context.fileSystem().add(inputFile);
sensor.execute(context);
assertThat(context.allAnalysisErrors()).hasSize(1);
AnalysisError error = context.allAnalysisErrors().iterator().next();
assertThat(error.inputFile()).isEqualTo(inputFile);
assertThat(error.location()).isEqualTo(new DefaultTextPointer(1, 4));
assertThat(error.message()).isEqualTo("my error");
}
|
@Override
public AppResponse process(Flow flow, CancelFlowRequest request) {
Map<String, Object> logOptions = new HashMap<>();
if (appAuthenticator != null && appAuthenticator.getAccountId() != null) logOptions.put(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId());
if ("upgrade_rda_widchecker".equals(appSession.getAction())) logOptions.put(HIDDEN, true);
if (appSession.getRdaSessionId() != null) {
rdaClient.cancel(appSession.getRdaSessionId());
}
digidClient.remoteLog(getLogCode(appSession.getAction()), logOptions);
return new OkResponse();
}
|
@Test
public void processReturnOkResponseWithAppAutheticatorAndNoRdaSession() {
mockedAppSession.setRdaSessionId(null);
Map<String, Object> logOptions = new HashMap<>();
var logCode = Map.of(
"upgrade_rda_widchecker", "1311",
"upgrade_app", "879"
);
AppResponse appResponse = cancelled.process(mockedFlow, cancelFlowRequest);
verify(digidClient, times(1)).remoteLog("1311", ImmutableMap.of(lowerUnderscore(ACCOUNT_ID), mockedAppAuthenticator.getAccountId(), lowerUnderscore(HIDDEN), true));
assertTrue(appResponse instanceof OkResponse);
}
|
public static String getFlowName(String activationMethod) {
return switch (activationMethod) {
case ActivationMethod.ACCOUNT -> ActivateAppWithRequestWebsite.NAME;
case ActivationMethod.PASSWORD -> ActivateAppWithPasswordLetterFlow.NAME;
case ActivationMethod.SMS -> ActivateAppWithPasswordSmsFlow.NAME;
case ActivationMethod.RDA -> ActivateAppWithPasswordRdaFlow.NAME;
case ActivationMethod.APP -> ActivateAppWithOtherAppFlow.NAME;
case ActivationMethod.LETTER -> ActivateAccountAndAppFlow.NAME;
case ActivationMethod.UNDEFINED -> UndefinedFlow.NAME;
default -> throw new IllegalStateException("Unexpected value: " + activationMethod);
};
}
|
@Test
void getFlowNameTest() {
String result = flowService.getFlowName(ActivationMethod.RDA);
assertEquals(ActivateAppWithPasswordRdaFlow.NAME, result);
}
|
@Override
public <T> T convert(DataTable dataTable, Type type) {
return convert(dataTable, type, false);
}
|
@Test
void convert_to_map_of_string_to_string__throws_exception__more_then_one_value_per_key() {
DataTable table = parse("",
"| KMSY | 29.993333 | -90.258056 |",
"| KSFO | 37.618889 | -122.375 |",
"| KSEA | 47.448889 | -122.309444 |",
"| KJFK | 40.639722 | -73.778889 |");
CucumberDataTableException exception = assertThrows(
CucumberDataTableException.class,
() -> converter.convert(table, MAP_OF_STRING_TO_STRING));
assertThat(exception.getMessage(), is(format("" +
"Can't convert DataTable to Map<%s, %s>.\n" +
"There is more then one value per key. " +
"Did you mean to transform to Map<%s, List<%s>> instead?",
typeName(String.class), typeName(String.class), typeName(String.class), typeName(String.class))));
}
|
public void assertNonNegative() {
if(value < 0)
throw new IllegalStateException("non negative value required");
}
|
@Test(expected=IllegalStateException.class)
public void negativeQuantity_throwsIllegalStateExceptionOnCheckForNonNegative() throws Exception {
Quantity<Metrics> negative = new Quantity<Metrics>(-1, Metrics.cm);
negative.assertNonNegative();
}
|
@Override
public Catalog createCatalog(Context context) {
final FactoryUtil.CatalogFactoryHelper helper =
FactoryUtil.createCatalogFactoryHelper(this, context);
helper.validate();
return new HiveCatalog(
context.getName(),
helper.getOptions().get(DEFAULT_DATABASE),
helper.getOptions().get(HIVE_CONF_DIR),
helper.getOptions().get(HADOOP_CONF_DIR),
helper.getOptions().get(HIVE_VERSION));
}
|
@Test
public void testCreateMultipleHiveCatalog() throws Exception {
final Map<String, String> props1 = new HashMap<>();
props1.put(CommonCatalogOptions.CATALOG_TYPE.key(), HiveCatalogFactoryOptions.IDENTIFIER);
props1.put(
HiveCatalogFactoryOptions.HIVE_CONF_DIR.key(),
Thread.currentThread()
.getContextClassLoader()
.getResource("test-multi-hive-conf1")
.getPath());
final Map<String, String> props2 = new HashMap<>();
props2.put(CommonCatalogOptions.CATALOG_TYPE.key(), HiveCatalogFactoryOptions.IDENTIFIER);
props2.put(
HiveCatalogFactoryOptions.HIVE_CONF_DIR.key(),
Thread.currentThread()
.getContextClassLoader()
.getResource("test-multi-hive-conf2")
.getPath());
Callable<Catalog> callable1 =
() ->
FactoryUtil.createCatalog(
"cat1",
props1,
null,
Thread.currentThread().getContextClassLoader());
Callable<Catalog> callable2 =
() ->
FactoryUtil.createCatalog(
"cat2",
props2,
null,
Thread.currentThread().getContextClassLoader());
ExecutorService executorService = EXECUTOR_RESOURCE.getExecutor();
Future<Catalog> future1 = executorService.submit(callable1);
Future<Catalog> future2 = executorService.submit(callable2);
HiveCatalog catalog1 = (HiveCatalog) future1.get();
HiveCatalog catalog2 = (HiveCatalog) future2.get();
// verify we read our own props
assertThat(catalog1.getHiveConf().get("key")).isEqualTo("val1");
assertThat(catalog1.getHiveConf().get("conf1", null)).isNotNull();
// verify we don't read props from other conf
assertThat(catalog1.getHiveConf().get("conf2", null)).isNull();
// verify we read our own props
assertThat(catalog2.getHiveConf().get("key")).isEqualTo("val2");
assertThat(catalog2.getHiveConf().get("conf2", null)).isNotNull();
// verify we don't read props from other conf
assertThat(catalog2.getHiveConf().get("conf1", null)).isNull();
}
|
@Activate
public void activate() {
localNodeId = clusterService.getLocalNode().id();
leadershipService.addListener(leaderListener);
listenerRegistry = new ListenerRegistry<>();
eventDispatcher.addSink(WorkPartitionEvent.class, listenerRegistry);
for (int i = 0; i < NUM_PARTITIONS; i++) {
leadershipService.runForLeadership(getPartitionPath(i));
log.debug("Registered to run for {}", getPartitionPath(i));
}
executor.scheduleAtFixedRate(() -> scheduleRebalance(0), 0,
CHECK_PARTITION_BALANCE_PERIOD_SEC, TimeUnit.SECONDS);
log.info("Started");
}
|
@Test
public void testActivate() {
reset(leadershipService);
leadershipService.addListener(anyObject(LeadershipEventListener.class));
for (int i = 0; i < WorkPartitionManager.NUM_PARTITIONS; i++) {
expect(leadershipService.runForLeadership(ELECTION_PREFIX + i))
.andReturn(null)
.times(1);
}
replay(leadershipService);
partitionManager.activate();
verify(leadershipService);
}
|
void createOutputValueMapping() throws KettleException {
data.outputRowMeta = getInputRowMeta().clone();
meta.getFields( getInputRowMeta(), getStepname(), null, null, this, repository, metaStore );
data.fieldIndex = getInputRowMeta().indexOfValue( meta.getFieldname() );
if ( data.fieldIndex < 0 ) {
throw new KettleException( BaseMessages.getString( PKG, "SwitchCase.Exception.UnableToFindFieldName", meta
.getFieldname() ) );
}
data.inputValueMeta = getInputRowMeta().getValueMeta( data.fieldIndex );
try {
StepIOMetaInterface ioMeta = meta.getStepIOMeta();
// There is one or many case target for each target stream.
// The ioMeta object has one more target stream for the default target though.
//
List<StreamInterface> targetStreams = ioMeta.getTargetStreams();
for ( int i = 0; i < targetStreams.size(); i++ ) {
SwitchCaseTarget target = (SwitchCaseTarget) targetStreams.get( i ).getSubject();
if ( target == null ) {
break; // Skip over default option
}
if ( target.caseTargetStep == null ) {
throw new KettleException( BaseMessages.getString(
PKG, "SwitchCase.Log.NoTargetStepSpecifiedForValue", target.caseValue ) );
}
RowSet rowSet = findOutputRowSet( target.caseTargetStep.getName() );
if ( rowSet == null ) {
throw new KettleException( BaseMessages.getString(
PKG, "SwitchCase.Log.UnableToFindTargetRowSetForStep", target.caseTargetStep ) );
}
try {
Object value =
data.valueMeta.convertDataFromString(
target.caseValue, data.stringValueMeta, null, null, ValueMetaInterface.TRIM_TYPE_NONE );
// If we have a value and a rowset, we can store the combination in the map
//
if ( data.valueMeta.isNull( value ) ) {
data.nullRowSetSet.add( rowSet );
} else {
// could not use byte[] as key in Maps, so we need to convert it to his specific hashCode for future
// comparisons
value = prepareObjectType( value );
data.outputMap.put( value, rowSet );
}
} catch ( Exception e ) {
throw new KettleException( BaseMessages.getString(
PKG, "SwitchCase.Log.UnableToConvertValue", target.caseValue ), e );
}
}
if ( meta.getDefaultTargetStep() != null ) {
RowSet rowSet = findOutputRowSet( meta.getDefaultTargetStep().getName() );
if ( rowSet != null ) {
data.defaultRowSetSet.add( rowSet );
if ( data.nullRowSetSet.isEmpty() ) {
data.nullRowSetSet.add( rowSet );
}
}
}
} catch ( Exception e ) {
throw new KettleException( e );
}
}
|
@Test
public void testCreateOutputValueMapping() throws KettleException, URISyntaxException,
ParserConfigurationException, SAXException, IOException {
SwitchCaseCustom krasavez = new SwitchCaseCustom( mockHelper );
// load step info value-case mapping from xml.
List<DatabaseMeta> emptyList = new ArrayList<DatabaseMeta>();
krasavez.meta.loadXML( loadStepXmlMetadata( "SwitchCaseTest.xml" ), emptyList, mock( IMetaStore.class ) );
KeyToRowSetMap expectedNN = new KeyToRowSetMap();
Set<RowSet> nulls = new HashSet<RowSet>();
// create real steps for all targets
List<SwitchCaseTarget> list = krasavez.meta.getCaseTargets();
for ( SwitchCaseTarget item : list ) {
StepMetaInterface smInt = new DummyTransMeta();
StepMeta stepMeta = new StepMeta( item.caseTargetStepname, smInt );
item.caseTargetStep = stepMeta;
// create and put row set for this
RowSet rw = new QueueRowSet();
krasavez.map.put( item.caseTargetStepname, rw );
// null values goes to null rowset
if ( item.caseValue != null ) {
expectedNN.put( item.caseValue, rw );
} else {
nulls.add( rw );
}
}
// create default step
StepMetaInterface smInt = new DummyTransMeta();
StepMeta stepMeta = new StepMeta( krasavez.meta.getDefaultTargetStepname(), smInt );
krasavez.meta.setDefaultTargetStep( stepMeta );
RowSet rw = new QueueRowSet();
krasavez.map.put( krasavez.meta.getDefaultTargetStepname(), rw );
krasavez.createOutputValueMapping();
// inspect step output data:
Set<RowSet> ones = krasavez.data.outputMap.get( "1" );
assertEquals( "Output map for 1 values contains 2 row sets", 2, ones.size() );
Set<RowSet> twos = krasavez.data.outputMap.get( "2" );
assertEquals( "Output map for 2 values contains 1 row sets", 1, twos.size() );
assertEquals( "Null row set contains 2 items: ", 2, krasavez.data.nullRowSetSet.size() );
assertEquals( "We have at least one default rowset", 1, krasavez.data.defaultRowSetSet.size() );
// check that rowsets data is correct:
Set<RowSet> rowsets = expectedNN.get( "1" );
for ( RowSet rowset : rowsets ) {
assertTrue( "Output map for 1 values contains expected row set", ones.contains( rowset ) );
}
rowsets = expectedNN.get( "2" );
for ( RowSet rowset : rowsets ) {
assertTrue( "Output map for 2 values contains expected row set", twos.contains( rowset ) );
}
for ( RowSet rowset : krasavez.data.nullRowSetSet ) {
assertTrue( "Output map for null values contains expected row set", nulls.contains( rowset ) );
}
// we have already check that there is only one item.
for ( RowSet rowset : krasavez.data.defaultRowSetSet ) {
assertTrue( "Output map for default case contains expected row set", rowset.equals( rw ) );
}
}
|
public EnumSet<E> get() {
return value;
}
|
@SuppressWarnings("unchecked")
@Test
public void testSerializeAndDeserializeEmpty() throws IOException {
boolean gotException = false;
try {
new EnumSetWritable<TestEnumSet>(emptyFlag);
} catch (RuntimeException e) {
gotException = true;
}
assertTrue(
"Instantiation of empty EnumSetWritable with no element type class "
+ "provided should throw exception.",
gotException);
EnumSetWritable<TestEnumSet> emptyFlagWritable =
new EnumSetWritable<TestEnumSet>(emptyFlag, TestEnumSet.class);
DataOutputBuffer out = new DataOutputBuffer();
ObjectWritable.writeObject(out, emptyFlagWritable, emptyFlagWritable
.getClass(), null);
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), out.getLength());
EnumSet<TestEnumSet> read = ((EnumSetWritable<TestEnumSet>) ObjectWritable
.readObject(in, null)).get();
assertEquals(read, emptyFlag);
}
|
@Override
public void close() {
this.internalClient.close();
ExecutorUtils.gracefulShutdown(5, TimeUnit.SECONDS, this.kubeClientExecutorService);
}
|
@Test
void testIOExecutorShouldBeShutDownWhenFlinkKubeClientClosed() {
final ScheduledExecutorService executorService =
Executors.newSingleThreadScheduledExecutor();
final FlinkKubeClient flinkKubeClient =
new Fabric8FlinkKubeClient(flinkConfig, kubeClient, executorService);
flinkKubeClient.close();
assertThat(executorService.isShutdown()).isTrue();
}
|
public void restore(final List<Pair<byte[], byte[]>> backupCommands) {
// Delete the command topic
deleteCommandTopicIfExists();
// Create the command topic
KsqlInternalTopicUtils.ensureTopic(commandTopicName, serverConfig, topicClient);
// Restore the commands
restoreCommandTopic(backupCommands);
}
|
@Test
public void shouldCreateAndRestoreCommandTopic() throws ExecutionException, InterruptedException {
// Given:
when(topicClient.isTopicExists(COMMAND_TOPIC_NAME)).thenReturn(false);
// When:
restoreCommandTopic.restore(BACKUP_COMMANDS);
// Then:
verifyCreateCommandTopic();
final InOrder inOrder = inOrder(kafkaProducer, future1, future2, future3);
inOrder.verify(kafkaProducer).initTransactions();
inOrder.verify(kafkaProducer).beginTransaction();
inOrder.verify(kafkaProducer).send(RECORD_1);
inOrder.verify(future1).get();
inOrder.verify(kafkaProducer).commitTransaction();
inOrder.verify(kafkaProducer).beginTransaction();
inOrder.verify(kafkaProducer).send(RECORD_2);
inOrder.verify(future2).get();
inOrder.verify(kafkaProducer).commitTransaction();
inOrder.verify(kafkaProducer).beginTransaction();
inOrder.verify(kafkaProducer).send(RECORD_3);
inOrder.verify(future3).get();
inOrder.verify(kafkaProducer).commitTransaction();
inOrder.verify(kafkaProducer).close();
verifyNoMoreInteractions(kafkaProducer, future1, future2, future3);
}
|
public EventDefinitionDto createWithoutSchedule(EventDefinitionDto unsavedEventDefinition, Optional<User> user) {
return createEventDefinition(unsavedEventDefinition, user);
}
|
@Test
public void createWithoutSchedule() {
final EventDefinitionDto newDto = EventDefinitionDto.builder()
.title("Test")
.description("A test event definition")
.config(TestEventProcessorConfig.builder()
.message("This is a test event processor")
.searchWithinMs(300000)
.executeEveryMs(60001)
.build())
.priority(3)
.alert(false)
.notificationSettings(EventNotificationSettings.withGracePeriod(60000))
.keySpec(ImmutableList.of("a", "b"))
.notifications(ImmutableList.of())
.build();
final EventDefinitionDto dto = handler.createWithoutSchedule(newDto, Optional.empty());
// Handler should create the event definition
assertThat(eventDefinitionService.get(dto.id())).isPresent();
// Handler should NOT create a job definition for the event definition/processor
assertThat(jobDefinitionService.getByConfigField("event_definition_id", dto.id())).isNotPresent();
// And the handler should also NOT create a job trigger for the created job definition
assertThat(jobTriggerService.nextRunnableTrigger()).isNotPresent();
}
|
public JmxCollector register() {
return register(PrometheusRegistry.defaultRegistry);
}
|
@Test
public void testLowercaseOutputLabelNames() throws Exception {
new JmxCollector(
"\n---\nlowercaseOutputLabelNames: true\nrules:\n- pattern: `^hadoop<service=DataNode, name=DataNodeActivity-ams-hdd001-50010><>replaceBlockOpMinTime:`\n name: Foo\n labels:\n ABC: DEF"
.replace('`', '"'))
.register(prometheusRegistry);
assertEquals(200, getSampleValue("Foo", new String[] {"abc"}, new String[] {"DEF"}), .001);
}
|
@Override
public void configure(Map<String, ?> configs, boolean isKey) {
if (listClass != null || inner != null) {
log.error("Could not configure ListDeserializer as some parameters were already set -- listClass: {}, inner: {}", listClass, inner);
throw new ConfigException("List deserializer was already initialized using a non-default constructor");
}
configureListClass(configs, isKey);
configureInnerSerde(configs, isKey);
}
|
@Test
public void testListValueDeserializerNoArgConstructorsShouldThrowConfigExceptionDueMissingTypeClassProp() {
props.put(CommonClientConfigs.DEFAULT_LIST_VALUE_SERDE_INNER_CLASS, Serdes.StringSerde.class);
final ConfigException exception = assertThrows(
ConfigException.class,
() -> listDeserializer.configure(props, false)
);
assertEquals("Not able to determine the list class because "
+ "it was neither passed via the constructor nor set in the config.", exception.getMessage());
}
|
@Override
public void putJobGraph(JobGraph jobGraph) throws Exception {
checkNotNull(jobGraph, "Job graph");
final JobID jobID = jobGraph.getJobID();
final String name = jobGraphStoreUtil.jobIDToName(jobID);
LOG.debug("Adding job graph {} to {}.", jobID, jobGraphStateHandleStore);
boolean success = false;
while (!success) {
synchronized (lock) {
verifyIsRunning();
final R currentVersion = jobGraphStateHandleStore.exists(name);
if (!currentVersion.isExisting()) {
try {
jobGraphStateHandleStore.addAndLock(name, jobGraph);
addedJobGraphs.add(jobID);
success = true;
} catch (StateHandleStore.AlreadyExistException ignored) {
LOG.warn("{} already exists in {}.", jobGraph, jobGraphStateHandleStore);
}
} else if (addedJobGraphs.contains(jobID)) {
try {
jobGraphStateHandleStore.replace(name, currentVersion, jobGraph);
LOG.info("Updated {} in {}.", jobGraph, getClass().getSimpleName());
success = true;
} catch (StateHandleStore.NotExistException ignored) {
LOG.warn("{} does not exists in {}.", jobGraph, jobGraphStateHandleStore);
}
} else {
throw new IllegalStateException(
"Trying to update a graph you didn't "
+ "#getAllSubmittedJobGraphs() or #putJobGraph() yourself before.");
}
}
}
LOG.info("Added {} to {}.", jobGraph, jobGraphStateHandleStore);
}
|
@Test
public void testOnAddedJobGraphShouldNotProcessKnownJobGraphs() throws Exception {
final TestingStateHandleStore<JobGraph> stateHandleStore =
builder.setAddFunction((ignore, state) -> jobGraphStorageHelper.store(state))
.build();
final JobGraphStore jobGraphStore = createAndStartJobGraphStore(stateHandleStore);
jobGraphStore.putJobGraph(testingJobGraph);
testingJobGraphStoreWatcher.addJobGraph(testingJobGraph.getJobID());
assertThat(testingJobGraphListener.getAddedJobGraphs().size(), is(0));
}
|
public String translate(String text, String targetLanguage) throws TikaException, IOException {
String sourceLanguage = detectLanguage(text).getLanguage();
return translate(text, sourceLanguage, targetLanguage);
}
|
@Test
public void testNoConfig() throws Exception {
String source = "Apache Tika is a wonderful tool";
String expected =
"Apache Tika is a wonderful tool"; // Pattern from other Translators is to return source
String translated = translator.translate(source, "en", "zz");
assertTrue(expected.equalsIgnoreCase(translated),
"Translate " + source + " to " + expected + " (was " + translated + ")");
}
|
@Override
public StringBuffer format(Object obj, StringBuffer result, FieldPosition pos) {
return format.format(toArgs((Attributes) obj), result, pos);
}
|
@Test
public void testFormatMD5() {
Attributes attrs = new Attributes();
attrs.setString(Tag.StudyDate, VR.DA, "20111012");
attrs.setString(Tag.StudyTime, VR.TM, "0930");
attrs.setString(Tag.StudyInstanceUID, VR.UI, "1.2.3");
attrs.setString(Tag.SeriesInstanceUID, VR.UI, "1.2.3.4");
attrs.setString(Tag.SOPInstanceUID, VR.UI, "1.2.3.4.5");
assertEquals("2011/10/12/09/02C82A3A/71668980/08vpsu2l2shpb0kc3orpgfnhv0.dcm",
new AttributesFormat(TEST_PATTERN_MD5).format(attrs));
}
|
public int tryClaim(final int msgTypeId, final int length)
{
checkTypeId(msgTypeId);
checkMsgLength(length);
final AtomicBuffer buffer = this.buffer;
final int recordLength = length + HEADER_LENGTH;
final int recordIndex = claimCapacity(buffer, recordLength);
if (INSUFFICIENT_CAPACITY == recordIndex)
{
return recordIndex;
}
buffer.putIntOrdered(lengthOffset(recordIndex), -recordLength);
MemoryAccess.releaseFence();
buffer.putInt(typeOffset(recordIndex), msgTypeId);
return encodedMsgOffset(recordIndex);
}
|
@Test
void tryClaimReturnsIndexAtWhichEncodedMessageStarts()
{
final int length = 10;
final int recordLength = length + HEADER_LENGTH;
final int alignedRecordLength = align(recordLength, ALIGNMENT);
final long headPosition = 248L;
final long tailPosition = 320L;
final int recordIndex = (int)tailPosition;
when(buffer.getLongVolatile(HEAD_COUNTER_CACHE_INDEX)).thenReturn(headPosition);
when(buffer.getLongVolatile(TAIL_COUNTER_INDEX)).thenReturn(tailPosition);
when(buffer.compareAndSetLong(TAIL_COUNTER_INDEX, tailPosition, tailPosition + alignedRecordLength))
.thenReturn(TRUE);
final int index = ringBuffer.tryClaim(MSG_TYPE_ID, length);
assertEquals(recordIndex + HEADER_LENGTH, index);
final InOrder inOrder = inOrder(buffer);
inOrder.verify(buffer).getLongVolatile(HEAD_COUNTER_CACHE_INDEX);
inOrder.verify(buffer).getLongVolatile(TAIL_COUNTER_INDEX);
inOrder.verify(buffer).compareAndSetLong(TAIL_COUNTER_INDEX, tailPosition, tailPosition + alignedRecordLength);
inOrder.verify(buffer).putIntOrdered(lengthOffset(recordIndex), -recordLength);
inOrder.verify(buffer).putInt(typeOffset(recordIndex), MSG_TYPE_ID);
inOrder.verifyNoMoreInteractions();
}
|
static void validateDependencies(Set<Artifact> dependencies, Set<String> allowedRules, boolean failOnUnmatched)
throws EnforcerRuleException {
SortedSet<Artifact> unmatchedArtifacts = new TreeSet<>();
Set<String> matchedRules = new HashSet<>();
for (Artifact dependency : dependencies) {
boolean matches = false;
for (String rule : allowedRules) {
if (matches(dependency, rule)){
matchedRules.add(rule);
matches = true;
break;
}
}
if (!matches) {
unmatchedArtifacts.add(dependency);
}
}
SortedSet<String> unmatchedRules = new TreeSet<>(allowedRules);
unmatchedRules.removeAll(matchedRules);
if (!unmatchedArtifacts.isEmpty() || (failOnUnmatched && !unmatchedRules.isEmpty())) {
StringBuilder errorMessage = new StringBuilder("Vespa dependency enforcer failed:\n");
if (!unmatchedArtifacts.isEmpty()) {
errorMessage.append("Dependencies not matching any rule:\n");
unmatchedArtifacts.forEach(a -> errorMessage.append(" - ").append(a.toString()).append('\n'));
}
if (failOnUnmatched && !unmatchedRules.isEmpty()) {
errorMessage.append("Rules not matching any dependency:\n");
unmatchedRules.forEach(p -> errorMessage.append(" - ").append(p).append('\n'));
}
throw new EnforcerRuleException(errorMessage.toString());
}
}
|
@Test
void matches_on_version_in_allowed_range() {
var dependencies = Set.of(
artifact("com.yahoo.testing", "test", "1.2.3", "compile"));
var rules = Set.of("com.yahoo.testing:test:jar:[1.0,2):compile");
assertDoesNotThrow(() -> EnforceDependencies.validateDependencies(dependencies, rules, true));
}
|
static SortKey[] rangeBounds(
int numPartitions, Comparator<StructLike> comparator, SortKey[] samples) {
// sort the keys first
Arrays.sort(samples, comparator);
int numCandidates = numPartitions - 1;
SortKey[] candidates = new SortKey[numCandidates];
int step = (int) Math.ceil((double) samples.length / numPartitions);
int position = step - 1;
int numChosen = 0;
while (position < samples.length && numChosen < numCandidates) {
SortKey candidate = samples[position];
// skip duplicate values
if (numChosen > 0 && candidate.equals(candidates[numChosen - 1])) {
// linear probe for the next distinct value
position += 1;
} else {
candidates[numChosen] = candidate;
position += step;
numChosen += 1;
}
}
return candidates;
}
|
@Test
public void testRangeBoundsOneChannel() {
assertThat(
SketchUtil.rangeBounds(
1,
SORT_ORDER_COMPARTOR,
new SortKey[] {
CHAR_KEYS.get("a"),
CHAR_KEYS.get("b"),
CHAR_KEYS.get("c"),
CHAR_KEYS.get("d"),
CHAR_KEYS.get("e"),
CHAR_KEYS.get("f")
}))
.isEmpty();
}
|
static String guessIcon(String iconGuess, String rootURL) {
String iconSource;
//noinspection HttpUrlsUsage
if (iconGuess.startsWith("http://") || iconGuess.startsWith("https://")) {
iconSource = iconGuess;
} else {
if (!iconGuess.startsWith("/")) {
iconGuess = "/" + iconGuess;
}
if (iconGuess.startsWith(rootURL)) {
if ((!rootURL.equals("/images") && !rootURL.equals("/plugin")) || iconGuess.startsWith(rootURL + rootURL)) {
iconGuess = iconGuess.substring(rootURL.length());
}
}
iconSource = rootURL + (iconGuess.startsWith("/images/") || iconGuess.startsWith("/plugin/") ? getResourcePath() : "") + iconGuess;
}
return iconSource;
}
|
@Test
public void guessIcon() throws Exception {
Jenkins.RESOURCE_PATH = "/static/12345678";
assertEquals("/jenkins/static/12345678/images/48x48/green.gif", Functions.guessIcon("jenkins/images/48x48/green.gif", "/jenkins"));
assertEquals("/jenkins/static/12345678/images/48x48/green.gif", Functions.guessIcon("/jenkins/images/48x48/green.gif", "/jenkins"));
assertEquals("/static/12345678/images/48x48/green.gif", Functions.guessIcon("images/48x48/green.gif", ""));
assertEquals("/jenkins/static/12345678/images/48x48/green.gif", Functions.guessIcon("images/48x48/green.gif", "/jenkins"));
assertEquals("/jenkins/static/12345678/images/48x48/green.gif", Functions.guessIcon("/images/48x48/green.gif", "/jenkins"));
assertEquals("/images/static/12345678/images/48x48/green.gif", Functions.guessIcon("/images/48x48/green.gif", "/images"));
assertEquals("/static/12345678/plugin/myartifactId/images/48x48/green.gif", Functions.guessIcon("/plugin/myartifactId/images/48x48/green.gif", ""));
assertEquals("/jenkins/static/12345678/plugin/myartifactId/images/48x48/green.gif", Functions.guessIcon("/plugin/myartifactId/images/48x48/green.gif", "/jenkins"));
assertEquals("/jenkins/static/12345678/plugin/myartifactId/images/48x48/green.gif", Functions.guessIcon("/jenkins/plugin/myartifactId/images/48x48/green.gif", "/jenkins"));
assertEquals("/plugin/static/12345678/plugin/myartifactId/images/48x48/green.gif", Functions.guessIcon("/plugin/myartifactId/images/48x48/green.gif", "/plugin"));
assertEquals("/plugin/static/12345678/plugin/myartifactId/images/48x48/green.gif", Functions.guessIcon("/plugin/plugin/myartifactId/images/48x48/green.gif", "/plugin"));
assertEquals("http://acme.com/icon.svg", Functions.guessIcon("http://acme.com/icon.svg", "/jenkins"));
assertEquals("https://acme.com/icon.svg", Functions.guessIcon("https://acme.com/icon.svg", "/jenkins"));
}
|
public Marshaller createMarshaller(Class<?> clazz) throws JAXBException {
Marshaller marshaller = getContext(clazz).createMarshaller();
setMarshallerProperties(marshaller);
if (marshallerEventHandler != null) {
marshaller.setEventHandler(marshallerEventHandler);
}
marshaller.setSchema(marshallerSchema);
return marshaller;
}
|
@Test
void buildsMarshallerWithNoNamespaceSchemaLocationProperty() throws Exception {
JAXBContextFactory factory =
new JAXBContextFactory.Builder()
.withMarshallerNoNamespaceSchemaLocation("http://apihost/schema.xsd").build();
Marshaller marshaller = factory.createMarshaller(Object.class);
assertThat(marshaller.getProperty(Marshaller.JAXB_NO_NAMESPACE_SCHEMA_LOCATION))
.isEqualTo("http://apihost/schema.xsd");
}
|
List<Integer> allocatePorts(NetworkPortRequestor service, int wantedPort) {
PortAllocBridge allocator = new PortAllocBridge(this, service);
service.allocatePorts(wantedPort, allocator);
return allocator.result();
}
|
@Test
void allocating_same_port_throws_exception() {
assertThrows(RuntimeException.class, () -> {
HostPorts host = new HostPorts("myhostname");
MockRoot root = new MockRoot();
TestService service1 = new TestService(root, 1);
TestService service2 = new TestService(root, 1);
host.allocatePorts(service1, HostPorts.BASE_PORT);
host.allocatePorts(service2, HostPorts.BASE_PORT);
});
}
|
protected void gatherBestRouteCoverages() {
// get a de-duplicated list of the routes
testResults.forEach(testResult -> {
List<Route> routeList = testResult.getCamelContextRouteCoverage().getRoutes().getRouteList();
routeList.forEach(route -> {
String routeId = route.getId();
Route mappedRoute = routeMap.get(routeId);
if (mappedRoute == null) {
// if the route only appears once, this will handle it
routeMap.put(routeId, route);
mappedRoute = routeMap.get(routeId);
}
// if the route appears multiple times in the test results,
// look for the route with the best coverage of EIPs
try {
if (route.getExchangesTotal() > mappedRoute.getExchangesTotal()) {
routeMap.put(routeId, route);
}
} catch (Exception t) {
// this is an edge case that needs to be identified. Log some useful debugging information.
System.out.println(t.getClass().toString());
System.out.printf("routeID: %s%n", routeId);
System.out.printf("route: %s%n", route);
System.out.printf("mappedRoute: %s%n", mappedRoute != null ? mappedRoute.toString() : "null");
}
});
});
}
|
@Test
public void testGatherBestRouteCoverages() throws IllegalAccessException, IOException {
@SuppressWarnings("unchecked")
List<TestResult> testResults = (List<TestResult>) FieldUtils.readDeclaredField(processor, "testResults", true);
@SuppressWarnings("unchecked")
Map<String, Route> result = (Map<String, Route>) FieldUtils.readDeclaredField(processor, "routeMap", true);
processor.parseAllTestResults(xmlPath());
testResults.add(TestUtil.testResult());
assertAll(
() -> assertNotNull(result),
() -> assertEquals(0, result.size()));
processor.gatherBestRouteCoverages();
assertAll(
() -> assertNotNull(result),
() -> assertEquals(7, result.size()));
}
|
@Override
public void runExclusively(Runnable runnable) {
lock.lock();
try {
runnable.run();
} finally {
lock.unlock();
}
}
|
@Test
void testRunExclusively() throws InterruptedException {
CountDownLatch exclusiveCodeStarted = new CountDownLatch(1);
final int numMails = 10;
// send 10 mails in an atomic operation
new Thread(
() ->
taskMailbox.runExclusively(
() -> {
exclusiveCodeStarted.countDown();
for (int index = 0; index < numMails; index++) {
try {
taskMailbox.put(new Mail(() -> {}, 1, "mailD"));
Thread.sleep(1);
} catch (Exception e) {
}
}
}))
.start();
exclusiveCodeStarted.await();
// make sure that all 10 messages have been actually enqueued.
assertThat(taskMailbox.close()).hasSize(numMails);
}
|
protected abstract Source.Reader<T> createReader(@Nonnull FlinkSourceSplit<T> sourceSplit)
throws IOException;
|
@Test
public void testNumBytesInMetrics() throws Exception {
final int numSplits = 2;
final int numRecordsPerSplit = 10;
List<FlinkSourceSplit<KV<Integer, Integer>>> splits =
createSplits(numSplits, numRecordsPerSplit, 0);
SourceTestMetrics.TestMetricGroup testMetricGroup = new SourceTestMetrics.TestMetricGroup();
try (SourceReader<OutputT, FlinkSourceSplit<KV<Integer, Integer>>> reader =
createReader(null, -1L, null, testMetricGroup)) {
pollAndValidate(reader, splits, false);
}
assertEquals(numRecordsPerSplit * numSplits, testMetricGroup.numRecordsInCounter.getCount());
}
|
@Override
public <VO, VR> KStream<K, VR> leftJoin(final KStream<K, VO> otherStream,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final JoinWindows windows) {
return leftJoin(otherStream, toValueJoinerWithKey(joiner), windows);
}
|
@SuppressWarnings("deprecation")
@Test
public void shouldNotAllowNullStreamJoinedOnLeftJoin() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.leftJoin(
testStream,
MockValueJoiner.TOSTRING_JOINER,
JoinWindows.of(ofMillis(10)),
(StreamJoined<String, String, String>) null));
assertThat(exception.getMessage(), equalTo("streamJoined can't be null"));
}
|
public void parse(DataByteArrayInputStream input, int readSize) throws Exception {
if (currentParser == null) {
currentParser = initializeHeaderParser();
}
// Parser stack will run until current incoming data has all been consumed.
currentParser.parse(input, readSize);
}
|
@Test
public void testMessageDecoding() throws Exception {
byte[] CONTENTS = new byte[MESSAGE_SIZE];
for (int i = 0; i < MESSAGE_SIZE; i++) {
CONTENTS[i] = 'a';
}
PUBLISH publish = new PUBLISH();
publish.dup(false);
publish.messageId((short) 127);
publish.qos(QoS.AT_LEAST_ONCE);
publish.payload(new Buffer(CONTENTS));
publish.topicName(new UTF8Buffer("TOPIC"));
DataByteArrayOutputStream output = new DataByteArrayOutputStream();
wireFormat.marshal(publish.encode(), output);
Buffer marshalled = output.toBuffer();
DataByteArrayInputStream input = new DataByteArrayInputStream(marshalled);
codec.parse(input, marshalled.length());
assertTrue(!frames.isEmpty());
assertEquals(1, frames.size());
publish = new PUBLISH().decode(frames.get(0));
assertFalse(publish.dup());
assertEquals(MESSAGE_SIZE, publish.payload().length());
}
|
public BigDecimal calculateTDEE(ActiveLevel activeLevel) {
if(activeLevel == null) return BigDecimal.valueOf(0);
BigDecimal multiplayer = BigDecimal.valueOf(activeLevel.getMultiplayer());
return multiplayer.multiply(BMR).setScale(2, RoundingMode.HALF_DOWN);
}
|
@Test
void calculateTDEE_SUPER_ACTIVE() {
BigDecimal TDEE = bmrCalculator.calculate(attributes).calculateTDEE(ActiveLevel.SUPER);
assertEquals(new BigDecimal("3880.75"), TDEE);
}
|
public void processOnce() throws IOException {
// set status of query to OK.
ctx.getState().reset();
executor = null;
// reset sequence id of MySQL protocol
final MysqlChannel channel = ctx.getMysqlChannel();
channel.setSequenceId(0);
// read packet from channel
try {
packetBuf = channel.fetchOnePacket();
if (packetBuf == null) {
throw new RpcException(ctx.getRemoteIP(), "Error happened when receiving packet.");
}
} catch (AsynchronousCloseException e) {
// when this happened, timeout checker close this channel
// killed flag in ctx has been already set, just return
return;
}
// dispatch
dispatch();
// finalize
finalizeCommand();
ctx.setCommand(MysqlCommand.COM_SLEEP);
}
|
@Test
public void testInitWarehouse() throws IOException {
ConnectContext ctx = initMockContext(mockChannel(initWarehousePacket), GlobalStateMgr.getCurrentState());
ctx.setCurrentUserIdentity(UserIdentity.ROOT);
ctx.setQualifiedUser(AuthenticationMgr.ROOT_USER);
ConnectProcessor processor = new ConnectProcessor(ctx);
processor.processOnce();
Assert.assertEquals(MysqlCommand.COM_INIT_DB, myContext.getCommand());
Assert.assertTrue(myContext.getState().toResponsePacket() instanceof MysqlOkPacket);
}
|
@Override
public void onRenamed(Item item, String oldName, String newName) {
// bug 5077308 - Display name field should be cleared when you rename a job.
if (item instanceof AbstractItem) {
AbstractItem abstractItem = (AbstractItem) item;
if (oldName.equals(abstractItem.getDisplayName())) {
// the user renamed the job, but the old project name which is shown as the
// displayname if no displayname was set, has been set into the displayname field.
// This means that the displayname was never set, so we want to set it
// to null as it was before
try {
LOGGER.info(String.format("onRenamed():Setting displayname to null for item.name=%s", item.getName()));
abstractItem.setDisplayName(null);
}
catch (IOException ioe) {
LOGGER.log(Level.WARNING, String.format("onRenamed():Exception while trying to clear the displayName for Item.name:%s",
item.getName()), ioe);
}
}
}
}
|
@Test
public void testOnRenamedOldNameNotEqualDisplayName() throws Exception {
DisplayNameListener listener = new DisplayNameListener();
final String oldName = "old job name";
final String newName = "new job name";
final String displayName = "the display name";
StubJob src = new StubJob();
src.doSetName(newName);
src.setDisplayName(displayName);
listener.onRenamed(src, oldName, oldName);
// make sure displayname is still intact
assertEquals(displayName, src.getDisplayName());
}
|
String formatStepText(
String keyword, String stepText, Format textFormat, Format argFormat, List<Argument> arguments
) {
int beginIndex = 0;
StringBuilder result = new StringBuilder(textFormat.text(keyword));
for (Argument argument : arguments) {
// can be null if the argument is missing.
if (argument.getValue() != null) {
int argumentOffset = argument.getStart();
// a nested argument starts before the enclosing argument ends;
// ignore it when formatting
if (argumentOffset < beginIndex) {
continue;
}
String text = stepText.substring(beginIndex, argumentOffset);
result.append(textFormat.text(text));
}
// val can be null if the argument isn't there, for example
// @And("(it )?has something")
if (argument.getValue() != null) {
String text = stepText.substring(argument.getStart(), argument.getEnd());
result.append(argFormat.text(text));
// set beginIndex to end of argument
beginIndex = argument.getEnd();
}
}
if (beginIndex != stepText.length()) {
String text = stepText.substring(beginIndex);
result.append(textFormat.text(text));
}
return result.toString();
}
|
@Test
void should_mark_nested_arguments_as_part_of_enclosing_argument() {
Formats formats = ansi();
PrettyFormatter prettyFormatter = new PrettyFormatter(new ByteArrayOutputStream());
StepTypeRegistry registry = new StepTypeRegistry(Locale.ENGLISH);
StepExpressionFactory stepExpressionFactory = new StepExpressionFactory(registry, bus);
StepDefinition stepDefinition = new StubStepDefinition("^the order is placed( and (not( yet)? )?confirmed)?$",
String.class);
StepExpression expression = stepExpressionFactory.createExpression(stepDefinition);
String stepText = "the order is placed and not yet confirmed";
String formattedText = prettyFormatter.formatStepText("Given ", stepText, formats.get("passed"),
formats.get("passed_arg"), createArguments(expression.match(stepText)));
assertThat(formattedText, equalTo(AnsiEscapes.GREEN + "Given " + AnsiEscapes.RESET +
AnsiEscapes.GREEN + "the order is placed" + AnsiEscapes.RESET +
AnsiEscapes.GREEN + AnsiEscapes.INTENSITY_BOLD + " and not yet confirmed" + AnsiEscapes.RESET));
}
|
@Override
public Set<UnloadDecision> findBundlesForUnloading(LoadManagerContext context,
Map<String, Long> recentlyUnloadedBundles,
Map<String, Long> recentlyUnloadedBrokers) {
final var conf = context.brokerConfiguration();
decisionCache.clear();
stats.clear();
Map<String, BrokerLookupData> availableBrokers;
try {
availableBrokers = context.brokerRegistry().getAvailableBrokerLookupDataAsync()
.get(context.brokerConfiguration().getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS);
} catch (ExecutionException | InterruptedException | TimeoutException e) {
counter.update(Failure, Unknown);
log.warn("Failed to fetch available brokers. Stop unloading.", e);
return decisionCache;
}
try {
final var loadStore = context.brokerLoadDataStore();
stats.setLoadDataStore(loadStore);
boolean debugMode = ExtensibleLoadManagerImpl.debug(conf, log);
var skipReason = stats.update(
context.brokerLoadDataStore(), availableBrokers, recentlyUnloadedBrokers, conf);
if (skipReason.isPresent()) {
if (debugMode) {
log.warn(CANNOT_CONTINUE_UNLOAD_MSG
+ " Skipped the load stat update. Reason:{}.",
skipReason.get());
}
counter.update(Skip, skipReason.get());
return decisionCache;
}
counter.updateLoadData(stats.avg, stats.std);
if (debugMode) {
log.info("brokers' load stats:{}", stats);
}
// skip metrics
int numOfBrokersWithEmptyLoadData = 0;
int numOfBrokersWithFewBundles = 0;
final double targetStd = conf.getLoadBalancerBrokerLoadTargetStd();
boolean transfer = conf.isLoadBalancerTransferEnabled();
if (stats.std() > targetStd
|| isUnderLoaded(context, stats.peekMinBroker(), stats)
|| isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) {
unloadConditionHitCount++;
} else {
unloadConditionHitCount = 0;
}
if (unloadConditionHitCount <= conf.getLoadBalancerSheddingConditionHitCountThreshold()) {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ " Shedding condition hit count:{} is less than or equal to the threshold:{}.",
unloadConditionHitCount, conf.getLoadBalancerSheddingConditionHitCountThreshold());
}
counter.update(Skip, HitCount);
return decisionCache;
}
while (true) {
if (!stats.hasTransferableBrokers()) {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ " Exhausted target transfer brokers.");
}
break;
}
UnloadDecision.Reason reason;
if (stats.std() > targetStd) {
reason = Overloaded;
} else if (isUnderLoaded(context, stats.peekMinBroker(), stats)) {
reason = Underloaded;
if (debugMode) {
log.info(String.format("broker:%s is underloaded:%s although "
+ "load std:%.2f <= targetStd:%.2f. "
+ "Continuing unload for this underloaded broker.",
stats.peekMinBroker(),
context.brokerLoadDataStore().get(stats.peekMinBroker()).get(),
stats.std(), targetStd));
}
} else if (isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) {
reason = Overloaded;
if (debugMode) {
log.info(String.format("broker:%s is overloaded:%s although "
+ "load std:%.2f <= targetStd:%.2f. "
+ "Continuing unload for this overloaded broker.",
stats.peekMaxBroker(),
context.brokerLoadDataStore().get(stats.peekMaxBroker()).get(),
stats.std(), targetStd));
}
} else {
if (debugMode) {
log.info(CANNOT_CONTINUE_UNLOAD_MSG
+ "The overall cluster load meets the target, std:{} <= targetStd:{}."
+ "minBroker:{} is not underloaded. maxBroker:{} is not overloaded.",
stats.std(), targetStd, stats.peekMinBroker(), stats.peekMaxBroker());
}
break;
}
String maxBroker = stats.pollMaxBroker();
String minBroker = stats.peekMinBroker();
Optional<BrokerLoadData> maxBrokerLoadData = context.brokerLoadDataStore().get(maxBroker);
Optional<BrokerLoadData> minBrokerLoadData = context.brokerLoadDataStore().get(minBroker);
if (maxBrokerLoadData.isEmpty()) {
log.error(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " MaxBrokerLoadData is empty.", maxBroker));
numOfBrokersWithEmptyLoadData++;
continue;
}
if (minBrokerLoadData.isEmpty()) {
log.error("Can't transfer load to broker:{}. MinBrokerLoadData is empty.", minBroker);
numOfBrokersWithEmptyLoadData++;
continue;
}
double maxLoad = maxBrokerLoadData.get().getWeightedMaxEMA();
double minLoad = minBrokerLoadData.get().getWeightedMaxEMA();
double offload = (maxLoad - minLoad) / 2;
BrokerLoadData brokerLoadData = maxBrokerLoadData.get();
double maxBrokerThroughput = brokerLoadData.getMsgThroughputIn()
+ brokerLoadData.getMsgThroughputOut();
double minBrokerThroughput = minBrokerLoadData.get().getMsgThroughputIn()
+ minBrokerLoadData.get().getMsgThroughputOut();
double offloadThroughput = maxBrokerThroughput * offload / maxLoad;
if (debugMode) {
log.info(String.format(
"Attempting to shed load from broker:%s%s, which has the max resource "
+ "usage:%.2f%%, targetStd:%.2f,"
+ " -- Trying to offload %.2f%%, %.2f KByte/s of traffic.",
maxBroker, transfer ? " to broker:" + minBroker : "",
maxLoad * 100,
targetStd,
offload * 100,
offloadThroughput / KB
));
}
double trafficMarkedToOffload = 0;
double trafficMarkedToGain = 0;
Optional<TopBundlesLoadData> bundlesLoadData = context.topBundleLoadDataStore().get(maxBroker);
if (bundlesLoadData.isEmpty() || bundlesLoadData.get().getTopBundlesLoadData().isEmpty()) {
log.error(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " TopBundlesLoadData is empty.", maxBroker));
numOfBrokersWithEmptyLoadData++;
continue;
}
var maxBrokerTopBundlesLoadData = bundlesLoadData.get().getTopBundlesLoadData();
if (maxBrokerTopBundlesLoadData.size() == 1) {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " Sole namespace bundle:%s is overloading the broker. ",
maxBroker, maxBrokerTopBundlesLoadData.iterator().next()));
continue;
}
Optional<TopBundlesLoadData> minBundlesLoadData = context.topBundleLoadDataStore().get(minBroker);
var minBrokerTopBundlesLoadDataIter =
minBundlesLoadData.isPresent() ? minBundlesLoadData.get().getTopBundlesLoadData().iterator() :
null;
if (maxBrokerTopBundlesLoadData.isEmpty()) {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " Broker overloaded despite having no bundles", maxBroker));
continue;
}
int remainingTopBundles = maxBrokerTopBundlesLoadData.size();
for (var e : maxBrokerTopBundlesLoadData) {
String bundle = e.bundleName();
if (channel != null && !channel.isOwner(bundle, maxBroker)) {
if (debugMode) {
log.warn(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " MaxBroker:%s is not the owner.", bundle, maxBroker));
}
continue;
}
if (recentlyUnloadedBundles.containsKey(bundle)) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " Bundle has been recently unloaded at ts:%d.",
bundle, recentlyUnloadedBundles.get(bundle)));
}
continue;
}
if (!isTransferable(context, availableBrokers, bundle, maxBroker, Optional.of(minBroker))) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " This unload can't meet "
+ "affinity(isolation) or anti-affinity group policies.", bundle));
}
continue;
}
if (remainingTopBundles <= 1) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " The remaining bundles in TopBundlesLoadData from the maxBroker:%s is"
+ " less than or equal to 1.",
bundle, maxBroker));
}
break;
}
var bundleData = e.stats();
double maxBrokerBundleThroughput = bundleData.msgThroughputIn + bundleData.msgThroughputOut;
boolean swap = false;
List<Unload> minToMaxUnloads = new ArrayList<>();
double minBrokerBundleSwapThroughput = 0.0;
if (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput > offloadThroughput) {
// see if we can swap bundles from min to max broker to balance better.
if (transfer && minBrokerTopBundlesLoadDataIter != null) {
var maxBrokerNewThroughput =
maxBrokerThroughput - trafficMarkedToOffload + trafficMarkedToGain
- maxBrokerBundleThroughput;
var minBrokerNewThroughput =
minBrokerThroughput + trafficMarkedToOffload - trafficMarkedToGain
+ maxBrokerBundleThroughput;
while (minBrokerTopBundlesLoadDataIter.hasNext()) {
var minBrokerBundleData = minBrokerTopBundlesLoadDataIter.next();
if (!isTransferable(context, availableBrokers,
minBrokerBundleData.bundleName(), minBroker, Optional.of(maxBroker))) {
continue;
}
var minBrokerBundleThroughput =
minBrokerBundleData.stats().msgThroughputIn
+ minBrokerBundleData.stats().msgThroughputOut;
var maxBrokerNewThroughputTmp = maxBrokerNewThroughput + minBrokerBundleThroughput;
var minBrokerNewThroughputTmp = minBrokerNewThroughput - minBrokerBundleThroughput;
if (maxBrokerNewThroughputTmp < maxBrokerThroughput
&& minBrokerNewThroughputTmp < maxBrokerThroughput) {
minToMaxUnloads.add(new Unload(minBroker,
minBrokerBundleData.bundleName(), Optional.of(maxBroker)));
maxBrokerNewThroughput = maxBrokerNewThroughputTmp;
minBrokerNewThroughput = minBrokerNewThroughputTmp;
minBrokerBundleSwapThroughput += minBrokerBundleThroughput;
if (minBrokerNewThroughput <= maxBrokerNewThroughput
&& maxBrokerNewThroughput < maxBrokerThroughput * 0.75) {
swap = true;
break;
}
}
}
}
if (!swap) {
if (debugMode) {
log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG
+ " The traffic to unload:%.2f - gain:%.2f = %.2f KByte/s is "
+ "greater than the target :%.2f KByte/s.",
bundle,
(trafficMarkedToOffload + maxBrokerBundleThroughput) / KB,
trafficMarkedToGain / KB,
(trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput) / KB,
offloadThroughput / KB));
}
break;
}
}
Unload unload;
if (transfer) {
if (swap) {
minToMaxUnloads.forEach(minToMaxUnload -> {
if (debugMode) {
log.info("Decided to gain bundle:{} from min broker:{}",
minToMaxUnload.serviceUnit(), minToMaxUnload.sourceBroker());
}
var decision = new UnloadDecision();
decision.setUnload(minToMaxUnload);
decision.succeed(reason);
decisionCache.add(decision);
});
if (debugMode) {
log.info(String.format(
"Total traffic %.2f KByte/s to transfer from min broker:%s to max broker:%s.",
minBrokerBundleSwapThroughput / KB, minBroker, maxBroker));
trafficMarkedToGain += minBrokerBundleSwapThroughput;
}
}
unload = new Unload(maxBroker, bundle, Optional.of(minBroker));
} else {
unload = new Unload(maxBroker, bundle);
}
var decision = new UnloadDecision();
decision.setUnload(unload);
decision.succeed(reason);
decisionCache.add(decision);
trafficMarkedToOffload += maxBrokerBundleThroughput;
remainingTopBundles--;
if (debugMode) {
log.info(String.format("Decided to unload bundle:%s, throughput:%.2f KByte/s."
+ " The traffic marked to unload:%.2f - gain:%.2f = %.2f KByte/s."
+ " Target:%.2f KByte/s.",
bundle, maxBrokerBundleThroughput / KB,
trafficMarkedToOffload / KB,
trafficMarkedToGain / KB,
(trafficMarkedToOffload - trafficMarkedToGain) / KB,
offloadThroughput / KB));
}
}
if (trafficMarkedToOffload > 0) {
var adjustedOffload =
(trafficMarkedToOffload - trafficMarkedToGain) * maxLoad / maxBrokerThroughput;
stats.offload(maxLoad, minLoad, adjustedOffload);
if (debugMode) {
log.info(
String.format("brokers' load stats:%s, after offload{max:%.2f, min:%.2f, offload:%.2f}",
stats, maxLoad, minLoad, adjustedOffload));
}
} else {
numOfBrokersWithFewBundles++;
log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG
+ " There is no bundle that can be unloaded in top bundles load data. "
+ "Consider splitting bundles owned by the broker "
+ "to make each bundle serve less traffic "
+ "or increasing loadBalancerMaxNumberOfBundlesInBundleLoadReport"
+ " to report more bundles in the top bundles load data.", maxBroker));
}
} // while end
if (debugMode) {
log.info("decisionCache:{}", decisionCache);
}
if (decisionCache.isEmpty()) {
UnloadDecision.Reason reason;
if (numOfBrokersWithEmptyLoadData > 0) {
reason = NoLoadData;
} else if (numOfBrokersWithFewBundles > 0) {
reason = NoBundles;
} else {
reason = HitCount;
}
counter.update(Skip, reason);
} else {
unloadConditionHitCount = 0;
}
} catch (Throwable e) {
log.error("Failed to process unloading. ", e);
this.counter.update(Failure, Unknown);
}
return decisionCache;
}
|
@Test
public void testTargetStdAfterTransfer() {
UnloadCounter counter = new UnloadCounter();
TransferShedder transferShedder = new TransferShedder(counter);
var ctx = setupContext();
var brokerLoadDataStore = ctx.brokerLoadDataStore();
brokerLoadDataStore.pushAsync("broker4:8080", getCpuLoad(ctx, 55, "broker4:8080"));
brokerLoadDataStore.pushAsync("broker5:8080", getCpuLoad(ctx, 65, "broker5:8080"));
var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of());
var expected = new HashSet<UnloadDecision>();
expected.add(new UnloadDecision(new Unload("broker5:8080", bundleE1, Optional.of("broker1:8080")),
Success, Overloaded));
assertEquals(res, expected);
assertEquals(counter.getLoadAvg(), 0.26400000000000007);
assertEquals(counter.getLoadStd(), 0.27644891028904417);
}
|
@Override
public Charset detect(InputStream input, Metadata metadata) throws IOException {
input.mark(MAX_BYTES);
byte[] bytes = new byte[MAX_BYTES];
try {
int numRead = IOUtils.read(input, bytes);
if (numRead < MIN_BYTES) {
return null;
} else if (numRead < MAX_BYTES) {
//s
byte[] tmpBytes = new byte[numRead];
System.arraycopy(bytes, 0, tmpBytes, 0, numRead);
bytes = tmpBytes;
}
} finally {
input.reset();
}
for (int i = 0; i < BOMS.length; i++) {
ByteOrderMark bom = BOMS[i];
if (startsWith(bom, bytes)) {
return CHARSETS[i];
}
}
return null;
}
|
@Test
public void testShort() throws Exception {
EncodingDetector detector = new BOMDetector();
for (ByteOrderMark bom : new ByteOrderMark[] {
ByteOrderMark.UTF_8, ByteOrderMark.UTF_16BE, ByteOrderMark.UTF_16LE, ByteOrderMark.UTF_16BE,
ByteOrderMark.UTF_32LE
}) {
byte[] bytes = new byte[3];
System.arraycopy(bom.getBytes(), 0, bytes, 0, 1);
bytes[1] = (byte)32;
bytes[2] = (byte)32;
try (InputStream is = UnsynchronizedByteArrayInputStream.builder().setByteArray(bytes).get()) {
assertNull(detector.detect(is, new Metadata()));
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.