focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public void streamRequest(StreamRequest request, Callback<StreamResponse> callback)
{
streamRequest(request, new RequestContext(), callback);
}
|
@Test
public void testStreamException() throws Exception
{
SimpleLoadBalancer balancer = prepareLoadBalancer(Arrays.asList("http://test.linkedin.com/retry1", "http://test.linkedin.com/bad"),
HttpClientFactory.UNLIMITED_CLIENT_REQUEST_RETRY_RATIO);
DynamicClient dynamicClient = new DynamicClient(balancer, null);
RetryClient client = new RetryClient(
dynamicClient,
balancer,
D2ClientConfig.DEFAULT_RETRY_LIMIT,
RetryClient.DEFAULT_UPDATE_INTERVAL_MS,
RetryClient.DEFAULT_AGGREGATED_INTERVAL_NUM,
SystemClock.instance(),
true,
true);
URI uri = URI.create("d2://retryService?arg1=empty&arg2=empty");
StreamRequest streamRequest = new StreamRequestBuilder(uri).build(EntityStreams.emptyStream());
DegraderTrackerClientTest.TestCallback<StreamResponse> streamCallback = new DegraderTrackerClientTest.TestCallback<>();
RequestContext context = new RequestContext();
KeyMapper.TargetHostHints.setRequestContextTargetHost(context, URI.create("http://test.linkedin.com/bad"));
client.streamRequest(streamRequest, context, streamCallback);
assertNull(streamCallback.t);
assertNotNull(streamCallback.e);
assertTrue(streamCallback.e.getMessage().contains("exception happens"), streamCallback.e.getMessage());
}
|
public static RowCoder of(Schema schema) {
return new RowCoder(schema);
}
|
@Test
public void testArrayOfRow() throws Exception {
Schema nestedSchema = Schema.builder().addInt32Field("f1_int").addStringField("f1_str").build();
FieldType collectionElementType = FieldType.row(nestedSchema);
Schema schema = Schema.builder().addArrayField("f_array", collectionElementType).build();
Row row =
Row.withSchema(schema)
.addArray(
Row.withSchema(nestedSchema).addValues(1, "one").build(),
Row.withSchema(nestedSchema).addValues(2, "two").build(),
Row.withSchema(nestedSchema).addValues(3, "three").build())
.build();
CoderProperties.coderDecodeEncodeEqual(RowCoder.of(schema), row);
}
|
public void abort(final int index)
{
final int recordIndex = computeRecordIndex(index);
final AtomicBuffer buffer = this.buffer;
final int recordLength = verifyClaimedSpaceNotReleased(buffer, recordIndex);
buffer.putInt(typeOffset(recordIndex), PADDING_MSG_TYPE_ID);
buffer.putIntOrdered(lengthOffset(recordIndex), -recordLength);
}
|
@Test
void abortMarksUnusedSpaceAsPadding()
{
final int index = 108;
final int recordIndex = index - HEADER_LENGTH;
final int recordLength = -11111;
when(buffer.getInt(lengthOffset(recordIndex))).thenReturn(recordLength);
ringBuffer.abort(index);
final InOrder inOrder = inOrder(buffer);
inOrder.verify(buffer).getInt(lengthOffset(recordIndex));
inOrder.verify(buffer).putInt(typeOffset(recordIndex), PADDING_MSG_TYPE_ID);
inOrder.verify(buffer).putIntOrdered(lengthOffset(recordIndex), -recordLength);
inOrder.verifyNoMoreInteractions();
}
|
@Override
public String generateSegmentName(int sequenceId, @Nullable Object minTimeValue, @Nullable Object maxTimeValue) {
return Joiner.on(DELIMITER).join(_prefix, _tableName, _partitionId, _creationTimeMillis,
StringUtils.isBlank(_suffix) ? sequenceId : _suffix);
}
|
@Test
public void testGenerateSegmentName() {
String tableName = "tableName";
int partitionId = 1;
long creationTimeMillis = 1234567890L;
int sequenceId = 2;
UploadedRealtimeSegmentNameGenerator generator =
new UploadedRealtimeSegmentNameGenerator(tableName, partitionId, creationTimeMillis, "prefix", "suffix");
String expectedSegmentName = "prefix__tableName__1__1234567890__suffix";
String actualSegmentName = generator.generateSegmentName(sequenceId, null, null);
assertEquals(actualSegmentName, expectedSegmentName);
}
|
@Override
public void stop() {
if (!isStarted()) return;
try {
runner.stop();
super.stop();
}
catch (IOException ex) {
addError("server shutdown error: " + ex, ex);
}
}
|
@Test
public void testStopWhenNotStarted() throws Exception {
appender.stop();
assertEquals(0, runner.getStartCount());
}
|
@Override
public int next() {
IntPair currentRange = _docIdRanges.get(_currentRangeId);
if (_nextDocId <= currentRange.getRight()) {
// Next document id is within the current range
return _nextDocId++;
}
if (_currentRangeId < _numRanges - 1) {
// Move to the next range
_currentRangeId++;
_nextDocId = _docIdRanges.get(_currentRangeId).getLeft();
return _nextDocId++;
} else {
return Constants.EOF;
}
}
|
@Test
public void testPairWithSameStartAndEnd() {
SortedDocIdSet sortedDocIdSet = new SortedDocIdSet(Collections.singletonList(new Pairs.IntPair(1, 1)));
BlockDocIdIterator iterator = sortedDocIdSet.iterator();
List<Integer> result = new ArrayList<>();
int docId;
while ((docId = iterator.next()) != Constants.EOF) {
result.add(docId);
}
assertEquals(result, Collections.singletonList(1));
}
|
@Operation(summary = "list", description = "List host-components")
@GetMapping("/services/{serviceId}")
public ResponseEntity<List<HostComponentVO>> listByService(
@PathVariable Long clusterId, @PathVariable Long serviceId) {
return ResponseEntity.success(hostComponentService.listByService(clusterId, serviceId));
}
|
@Test
void listByServiceReturnsHostComponentsForService() {
Long clusterId = 1L;
Long serviceId = 1L;
List<HostComponentVO> hostComponents = Arrays.asList(new HostComponentVO(), new HostComponentVO());
when(hostComponentService.listByService(clusterId, serviceId)).thenReturn(hostComponents);
ResponseEntity<List<HostComponentVO>> response = hostComponentController.listByService(clusterId, serviceId);
assertTrue(response.isSuccess());
assertEquals(hostComponents, response.getData());
}
|
@Override
public String toString() {
final StringBuilder builder = new StringBuilder(2 * 6 + 5);
for (final byte b : this.address) {
if (builder.length() > 0) {
builder.append(':');
}
builder.append(String.format("%02X", b & 0xFF));
}
return builder.toString();
}
|
@Test
public void testToString() throws Exception {
assertEquals(MAC_ONOS_STR, MAC_ONOS.toString());
}
|
public void remove(SequenceSet sequenceSet) {
Sequence node = sequenceSet.getHead();
while (node != null) {
remove(node);
node = node.getNext();
}
}
|
@Test
public void testRemove() {
SequenceSet set = new SequenceSet();
set.add(new Sequence(0, 100));
assertEquals(101, set.rangeSize());
assertEquals(1, set.size());
assertFalse(set.remove(101));
assertTrue(set.remove(50));
assertEquals(2, set.size());
assertEquals(100, set.rangeSize());
assertFalse(set.remove(101));
set.remove(0);
assertEquals(2, set.size());
assertEquals(99, set.rangeSize());
set.remove(100);
assertEquals(2, set.size());
assertEquals(98, set.rangeSize());
set.remove(10);
assertEquals(3, set.size());
assertEquals(97, set.rangeSize());
SequenceSet toRemove = new SequenceSet();
toRemove.add(new Sequence(0, 100));
set.remove(toRemove);
assertEquals(0, set.size());
assertEquals(0, set.rangeSize());
}
|
@Override
public String getCookie(boolean decode) {
return null;
}
|
@Test
public void getCookie() {
mSensorsAPI.setCookie("cookie", false);
Assert.assertNull(mSensorsAPI.getCookie(false));
}
|
@Nullable
@Override
public JobGraph recoverJobGraph(JobID jobId) throws Exception {
checkNotNull(jobId, "Job ID");
LOG.debug("Recovering job graph {} from {}.", jobId, jobGraphStateHandleStore);
final String name = jobGraphStoreUtil.jobIDToName(jobId);
synchronized (lock) {
verifyIsRunning();
boolean success = false;
RetrievableStateHandle<JobGraph> jobGraphRetrievableStateHandle;
try {
try {
jobGraphRetrievableStateHandle = jobGraphStateHandleStore.getAndLock(name);
} catch (StateHandleStore.NotExistException ignored) {
success = true;
return null;
} catch (Exception e) {
throw new FlinkException(
"Could not retrieve the submitted job graph state handle "
+ "for "
+ name
+ " from the submitted job graph store.",
e);
}
JobGraph jobGraph;
try {
jobGraph = jobGraphRetrievableStateHandle.retrieveState();
} catch (ClassNotFoundException cnfe) {
throw new FlinkException(
"Could not retrieve submitted JobGraph from state handle under "
+ name
+ ". This indicates that you are trying to recover from state written by an "
+ "older Flink version which is not compatible. Try cleaning the state handle store.",
cnfe);
} catch (IOException ioe) {
throw new FlinkException(
"Could not retrieve submitted JobGraph from state handle under "
+ name
+ ". This indicates that the retrieved state handle is broken. Try cleaning the state handle "
+ "store.",
ioe);
}
addedJobGraphs.add(jobGraph.getJobID());
LOG.info("Recovered {}.", jobGraph);
success = true;
return jobGraph;
} finally {
if (!success) {
jobGraphStateHandleStore.release(name);
}
}
}
}
|
@Test
public void testRecoverJobGraphWhenNotExist() throws Exception {
final TestingStateHandleStore<JobGraph> stateHandleStore =
builder.setGetFunction(
ignore -> {
throw new StateHandleStore.NotExistException(
"Not exist exception.");
})
.build();
final JobGraphStore jobGraphStore = createAndStartJobGraphStore(stateHandleStore);
final JobGraph recoveredJobGraph =
jobGraphStore.recoverJobGraph(testingJobGraph.getJobID());
assertThat(recoveredJobGraph, is(nullValue()));
}
|
@Override
public int getAttemptCount(int subtaskIndex) {
Preconditions.checkArgument(subtaskIndex >= 0);
if (subtaskIndex >= attemptCounts.size()) {
return 0;
}
return attemptCounts.get(subtaskIndex);
}
|
@Test
void testNegativeSubtaskIndexRejected() {
final DefaultSubtaskAttemptNumberStore subtaskAttemptNumberStore =
new DefaultSubtaskAttemptNumberStore(Collections.emptyList());
assertThatThrownBy(() -> subtaskAttemptNumberStore.getAttemptCount(-1))
.isInstanceOf(IllegalArgumentException.class);
}
|
@Override
public String getRomOAID() {
String oaid = null;
try {
Intent intent = new Intent();
intent.setClassName("com.zui.deviceidservice", "com.zui.deviceidservice.DeviceidService");
if (mContext.bindService(intent, mService, Context.BIND_AUTO_CREATE)) {
LenovoInterface anInterface = new LenovoInterface(OAIDService.BINDER_QUEUE.take());
oaid = anInterface.getOAID();
mContext.unbindService(mService);
}
} catch (Throwable th) {
SALog.i(TAG, th);
}
return oaid;
}
|
@Test
public void getRomOAID() {
LenovoImpl lenovo = new LenovoImpl(mApplication);
// if (lenovo.isSupported()) {
// Assert.assertNull(lenovo.getRomOAID());
// }
}
|
@Deprecated(forRemoval=true, since = "13.0")
public static byte[] convertJavaToText(Object source, MediaType sourceMediaType, MediaType destinationMediaType) {
if (source == null) return null;
if (sourceMediaType == null || destinationMediaType == null) {
throw new NullPointerException("sourceMediaType and destinationMediaType cannot be null!");
}
Object decoded = decodeObjectContent(source, sourceMediaType);
if (decoded instanceof byte[]) {
return convertCharset(source, StandardCharsets.UTF_8, destinationMediaType.getCharset());
} else {
String asString = decoded.toString();
return asString.getBytes(destinationMediaType.getCharset());
}
}
|
@Test
public void testJavaToTextConversion() {
String string = "I've seen things you people wouldn't believe.";
Double number = 12.1d;
Calendar complex = Calendar.getInstance();
MediaType stringType = APPLICATION_OBJECT.withParameter("type", "java.lang.String");
byte[] result1 = StandardConversions.convertJavaToText(string, stringType, TEXT_PLAIN.withCharset(UTF_16BE));
assertArrayEquals(string.getBytes(UTF_16BE), result1);
MediaType doubleType = APPLICATION_OBJECT.withParameter("type", "java.lang.Double");
byte[] result2 = StandardConversions.convertJavaToText(number, doubleType, TEXT_PLAIN.withCharset(US_ASCII));
assertArrayEquals("12.1".getBytes(US_ASCII), result2);
MediaType customType = APPLICATION_OBJECT.withParameter("type", complex.getClass().getName());
byte[] bytes = StandardConversions.convertJavaToText(complex, customType, TEXT_PLAIN.withCharset(US_ASCII));
assertEquals(complex.toString(), new String(bytes));
}
|
@VisibleForTesting
ExportResult<PhotosContainerResource> exportPhotos(
TokensAndUrlAuthData authData,
Optional<IdOnlyContainerResource> albumData,
Optional<PaginationData> paginationData,
UUID jobId)
throws IOException, InvalidTokenException, PermissionDeniedException, UploadErrorException {
Optional<String> albumId = Optional.empty();
if (albumData.isPresent()) {
albumId = Optional.of(albumData.get().getId());
}
Optional<String> paginationToken = getPhotosPaginationToken(paginationData);
MediaItemSearchResponse mediaItemSearchResponse =
getOrCreatePhotosInterface(authData).listMediaItems(albumId, paginationToken);
PaginationData nextPageData = null;
if (!Strings.isNullOrEmpty(mediaItemSearchResponse.getNextPageToken())) {
nextPageData =
new StringPaginationToken(
PHOTO_TOKEN_PREFIX + mediaItemSearchResponse.getNextPageToken());
}
ContinuationData continuationData = new ContinuationData(nextPageData);
PhotosContainerResource containerResource = null;
GoogleMediaItem[] mediaItems = mediaItemSearchResponse.getMediaItems();
if (mediaItems != null && mediaItems.length > 0) {
List<PhotoModel> photos = convertPhotosList(albumId, mediaItems, jobId);
containerResource = new PhotosContainerResource(null, photos);
}
ResultType resultType = ResultType.CONTINUE;
if (nextPageData == null) {
resultType = ResultType.END;
}
return new ExportResult<>(resultType, containerResource, continuationData);
}
|
@Test
public void exportPhotoSubsequentSet()
throws IOException, InvalidTokenException, PermissionDeniedException, UploadErrorException {
setUpSingleAlbum();
when(albumListResponse.getNextPageToken()).thenReturn(null);
GoogleMediaItem mediaItem = setUpSinglePhoto(IMG_URI, PHOTO_ID);
when(mediaItemSearchResponse.getMediaItems()).thenReturn(new GoogleMediaItem[] {mediaItem});
when(mediaItemSearchResponse.getNextPageToken()).thenReturn(null);
StringPaginationToken inputPaginationToken =
new StringPaginationToken(PHOTO_TOKEN_PREFIX + PHOTO_TOKEN);
IdOnlyContainerResource idOnlyContainerResource = new IdOnlyContainerResource(ALBUM_ID);
// Run test
ExportResult<PhotosContainerResource> result =
googlePhotosExporter.exportPhotos(
null, Optional.of(idOnlyContainerResource), Optional.of(inputPaginationToken), uuid);
// Check results
// Verify correct methods were called
verify(photosInterface).listMediaItems(Optional.of(ALBUM_ID), Optional.of(PHOTO_TOKEN));
verify(mediaItemSearchResponse).getMediaItems();
// Check pagination token
ContinuationData continuationData = result.getContinuationData();
PaginationData paginationToken = continuationData.getPaginationData();
assertNull(paginationToken);
}
|
@Converter
public static byte[] toByteArray(IoBuffer buffer) {
byte[] answer = new byte[buffer.remaining()];
buffer.get(answer);
return answer;
// we should not mark and reset the buffer with mina
}
|
@Test
public void testToByteArray() {
byte[] in = "Hello World".getBytes();
IoBuffer bb = IoBuffer.wrap(in);
byte[] out = MinaConverter.toByteArray(bb);
for (int i = 0; i < out.length; i++) {
assertEquals(in[i], out[i]);
}
}
|
@Override
public void next(DeviceId deviceId, NextObjective nextObjective) {
process(deviceId, nextObjective);
}
|
@Test
@Ignore("Not supported")
public void nextPending() {
// Note: current logic will double check if the next obj need to be queued
// it does not check when resubmitting pending next back to the queue
expect(mgr.flowObjectiveStore.getNextGroup(NID1)).andReturn(null).times(6);
expect(mgr.flowObjectiveStore.getNextGroup(NID2)).andReturn(null).times(6);
replay(mgr.flowObjectiveStore);
expectNextObjsPending.forEach(nextObj -> mgr.next(DEV1, nextObj));
// Wait for the pipeline operation to complete
int expectedTime = (bound + offset) * 8;
assertAfter(expectedTime, expectedTime * 5, () -> assertEquals(expectNextObjs.size(), actualObjs.size()));
assertTrue(actualObjs.indexOf(NEXT1) < actualObjs.indexOf(NEXT5));
assertTrue(actualObjs.indexOf(NEXT5) < actualObjs.indexOf(NEXT3));
assertTrue(actualObjs.indexOf(NEXT3) < actualObjs.indexOf(NEXT7));
assertTrue(actualObjs.indexOf(NEXT2) < actualObjs.indexOf(NEXT6));
assertTrue(actualObjs.indexOf(NEXT6) < actualObjs.indexOf(NEXT4));
assertTrue(actualObjs.indexOf(NEXT4) < actualObjs.indexOf(NEXT8));
verify(mgr.flowObjectiveStore);
}
|
@Override
@SuppressWarnings("NullAway")
public void onRemoval(K key, @Nullable Expirable<V> expirable, RemovalCause cause) {
if (expirable != null) {
V value = expirable.get();
if (cause == RemovalCause.EXPIRED) {
dispatcher.publishExpiredQuietly(cache, key, value);
} else {
dispatcher.publishRemovedQuietly(cache, key, value);
}
statistics.recordEvictions(1L);
}
}
|
@Test(dataProvider = "notifications")
public void publishIfEvicted(Integer key, Expirable<Integer> value, RemovalCause cause) {
listener.onRemoval(key, value, cause);
if (cause.wasEvicted()) {
if (cause == RemovalCause.EXPIRED) {
verify(entryListener).onExpired(any());
} else {
verify(entryListener).onRemoved(any());
}
assertThat(statistics.getCacheEvictions()).isEqualTo(1L);
} else {
verify(entryListener, never()).onRemoved(any());
assertThat(statistics.getCacheEvictions()).isEqualTo(0L);
}
}
|
@Override
public V delete(final K key) {
Objects.requireNonNull(key, "key cannot be null");
try {
return maybeMeasureLatency(() -> outerValue(wrapped().delete(keyBytes(key))), time, deleteSensor);
} catch (final ProcessorStateException e) {
final String message = String.format(e.getMessage(), key);
throw new ProcessorStateException(message, e);
}
}
|
@Test
public void shouldThrowNullPointerOnDeleteIfKeyIsNull() {
setUpWithoutContext();
assertThrows(NullPointerException.class, () -> metered.delete(null));
}
|
@Override
protected void run(final Environment environment,
final Namespace namespace,
final WhisperServerConfiguration configuration,
final CommandDependencies commandDependencies) throws Exception {
final PushNotificationExperiment<T> experiment =
experimentFactory.buildExperiment(commandDependencies, configuration);
final int maxConcurrency = namespace.getInt(MAX_CONCURRENCY_ARGUMENT);
log.info("Finishing \"{}\" with max concurrency: {}", experiment.getExperimentName(), maxConcurrency);
final AccountsManager accountsManager = commandDependencies.accountsManager();
final PushNotificationExperimentSamples pushNotificationExperimentSamples = commandDependencies.pushNotificationExperimentSamples();
final Flux<PushNotificationExperimentSample<T>> finishedSamples =
pushNotificationExperimentSamples.getSamples(experiment.getExperimentName(), experiment.getStateClass())
.doOnNext(sample -> Metrics.counter(SAMPLES_READ_COUNTER_NAME, "final", String.valueOf(sample.finalState() != null)).increment())
.flatMap(sample -> {
if (sample.finalState() == null) {
// We still need to record a final state for this sample
return Mono.fromFuture(() -> accountsManager.getByAccountIdentifierAsync(sample.accountIdentifier()))
.retryWhen(Retry.backoff(3, Duration.ofSeconds(1)))
.doOnNext(ignored -> ACCOUNT_READ_COUNTER.increment())
.flatMap(maybeAccount -> {
final T finalState = experiment.getState(maybeAccount.orElse(null),
maybeAccount.flatMap(account -> account.getDevice(sample.deviceId())).orElse(null));
return Mono.fromFuture(
() -> pushNotificationExperimentSamples.recordFinalState(sample.accountIdentifier(),
sample.deviceId(),
experiment.getExperimentName(),
finalState))
.onErrorResume(ConditionalCheckFailedException.class, throwable -> Mono.empty())
.onErrorResume(JsonProcessingException.class, throwable -> {
log.error("Failed to parse sample state JSON", throwable);
return Mono.empty();
})
.retryWhen(Retry.backoff(3, Duration.ofSeconds(1)))
.onErrorResume(throwable -> {
log.warn("Failed to record final state for {}:{} in experiment {}",
sample.accountIdentifier(), sample.deviceId(), experiment.getExperimentName(), throwable);
return Mono.empty();
})
.doOnSuccess(ignored -> FINAL_SAMPLE_STORED_COUNTER.increment());
});
} else {
return Mono.just(sample);
}
}, maxConcurrency);
experiment.analyzeResults(finishedSamples);
}
|
@Test
void runAccountFetchRetry() {
final UUID accountIdentifier = UUID.randomUUID();
final byte deviceId = Device.PRIMARY_ID;
final Device device = mock(Device.class);
when(device.getId()).thenReturn(deviceId);
final Account account = mock(Account.class);
when(account.getDevice(deviceId)).thenReturn(Optional.of(device));
when(commandDependencies.accountsManager().getByAccountIdentifierAsync(accountIdentifier))
.thenReturn(CompletableFuture.failedFuture(new RuntimeException()))
.thenReturn(CompletableFuture.failedFuture(new RuntimeException()))
.thenReturn(CompletableFuture.completedFuture(Optional.of(account)));
when(commandDependencies.pushNotificationExperimentSamples().getSamples(eq(EXPERIMENT_NAME), eq(String.class)))
.thenReturn(Flux.just(new PushNotificationExperimentSample<>(accountIdentifier, deviceId, true, "test", null)));
assertDoesNotThrow(() -> finishPushNotificationExperimentCommand.run(null, NAMESPACE, null, commandDependencies));
verify(experiment).getState(account, device);
verify(commandDependencies.pushNotificationExperimentSamples())
.recordFinalState(eq(accountIdentifier), eq(deviceId), eq(EXPERIMENT_NAME), any());
verify(commandDependencies.accountsManager(), times(3)).getByAccountIdentifierAsync(accountIdentifier);
}
|
public synchronized OutputStream open() {
try {
close();
fileOutputStream = new FileOutputStream(file, true);
} catch (FileNotFoundException e) {
throw new RuntimeException("Unable to open output stream", e);
}
return fileOutputStream;
}
|
@Test
public void requireThatFileCanOpened() throws IOException {
FileLogTarget logTarget = new FileLogTarget(File.createTempFile("logfile", ".log"));
assertNotNull(logTarget.open());
}
|
public static Collection<X509Certificate> parseCertificates(String pemRepresentation) throws IOException,
CertificateException {
// The parser is very picky. We should trim each line of the input string.
final String pem = pemRepresentation //
.replaceAll("(?m) +$", "") // remove trailing whitespace
.replaceAll("(?m)^ +", ""); // remove leading whitespace
ByteArrayInputStream input = new ByteArrayInputStream(pem.getBytes(StandardCharsets.UTF_8));
return parseCertificates(input);
}
|
@Test
public void testParseFullChain() throws Exception
{
// Setup fixture.
try ( final InputStream stream = getClass().getResourceAsStream( "/fullchain.pem" ) )
{
// Execute system under test.
final Collection<X509Certificate> result = CertificateManager.parseCertificates( stream );
// Verify result.
assertNotNull( result );
assertEquals( 2, result.size() );
}
}
|
public RowMetaInterface getStepFields( String stepname ) throws KettleStepException {
StepMeta stepMeta = findStep( stepname );
if ( stepMeta != null ) {
return getStepFields( stepMeta );
} else {
return null;
}
}
|
@Test
public void prevStepFieldsAreIncludedInGetStepFields() throws KettleStepException {
TransMeta transMeta = new TransMeta( new Variables() );
StepMeta prevStep1 = testStep( "prevStep1", emptyList(), asList( "field1", "field2" ) );
StepMeta prevStep2 = testStep( "prevStep2", emptyList(), asList( "field3", "field4", "field5" ) );
StepMeta someStep = testStep( "step", List.of( "prevStep1" ), List.of( "outputField" ) );
StepMeta after = new StepMeta( "after", new DummyTransMeta() );
wireUpTestTransMeta( transMeta, prevStep1, prevStep2, someStep, after );
RowMetaInterface results = transMeta.getStepFields( someStep, after, mock( ProgressMonitorListener.class ) );
assertThat( 4, equalTo( results.size() ) );
assertThat( new String[] { "field3", "field4", "field5", "outputField" }, equalTo( results.getFieldNames() ) );
}
|
@Override
public List<Intent> compile(SinglePointToMultiPointIntent intent,
List<Intent> installable) {
Set<Link> links = new HashSet<>();
final boolean allowMissingPaths = intentAllowsPartialFailure(intent);
boolean hasPaths = false;
boolean missingSomePaths = false;
for (ConnectPoint egressPoint : intent.egressPoints()) {
if (egressPoint.deviceId().equals(intent.ingressPoint().deviceId())) {
// Do not need to look for paths, since ingress and egress
// devices are the same.
if (deviceService.isAvailable(egressPoint.deviceId())) {
hasPaths = true;
} else {
missingSomePaths = true;
}
continue;
}
Path path = getPath(intent, intent.ingressPoint().deviceId(), egressPoint.deviceId());
if (path != null) {
hasPaths = true;
links.addAll(path.links());
} else {
missingSomePaths = true;
}
}
// Allocate bandwidth if a bandwidth constraint is set
ConnectPoint ingressCP = intent.filteredIngressPoint().connectPoint();
List<ConnectPoint> egressCPs =
intent.filteredEgressPoints().stream()
.map(fcp -> fcp.connectPoint())
.collect(Collectors.toList());
List<ConnectPoint> pathCPs =
links.stream()
.flatMap(l -> Stream.of(l.src(), l.dst()))
.collect(Collectors.toList());
pathCPs.add(ingressCP);
pathCPs.addAll(egressCPs);
allocateBandwidth(intent, pathCPs);
if (!hasPaths) {
throw new IntentException("Cannot find any path between ingress and egress points.");
} else if (!allowMissingPaths && missingSomePaths) {
throw new IntentException("Missing some paths between ingress and egress points.");
}
Intent result = LinkCollectionIntent.builder()
.appId(intent.appId())
.key(intent.key())
.selector(intent.selector())
.treatment(intent.treatment())
.links(links)
.filteredIngressPoints(ImmutableSet.of(intent.filteredIngressPoint()))
.filteredEgressPoints(intent.filteredEgressPoints())
.priority(intent.priority())
.applyTreatmentOnEgress(true)
.constraints(intent.constraints())
.resourceGroup(intent.resourceGroup())
.build();
return Collections.singletonList(result);
}
|
@Test
public void testMultiEgressCompilation() {
FilteredConnectPoint ingress =
new FilteredConnectPoint(new ConnectPoint(DID_1, PORT_1));
FilteredConnectPoint egressOne =
new FilteredConnectPoint(new ConnectPoint(DID_3, PORT_2));
FilteredConnectPoint egressTwo =
new FilteredConnectPoint(new ConnectPoint(DID_4, PORT_2));
FilteredConnectPoint egressThree =
new FilteredConnectPoint(new ConnectPoint(DID_5, PORT_2));
Set<FilteredConnectPoint> egress = Sets.newHashSet(egressOne,
egressTwo,
egressThree);
SinglePointToMultiPointIntent intent = makeIntent(ingress, egress);
assertThat(intent, is(notNullValue()));
final String[] hops = {S2};
SinglePointToMultiPointIntentCompiler compiler = makeCompiler(hops);
assertThat(compiler, is(notNullValue()));
List<Intent> result = compiler.compile(intent, null);
assertThat(result, is(notNullValue()));
assertThat(result, hasSize(1));
Intent resultIntent = result.get(0);
assertThat(resultIntent instanceof LinkCollectionIntent, is(true));
if (resultIntent instanceof LinkCollectionIntent) {
LinkCollectionIntent linkIntent = (LinkCollectionIntent) resultIntent;
assertThat(linkIntent.links(), hasSize(4));
assertThat(linkIntent.links(), linksHasPath(S1, S2));
assertThat(linkIntent.links(), linksHasPath(S2, S3));
assertThat(linkIntent.links(), linksHasPath(S2, S4));
assertThat(linkIntent.links(), linksHasPath(S2, S5));
}
assertThat("key is inherited", resultIntent.key(), is(intent.key()));
}
|
public static <E, K> Map<K, List<E>> groupByKey(Collection<E> collection, Function<E, K> key) {
return groupByKey(collection, key, false);
}
|
@Test
public void testGroupByKey() {
Map<Long, List<Student>> map = CollStreamUtil.groupByKey(null, Student::getClassId);
assertEquals(map, Collections.EMPTY_MAP);
List<Student> list = new ArrayList<>();
map = CollStreamUtil.groupByKey(list, Student::getClassId);
assertEquals(map, Collections.EMPTY_MAP);
list.add(new Student(1, 1, 1, "张三"));
list.add(new Student(1, 2, 2, "李四"));
list.add(new Student(2, 1, 1, "擎天柱"));
list.add(new Student(2, 2, 2, "威震天"));
list.add(new Student(2, 3, 2, "霸天虎"));
map = CollStreamUtil.groupByKey(list, Student::getClassId);
Map<Long, List<Student>> compare = new HashMap<>();
List<Student> class1 = new ArrayList<>();
class1.add(new Student(1, 1, 1, "张三"));
class1.add(new Student(2, 1, 1, "擎天柱"));
compare.put(1L, class1);
List<Student> class2 = new ArrayList<>();
class2.add(new Student(1, 2, 2, "李四"));
class2.add(new Student(2, 2, 2, "威震天"));
compare.put(2L, class2);
List<Student> class3 = new ArrayList<>();
class3.add(new Student(2, 3, 2, "霸天虎"));
compare.put(3L, class3);
assertEquals(map, compare);
}
|
public static NetworkEndpoint forIpAndPort(String ipAddress, int port) {
checkArgument(InetAddresses.isInetAddress(ipAddress), "'%s' is not an IP address.", ipAddress);
checkArgument(
0 <= port && port <= MAX_PORT_NUMBER,
"Port out of range. Expected [0, %s], actual %s.",
MAX_PORT_NUMBER,
port);
return forIp(ipAddress).toBuilder()
.setType(NetworkEndpoint.Type.IP_PORT)
.setPort(Port.newBuilder().setPortNumber(port))
.build();
}
|
@Test
public void forIpAndPort_withInvalidIp_throwsIllegalArgumentException() {
assertThrows(
IllegalArgumentException.class, () -> NetworkEndpointUtils.forIpAndPort("abc", 8888));
}
|
static JavaInput reorderModifiers(String text) throws FormatterException {
return reorderModifiers(
new JavaInput(text), ImmutableList.of(Range.closedOpen(0, text.length())));
}
|
@Test
public void comment() throws FormatterException {
assertThat(ModifierOrderer.reorderModifiers("static/*1*/abstract/*2*/public").getText())
.isEqualTo("public/*1*/abstract/*2*/static");
}
|
@Override
public void validateDictDataList(String dictType, Collection<String> values) {
if (CollUtil.isEmpty(values)) {
return;
}
Map<String, DictDataDO> dictDataMap = CollectionUtils.convertMap(
dictDataMapper.selectByDictTypeAndValues(dictType, values), DictDataDO::getValue);
// 校验
values.forEach(value -> {
DictDataDO dictData = dictDataMap.get(value);
if (dictData == null) {
throw exception(DICT_DATA_NOT_EXISTS);
}
if (!CommonStatusEnum.ENABLE.getStatus().equals(dictData.getStatus())) {
throw exception(DICT_DATA_NOT_ENABLE, dictData.getLabel());
}
});
}
|
@Test
public void testValidateDictDataList_notFound() {
// 准备参数
String dictType = randomString();
List<String> values = singletonList(randomString());
// 调用, 并断言异常
assertServiceException(() -> dictDataService.validateDictDataList(dictType, values), DICT_DATA_NOT_EXISTS);
}
|
public static Logger verboseLogger() {
return VERBOSE_ANDROID_LOGGER;
}
|
@Test
public void loggersReturnsVerboseInstance() {
Logger logger = Loggers.verboseLogger();
assertThat(logger, instanceOf(VerboseAndroidLogger.class));
}
|
@Secured(resource = Commons.NACOS_CORE_CONTEXT_V2 + "/loader", action = ActionTypes.WRITE)
@GetMapping("/reloadClient")
public ResponseEntity<String> reloadSingle(@RequestParam String connectionId,
@RequestParam(value = "redirectAddress", required = false) String redirectAddress) {
connectionManager.loadSingle(connectionId, redirectAddress);
return ResponseEntity.ok().body("success");
}
|
@Test
void testReloadSingle() {
ResponseEntity<String> result = serverLoaderController.reloadSingle("111", "1.1.1.1");
assertEquals("success", result.getBody());
}
|
static Schema schemaWithName(final Schema schema, final String schemaName) {
if (schemaName == null || schema.type() != Schema.Type.STRUCT) {
return schema;
}
final SchemaBuilder builder = SchemaBuilder.struct();
for (final Field f : schema.fields()) {
builder.field(f.name(), f.schema());
}
if (schema.parameters() != null) {
builder.parameters(schema.parameters());
}
if (schema.isOptional()) {
builder.optional();
}
if (schema.defaultValue() != null) {
builder.defaultValue(schema.defaultValue());
}
builder.doc(schema.doc());
builder.version(schema.version());
return builder.name(schemaName).build();
}
|
@Test
public void shouldNameSchemaWithoutName() {
// Given
final Schema unnamedSchema = SchemaBuilder.struct()
.field("field1", Schema.INT32_SCHEMA)
.field("field2",
SchemaBuilder.struct()
.field("product_id", Schema.INT32_SCHEMA)
.build())
.build();
// When
final Schema schemaWithName = ProtobufSchemas.schemaWithName(unnamedSchema, CUSTOM_FULL_SCHEMA_NAME);
// Then
assertThat(schemaWithName, is(SchemaBuilder.struct()
.field("field1", Schema.INT32_SCHEMA)
.field("field2",
SchemaBuilder.struct()
.field("product_id", Schema.INT32_SCHEMA)
.build())
.name(CUSTOM_FULL_SCHEMA_NAME)
.build()));
}
|
@Override
public void run() {
// top-level command, do nothing
}
|
@Test
public void test_submit_argsPassing() {
run("submit", testJobJarFile.toString(), "--jobOption", "fooValue");
assertTrueEventually(() -> assertContains(captureOut(), " with arguments [--jobOption, fooValue]"));
}
|
@Override
public void close()
{
destroy();
}
|
@Test(timeout = 5000)
public void testZPollerNew()
{
ZContext ctx = new ZContext();
ZPoller poller = new ZPoller(ctx);
try {
ZPoller other = new ZPoller(poller);
other.close();
ItemCreator itemCreator = new ZPoller.SimpleCreator();
other = new ZPoller(itemCreator, poller);
other.close();
}
finally {
poller.close();
ctx.close();
}
}
|
public static void checkMapConfig(Config config, MapConfig mapConfig,
SplitBrainMergePolicyProvider mergePolicyProvider) {
checkNotNativeWhenOpenSource(mapConfig.getInMemoryFormat());
checkNotBitmapIndexWhenNativeMemory(mapConfig.getInMemoryFormat(), mapConfig.getIndexConfigs());
checkTSEnabledOnEnterpriseJar(mapConfig.getTieredStoreConfig());
if (getBuildInfo().isEnterprise()) {
checkTieredStoreMapConfig(config, mapConfig);
checkMapNativeConfig(mapConfig, config.getNativeMemoryConfig());
}
checkMapEvictionConfig(mapConfig.getEvictionConfig());
checkMapMaxSizePolicyPerInMemoryFormat(mapConfig);
checkMapMergePolicy(mapConfig,
mapConfig.getMergePolicyConfig().getPolicy(), mergePolicyProvider);
}
|
@Test(expected = InvalidConfigurationException.class)
public void checkMapConfig_fails_with_merge_policy_which_requires_per_entry_stats_enabled() {
checkMapConfig(new Config(), getMapConfig(BINARY).setPerEntryStatsEnabled(false),
splitBrainMergePolicyProvider);
}
|
@Override
public YamlTableRuleConfiguration swapToYamlConfiguration(final ShardingTableRuleConfiguration data) {
YamlTableRuleConfiguration result = new YamlTableRuleConfiguration();
result.setLogicTable(data.getLogicTable());
result.setActualDataNodes(data.getActualDataNodes());
if (null != data.getDatabaseShardingStrategy()) {
result.setDatabaseStrategy(shardingStrategySwapper.swapToYamlConfiguration(data.getDatabaseShardingStrategy()));
}
if (null != data.getTableShardingStrategy()) {
result.setTableStrategy(shardingStrategySwapper.swapToYamlConfiguration(data.getTableShardingStrategy()));
}
if (null != data.getKeyGenerateStrategy()) {
result.setKeyGenerateStrategy(keyGenerateStrategySwapper.swapToYamlConfiguration(data.getKeyGenerateStrategy()));
}
if (null != data.getAuditStrategy()) {
result.setAuditStrategy(auditStrategySwapper.swapToYamlConfiguration(data.getAuditStrategy()));
}
return result;
}
|
@Test
void assertSwapToYamlConfiguration() {
YamlShardingTableRuleConfigurationSwapper swapper = new YamlShardingTableRuleConfigurationSwapper();
YamlTableRuleConfiguration actual = swapper.swapToYamlConfiguration(createShardingTableRuleConfiguration());
assertThat(actual.getDatabaseStrategy().getStandard().getShardingAlgorithmName(), is("standard"));
assertThat(actual.getTableStrategy().getStandard().getShardingAlgorithmName(), is("standard"));
assertThat(actual.getKeyGenerateStrategy().getKeyGeneratorName(), is("auto_increment"));
assertThat(actual.getAuditStrategy().getAuditorNames(), is(Collections.singletonList("audit_algorithm")));
}
|
public static final StartTime immediate() {
return new StartTime(StartTimeOption.IMMEDIATE, null, null);
}
|
@Test
public void testStartImmediate() {
StartTime st = StartTime.immediate();
assertEquals(StartTimeOption.IMMEDIATE, st.option());
assertNull(st.relativeTime());
assertNull(st.absoluteTime());
}
|
@Override
public void uncaughtException(Thread t, Throwable e) {
if(ShutdownHookManager.get().isShutdownInProgress()) {
LOG.error("Thread " + t + " threw an Throwable, but we are shutting " +
"down, so ignoring this", e);
} else if(e instanceof Error) {
try {
LOG.error(FATAL,
"Thread " + t + " threw an Error. Shutting down now...", e);
} catch (Throwable err) {
//We don't want to not exit because of an issue with logging
}
if(e instanceof OutOfMemoryError) {
//After catching an OOM java says it is undefined behavior, so don't
//even try to clean up or we can get stuck on shutdown.
try {
System.err.println("Halting due to Out Of Memory Error...");
} catch (Throwable err) {
//Again we done want to exit because of logging issues.
}
ExitUtil.halt(-1);
} else {
ExitUtil.terminate(-1);
}
} else {
LOG.error("Thread " + t + " threw an Exception.", e);
}
}
|
@Test
void testUncaughtExceptionHandlerWithOutOfMemoryError()
throws InterruptedException {
ExitUtil.disableSystemHalt();
final YarnUncaughtExceptionHandler spyOomHandler = spy(exHandler);
final OutOfMemoryError oomError = new OutOfMemoryError("out-of-memory-error");
final Thread oomThread = new Thread(new Runnable() {
@Override
public void run() {
throw oomError;
}
});
oomThread.setUncaughtExceptionHandler(spyOomHandler);
assertSame(spyOomHandler, oomThread.getUncaughtExceptionHandler());
oomThread.start();
oomThread.join();
verify(spyOomHandler).uncaughtException(oomThread, oomError);
}
|
public static ImmutableList<SbeField> generateFields(Ir ir, IrOptions irOptions) {
ImmutableList.Builder<SbeField> fields = ImmutableList.builder();
TokenIterator iterator = getIteratorForMessage(ir, irOptions);
while (iterator.hasNext()) {
Token token = iterator.next();
switch (token.signal()) {
case BEGIN_FIELD:
fields.add(processPrimitive(iterator));
break;
default:
// TODO(https://github.com/apache/beam/issues/21102): Support remaining field types
break;
}
}
return fields.build();
}
|
@Test
public void testGenerateFieldsWithMessageId() throws Exception {
Ir ir = getIr(OnlyPrimitivesMultiMessage.RESOURCE_PATH);
IrOptions msg1Opts = IrOptions.builder().setMessageId(Primitives1.ID).build();
IrOptions msg2Opts = IrOptions.builder().setMessageId(Primitives2.ID).build();
ImmutableList<SbeField> actual1 = IrFieldGenerator.generateFields(ir, msg1Opts);
ImmutableList<SbeField> actual2 = IrFieldGenerator.generateFields(ir, msg2Opts);
assertEquals(Primitives1.FIELDS, actual1);
assertEquals(Primitives2.FIELDS, actual2);
}
|
@Override
public ConsumeMessageDirectlyResult consumeMessageDirectly(MessageExt msg, String brokerName) {
ConsumeMessageDirectlyResult result = new ConsumeMessageDirectlyResult();
result.setOrder(true);
List<MessageExt> msgs = new ArrayList<>();
msgs.add(msg);
MessageQueue mq = new MessageQueue();
mq.setBrokerName(brokerName);
mq.setTopic(msg.getTopic());
mq.setQueueId(msg.getQueueId());
ConsumeOrderlyContext context = new ConsumeOrderlyContext(mq);
this.defaultMQPushConsumerImpl.resetRetryAndNamespace(msgs, this.consumerGroup);
final long beginTime = System.currentTimeMillis();
log.info("consumeMessageDirectly receive new message: {}", msg);
try {
ConsumeOrderlyStatus status = this.messageListener.consumeMessage(msgs, context);
if (status != null) {
switch (status) {
case COMMIT:
result.setConsumeResult(CMResult.CR_COMMIT);
break;
case ROLLBACK:
result.setConsumeResult(CMResult.CR_ROLLBACK);
break;
case SUCCESS:
result.setConsumeResult(CMResult.CR_SUCCESS);
break;
case SUSPEND_CURRENT_QUEUE_A_MOMENT:
result.setConsumeResult(CMResult.CR_LATER);
break;
default:
break;
}
} else {
result.setConsumeResult(CMResult.CR_RETURN_NULL);
}
} catch (Throwable e) {
result.setConsumeResult(CMResult.CR_THROW_EXCEPTION);
result.setRemark(UtilAll.exceptionSimpleDesc(e));
log.warn("consumeMessageDirectly exception: {} Group: {} Msgs: {} MQ: {}",
UtilAll.exceptionSimpleDesc(e),
ConsumeMessagePopOrderlyService.this.consumerGroup,
msgs,
mq, e);
}
result.setAutoCommit(context.isAutoCommit());
result.setSpentTimeMills(System.currentTimeMillis() - beginTime);
log.info("consumeMessageDirectly Result: {}", result);
return result;
}
|
@Test
public void testConsumeMessageDirectlyWithCrLater() {
when(messageListener.consumeMessage(any(), any(ConsumeOrderlyContext.class))).thenReturn(ConsumeOrderlyStatus.SUSPEND_CURRENT_QUEUE_A_MOMENT);
ConsumeMessageDirectlyResult actual = popService.consumeMessageDirectly(createMessageExt(), defaultBroker);
assertEquals(CMResult.CR_LATER, actual.getConsumeResult());
}
|
@VisibleForTesting
static boolean isIdentityMapping(Map<Integer, Integer> map)
{
for (int i = 0; i < map.size(); i++) {
if (!Objects.equals(map.get(i), i)) {
return false;
}
}
return true;
}
|
@Test
public void testIsOneToOneMapping()
{
assertTrue(isIdentityMapping(ImmutableMap.<Integer, Integer>builder()
.put(0, 0)
.put(1, 1)
.put(2, 2)
.put(3, 3)
.build()));
assertFalse(isIdentityMapping(ImmutableMap.<Integer, Integer>builder()
.put(0, 0)
.put(1, 1)
.put(2, 2)
.put(3, 3)
.put(5, 5)
.build()));
assertFalse(isIdentityMapping(ImmutableMap.<Integer, Integer>builder()
.put(0, 0)
.put(1, 1)
.put(2, 2)
.put(4, 5)
.build()));
}
|
public static Ip4Address makeMaskPrefix(int prefixLength) {
byte[] mask = IpAddress.makeMaskPrefixArray(VERSION, prefixLength);
return new Ip4Address(mask);
}
|
@Test(expected = IllegalArgumentException.class)
public void testInvalidMakeTooLongMaskPrefixIPv4() {
Ip4Address ipAddress;
ipAddress = Ip4Address.makeMaskPrefix(33);
}
|
public Map<String, String> getAllConfigPropsWithSecretsObfuscated() {
final Map<String, String> allPropsCleaned = new HashMap<>();
// build a properties map with obfuscated values for sensitive configs.
// Obfuscation is handled by ConfigDef.convertToString
allPropsCleaned.putAll(getKsqlConfigPropsWithSecretsObfuscated());
allPropsCleaned.putAll(
getKsqlStreamConfigPropsWithSecretsObfuscated().entrySet().stream().collect(
Collectors.toMap(
e -> KSQL_STREAMS_PREFIX + e.getKey(), Map.Entry::getValue
)
)
);
return Collections.unmodifiableMap(allPropsCleaned);
}
|
@Test
public void shouldListUnknownKsqlFunctionConfigObfuscated() {
// Given:
final KsqlConfig config = new KsqlConfig(ImmutableMap.of(
KsqlConfig.KSQL_FUNCTIONS_PROPERTY_PREFIX + "some_udf.some.prop", "maybe sensitive"
));
// When:
final Map<String, String> result = config.getAllConfigPropsWithSecretsObfuscated();
// Then:
assertThat(result.get(KsqlConfig.KSQL_FUNCTIONS_PROPERTY_PREFIX + "some_udf.some.prop"),
is("[hidden]"));
}
|
List<KinesisLogEntry> processMessages(final byte[] payloadBytes, Instant approximateArrivalTimestamp) throws IOException {
// This method will be called from a codec, and therefore will not perform any detection. It will rely
// exclusively on the AWSMessageType detected in the setup HealthCheck.
// If a user needs to change the type of data stored in a stream, they will need to set the integration up again.
if (awsMessageType == AWSMessageType.KINESIS_CLOUDWATCH_FLOW_LOGS || awsMessageType == AWSMessageType.KINESIS_CLOUDWATCH_RAW) {
final CloudWatchLogSubscriptionData logSubscriptionData = decompressCloudWatchMessages(payloadBytes, objectMapper);
return logSubscriptionData.logEvents().stream()
.map(le -> {
DateTime timestamp = new DateTime(le.timestamp(), DateTimeZone.UTC);
return KinesisLogEntry.create(kinesisStream,
// Use the log group and stream returned from CloudWatch.
logSubscriptionData.logGroup(),
logSubscriptionData.logStream(), timestamp, le.message());
})
.collect(Collectors.toList());
} else if (awsMessageType == AWSMessageType.KINESIS_RAW) {
// The best timestamp available is the approximate arrival time of the message to the Kinesis stream.
final DateTime timestamp = new DateTime(approximateArrivalTimestamp.toEpochMilli(), DateTimeZone.UTC);
final KinesisLogEntry kinesisLogEntry = KinesisLogEntry.create(kinesisStream,
"", "",
timestamp, new String(payloadBytes, StandardCharsets.UTF_8));
return Collections.singletonList(kinesisLogEntry);
} else {
LOG.error("The AWSMessageType [{}] is not supported by the KinesisTransport", awsMessageType);
return new ArrayList<>();
}
}
|
@Test
public void testKinesisRawDecoding() throws IOException {
final String textLogMessage = "a text log message";
final Instant now = Instant.now();
final List<KinesisLogEntry> logEntries =
rawDecoder.processMessages(textLogMessage.getBytes(StandardCharsets.UTF_8), now);
Assert.assertEquals(1, logEntries.size());
// Verify that there are two flow logs present in the parsed result.
Assert.assertEquals(1, logEntries.stream().filter(logEntry -> logEntry.message().equals(textLogMessage)).count());
// Verify timestamp and message contents.
final KinesisLogEntry resultLogEntry = logEntries.stream().findAny().get();
Assert.assertEquals(textLogMessage, resultLogEntry.message());
Assert.assertEquals(new DateTime(now.toEpochMilli(), DateTimeZone.UTC), resultLogEntry.timestamp());
}
|
public static String toCamelCase(String s) {
if (s == null) {
return null;
}
s = s.toLowerCase();
StringBuilder sb = new StringBuilder(s.length());
boolean upperCase = false;
for (int i = 0; i < s.length(); i++) {
char c = s.charAt(i);
if (c == SEPARATOR) {
upperCase = true;
} else if (upperCase) {
sb.append(Character.toUpperCase(c));
upperCase = false;
} else {
sb.append(c);
}
}
return sb.toString();
}
|
@Test
public void testToCamelCase(){
String a = "user_name";
Assert.assertEquals("userName", StringKit.toCamelCase(a));
}
|
@Override
public String selectForUpdateSkipLocked() {
return supportsSelectForUpdateSkipLocked ? " FOR UPDATE SKIP LOCKED" : "";
}
|
@Test
void mariaDB8DoesNotSupportSelectForUpdateSkipLocked() {
assertThat(new MariaDbDialect("MariaDB", "8.0").selectForUpdateSkipLocked()).isEmpty();
}
|
@Nonnull
public static String[] splitNewline(@Nonnull String input) {
return input.split("\n\r|\r\n|\n");
}
|
@Test
void testSplitNewline() {
assertEquals(1, StringUtil.splitNewline("").length);
assertEquals(1, StringUtil.splitNewline("a").length);
assertEquals(2, StringUtil.splitNewline("a\nb").length);
assertEquals(2, StringUtil.splitNewline("a\n\rb").length);
assertEquals(2, StringUtil.splitNewline("a\r\nb").length);
assertEquals(3, StringUtil.splitNewline("a\n\nb").length);
assertEquals(3, StringUtil.splitNewline("a\n\r\r\nb").length);
assertEquals(3, StringUtil.splitNewline("a\r\n\r\nb").length);
assertEquals(3, StringUtil.splitNewline("a\r\n\n\rb").length);
assertEquals(3, StringUtil.splitNewline("a\nb\nc").length);
}
|
@Override
public Optional<Period> chooseBin(final List<Period> availablePeriods, final QueryExecutionStats stats) {
return availablePeriods.stream()
.filter(per -> matches(per, stats.effectiveTimeRange()))
.findFirst();
}
|
@Test
void testReturnsEmptyOptionalOnNoAvailablePeriods() {
assertTrue(
toTest.chooseBin(
List.of(),
getQueryExecutionStats(5, AbsoluteRange.create(
DateTime.now(DateTimeZone.UTC).minusHours(5),
DateTime.now(DateTimeZone.UTC))
)
).isEmpty());
}
|
public int getKafkaFetcherSizeBytes() {
return _kafkaFetcherSizeBytes;
}
|
@Test
public void testGetFetcherSize() {
// test default
KafkaPartitionLevelStreamConfig config = getStreamConfig("topic", "host1", "", "", "", null, null);
Assert.assertEquals(KafkaStreamConfigProperties.LowLevelConsumer.KAFKA_BUFFER_SIZE_DEFAULT,
config.getKafkaFetcherSizeBytes());
config = getStreamConfig("topic", "host1", "100", "", "", null, null);
Assert.assertEquals(100, config.getKafkaFetcherSizeBytes());
config = getStreamConfig("topic", "host1", "100", "", "bad value", null, null);
Assert.assertEquals(100, config.getKafkaFetcherSizeBytes());
// correct config
config = getStreamConfig("topic", "host1", "100", "", "200", null, null);
Assert.assertEquals(200, config.getKafkaFetcherSizeBytes());
}
|
public static Version fromString(String versionString) {
checkArgument(!Strings.isNullOrEmpty(versionString));
Version version = builder().setVersionType(Type.NORMAL).setVersionString(versionString).build();
if (!EPOCH_PATTERN.matcher(versionString).matches()) {
versionString = "0:" + versionString;
}
boolean isValid =
version.segments().stream()
.flatMap(segment -> segment.tokens().stream())
.anyMatch(
token ->
(token.isNumeric() && token.getNumeric() != 0)
|| (token.isText() && !token.getText().isEmpty()));
if (!isValid) {
throw new IllegalArgumentException(
String.format(
"Input version string %s is not valid, it should contain at least one non-empty"
+ " field.",
versionString));
}
return version;
}
|
@Test
public void create_whenNormalVersionAndValueIsNull_throwsException() {
assertThrows(IllegalArgumentException.class, () -> Version.fromString(null));
}
|
public static InternalRequestSignature fromHeaders(Crypto crypto, byte[] requestBody, HttpHeaders headers) {
if (headers == null) {
return null;
}
String signatureAlgorithm = headers.getHeaderString(SIGNATURE_ALGORITHM_HEADER);
String encodedSignature = headers.getHeaderString(SIGNATURE_HEADER);
if (signatureAlgorithm == null || encodedSignature == null) {
return null;
}
Mac mac;
try {
mac = crypto.mac(signatureAlgorithm);
} catch (NoSuchAlgorithmException e) {
throw new BadRequestException(e.getMessage());
}
byte[] decodedSignature;
try {
decodedSignature = Base64.getDecoder().decode(encodedSignature);
} catch (IllegalArgumentException e) {
throw new BadRequestException(e.getMessage());
}
return new InternalRequestSignature(
requestBody,
mac,
decodedSignature
);
}
|
@Test
public void fromHeadersShouldReturnNullOnNullHeaders() {
assertNull(InternalRequestSignature.fromHeaders(crypto, REQUEST_BODY, null));
}
|
@Override
public NativeQuerySpec<Record> select(String sql, Object... args) {
return new NativeQuerySpecImpl<>(this, sql, args, DefaultRecord::new, false);
}
|
@Test
public void test() {
database.dml()
.insert("s_test_event")
.value("id", "helper_test")
.value("name", "main")
.value("age", 10)
.execute()
.sync();
database.dml()
.insert("s_test")
.value("id", "helper_test")
.value("name", "main")
.value("testName", "testName")
.value("age", 10)
.execute()
.sync();
DefaultQueryHelper helper = new DefaultQueryHelper(database);
helper.select(TestInfo.class)
.all(EventTestEntity.class, TestInfo::setEventList)
// .all("e2", TestInfo::setEvent)
.all(TestEntity.class)
.from(TestEntity.class)
.leftJoin(EventTestEntity.class,
join -> join
.alias("e1")
.is(EventTestEntity::getId, TestEntity::getId)
// .is(EventTestEntity::getName, TestEntity::getId)
.notNull(EventTestEntity::getAge))
// .leftJoin(EventTestEntity.class,
// join -> join
// .alias("e2")
// .is(EventTestEntity::getId, TestEntity::getId))
// .where(dsl -> dsl.is(EventTestEntity::getName, "Ename")
// .is("e1.name", "Ename")
// .orNest()
// .is(TestEntity::getName, "main")
// .is("e1.name", "Ename")
// .end()
// )
.orderByAsc(TestEntity::getAge)
.orderByDesc(EventTestEntity::getAge)
.fetchPaged(0, 10)
.doOnNext(info -> System.out.println(JSON.toJSONString(info, SerializerFeature.PrettyFormat)))
.as(StepVerifier::create)
.expectNextCount(1)
.verifyComplete();
}
|
@Override
public void loginSuccess(HttpRequest request, @Nullable String login, Source source) {
checkRequest(request);
requireNonNull(source, "source can't be null");
LOGGER.atDebug().setMessage("login success [method|{}][provider|{}|{}][IP|{}|{}][login|{}]")
.addArgument(source::getMethod)
.addArgument(source::getProvider)
.addArgument(source::getProviderName)
.addArgument(request::getRemoteAddr)
.addArgument(() -> getAllIps(request))
.addArgument(() -> preventLogFlood(sanitizeLog(emptyIfNull(login))))
.log();
}
|
@Test
public void login_success_logs_X_Forwarded_For_header_from_request() {
HttpRequest request = mockRequest("1.2.3.4", List.of("2.3.4.5"));
underTest.loginSuccess(request, "foo", Source.realm(Method.EXTERNAL, "bar"));
verifyLog("login success [method|EXTERNAL][provider|REALM|bar][IP|1.2.3.4|2.3.4.5][login|foo]", Set.of("logout", "login failure"));
}
|
@Override
public TransformResultMetadata getResultMetadata() {
return _resultMetadata;
}
|
@Test
public void testArrayElementAtLong() {
Random rand = new Random();
int index = rand.nextInt(MAX_NUM_MULTI_VALUES);
ExpressionContext expression =
RequestContextUtils.getExpression(String.format("array_element_at_long(%s, %d)", LONG_MV_COLUMN, index + 1));
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper);
assertEquals(transformFunction.getResultMetadata().getDataType(), DataType.LONG);
assertTrue(transformFunction.getResultMetadata().isSingleValue());
long[] expectedValues = new long[NUM_ROWS];
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = _longMVValues[i].length > index ? _longMVValues[i][index] : NullValuePlaceHolder.LONG;
}
testTransformFunction(transformFunction, expectedValues);
}
|
public static <T> List<String> getModifiedFields(T src, T dst, String... ignoreFields) {
Class clazz = src.getClass();
Method[] methods = clazz.getMethods();
List<String> ignoreFiledList = Arrays.asList(ignoreFields);
List<String> modifiedFields = new ArrayList<String>();
for (Method getterMethod : methods) { // 遍历目标对象的方法
if (Modifier.isStatic(getterMethod.getModifiers())
|| !ReflectUtils.isBeanPropertyReadMethod(getterMethod)) {
// 不是static方法, 是getter方法
continue;
}
String propertyName = ReflectUtils.getPropertyNameFromBeanReadMethod(getterMethod);
if (ignoreFiledList.contains(propertyName)) {
// 忽略字段
continue;
}
Class returnType = getterMethod.getReturnType();
try { // 同时目标字段还需要有set方法
Method setterMethod = ReflectUtils.getPropertySetterMethod(clazz, propertyName, returnType);
if (setterMethod != null) {
Object srcVal = getterMethod.invoke(src); // 原始值
Object dstVal = getterMethod.invoke(dst); // 修改后值
if (srcVal == null) { // 左边为空
if (dstVal != null) {
modifiedFields.add(propertyName);
}
} else {
if (dstVal == null) { // 右边为空
modifiedFields.add(propertyName);
} else {
if (!srcVal.equals(dstVal)) { // 都不为空且不同
modifiedFields.add(propertyName);
}
}
}
}
} catch (Exception ignore) {
// ignore 下一循环
}
}
return modifiedFields;
}
|
@Test
public void testGetModifiedFields() throws Exception {
TestBean cg0 = new TestBean();
TestBean cg1 = new TestBean();
cg1.setAlias("aaa:1.0.0");
cg1.setHeartbeat(2222);
cg1.setRegister(true);
TestBean cg2 = new TestBean();
cg2.setAlias("aaa:1.0.0");
cg2.setHeartbeat(2222);
cg2.setRegister(false);
cg2.setSubBeans(new ArrayList<TestSubBean>());
Assert.assertTrue(BeanUtils.getModifiedFields(cg0, cg1).size() == 3);
Assert.assertTrue(BeanUtils.getModifiedFields(cg0, cg2).size() == 3);
Assert.assertTrue(BeanUtils.getModifiedFields(cg1, cg2).size() == 2);
}
|
@Override
String unfixForNamespace(String path) {
return namespace.unfixForNamespace(path);
}
|
@Test
public void testUnfixForEmptyNamespace() {
CuratorFramework client = CuratorFrameworkFactory.builder()
.namespace("")
.retryPolicy(new RetryOneTime(1))
.connectString("foo")
.build();
CuratorFrameworkImpl clientImpl = (CuratorFrameworkImpl) client;
assertEquals(clientImpl.unfixForNamespace("/foo/bar"), "/foo/bar");
CloseableUtils.closeQuietly(client);
}
|
@Override
public CatalogTable getTable(String sqlQuery) throws SQLException {
Connection defaultConnection = getConnection(defaultUrl);
return CatalogUtils.getCatalogTable(defaultConnection, sqlQuery, new OracleTypeMapper());
}
|
@Test
void testCatalog() {
List<String> strings = catalog.listDatabases();
CatalogTable table = catalog.getTable(TablePath.of("XE", "TEST", "PG_TYPES_TABLE_CP1"));
catalog.createTable(new TablePath("XE", "TEST", "TEST003"), table, false);
}
|
public static DataSource createDataSource(final ModeConfiguration modeConfig) throws SQLException {
return createDataSource(DefaultDatabase.LOGIC_NAME, modeConfig);
}
|
@Test
void assertCreateDataSourceWithDatabaseNameAndDefaultModeConfigurationForSingleDataSource() throws SQLException {
assertDataSource(ShardingSphereDataSourceFactory.createDataSource("test_db", null, new MockedDataSource(), new LinkedList<>(), new Properties()), "test_db");
}
|
public void start() {
if (!enabled) {
logger.info(format("Diagnostics disabled. To enable add -D%s=true to the JVM arguments.", ENABLED.getName()));
return;
}
this.diagnosticsLog = outputType.newLog(this);
this.scheduler = new ScheduledThreadPoolExecutor(1, new DiagnosticSchedulerThreadFactory());
logger.info("Diagnostics started");
}
|
@Test
public void start_whenDisabled() throws Exception {
Diagnostics diagnostics = newDiagnostics(new Config().setProperty(Diagnostics.ENABLED.getName(), "false"));
diagnostics.start();
assertNull("DiagnosticsLogFile should be null", diagnostics.diagnosticsLog);
}
|
public static Object get(Object object, int index) {
if (index < 0) {
throw new IndexOutOfBoundsException("Index cannot be negative: " + index);
}
if (object instanceof Map) {
Map map = (Map) object;
Iterator iterator = map.entrySet().iterator();
return get(iterator, index);
} else if (object instanceof List) {
return ((List) object).get(index);
} else if (object instanceof Object[]) {
return ((Object[]) object)[index];
} else if (object instanceof Iterator) {
Iterator it = (Iterator) object;
while (it.hasNext()) {
index--;
if (index == -1) {
return it.next();
} else {
it.next();
}
}
throw new IndexOutOfBoundsException("Entry does not exist: " + index);
} else if (object instanceof Collection) {
Iterator iterator = ((Collection) object).iterator();
return get(iterator, index);
} else if (object instanceof Enumeration) {
Enumeration it = (Enumeration) object;
while (it.hasMoreElements()) {
index--;
if (index == -1) {
return it.nextElement();
} else {
it.nextElement();
}
}
throw new IndexOutOfBoundsException("Entry does not exist: " + index);
} else if (object == null) {
throw new IllegalArgumentException("Unsupported object type: null");
} else {
try {
return Array.get(object, index);
} catch (IllegalArgumentException ex) {
throw new IllegalArgumentException("Unsupported object type: " + object.getClass().getName());
}
}
}
|
@Test
void testGetList2() {
assertThrows(IndexOutOfBoundsException.class, () -> {
CollectionUtils.get(Collections.emptyList(), 1);
});
}
|
@Override
public synchronized DefaultConnectClient get(
final Optional<String> ksqlAuthHeader,
final List<Entry<String, String>> incomingRequestHeaders,
final Optional<KsqlPrincipal> userPrincipal
) {
if (defaultConnectAuthHeader == null) {
defaultConnectAuthHeader = buildDefaultAuthHeader();
}
final Map<String, Object> configWithPrefixOverrides =
ksqlConfig.valuesWithPrefixOverride(KsqlConfig.KSQL_CONNECT_PREFIX);
return new DefaultConnectClient(
ksqlConfig.getString(KsqlConfig.CONNECT_URL_PROPERTY),
buildAuthHeader(ksqlAuthHeader, incomingRequestHeaders),
requestHeadersExtension
.map(extension -> extension.getHeaders(userPrincipal))
.orElse(Collections.emptyMap()),
Optional.ofNullable(newSslContext(configWithPrefixOverrides)),
shouldVerifySslHostname(configWithPrefixOverrides),
ksqlConfig.getLong(KsqlConfig.CONNECT_REQUEST_TIMEOUT_MS)
);
}
|
@Test
public void shouldPassCustomRequestHeaders() {
// Given:
when(config.getConfiguredInstance(
KsqlConfig.CONNECT_REQUEST_HEADERS_PLUGIN,
ConnectRequestHeadersExtension.class))
.thenReturn(requestHeadersExtension);
// re-initialize client factory since request headers extension is configured in constructor
connectClientFactory = new DefaultConnectClientFactory(config);
when(requestHeadersExtension.getHeaders(Optional.of(userPrincipal)))
.thenReturn(ImmutableMap.of("header", "value"));
// When:
final DefaultConnectClient connectClient =
connectClientFactory.get(Optional.empty(), Collections.emptyList(), Optional.of(userPrincipal));
// Then:
assertThat(connectClient.getRequestHeaders(), arrayContaining(header("header", "value")));
}
|
@Subscribe
public void onVarbitChanged(VarbitChanged event)
{
if (event.getVarbitId() == Varbits.IN_RAID)
{
removeVarTimer(OVERLOAD_RAID);
removeGameTimer(PRAYER_ENHANCE);
}
if (event.getVarbitId() == Varbits.VENGEANCE_COOLDOWN && config.showVengeance())
{
if (event.getValue() == 1)
{
createGameTimer(VENGEANCE);
}
else
{
removeGameTimer(VENGEANCE);
}
}
if (event.getVarbitId() == Varbits.SPELLBOOK_SWAP && config.showSpellbookSwap())
{
if (event.getValue() == 1)
{
createGameTimer(SPELLBOOK_SWAP);
}
else
{
removeGameTimer(SPELLBOOK_SWAP);
}
}
if (event.getVarbitId() == Varbits.HEAL_GROUP_COOLDOWN && config.showHealGroup())
{
if (event.getValue() == 1)
{
createGameTimer(HEAL_GROUP);
}
else
{
removeGameTimer(HEAL_GROUP);
}
}
if (event.getVarbitId() == Varbits.DEATH_CHARGE_COOLDOWN && config.showArceuusCooldown())
{
if (event.getValue() == 1)
{
createGameTimer(DEATH_CHARGE_COOLDOWN);
}
else
{
removeGameTimer(DEATH_CHARGE_COOLDOWN);
}
}
if (event.getVarbitId() == Varbits.CORRUPTION_COOLDOWN && config.showArceuusCooldown())
{
if (event.getValue() == 1)
{
createGameTimer(CORRUPTION_COOLDOWN);
}
else
{
removeGameTimer(CORRUPTION_COOLDOWN);
}
}
if (event.getVarbitId() == Varbits.RESURRECT_THRALL_COOLDOWN && config.showArceuusCooldown())
{
if (event.getValue() == 1)
{
createGameTimer(RESURRECT_THRALL_COOLDOWN);
}
else
{
removeGameTimer(RESURRECT_THRALL_COOLDOWN);
}
}
if (event.getVarbitId() == Varbits.SHADOW_VEIL_COOLDOWN && config.showArceuusCooldown())
{
if (event.getValue() == 1)
{
createGameTimer(SHADOW_VEIL_COOLDOWN);
}
else
{
removeGameTimer(SHADOW_VEIL_COOLDOWN);
}
}
if (event.getVarbitId() == Varbits.WARD_OF_ARCEUUS_COOLDOWN && config.showArceuusCooldown())
{
if (event.getValue() == 1)
{
createGameTimer(WARD_OF_ARCEUUS_COOLDOWN);
}
else
{
removeGameTimer(WARD_OF_ARCEUUS_COOLDOWN);
}
}
if (event.getVarbitId() == Varbits.VENGEANCE_ACTIVE && config.showVengeanceActive())
{
updateVarCounter(VENGEANCE_ACTIVE, event.getValue());
}
if (event.getVarbitId() == Varbits.DEATH_CHARGE && config.showArceuus())
{
if (event.getValue() == 1)
{
createGameTimer(DEATH_CHARGE, Duration.of(client.getRealSkillLevel(Skill.MAGIC), RSTimeUnit.GAME_TICKS));
}
else
{
removeGameTimer(DEATH_CHARGE);
}
}
if (event.getVarbitId() == Varbits.RESURRECT_THRALL && event.getValue() == 0 && config.showArceuus())
{
removeGameTimer(RESURRECT_THRALL);
}
if (event.getVarbitId() == Varbits.SHADOW_VEIL && event.getValue() == 0 && config.showArceuus())
{
removeGameTimer(SHADOW_VEIL);
}
if (event.getVarpId() == VarPlayer.POISON && config.showAntiPoison())
{
final int poisonVarp = event.getValue();
final int tickCount = client.getTickCount();
if (poisonVarp == 0)
{
nextPoisonTick = -1;
}
else if (nextPoisonTick - tickCount <= 0)
{
nextPoisonTick = tickCount + POISON_TICK_LENGTH;
}
updateVarTimer(ANTIPOISON, event.getValue(),
i -> i >= 0 || i < VENOM_VALUE_CUTOFF,
i -> nextPoisonTick - tickCount + Math.abs((i + 1) * POISON_TICK_LENGTH));
updateVarTimer(ANTIVENOM, event.getValue(),
i -> i >= VENOM_VALUE_CUTOFF,
i -> nextPoisonTick - tickCount + Math.abs((i + 1 - VENOM_VALUE_CUTOFF) * POISON_TICK_LENGTH));
}
if ((event.getVarbitId() == Varbits.NMZ_OVERLOAD_REFRESHES_REMAINING
|| event.getVarbitId() == Varbits.COX_OVERLOAD_REFRESHES_REMAINING) && config.showOverload())
{
final int overloadVarb = event.getValue();
final int tickCount = client.getTickCount();
if (overloadVarb <= 0)
{
nextOverloadRefreshTick = -1;
}
else if (nextOverloadRefreshTick - tickCount <= 0)
{
nextOverloadRefreshTick = tickCount + OVERLOAD_TICK_LENGTH;
}
GameTimer overloadTimer = client.getVarbitValue(Varbits.IN_RAID) == 1 ? OVERLOAD_RAID : OVERLOAD;
updateVarTimer(overloadTimer, overloadVarb, i -> nextOverloadRefreshTick - tickCount + (i - 1) * OVERLOAD_TICK_LENGTH);
}
if (event.getVarbitId() == Varbits.TELEBLOCK && config.showTeleblock())
{
updateVarTimer(TELEBLOCK, event.getValue() - 100, i -> i <= 0, IntUnaryOperator.identity());
}
if (event.getVarpId() == VarPlayer.CHARGE_GOD_SPELL && config.showCharge())
{
updateVarTimer(CHARGE, event.getValue(), i -> i * 2);
}
if (event.getVarbitId() == Varbits.IMBUED_HEART_COOLDOWN && config.showImbuedHeart())
{
updateVarTimer(IMBUEDHEART, event.getValue(), i -> i * 10);
}
if (event.getVarbitId() == Varbits.DRAGONFIRE_SHIELD_COOLDOWN && config.showDFSSpecial())
{
updateVarTimer(DRAGON_FIRE_SHIELD, event.getValue(), i -> i * 8);
}
if (event.getVarpId() == LAST_HOME_TELEPORT && config.showHomeMinigameTeleports())
{
checkTeleport(LAST_HOME_TELEPORT);
}
if (event.getVarpId() == LAST_MINIGAME_TELEPORT && config.showHomeMinigameTeleports())
{
checkTeleport(LAST_MINIGAME_TELEPORT);
}
if (event.getVarbitId() == Varbits.RUN_SLOWED_DEPLETION_ACTIVE
|| event.getVarbitId() == Varbits.STAMINA_EFFECT
|| event.getVarbitId() == Varbits.RING_OF_ENDURANCE_EFFECT)
{
// staminaEffectActive is checked to match https://github.com/Joshua-F/cs2-scripts/blob/741271f0c3395048c1bad4af7881a13734516adf/scripts/%5Bproc%2Cbuff_bar_get_value%5D.cs2#L25
int staminaEffectActive = client.getVarbitValue(Varbits.RUN_SLOWED_DEPLETION_ACTIVE);
int staminaPotionEffectVarb = client.getVarbitValue(Varbits.STAMINA_EFFECT);
int enduranceRingEffectVarb = client.getVarbitValue(Varbits.RING_OF_ENDURANCE_EFFECT);
final int totalStaminaEffect = staminaPotionEffectVarb + enduranceRingEffectVarb;
if (staminaEffectActive == 1 && config.showStamina())
{
updateVarTimer(STAMINA, totalStaminaEffect, i -> i * 10);
}
}
if (event.getVarbitId() == Varbits.ANTIFIRE && config.showAntiFire())
{
final int antifireVarb = event.getValue();
final int tickCount = client.getTickCount();
if (antifireVarb == 0)
{
nextAntifireTick = -1;
}
else if (nextAntifireTick - tickCount <= 0)
{
nextAntifireTick = tickCount + ANTIFIRE_TICK_LENGTH;
}
updateVarTimer(ANTIFIRE, antifireVarb, i -> nextAntifireTick - tickCount + (i - 1) * ANTIFIRE_TICK_LENGTH);
}
if (event.getVarbitId() == Varbits.SUPER_ANTIFIRE && config.showAntiFire())
{
final int superAntifireVarb = event.getValue();
final int tickCount = client.getTickCount();
if (superAntifireVarb == 0)
{
nextSuperAntifireTick = -1;
}
else if (nextSuperAntifireTick - tickCount <= 0)
{
nextSuperAntifireTick = tickCount + SUPERANTIFIRE_TICK_LENGTH;
}
updateVarTimer(SUPERANTIFIRE, event.getValue(), i -> nextSuperAntifireTick - tickCount + (i - 1) * SUPERANTIFIRE_TICK_LENGTH);
}
if (event.getVarbitId() == Varbits.MAGIC_IMBUE && config.showMagicImbue())
{
updateVarTimer(MAGICIMBUE, event.getValue(), i -> i * 10);
}
if (event.getVarbitId() == Varbits.DIVINE_SUPER_ATTACK && config.showDivine())
{
if (client.getVarbitValue(Varbits.DIVINE_SUPER_COMBAT) > event.getValue())
{
return;
}
updateVarTimer(DIVINE_SUPER_ATTACK, event.getValue(), IntUnaryOperator.identity());
}
if (event.getVarbitId() == Varbits.DIVINE_SUPER_STRENGTH && config.showDivine())
{
if (client.getVarbitValue(Varbits.DIVINE_SUPER_COMBAT) > event.getValue())
{
return;
}
updateVarTimer(DIVINE_SUPER_STRENGTH, event.getValue(), IntUnaryOperator.identity());
}
if (event.getVarbitId() == Varbits.DIVINE_SUPER_DEFENCE && config.showDivine())
{
if (client.getVarbitValue(Varbits.DIVINE_SUPER_COMBAT) > event.getValue()
|| client.getVarbitValue(Varbits.DIVINE_BASTION) > event.getValue()
|| client.getVarbitValue(Varbits.DIVINE_BATTLEMAGE) > event.getValue()
// When drinking a dose of moonlight potion while already under its effects, desync between
// Varbits.MOONLIGHT_POTION and Varbits.DIVINE_SUPER_DEFENCE can occur, with the latter being 1 tick
// greater
|| client.getVarbitValue(Varbits.MOONLIGHT_POTION) >= event.getValue())
{
return;
}
if (client.getVarbitValue(Varbits.MOONLIGHT_POTION) < event.getValue())
{
removeVarTimer(MOONLIGHT_POTION);
}
updateVarTimer(DIVINE_SUPER_DEFENCE, event.getValue(), IntUnaryOperator.identity());
}
if (event.getVarbitId() == Varbits.DIVINE_RANGING && config.showDivine())
{
if (client.getVarbitValue(Varbits.DIVINE_BASTION) > event.getValue())
{
return;
}
updateVarTimer(DIVINE_RANGING, event.getValue(), IntUnaryOperator.identity());
}
if (event.getVarbitId() == Varbits.DIVINE_MAGIC && config.showDivine())
{
if (client.getVarbitValue(Varbits.DIVINE_BATTLEMAGE) > event.getValue())
{
return;
}
updateVarTimer(DIVINE_MAGIC, event.getValue(), IntUnaryOperator.identity());
}
if (event.getVarbitId() == Varbits.DIVINE_SUPER_COMBAT && config.showDivine())
{
if (client.getVarbitValue(Varbits.DIVINE_SUPER_ATTACK) == event.getValue())
{
removeVarTimer(DIVINE_SUPER_ATTACK);
}
if (client.getVarbitValue(Varbits.DIVINE_SUPER_STRENGTH) == event.getValue())
{
removeVarTimer(DIVINE_SUPER_STRENGTH);
}
if (client.getVarbitValue(Varbits.DIVINE_SUPER_DEFENCE) == event.getValue())
{
removeVarTimer(DIVINE_SUPER_DEFENCE);
}
updateVarTimer(DIVINE_SUPER_COMBAT, event.getValue(), IntUnaryOperator.identity());
}
if (event.getVarbitId() == Varbits.DIVINE_BASTION && config.showDivine())
{
if (client.getVarbitValue(Varbits.DIVINE_RANGING) == event.getValue())
{
removeVarTimer(DIVINE_RANGING);
}
if (client.getVarbitValue(Varbits.DIVINE_SUPER_DEFENCE) == event.getValue())
{
removeVarTimer(DIVINE_SUPER_DEFENCE);
}
updateVarTimer(DIVINE_BASTION, event.getValue(), IntUnaryOperator.identity());
}
if (event.getVarbitId() == Varbits.DIVINE_BATTLEMAGE && config.showDivine())
{
if (client.getVarbitValue(Varbits.DIVINE_MAGIC) == event.getValue())
{
removeVarTimer(DIVINE_MAGIC);
}
if (client.getVarbitValue(Varbits.DIVINE_SUPER_DEFENCE) == event.getValue())
{
removeVarTimer(DIVINE_SUPER_DEFENCE);
}
updateVarTimer(DIVINE_BATTLEMAGE, event.getValue(), IntUnaryOperator.identity());
}
if (event.getVarbitId() == Varbits.BUFF_STAT_BOOST && config.showOverload())
{
updateVarTimer(SMELLING_SALTS, event.getValue(), i -> i * 25);
}
if (event.getVarbitId() == Varbits.MENAPHITE_REMEDY && config.showMenaphiteRemedy())
{
updateVarTimer(MENAPHITE_REMEDY, event.getValue(), i -> i * 25);
}
if (event.getVarbitId() == Varbits.LIQUID_ADERNALINE_ACTIVE && event.getValue() == 0 && config.showLiquidAdrenaline())
{
removeGameTimer(LIQUID_ADRENALINE);
}
if (event.getVarbitId() == Varbits.FARMERS_AFFINITY && config.showFarmersAffinity())
{
updateVarTimer(FARMERS_AFFINITY, event.getValue(), i -> i * 20);
}
if (event.getVarbitId() == Varbits.GOD_WARS_ALTAR_COOLDOWN && config.showGodWarsAltar())
{
updateVarTimer(GOD_WARS_ALTAR, event.getValue(), i -> i * 100);
}
if (event.getVarbitId() == Varbits.CURSE_OF_THE_MOONS && config.showCurseOfTheMoons())
{
final int regionID = WorldPoint.fromLocal(client, client.getLocalPlayer().getLocalLocation()).getRegionID();
if (regionID == ECLIPSE_MOON_REGION_ID)
{
updateVarCounter(CURSE_OF_THE_MOONS_ECLIPSE, event.getValue());
}
else
{
updateVarCounter(CURSE_OF_THE_MOONS_BLUE, event.getValue());
}
}
if (event.getVarbitId() == Varbits.COLOSSEUM_DOOM && config.showColosseumDoom())
{
updateVarCounter(COLOSSEUM_DOOM, event.getValue());
}
if (event.getVarbitId() == Varbits.MOONLIGHT_POTION && config.showMoonlightPotion())
{
int moonlightValue = event.getValue();
// Increase the timer by 1 tick in case of desync due to drinking a dose of moonlight potion while already
// under its effects. Otherwise, the timer would be 1 tick shorter than it is meant to be.
if (client.getVarbitValue(Varbits.DIVINE_SUPER_DEFENCE) == moonlightValue + 1)
{
moonlightValue++;
}
updateVarTimer(MOONLIGHT_POTION, moonlightValue, IntUnaryOperator.identity());
}
}
|
@Test
public void testNMZOverload()
{
when(timersAndBuffsConfig.showOverload()).thenReturn(true);
ArgumentCaptor<Predicate<InfoBox>> prcaptor = ArgumentCaptor.forClass(Predicate.class);
VarbitChanged varbitChanged = new VarbitChanged();
varbitChanged.setVarbitId(Varbits.NMZ_OVERLOAD_REFRESHES_REMAINING);
varbitChanged.setValue(9);
timersAndBuffsPlugin.onVarbitChanged(varbitChanged);
TimerTimer overloadInfobox = new TimerTimer(GameTimer.OVERLOAD, Duration.ofSeconds(135), timersAndBuffsPlugin);
verify(infoBoxManager).addInfoBox(any());
verify(infoBoxManager).removeIf(prcaptor.capture());
Predicate<InfoBox> pred = prcaptor.getValue();
assertTrue(pred.test(overloadInfobox));
// Remove on running out
varbitChanged.setValue(0);
timersAndBuffsPlugin.onVarbitChanged(varbitChanged);
verify(infoBoxManager).addInfoBox(any());
verify(infoBoxManager, times(2)).removeIf(any());
}
|
@Override
protected OAuth2AccessToken createToken(String accessToken, String tokenType, Integer expiresIn, String refreshToken, String scope,
JsonNode response, String rawResponse) {
var token = super.createToken(accessToken, tokenType, expiresIn, refreshToken,
scope, response, rawResponse);
var uid = extractRequiredParameter(response, "uid", rawResponse).asText();
if (uid == null || Pac4jConstants.EMPTY_STRING.equals(uid)) {
throw new OAuthException(
"There is no required UID in the response of the AssessToken endpoint.");
}
return new WeiboToken(token, uid);
}
|
@Test(expected = OAuthException.class)
public void createTokenWithOutUid() throws IOException {
extractor.createToken("ACCESS_TOKEN", null,
123, null, null, mapper.readTree(responseError), responseError);
}
|
T getFunction(final List<SqlArgument> arguments) {
// first try to get the candidates without any implicit casting
Optional<T> candidate = findMatchingCandidate(arguments, false);
if (candidate.isPresent()) {
return candidate.get();
} else if (!supportsImplicitCasts) {
throw createNoMatchingFunctionException(arguments);
}
// if none were found (candidate isn't present) try again with implicit casting
candidate = findMatchingCandidate(arguments, true);
if (candidate.isPresent()) {
return candidate.get();
}
throw createNoMatchingFunctionException(arguments);
}
|
@Test
public void shouldNotMatchIfNoneFound() {
// Given:
givenFunctions(
function(EXPECTED, -1, STRING)
);
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> udfIndex.getFunction(ImmutableList.of(SqlArgument.of(INTEGER)))
);
// Then:
assertThat(e.getMessage(), containsString("Function 'name' does not accept parameters "
+ "(INTEGER)"));
}
|
public DeleteGranularity deleteGranularity() {
String valueAsString =
confParser
.stringConf()
.option(SparkWriteOptions.DELETE_GRANULARITY)
.tableProperty(TableProperties.DELETE_GRANULARITY)
.defaultValue(TableProperties.DELETE_GRANULARITY_DEFAULT)
.parse();
return DeleteGranularity.fromString(valueAsString);
}
|
@TestTemplate
public void testDeleteGranularityTableProperty() {
Table table = validationCatalog.loadTable(tableIdent);
table
.updateProperties()
.set(TableProperties.DELETE_GRANULARITY, DeleteGranularity.FILE.toString())
.commit();
SparkWriteConf writeConf = new SparkWriteConf(spark, table, ImmutableMap.of());
DeleteGranularity value = writeConf.deleteGranularity();
assertThat(value).isEqualTo(DeleteGranularity.FILE);
}
|
public List<ShardingCondition> createShardingConditions(final InsertStatementContext sqlStatementContext, final List<Object> params) {
List<ShardingCondition> result = null == sqlStatementContext.getInsertSelectContext()
? createShardingConditionsWithInsertValues(sqlStatementContext, params)
: createShardingConditionsWithInsertSelect(sqlStatementContext, params);
appendGeneratedKeyConditions(sqlStatementContext, result);
return result;
}
|
@Test
void assertCreateShardingConditionsInsertStatementWithGeneratedKeyContext() {
when(insertStatementContext.getGeneratedKeyContext()).thenReturn(Optional.of(mock(GeneratedKeyContext.class)));
List<ShardingCondition> shardingConditions = shardingConditionEngine.createShardingConditions(insertStatementContext, Collections.emptyList());
assertThat(shardingConditions.get(0).getStartIndex(), is(0));
assertTrue(shardingConditions.get(0).getValues().isEmpty());
}
|
void growCatalog(final long maxCatalogCapacity, final int frameLength)
{
final long oldCapacity = capacity;
final long recordingOffset = nextRecordingDescriptorOffset;
final long targetCapacity = recordingOffset + frameLength;
if (targetCapacity > maxCatalogCapacity)
{
if (maxCatalogCapacity == oldCapacity)
{
throw new ArchiveException("catalog is full, max capacity reached: " + maxCatalogCapacity);
}
else
{
throw new ArchiveException(
"recording is too big: total recording length is " + frameLength + " bytes," +
" available space is " + (maxCatalogCapacity - recordingOffset) + " bytes");
}
}
long newCapacity = oldCapacity;
while (newCapacity < targetCapacity)
{
newCapacity = min(newCapacity + (newCapacity >> 1), maxCatalogCapacity);
}
final MappedByteBuffer mappedByteBuffer;
try
{
unmapAndCloseChannel();
catalogChannel = FileChannel.open(catalogFile.toPath(), READ, WRITE, SPARSE);
mappedByteBuffer = catalogChannel.map(READ_WRITE, 0, newCapacity);
}
catch (final Exception ex)
{
close();
LangUtil.rethrowUnchecked(ex);
return;
}
capacity = newCapacity;
initBuffers(mappedByteBuffer);
final UnsafeBuffer catalogHeaderBuffer = new UnsafeBuffer(catalogByteBuffer);
catalogHeaderDecoder.wrap(
catalogHeaderBuffer, 0, CatalogHeaderDecoder.BLOCK_LENGTH, CatalogHeaderDecoder.SCHEMA_VERSION);
catalogHeaderEncoder.wrap(catalogHeaderBuffer, 0);
catalogResized(oldCapacity, newCapacity);
}
|
@Test
void growCatalogThrowsArchiveExceptionIfRecordingIsTooBig()
{
try (Catalog catalog = new Catalog(archiveDir, null, 0, CAPACITY, clock, null, segmentFileBuffer))
{
final ArchiveException exception = assertThrows(
ArchiveException.class, () -> catalog.growCatalog(CAPACITY * 2, Integer.MAX_VALUE));
assertEquals(
"ERROR - recording is too big: total recording length is " + Integer.MAX_VALUE +
" bytes, available space is " + (CAPACITY * 2 - 800) + " bytes",
exception.getMessage());
}
}
|
public static void updateInt(Checksum checksum, int input) {
checksum.update((byte) (input >> 24));
checksum.update((byte) (input >> 16));
checksum.update((byte) (input >> 8));
checksum.update((byte) input /* >> 0 */);
}
|
@Test
public void testUpdateInt() {
final int value = 1000;
final ByteBuffer buffer = ByteBuffer.allocate(4);
buffer.putInt(value);
Checksum crc1 = Crc32C.create();
Checksum crc2 = Crc32C.create();
Checksums.updateInt(crc1, value);
crc2.update(buffer.array(), buffer.arrayOffset(), 4);
assertEquals(crc1.getValue(), crc2.getValue(), "Crc values should be the same");
}
|
public ArtifactResponse buildArtifactResponse(ArtifactResolveRequest artifactResolveRequest, String entityId, SignType signType) throws InstantiationException, ValidationException, ArtifactBuildException, BvdException {
final var artifactResponse = OpenSAMLUtils.buildSAMLObject(ArtifactResponse.class);
final var status = OpenSAMLUtils.buildSAMLObject(Status.class);
final var statusCode = OpenSAMLUtils.buildSAMLObject(StatusCode.class);
final var issuer = OpenSAMLUtils.buildSAMLObject(Issuer.class);
return ArtifactResponseBuilder
.newInstance(artifactResponse)
.addID()
.addIssueInstant()
.addInResponseTo(artifactResolveRequest.getArtifactResolve().getID())
.addStatus(StatusBuilder
.newInstance(status)
.addStatusCode(statusCode, StatusCode.SUCCESS)
.build())
.addIssuer(issuer, entityId)
.addMessage(buildResponse(artifactResolveRequest, entityId, signType))
.addSignature(signatureService, signType)
.build();
}
|
@Test
void validateMultipleAssertionsArePresent() throws BvdException, ValidationException, SamlParseException, ArtifactBuildException, InstantiationException, MetadataException, JsonProcessingException {
when(bvdClientMock.retrieveRepresentationAffirmations(anyString())).thenReturn(getBvdResponse());
ArtifactResponse artifactResponse = artifactResponseService.buildArtifactResponse(getArtifactResolveRequest("success", true, true, SAML_COMBICONNECT, EncryptionType.BSN, ENTRANCE_ENTITY_ID), ENTRANCE_ENTITY_ID, TD);
Response response = (Response) artifactResponse.getMessage();
Assertion assertion = response.getAssertions().get(0);
verify(bvdClientMock, times(1)).retrieveRepresentationAffirmations(anyString());
assertNotNull(assertion.getAdvice());
assertEquals(2, assertion.getAdvice().getAssertions().size());
}
|
public static String hashForJson(String json) {
String regularizedJson = removeWhiteSpaceFromJson(json);
return computeHashFor(regularizedJson);
}
|
@Test
public void hashesAreTheSame() {
String input1 = "{\"a\":123,\"b\":456}";
String input2 = "{\n\"a\":123,\n\"b\":456\n}";
String input3 = "{\n \"a\":123,\n \"b\":456\n}";
String input4 = "{\n \"a\": 123,\n \"b\": 456\n}";
String hash1 = HashUtils.hashForJson(input1);
String hash2 = HashUtils.hashForJson(input2);
String hash3 = HashUtils.hashForJson(input3);
String hash4 = HashUtils.hashForJson(input4);
assertThat(hash1.equals(hash2), is(true));
assertThat(hash2.equals(hash3), is(true));
assertThat(hash4.equals(hash4), is(true));
}
|
public <T> T retry(Supplier<T> action, Predicate<T> condition, String logDescription, boolean failWithException) {
long startTime = System.currentTimeMillis();
int count = 0, max = getRetryCount();
T result;
boolean success;
do {
if (count > 0) {
logger.debug("{} - retry #{}", logDescription, count);
sleep();
}
result = action.get();
success = condition.test(result);
} while (!success && count++ < max);
if (!success) {
long elapsedTime = System.currentTimeMillis() - startTime;
String message = logDescription + ": failed after " + (count - 1) + " retries and " + elapsedTime + " milliseconds";
logger.warn(message);
if (failWithException) {
throw new RuntimeException(message);
}
}
return result;
}
|
@Test
void testRetry() {
DriverOptions options = new DriverOptions(Collections.EMPTY_MAP, TestUtils.runtime(), 0, null);
options.retry(() -> 1, x -> x < 5, "not 5", false);
}
|
public static ParamType getVarArgsSchemaFromType(final Type type) {
return getSchemaFromType(type, VARARGS_JAVA_TO_ARG_TYPE);
}
|
@Test
public void shouldGetStringSchemaFromStructClassVariadic() {
assertThat(
UdfUtil.getVarArgsSchemaFromType(Struct.class),
equalTo(StructType.ANY_STRUCT)
);
}
|
@SuppressWarnings("unused") // Part of required API.
public void execute(
final ConfiguredStatement<InsertValues> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final InsertValues insertValues = statement.getStatement();
final MetaStore metaStore = executionContext.getMetaStore();
final KsqlConfig config = statement.getSessionConfig().getConfig(true);
final DataSource dataSource = getDataSource(config, metaStore, insertValues);
validateInsert(insertValues.getColumns(), dataSource);
final ProducerRecord<byte[], byte[]> record =
buildRecord(statement, metaStore, dataSource, serviceContext);
try {
producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps());
} catch (final TopicAuthorizationException e) {
// TopicAuthorizationException does not give much detailed information about why it failed,
// except which topics are denied. Here we just add the ACL to make the error message
// consistent with other authorization error messages.
final Exception rootCause = new KsqlTopicAuthorizationException(
AclOperation.WRITE,
e.unauthorizedTopics()
);
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause);
} catch (final ClusterAuthorizationException e) {
// ClusterAuthorizationException is thrown when using idempotent producers
// and either a topic write permission or a cluster-level idempotent write
// permission (only applicable for broker versions no later than 2.8) is
// missing. In this case, we include additional context to help the user
// distinguish this type of failure from other permissions exceptions
// such as the ones thrown above when TopicAuthorizationException is caught.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} catch (final KafkaException e) {
if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) {
// The error message thrown when an idempotent producer is missing permissions
// is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException,
// as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException.
// ksqlDB handles these two the same way, accordingly.
// See https://issues.apache.org/jira/browse/KAFKA-14138 for more.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} else {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} catch (final Exception e) {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
}
|
@Test
public void shouldThrowOnSerializingValueError() {
// Given:
final ConfiguredStatement<InsertValues> statement = givenInsertValues(
allAndPseudoColumnNames(SCHEMA),
ImmutableList.of(
new LongLiteral(1L),
new StringLiteral("str"),
new StringLiteral("str"),
new LongLiteral(2L))
);
when(valueSerializer.serialize(any(), any()))
.thenThrow(new SerializationException("Jibberish!"));
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> executor.execute(statement, mock(SessionProperties.class), engine, serviceContext)
);
// Then:
assertThat(e.getCause(), (hasMessage(containsString("Could not serialize value"))));
}
|
public synchronized ImmutableList<Row> readTable(String tableId)
throws BigtableResourceManagerException {
return readTable(tableId, null);
}
|
@Test
public void testReadTableShouldThrowErrorWhenInstanceDoesNotExist() {
assertThrows(IllegalStateException.class, () -> testManager.readTable(TABLE_ID));
}
|
@Subscribe
public void onChatMessage(ChatMessage event)
{
if (event.getType() == ChatMessageType.GAMEMESSAGE || event.getType() == ChatMessageType.SPAM)
{
String message = Text.removeTags(event.getMessage());
Matcher dodgyCheckMatcher = DODGY_CHECK_PATTERN.matcher(message);
Matcher dodgyProtectMatcher = DODGY_PROTECT_PATTERN.matcher(message);
Matcher dodgyBreakMatcher = DODGY_BREAK_PATTERN.matcher(message);
Matcher bindingNecklaceCheckMatcher = BINDING_CHECK_PATTERN.matcher(message);
Matcher bindingNecklaceUsedMatcher = BINDING_USED_PATTERN.matcher(message);
Matcher ringOfForgingCheckMatcher = RING_OF_FORGING_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryCheckMatcher = AMULET_OF_CHEMISTRY_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryUsedMatcher = AMULET_OF_CHEMISTRY_USED_PATTERN.matcher(message);
Matcher amuletOfChemistryBreakMatcher = AMULET_OF_CHEMISTRY_BREAK_PATTERN.matcher(message);
Matcher amuletOfBountyCheckMatcher = AMULET_OF_BOUNTY_CHECK_PATTERN.matcher(message);
Matcher amuletOfBountyUsedMatcher = AMULET_OF_BOUNTY_USED_PATTERN.matcher(message);
Matcher chronicleAddMatcher = CHRONICLE_ADD_PATTERN.matcher(message);
Matcher chronicleUseAndCheckMatcher = CHRONICLE_USE_AND_CHECK_PATTERN.matcher(message);
Matcher slaughterActivateMatcher = BRACELET_OF_SLAUGHTER_ACTIVATE_PATTERN.matcher(message);
Matcher slaughterCheckMatcher = BRACELET_OF_SLAUGHTER_CHECK_PATTERN.matcher(message);
Matcher expeditiousActivateMatcher = EXPEDITIOUS_BRACELET_ACTIVATE_PATTERN.matcher(message);
Matcher expeditiousCheckMatcher = EXPEDITIOUS_BRACELET_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceCheckMatcher = BLOOD_ESSENCE_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceExtractMatcher = BLOOD_ESSENCE_EXTRACT_PATTERN.matcher(message);
Matcher braceletOfClayCheckMatcher = BRACELET_OF_CLAY_CHECK_PATTERN.matcher(message);
if (message.contains(RING_OF_RECOIL_BREAK_MESSAGE))
{
notifier.notify(config.recoilNotification(), "Your Ring of Recoil has shattered");
}
else if (dodgyBreakMatcher.find())
{
notifier.notify(config.dodgyNotification(), "Your dodgy necklace has crumbled to dust.");
updateDodgyNecklaceCharges(MAX_DODGY_CHARGES);
}
else if (dodgyCheckMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyCheckMatcher.group(1)));
}
else if (dodgyProtectMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyProtectMatcher.group(1)));
}
else if (amuletOfChemistryCheckMatcher.find())
{
updateAmuletOfChemistryCharges(Integer.parseInt(amuletOfChemistryCheckMatcher.group(1)));
}
else if (amuletOfChemistryUsedMatcher.find())
{
final String match = amuletOfChemistryUsedMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateAmuletOfChemistryCharges(charges);
}
else if (amuletOfChemistryBreakMatcher.find())
{
notifier.notify(config.amuletOfChemistryNotification(), "Your amulet of chemistry has crumbled to dust.");
updateAmuletOfChemistryCharges(MAX_AMULET_OF_CHEMISTRY_CHARGES);
}
else if (amuletOfBountyCheckMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyCheckMatcher.group(1)));
}
else if (amuletOfBountyUsedMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyUsedMatcher.group(1)));
}
else if (message.equals(AMULET_OF_BOUNTY_BREAK_TEXT))
{
updateAmuletOfBountyCharges(MAX_AMULET_OF_BOUNTY_CHARGES);
}
else if (message.contains(BINDING_BREAK_TEXT))
{
notifier.notify(config.bindingNotification(), BINDING_BREAK_TEXT);
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateBindingNecklaceCharges(MAX_BINDING_CHARGES + 1);
}
else if (bindingNecklaceUsedMatcher.find())
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
if (equipment.contains(ItemID.BINDING_NECKLACE))
{
updateBindingNecklaceCharges(getItemCharges(ItemChargeConfig.KEY_BINDING_NECKLACE) - 1);
}
}
else if (bindingNecklaceCheckMatcher.find())
{
final String match = bindingNecklaceCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateBindingNecklaceCharges(charges);
}
else if (ringOfForgingCheckMatcher.find())
{
final String match = ringOfForgingCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateRingOfForgingCharges(charges);
}
else if (message.equals(RING_OF_FORGING_USED_TEXT) || message.equals(RING_OF_FORGING_VARROCK_PLATEBODY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player smelted with a Ring of Forging equipped.
if (equipment == null)
{
return;
}
if (equipment.contains(ItemID.RING_OF_FORGING) && (message.equals(RING_OF_FORGING_USED_TEXT) || inventory.count(ItemID.IRON_ORE) > 1))
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_RING_OF_FORGING) - 1, 0, MAX_RING_OF_FORGING_CHARGES);
updateRingOfForgingCharges(charges);
}
}
else if (message.equals(RING_OF_FORGING_BREAK_TEXT))
{
notifier.notify(config.ringOfForgingNotification(), "Your ring of forging has melted.");
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateRingOfForgingCharges(MAX_RING_OF_FORGING_CHARGES + 1);
}
else if (chronicleAddMatcher.find())
{
final String match = chronicleAddMatcher.group(1);
if (match.equals("one"))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(match));
}
}
else if (chronicleUseAndCheckMatcher.find())
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(chronicleUseAndCheckMatcher.group(1)));
}
else if (message.equals(CHRONICLE_ONE_CHARGE_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else if (message.equals(CHRONICLE_EMPTY_TEXT) || message.equals(CHRONICLE_NO_CHARGES_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 0);
}
else if (message.equals(CHRONICLE_FULL_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1000);
}
else if (slaughterActivateMatcher.find())
{
final String found = slaughterActivateMatcher.group(1);
if (found == null)
{
updateBraceletOfSlaughterCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.slaughterNotification(), BRACELET_OF_SLAUGHTER_BREAK_TEXT);
}
else
{
updateBraceletOfSlaughterCharges(Integer.parseInt(found));
}
}
else if (slaughterCheckMatcher.find())
{
updateBraceletOfSlaughterCharges(Integer.parseInt(slaughterCheckMatcher.group(1)));
}
else if (expeditiousActivateMatcher.find())
{
final String found = expeditiousActivateMatcher.group(1);
if (found == null)
{
updateExpeditiousBraceletCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.expeditiousNotification(), EXPEDITIOUS_BRACELET_BREAK_TEXT);
}
else
{
updateExpeditiousBraceletCharges(Integer.parseInt(found));
}
}
else if (expeditiousCheckMatcher.find())
{
updateExpeditiousBraceletCharges(Integer.parseInt(expeditiousCheckMatcher.group(1)));
}
else if (bloodEssenceCheckMatcher.find())
{
updateBloodEssenceCharges(Integer.parseInt(bloodEssenceCheckMatcher.group(1)));
}
else if (bloodEssenceExtractMatcher.find())
{
updateBloodEssenceCharges(getItemCharges(ItemChargeConfig.KEY_BLOOD_ESSENCE) - Integer.parseInt(bloodEssenceExtractMatcher.group(1)));
}
else if (message.contains(BLOOD_ESSENCE_ACTIVATE_TEXT))
{
updateBloodEssenceCharges(MAX_BLOOD_ESSENCE_CHARGES);
}
else if (braceletOfClayCheckMatcher.find())
{
updateBraceletOfClayCharges(Integer.parseInt(braceletOfClayCheckMatcher.group(1)));
}
else if (message.equals(BRACELET_OF_CLAY_USE_TEXT) || message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN))
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player mined with a Bracelet of Clay equipped.
if (equipment != null && equipment.contains(ItemID.BRACELET_OF_CLAY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
// Charge is not used if only 1 inventory slot is available when mining in Prifddinas
boolean ignore = inventory != null
&& inventory.count() == 27
&& message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN);
if (!ignore)
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_BRACELET_OF_CLAY) - 1, 0, MAX_BRACELET_OF_CLAY_CHARGES);
updateBraceletOfClayCharges(charges);
}
}
}
else if (message.equals(BRACELET_OF_CLAY_BREAK_TEXT))
{
notifier.notify(config.braceletOfClayNotification(), "Your bracelet of clay has crumbled to dust");
updateBraceletOfClayCharges(MAX_BRACELET_OF_CLAY_CHARGES);
}
}
}
|
@Test
public void testExpeditiousCheck()
{
ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.GAMEMESSAGE, "", CHECK_EXPEDITIOUS_BRACELET, "", 0);
itemChargePlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration(ItemChargeConfig.GROUP, ItemChargeConfig.KEY_EXPEDITIOUS_BRACELET, 6);
}
|
public static String camelCaseToDash(String text) {
if (text == null || text.isEmpty()) {
return text;
}
StringBuilder answer = new StringBuilder();
Character prev = null;
Character next;
char[] arr = text.toCharArray();
for (int i = 0; i < arr.length; i++) {
char ch = arr[i];
if (i < arr.length - 1) {
next = arr[i + 1];
} else {
next = null;
}
if (ch == '-' || ch == '_') {
answer.append("-");
} else if (Character.isUpperCase(ch) && prev != null && !Character.isUpperCase(prev)) {
applyDashPrefix(prev, answer, ch);
} else if (Character.isUpperCase(ch) && prev != null && next != null && Character.isLowerCase(next)) {
applyDashPrefix(prev, answer, ch);
} else {
answer.append(Character.toLowerCase(ch));
}
prev = ch;
}
return answer.toString();
}
|
@Test
public void testCamelCashToDash() {
assertNull(camelCaseToDash(null));
assertEquals("", camelCaseToDash(""));
assertEquals("hello-world", camelCaseToDash("HelloWorld"));
assertEquals("hello-big-world", camelCaseToDash("HelloBigWorld"));
assertEquals("hello-big-world", camelCaseToDash("Hello-bigWorld"));
assertEquals("my-id", camelCaseToDash("MyId"));
assertEquals("my-id", camelCaseToDash("MyID"));
assertEquals("my-url", camelCaseToDash("MyUrl"));
assertEquals("my-url", camelCaseToDash("MyURL"));
assertEquals("my-big-id", camelCaseToDash("MyBigId"));
assertEquals("my-big-id", camelCaseToDash("MyBigID"));
assertEquals("my-big-url", camelCaseToDash("MyBigUrl"));
assertEquals("my-big-url", camelCaseToDash("MyBigURL"));
assertEquals("my-big-id-again", camelCaseToDash("MyBigIdAgain"));
assertEquals("my-big-id-again", camelCaseToDash("MyBigIDAgain"));
assertEquals("my-big-url-again", camelCaseToDash("MyBigUrlAgain"));
assertEquals("my-big-url-again", camelCaseToDash("MyBigURLAgain"));
assertEquals("use-mdc-logging", camelCaseToDash("UseMDCLogging"));
assertEquals("mdc-logging-keys-pattern", camelCaseToDash("MDCLoggingKeysPattern"));
assertEquals("available-phone-number-country", camelCaseToDash("AVAILABLE_PHONE_NUMBER_COUNTRY"));
assertEquals("available-phone-number-country", camelCaseToDash("AVAILABLE-PHONE_NUMBER-COUNTRY"));
assertEquals("available-phone-number-country", camelCaseToDash("Available-Phone-Number-Country"));
assertEquals("available-phone-number-country", camelCaseToDash("Available_Phone_Number_Country"));
assertEquals("available-phone-number-country", camelCaseToDash("available_phone_number_country"));
assertEquals("available-phone-number-country", camelCaseToDash("availablePhoneNumberCountry"));
assertEquals("available-phone-number-country", camelCaseToDash("AvailablePhoneNumberCountry"));
assertEquals("enable-cors", camelCaseToDash("enableCORS"));
}
|
@Override
public boolean put(PageId pageId, ByteBuffer page, CacheContext cacheContext) {
LOG.debug("put({},{} bytes) enters", pageId, page.remaining());
if (mState.get() != READ_WRITE) {
Metrics.PUT_NOT_READY_ERRORS.inc();
Metrics.PUT_ERRORS.inc();
return false;
}
int originPosition = page.position();
if (!mOptions.isAsyncWriteEnabled()) {
boolean ok = putInternal(pageId, page, cacheContext);
LOG.debug("put({},{} bytes) exits: {}", pageId, page.position() - originPosition, ok);
if (!ok) {
Metrics.PUT_ERRORS.inc();
}
return ok;
}
if (!mPendingRequests.add(pageId)) { // already queued
return false;
}
try {
mAsyncCacheExecutor.get().submit(() -> {
try {
boolean ok = putInternal(pageId, page, cacheContext);
if (!ok) {
Metrics.PUT_ERRORS.inc();
}
} finally {
mPendingRequests.remove(pageId);
}
});
} catch (RejectedExecutionException e) { // queue is full, skip
// RejectedExecutionException may be thrown in extreme cases when the
// highly concurrent caching workloads. In these cases, return false
mPendingRequests.remove(pageId);
Metrics.PUT_ASYNC_REJECTION_ERRORS.inc();
Metrics.PUT_ERRORS.inc();
LOG.debug("put({},{} bytes) fails due to full queue", pageId,
page.position() - originPosition);
return false;
}
LOG.debug("put({},{} bytes) exits with async write", pageId, page.position() - originPosition);
return true;
}
|
@Test
public void noEvictionPolicy() throws Exception {
mEvictor = new UnevictableCacheEvictor(mCacheManagerOptions.getCacheEvictorOptions());
mPageMetaStore = new DefaultPageMetaStore(ImmutableList.of(mPageStoreDir));
mCacheManager = createLocalCacheManager();
long numPages = mPageStoreOptions.getCacheSize() / PAGE_SIZE_BYTES;
for (int i = 0; i < numPages; i++) {
PageId id = pageId(i, 0);
assertTrue(mCacheManager.put(id, PAGE1));
}
assertFalse(mCacheManager.put(pageId(numPages, 0), PAGE1));
}
|
@Udf
public <T> List<T> distinct(
@UdfParameter(description = "Array of values to distinct") final List<T> input) {
if (input == null) {
return null;
}
final Set<T> distinctVals = Sets.newLinkedHashSetWithExpectedSize(input.size());
distinctVals.addAll(input);
return new ArrayList<>(distinctVals);
}
|
@Test
public void shouldReturnNullForNullInput() {
final List<Double> result = udf.distinct((List<Double>) null);
assertThat(result, is(nullValue()));
}
|
@Override
public String rpcType() {
return RpcTypeEnum.SOFA.getName();
}
|
@Test
public void testPluginNamed() {
assertEquals(sofaMetaDataHandler.rpcType(), RpcTypeEnum.SOFA.getName());
}
|
public List<Flow> convertFlows(String componentName, @Nullable DbIssues.Locations issueLocations) {
if (issueLocations == null) {
return Collections.emptyList();
}
return issueLocations.getFlowList().stream()
.map(sourceFlow -> toFlow(componentName, sourceFlow))
.collect(Collectors.toCollection(LinkedList::new));
}
|
@Test
public void convertFlows_with2FlowsSingleDbLocations_returnsCorrectFlow() {
DbIssues.Location location1 = createDbLocation("comp_id_1");
DbIssues.Location location2 = createDbLocation("comp_id_2");
DbIssues.Locations issueLocations = DbIssues.Locations.newBuilder()
.addFlow(createFlow(location1))
.addFlow(createFlow(location2))
.build();
List<Flow> flows = flowGenerator.convertFlows(COMPONENT_NAME, issueLocations);
assertThat(flows).hasSize(2).extracting(f -> f.getLocations().size()).containsExactly(1, 1);
Map<String, DbIssues.Location> toDbLocation = Map.of(
"file_path_" + location1.getComponentId(), location1,
"file_path_" + location2.getComponentId(), location2);
flows.stream()
.map(actualFlow -> actualFlow.getLocations().iterator().next())
.forEach(l -> assertLocationMatches(l, toDbLocation.get(l.getFilePath())));
}
|
public String build( final String cellValue ) {
switch ( type ) {
case FORALL:
return buildForAll( cellValue );
case INDEXED:
return buildMulti( cellValue );
default:
return buildSingle( cellValue );
}
}
|
@Test
public void testForAllAndNone() {
final String snippet = "forall(&&){something == $}";
final SnippetBuilder snip = new SnippetBuilder(snippet);
final String result = snip.build("");
assertThat(result).isEqualTo("forall(&&){something == $}");
}
|
@Override
public PinotDataBuffer newBuffer(String column, IndexType<?, ?, ?> type, long sizeBytes)
throws IOException {
IndexKey key = new IndexKey(column, type);
return getWriteBufferFor(key, sizeBytes);
}
|
@Test(expectedExceptions = RuntimeException.class)
public void testWriteExisting()
throws Exception {
try (FilePerIndexDirectory columnDirectory = new FilePerIndexDirectory(TEMP_DIR, _segmentMetadata, ReadMode.mmap)) {
columnDirectory.newBuffer("column1", StandardIndexes.dictionary(), 1024);
}
try (FilePerIndexDirectory columnDirectory = new FilePerIndexDirectory(TEMP_DIR, _segmentMetadata, ReadMode.mmap)) {
columnDirectory.newBuffer("column1", StandardIndexes.dictionary(), 1024);
}
}
|
@Override
public List<RoleDO> getRoleListFromCache(Collection<Long> ids) {
if (CollectionUtil.isEmpty(ids)) {
return Collections.emptyList();
}
// 这里采用 for 循环从缓存中获取,主要考虑 Spring CacheManager 无法批量操作的问题
RoleServiceImpl self = getSelf();
return CollectionUtils.convertList(ids, self::getRoleFromCache);
}
|
@Test
public void testGetRoleListFromCache() {
try (MockedStatic<SpringUtil> springUtilMockedStatic = mockStatic(SpringUtil.class)) {
springUtilMockedStatic.when(() -> SpringUtil.getBean(eq(RoleServiceImpl.class)))
.thenReturn(roleService);
// mock 数据
RoleDO dbRole = randomPojo(RoleDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus()));
roleMapper.insert(dbRole);
// 测试 id 不匹配
roleMapper.insert(cloneIgnoreId(dbRole, o -> {}));
// 准备参数
Collection<Long> ids = singleton(dbRole.getId());
// 调用
List<RoleDO> list = roleService.getRoleListFromCache(ids);
// 断言
assertEquals(1, list.size());
assertPojoEquals(dbRole, list.get(0));
}
}
|
@ExecuteOn(TaskExecutors.IO)
@Get(uri = "/{executionId}")
@Operation(tags = {"Executions"}, summary = "Get an execution")
public Execution get(
@Parameter(description = "The execution id") @PathVariable String executionId
) {
return executionRepository
.findById(tenantService.resolveTenant(), executionId)
.orElse(null);
}
|
@SuppressWarnings("unchecked")
@Test
void resumePausedWithInputs() throws TimeoutException, InterruptedException {
// Run execution until it is paused
Execution pausedExecution = runnerUtils.runOneUntilPaused(null, TESTS_FLOW_NS, "pause_on_resume");
assertThat(pausedExecution.getState().isPaused(), is(true));
File applicationFile = new File(Objects.requireNonNull(
ExecutionControllerTest.class.getClassLoader().getResource("application-test.yml")
).getPath());
MultipartBody multipartBody = MultipartBody.builder()
.addPart("asked", "myString")
.addPart("files", "data", MediaType.TEXT_PLAIN_TYPE, applicationFile)
.build();
// resume the execution
HttpResponse<?> resumeResponse = client.toBlocking().exchange(
HttpRequest.POST("/api/v1/executions/" + pausedExecution.getId() + "/resume", multipartBody)
.contentType(MediaType.MULTIPART_FORM_DATA_TYPE)
);
assertThat(resumeResponse.getStatus(), is(HttpStatus.NO_CONTENT));
// check that the execution is no more paused
Thread.sleep(100);
Execution execution = client.toBlocking().retrieve(
GET("/api/v1/executions/" + pausedExecution.getId()),
Execution.class);
assertThat(execution.getState().isPaused(), is(false));
Map<String, Object> outputs = (Map<String, Object>) execution.findTaskRunsByTaskId("pause").getFirst().getOutputs().get("onResume");
assertThat(outputs.get("asked"), is("myString"));
assertThat((String) outputs.get("data"), startsWith("kestra://"));
}
|
public static InternalLogger getInstance(Class<?> clazz) {
return getInstance(clazz.getName());
}
|
@Test
public void testDebug() {
final InternalLogger logger = InternalLoggerFactory.getInstance("mock");
logger.debug("a");
verify(mockLogger).debug("a");
}
|
@Override
public void reset() throws IOException {
createDirectory(PATH_DATA.getKey());
createDirectory(PATH_WEB.getKey());
createDirectory(PATH_LOGS.getKey());
File tempDir = createOrCleanTempDirectory(PATH_TEMP.getKey());
try (AllProcessesCommands allProcessesCommands = new AllProcessesCommands(tempDir)) {
allProcessesCommands.clean();
}
}
|
@Test
public void reset_deletes_content_of_temp_dir_but_not_sharedmemory_file() throws Exception {
assertThat(tempDir.mkdir()).isTrue();
File sharedmemory = new File(tempDir, "sharedmemory");
assertThat(sharedmemory.createNewFile()).isTrue();
FileUtils.write(sharedmemory, "toto");
Object fileKey = getFileKey(sharedmemory);
Object tempDirKey = getFileKey(tempDir);
File fileInTempDir = new File(tempDir, "someFile.txt");
assertThat(fileInTempDir.createNewFile()).isTrue();
underTest.reset();
assertThat(tempDir).exists();
assertThat(fileInTempDir).doesNotExist();
assertThat(getFileKey(tempDir)).isEqualTo(tempDirKey);
assertThat(getFileKey(sharedmemory)).isEqualTo(fileKey);
// content of sharedMemory file is reset
assertThat(FileUtils.readFileToString(sharedmemory)).isNotEqualTo("toto");
}
|
public static void createTopics(
Logger log, String bootstrapServers, Map<String, String> commonClientConf,
Map<String, String> adminClientConf,
Map<String, NewTopic> topics, boolean failOnExisting) throws Throwable {
// this method wraps the call to createTopics() that takes admin client, so that we can
// unit test the functionality with MockAdminClient. The exception is caught and
// re-thrown so that admin client is closed when the method returns.
try (Admin adminClient
= createAdminClient(bootstrapServers, commonClientConf, adminClientConf)) {
createTopics(log, adminClient, topics, failOnExisting);
} catch (Exception e) {
log.warn("Failed to create or verify topics {}", topics, e);
throw e;
}
}
|
@Test
public void testCreatesNotExistingTopics() throws Throwable {
// should be no topics before the call
assertEquals(0, adminClient.listTopics().names().get().size());
WorkerUtils.createTopics(
log, adminClient, Collections.singletonMap(TEST_TOPIC, NEW_TEST_TOPIC), false);
assertEquals(Collections.singleton(TEST_TOPIC), adminClient.listTopics().names().get());
assertEquals(
new TopicDescription(
TEST_TOPIC, false,
Collections.singletonList(
new TopicPartitionInfo(0, broker1, singleReplica, Collections.emptyList(), Collections.emptyList(), Collections.emptyList()))),
adminClient.describeTopics(Collections.singleton(TEST_TOPIC)).topicNameValues().get(TEST_TOPIC).get()
);
}
|
public SubClusterId getHomeSubcluster(
ApplicationSubmissionContext appSubmissionContext,
List<SubClusterId> blackListSubClusters) throws YarnException {
// the maps are concurrent, but we need to protect from reset()
// reinitialization mid-execution by creating a new reference local to this
// method.
Map<String, SubClusterPolicyConfiguration> cachedConfs = globalConfMap;
Map<String, FederationRouterPolicy> policyMap = globalPolicyMap;
if (appSubmissionContext == null) {
throw new FederationPolicyException(
"The ApplicationSubmissionContext cannot be null.");
}
String queue = appSubmissionContext.getQueue();
// respecting YARN behavior we assume default queue if the queue is not
// specified. This also ensures that "null" can be used as a key to get the
// default behavior.
if (queue == null) {
queue = YarnConfiguration.DEFAULT_QUEUE_NAME;
}
FederationRouterPolicy policy = getFederationRouterPolicy(cachedConfs, policyMap, queue);
if (policy == null) {
// this should never happen, as the to maps are updated together
throw new FederationPolicyException("No FederationRouterPolicy found "
+ "for queue: " + appSubmissionContext.getQueue() + " (for "
+ "application: " + appSubmissionContext.getApplicationId() + ") "
+ "and no default specified.");
}
return policy.getHomeSubcluster(appSubmissionContext, blackListSubClusters);
}
|
@Test
public void testConfigurationUpdate() throws YarnException {
// in this test we see what happens when the configuration is changed
// between calls. We achieve this by changing what is in the store.
ApplicationSubmissionContext applicationSubmissionContext =
mock(ApplicationSubmissionContext.class);
when(applicationSubmissionContext.getQueue()).thenReturn(queue1);
// first call runs using standard UniformRandomRouterPolicy
SubClusterId chosen =
routerFacade.getHomeSubcluster(applicationSubmissionContext, null);
Assert.assertTrue(subClusterIds.contains(chosen));
Assert.assertTrue(routerFacade.globalPolicyMap
.get(queue1) instanceof UniformRandomRouterPolicy);
// then the operator changes how queue1 is routed setting it to
// PriorityRouterPolicy with weights favoring the first subcluster in
// subClusterIds.
store.setPolicyConfiguration(SetSubClusterPolicyConfigurationRequest
.newInstance(getPriorityPolicy(queue1)));
// second call is routed by new policy PriorityRouterPolicy
chosen = routerFacade.getHomeSubcluster(applicationSubmissionContext, null);
Assert.assertTrue(chosen.equals(subClusterIds.get(0)));
Assert.assertTrue(routerFacade.globalPolicyMap
.get(queue1) instanceof PriorityRouterPolicy);
}
|
@Override
public boolean canTransformResource(String resource) {
return JarFile.MANIFEST_NAME.equals(resource.toUpperCase(Locale.ROOT));
}
|
@Test
public void testCanTransformResource() {
assertTrue(transformer.canTransformResource("META-INF/MANIFEST.MF"));
assertTrue(transformer.canTransformResource("META-INF/manifest.mf"));
assertFalse(transformer.canTransformResource("MANIFEST.MF"));
assertFalse(transformer.canTransformResource("manifest.mf"));
}
|
@UdafFactory(description = "Compute sample standard deviation of column with type Integer.",
aggregateSchema = "STRUCT<SUM integer, COUNT bigint, M2 double>")
public static TableUdaf<Integer, Struct, Double> stdDevInt() {
return getStdDevImplementation(
0,
STRUCT_INT,
(agg, newValue) -> newValue + agg.getInt32(SUM),
(agg, newValue) ->
Double.valueOf(newValue * (agg.getInt64(COUNT) + 1) - (agg.getInt32(SUM) + newValue)),
(agg1, agg2) ->
Double.valueOf(
agg1.getInt32(SUM) / agg1.getInt64(COUNT)
- agg2.getInt32(SUM) / agg2.getInt64(COUNT)),
(agg1, agg2) -> agg1.getInt32(SUM) + agg2.getInt32(SUM),
(agg, valueToRemove) -> agg.getInt32(SUM) - valueToRemove);
}
|
@Test
public void shouldCalculateStdDevInts() {
final TableUdaf<Integer, Struct, Double> udaf = stdDevInt();
Struct agg = udaf.initialize();
final Integer[] values = new Integer[] {3, 5, 6, 7};
for (final Integer thisValue : values) {
agg = udaf.aggregate(thisValue, agg);
}
assertThat(agg.getInt64(COUNT), equalTo(4L));
assertThat(agg.getInt32(SUM), equalTo(21));
assertThat(agg.getFloat64(M2), equalTo(8.75));
final double standardDev = udaf.map(agg);
assertThat(standardDev, equalTo(2.9166666666666665));
}
|
public static <T, V> Collection<V> collectIf(
Iterable<T> iterable,
Predicate<? super T> predicate,
Function<? super T, V> function)
{
return FJIterate.collectIf(iterable, predicate, function, false);
}
|
@Test
public void collectIf()
{
this.iterables.each(this::basicCollectIf);
}
|
public static Function<Integer, Integer> composeFunctions(Function<Integer, Integer> f1, Function<Integer, Integer> f2) {
return f1.andThen(f2);
}
|
@Test
public void testComposeWithIdentity() {
Function<Integer, Integer> identity = Function.identity();
Function<Integer, Integer> timesThree = x -> x * 3;
Function<Integer, Integer> composedLeft = FunctionComposer.composeFunctions(identity, timesThree);
Function<Integer, Integer> composedRight = FunctionComposer.composeFunctions(timesThree, identity);
assertEquals("Composition with identity on the left should be the same", 9, (int) composedLeft.apply(3));
assertEquals("Composition with identity on the right should be the same", 9, (int) composedRight.apply(3));
}
|
public int getAppStatisticsFailedRetrieved() {
return numGetAppStatisticsFailedRetrieved.value();
}
|
@Test
public void testGetAppStatisticsRetrievedFailed() {
long totalBadBefore = metrics.getAppStatisticsFailedRetrieved();
badSubCluster.getAppStatisticsFailed();
Assert.assertEquals(totalBadBefore + 1,
metrics.getAppStatisticsFailedRetrieved());
}
|
public Scheduler getScheduler() {
return scheduler;
}
|
@Test
public void testScheduler() {
ShenyuConfig.Scheduler scheduler = config.getScheduler();
scheduler.setEnabled(true);
scheduler.setThreads(5);
scheduler.setType("test");
Boolean enabled = scheduler.getEnabled();
Integer threads = scheduler.getThreads();
String type = scheduler.getType();
notEmptyElements(enabled, type, threads);
}
|
public static ResourceProfile generateDefaultSlotResourceProfile(
WorkerResourceSpec workerResourceSpec, int numSlotsPerWorker) {
final ResourceProfile.Builder resourceProfileBuilder =
ResourceProfile.newBuilder()
.setCpuCores(workerResourceSpec.getCpuCores().divide(numSlotsPerWorker))
.setTaskHeapMemory(
workerResourceSpec.getTaskHeapSize().divide(numSlotsPerWorker))
.setTaskOffHeapMemory(
workerResourceSpec.getTaskOffHeapSize().divide(numSlotsPerWorker))
.setManagedMemory(
workerResourceSpec.getManagedMemSize().divide(numSlotsPerWorker))
.setNetworkMemory(
workerResourceSpec.getNetworkMemSize().divide(numSlotsPerWorker));
workerResourceSpec
.getExtendedResources()
.forEach(
(name, resource) ->
resourceProfileBuilder.setExtendedResource(
resource.divide(numSlotsPerWorker)));
return resourceProfileBuilder.build();
}
|
@Test
void testGenerateDefaultSlotProfileFromWorkerResourceSpec() {
final int numSlots = 5;
final ResourceProfile resourceProfile =
ResourceProfile.newBuilder()
.setCpuCores(1.0)
.setTaskHeapMemoryMB(1)
.setTaskOffHeapMemoryMB(2)
.setNetworkMemoryMB(3)
.setManagedMemoryMB(4)
.setExtendedResource(new ExternalResource(EXTERNAL_RESOURCE_NAME, 1))
.build();
final WorkerResourceSpec workerResourceSpec =
new WorkerResourceSpec.Builder()
.setCpuCores(1.0 * numSlots)
.setTaskHeapMemoryMB(1 * numSlots)
.setTaskOffHeapMemoryMB(2 * numSlots)
.setNetworkMemoryMB(3 * numSlots)
.setManagedMemoryMB(4 * numSlots)
.setExtendedResource(new ExternalResource(EXTERNAL_RESOURCE_NAME, numSlots))
.build();
assertThat(
SlotManagerUtils.generateDefaultSlotResourceProfile(
workerResourceSpec, numSlots))
.isEqualTo(resourceProfile);
}
|
public List<ChangeStreamRecord> toChangeStreamRecords(
PartitionMetadata partition,
ChangeStreamResultSet resultSet,
ChangeStreamResultSetMetadata resultSetMetadata) {
if (this.isPostgres()) {
// In PostgresQL, change stream records are returned as JsonB.
return Collections.singletonList(
toChangeStreamRecordJson(partition, resultSet.getPgJsonb(0), resultSetMetadata));
}
// In GoogleSQL, change stream records are returned as an array of structs.
return resultSet.getCurrentRowAsStruct().getStructList(0).stream()
.flatMap(struct -> toChangeStreamRecord(partition, struct, resultSetMetadata))
.collect(Collectors.toList());
}
|
@Test
public void testMappingInsertStructRowNewValuesToDataChangeRecord() {
final DataChangeRecord dataChangeRecord =
new DataChangeRecord(
"partitionToken",
Timestamp.ofTimeSecondsAndNanos(10L, 20),
"transactionId",
false,
"1",
"tableName",
Arrays.asList(
new ColumnType("column1", new TypeCode("{\"code\":\"INT64\"}"), true, 1L),
new ColumnType("column2", new TypeCode("{\"code\":\"BYTES\"}"), false, 2L)),
Collections.singletonList(
new Mod("{\"column1\":\"value1\"}", null, "{\"column2\":\"newValue2\"}")),
ModType.INSERT,
ValueCaptureType.NEW_VALUES,
10L,
2L,
"transactionTag",
true,
null);
final Struct jsonFieldsStruct = recordsToStructWithJson(dataChangeRecord);
ChangeStreamResultSet resultSet = mock(ChangeStreamResultSet.class);
when(resultSet.getCurrentRowAsStruct()).thenReturn(jsonFieldsStruct);
assertEquals(
Collections.singletonList(dataChangeRecord),
mapper.toChangeStreamRecords(partition, resultSet, resultSetMetadata));
}
|
@Nullable
static Class<? extends Activity> getReturnActivity(final Class<?> returnActivity) {
Class<? extends Activity> checkedReturnActivity = null;
if (returnActivity != null) {
if (Activity.class.isAssignableFrom(returnActivity)) {
checkedReturnActivity = returnActivity.asSubclass(Activity.class);
} else {
checkedReturnActivity = MainActivity.class;
}
}
return checkedReturnActivity;
}
|
@Test
public void getReturnActivity() {
Class<? extends Activity> returnActivity;
returnActivity = ErrorActivity.getReturnActivity(MainActivity.class);
assertEquals(MainActivity.class, returnActivity);
returnActivity = ErrorActivity.getReturnActivity(RouterActivity.class);
assertEquals(RouterActivity.class, returnActivity);
returnActivity = ErrorActivity.getReturnActivity(null);
assertNull(returnActivity);
returnActivity = ErrorActivity.getReturnActivity(Integer.class);
assertEquals(MainActivity.class, returnActivity);
returnActivity = ErrorActivity.getReturnActivity(VideoDetailFragment.class);
assertEquals(MainActivity.class, returnActivity);
}
|
static boolean explicitlyEcsConfigured(AwsConfig awsConfig) {
return !isNullOrEmptyAfterTrim(awsConfig.getCluster())
|| (!isNullOrEmptyAfterTrim(awsConfig.getHostHeader()) && awsConfig.getHostHeader().startsWith("ecs"));
}
|
@Test
public void explicitlyEcsConfigured() {
assertTrue(AwsClientConfigurator.explicitlyEcsConfigured(AwsConfig.builder().setHostHeader("ecs").build()));
assertTrue(AwsClientConfigurator.explicitlyEcsConfigured(
AwsConfig.builder().setHostHeader("ecs.us-east-1.amazonaws.com").build()));
assertTrue(AwsClientConfigurator.explicitlyEcsConfigured(AwsConfig.builder().setCluster("cluster").build()));
assertFalse(AwsClientConfigurator.explicitlyEcsConfigured(AwsConfig.builder().build()));
}
|
@Override
public String encrypt(final String key, final String data) throws Exception {
byte[] decoded = Base64.getDecoder().decode(key);
RSAPublicKey pubKey = (RSAPublicKey) KeyFactory.getInstance(RSA).generatePublic(new X509EncodedKeySpec(decoded));
Cipher cipher = Cipher.getInstance(RSA);
cipher.init(Cipher.ENCRYPT_MODE, pubKey);
return Base64.getEncoder().encodeToString(cipher.doFinal(data.getBytes(StandardCharsets.UTF_8)));
}
|
@Test
public void testEncrypt() throws Exception {
byte[] encryptedData = Base64.getMimeDecoder().decode(cryptorStrategy.encrypt(encKey, decryptedData));
assertThat(cryptorStrategy.decrypt(decKey, encryptedData), is(decryptedData));
}
|
@VisibleForTesting
static ParallelInstruction forParallelInstruction(
ParallelInstruction input, boolean replaceWithByteArrayCoder) throws Exception {
try {
ParallelInstruction instruction = clone(input, ParallelInstruction.class);
if (instruction.getRead() != null) {
Source cloudSource = instruction.getRead().getSource();
cloudSource.setCodec(forCodec(cloudSource.getCodec(), replaceWithByteArrayCoder));
} else if (instruction.getWrite() != null) {
com.google.api.services.dataflow.model.Sink cloudSink = instruction.getWrite().getSink();
cloudSink.setCodec(forCodec(cloudSink.getCodec(), replaceWithByteArrayCoder));
} else if (instruction.getParDo() != null) {
instruction.setParDo(
forParDoInstruction(instruction.getParDo(), replaceWithByteArrayCoder));
} else if (instruction.getPartialGroupByKey() != null) {
PartialGroupByKeyInstruction pgbk = instruction.getPartialGroupByKey();
pgbk.setInputElementCodec(forCodec(pgbk.getInputElementCodec(), replaceWithByteArrayCoder));
} else if (instruction.getFlatten() != null) {
// FlattenInstructions have no codecs to wrap.
} else {
throw new RuntimeException("Unknown parallel instruction: " + input);
}
return instruction;
} catch (IOException e) {
throw new RuntimeException(
String.format(
"Failed to replace unknown coder with " + "LengthPrefixCoder for : {%s}", input),
e);
}
}
|
@Test
public void testLengthPrefixWriteInstructionCoder() throws Exception {
WriteInstruction writeInstruction = new WriteInstruction();
writeInstruction.setSink(
new Sink()
.setCodec(CloudObjects.asCloudObject(windowedValueCoder, /*sdkComponents=*/ null)));
instruction.setWrite(writeInstruction);
ParallelInstruction prefixedInstruction = forParallelInstruction(instruction, false);
assertEqualsAsJson(
CloudObjects.asCloudObject(prefixedWindowedValueCoder, /*sdkComponents=*/ null),
prefixedInstruction.getWrite().getSink().getCodec());
// Should not mutate the instruction.
assertEqualsAsJson(
CloudObjects.asCloudObject(windowedValueCoder, /*sdkComponents=*/ null),
writeInstruction.getSink().getCodec());
}
|
public static void writeBuffers(
WritableByteChannel writeChannel, long expectedBytes, ByteBuffer[] bufferWithHeaders)
throws IOException {
int writeSize = 0;
for (ByteBuffer bufferWithHeader : bufferWithHeaders) {
writeSize += writeChannel.write(bufferWithHeader);
}
checkState(writeSize == expectedBytes, "Wong number of written bytes.");
}
|
@Test
void testWriteBuffers() throws IOException {
Random random = new Random();
int numBuffers = 20;
int bufferSizeBytes = 10;
File testFile = new File(tempFolder.getPath(), "testFile");
org.apache.flink.core.fs.Path testPath =
org.apache.flink.core.fs.Path.fromLocalFile(testFile);
FileSystem fs = testPath.getFileSystem();
WritableByteChannel currentChannel =
Channels.newChannel(fs.create(testPath, FileSystem.WriteMode.NO_OVERWRITE));
ByteBuffer[] toWriteBuffers = new ByteBuffer[numBuffers];
for (int i = 0; i < numBuffers; i++) {
byte[] bytes = new byte[bufferSizeBytes];
random.nextBytes(bytes);
toWriteBuffers[i] = ByteBuffer.wrap(bytes);
}
int numExpectedBytes = numBuffers * bufferSizeBytes;
SegmentPartitionFile.writeBuffers(currentChannel, numExpectedBytes, toWriteBuffers);
byte[] bytesRead = Files.readAllBytes(testFile.toPath());
assertThat(bytesRead).hasSize(numExpectedBytes);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.