focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public JavaCommand<CeJvmOptions> createCeCommand() {
File homeDir = props.nonNullValueAsFile(PATH_HOME.getKey());
CeJvmOptions jvmOptions = new CeJvmOptions(tempDir)
.addFromMandatoryProperty(props, CE_JAVA_OPTS.getKey())
.addFromMandatoryProperty(props, CE_JAVA_ADDITIONAL_OPTS.getKey());
addProxyJvmOptions(jvmOptions);
JavaCommand<CeJvmOptions> command = new JavaCommand<CeJvmOptions>(ProcessId.COMPUTE_ENGINE, homeDir)
.setReadsArgumentsFromFile(true)
.setArguments(props.rawProperties())
.setJvmOptions(jvmOptions)
.setGracefulStopTimeoutMs(getGracefulStopTimeoutMs(props, CE_GRACEFUL_STOP_TIMEOUT))
.setClassName("org.sonar.ce.app.CeServer")
.addClasspath("./lib/sonar-application-" + SQ_VERSION + ".jar");
String driverPath = props.value(JDBC_DRIVER_PATH.getKey());
if (driverPath != null) {
command.addClasspath(driverPath);
}
command.suppressEnvVariable(ENV_VAR_JAVA_TOOL_OPTIONS);
return command;
}
|
@Test
public void createCeCommand_returns_command_for_default_settings() {
JavaCommand command = newFactory(new Properties()).createCeCommand();
assertThat(command.getClassName()).isEqualTo("org.sonar.ce.app.CeServer");
assertThat(command.getWorkDir().getAbsolutePath()).isEqualTo(homeDir.getAbsolutePath());
assertThat(command.getClasspath()).hasSize(1).allMatch(p -> p.toString().startsWith("./lib/sonar-application-"));
assertThat(command.getJvmOptions().getAll())
// enforced values
.contains("-Djava.awt.headless=true", "-Dfile.encoding=UTF-8")
// default settings
.contains("-Djava.io.tmpdir=" + tempDir.getAbsolutePath(), "-Dfile.encoding=UTF-8")
.contains("-Xmx512m", "-Xms128m", "-XX:+HeapDumpOnOutOfMemoryError");
assertThat(command.getProcessId()).isEqualTo(ProcessId.COMPUTE_ENGINE);
assertThat(command.getEnvVariables())
.isNotEmpty();
assertThat(command.getArguments())
// default settings
.contains(entry("sonar.web.javaOpts", "-Xmx512m -Xms128m -XX:+HeapDumpOnOutOfMemoryError"))
.contains(entry("sonar.cluster.enabled", "false"));
assertThat(command.getSuppressedEnvVariables()).containsOnly("JAVA_TOOL_OPTIONS");
}
|
public Privacy privacy() {
return privacy;
}
|
@Test
void privacyCannotBeNull() {
Assertions.assertThrows(NullPointerException.class, () -> DefaultBot.getDefaultBuilder().privacy(null).build());
}
|
public static <Req extends MessagingRequest> Matcher<Req> channelKindEquals(String channelKind) {
if (channelKind == null) throw new NullPointerException("channelKind == null");
if (channelKind.isEmpty()) throw new NullPointerException("channelKind is empty");
return new MessagingChannelKindEquals<Req>(channelKind);
}
|
@Test void channelKindEquals_unmatched_null() {
assertThat(channelKindEquals("queue").matches(request)).isFalse();
}
|
public BoardFoundResponse findBoardById(final Long boardId, final Long memberId) {
return boardRepository.findByIdForRead(boardId, memberId)
.orElseThrow(BoardNotFoundException::new);
}
|
@Test
void 게시글이_없으면_에외를_발생시킨다() {
// when & then
assertThatThrownBy(() -> boardQueryService.findBoardById(1L, 1L))
.isInstanceOf(BoardNotFoundException.class);
}
|
public void valid(String extensionName) {
if (!inputExtensions.map().containsKey(extensionName))
throw new IllegalArgumentException(String.format("Extension %s was not found in the original extensions", extensionName));
validatedExtensions.put(extensionName, inputExtensions.map().get(extensionName));
}
|
@Test
public void testCannotValidateExtensionWhichWasNotGiven() {
Map<String, String> extensions = new HashMap<>();
extensions.put("hello", "bye");
OAuthBearerExtensionsValidatorCallback callback = new OAuthBearerExtensionsValidatorCallback(TOKEN, new SaslExtensions(extensions));
assertThrows(IllegalArgumentException.class, () -> callback.valid("???"));
}
|
public static CompletableFuture<Map<String, String>> labelFailure(
final Throwable cause,
final Context context,
final Executor mainThreadExecutor,
final Collection<FailureEnricher> failureEnrichers) {
// list of CompletableFutures to enrich failure with labels from each enricher
final Collection<CompletableFuture<Map<String, String>>> enrichFutures = new ArrayList<>();
for (final FailureEnricher enricher : failureEnrichers) {
enrichFutures.add(
enricher.processFailure(cause, context)
.thenApply(
enricherLabels -> {
final Map<String, String> validLabels = new HashMap<>();
enricherLabels.forEach(
(k, v) -> {
if (!enricher.getOutputKeys().contains(k)) {
LOG.warn(
"Ignoring label with key {} from enricher {}"
+ " violating contract, keys allowed {}.",
k,
enricher.getClass(),
enricher.getOutputKeys());
} else {
validLabels.put(k, v);
}
});
return validLabels;
})
.exceptionally(
t -> {
LOG.warn(
"Enricher {} threw an exception.",
enricher.getClass(),
t);
return Collections.emptyMap();
}));
}
// combine all CompletableFutures into a single CompletableFuture containing a Map of labels
return FutureUtils.combineAll(enrichFutures)
.thenApplyAsync(
labelsToMerge -> {
final Map<String, String> mergedLabels = new HashMap<>();
for (Map<String, String> labels : labelsToMerge) {
labels.forEach(
(k, v) ->
// merge label with existing, throwing an exception
// if there is a key conflict
mergedLabels.merge(
k,
v,
(first, second) -> {
throw new FlinkRuntimeException(
String.format(
MERGE_EXCEPTION_MSG,
k));
}));
}
return mergedLabels;
},
mainThreadExecutor);
}
|
@Test
public void testLabelFutureWithValidEnricher() {
// validate labelFailure by enricher with correct outputKeys
final Throwable cause = new RuntimeException("test exception");
final Set<FailureEnricher> failureEnrichers = new HashSet<>();
final FailureEnricher validEnricher = new TestEnricher("enricherKey");
failureEnrichers.add(validEnricher);
final CompletableFuture<Map<String, String>> result =
FailureEnricherUtils.labelFailure(
cause,
null,
ComponentMainThreadExecutorServiceAdapter.forMainThread(),
failureEnrichers);
assertThatFuture(result)
.eventuallySucceeds()
.satisfies(
labels -> {
assertThat(labels).hasSize(1);
assertThat(labels).containsKey("enricherKey");
assertThat(labels).containsValue("enricherKeyValue");
});
}
|
@SuppressWarnings("deprecation")
@Override
public Integer call() throws Exception {
super.call();
try (var files = Files.walk(directory)) {
List<String> flows = files
.filter(Files::isRegularFile)
.filter(YamlFlowParser::isValidExtension)
.map(path -> {
try {
return IncludeHelperExpander.expand(Files.readString(path, Charset.defaultCharset()), path.getParent());
} catch (IOException e) {
throw new RuntimeException(e);
}
})
.toList();
String body = "";
if (flows.isEmpty()) {
stdOut("No flow found on '{}'", directory.toFile().getAbsolutePath());
} else {
body = String.join("\n---\n", flows);
}
try(DefaultHttpClient client = client()) {
MutableHttpRequest<String> request = HttpRequest
.POST(apiUri("/flows/") + namespace + "?delete=" + delete, body).contentType(MediaType.APPLICATION_YAML);
List<UpdateResult> updated = client.toBlocking().retrieve(
this.requestOptions(request),
Argument.listOf(UpdateResult.class)
);
stdOut(updated.size() + " flow(s) for namespace '" + namespace + "' successfully updated !");
updated.forEach(flow -> stdOut("- " + flow.getNamespace() + "." + flow.getId()));
} catch (HttpClientResponseException e){
FlowValidateCommand.handleHttpException(e, "flow");
return 1;
}
} catch (ConstraintViolationException e) {
FlowValidateCommand.handleException(e, "flow");
return 1;
}
return 0;
}
|
@Test
void helper() {
URL directory = FlowNamespaceUpdateCommandTest.class.getClassLoader().getResource("helper");
ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
try (ApplicationContext ctx = ApplicationContext.run(Environment.CLI, Environment.TEST)) {
EmbeddedServer embeddedServer = ctx.getBean(EmbeddedServer.class);
embeddedServer.start();
String[] args = {
"--server",
embeddedServer.getURL().toString(),
"--user",
"myuser:pass:word",
"io.kestra.cli",
directory.getPath(),
};
Integer call = PicocliRunner.call(FlowNamespaceUpdateCommand.class, ctx, args);
assertThat(call, is(0));
assertThat(out.toString(), containsString("1 flow(s)"));
}
}
|
@Override
public SccResult<V, E> search(Graph<V, E> graph, EdgeWeigher<V, E> weigher) {
SccResult<V, E> result = new SccResult<>(graph);
for (V vertex : graph.getVertexes()) {
VertexData data = result.data(vertex);
if (data == null) {
connect(graph, vertex, weigher, result);
}
}
return result.build();
}
|
@Test
public void basic() {
graph = new AdjacencyListsGraph<>(vertexes(), edges());
TarjanGraphSearch<TestVertex, TestEdge> gs = new TarjanGraphSearch<>();
SccResult<TestVertex, TestEdge> result = gs.search(graph, null);
validate(result, 6);
}
|
@Override
public DataSource getDataSource(String column) {
IndexContainer indexContainer = _indexContainerMap.get(column);
if (indexContainer != null) {
// Physical column
return indexContainer.toDataSource();
} else {
// Virtual column
FieldSpec fieldSpec = _schema.getFieldSpecFor(column);
Preconditions.checkState(fieldSpec != null && fieldSpec.isVirtualColumn(), "Failed to find column: %s", column);
// TODO: Refactor virtual column provider to directly generate data source
VirtualColumnContext virtualColumnContext = new VirtualColumnContext(fieldSpec, _numDocsIndexed);
VirtualColumnProvider virtualColumnProvider = VirtualColumnProviderFactory.buildProvider(virtualColumnContext);
return new ImmutableDataSource(virtualColumnProvider.buildMetadata(virtualColumnContext),
virtualColumnProvider.buildColumnIndexContainer(virtualColumnContext));
}
}
|
@Test
public void testDataSourceForMVColumns()
throws IOException {
for (FieldSpec fieldSpec : _schema.getAllFieldSpecs()) {
if (!fieldSpec.isSingleValueField()) {
String column = fieldSpec.getName();
DataSource actualDataSource = _mutableSegmentImpl.getDataSource(column);
DataSource expectedDataSource = _immutableSegment.getDataSource(column);
int actualNumDocs = actualDataSource.getDataSourceMetadata().getNumDocs();
int expectedNumDocs = expectedDataSource.getDataSourceMetadata().getNumDocs();
assertEquals(actualNumDocs, expectedNumDocs);
Dictionary actualDictionary = actualDataSource.getDictionary();
Dictionary expectedDictionary = expectedDataSource.getDictionary();
assertEquals(actualDictionary.length(), expectedDictionary.length());
int maxNumValuesPerMVEntry = expectedDataSource.getDataSourceMetadata().getMaxNumValuesPerMVEntry();
int[] actualDictIds = new int[maxNumValuesPerMVEntry];
int[] expectedDictIds = new int[maxNumValuesPerMVEntry];
ForwardIndexReader actualReader = actualDataSource.getForwardIndex();
ForwardIndexReader expectedReader = expectedDataSource.getForwardIndex();
try (ForwardIndexReaderContext actualReaderContext = actualReader.createContext();
ForwardIndexReaderContext expectedReaderContext = expectedReader.createContext()) {
for (int docId = 0; docId < expectedNumDocs; docId++) {
int actualLength = actualReader.getDictIdMV(docId, actualDictIds, actualReaderContext);
int expectedLength = expectedReader.getDictIdMV(docId, expectedDictIds, expectedReaderContext);
assertEquals(actualLength, expectedLength);
for (int i = 0; i < expectedLength; i++) {
assertEquals(actualDictionary.get(actualDictIds[i]), expectedDictionary.get(expectedDictIds[i]));
}
}
}
}
}
}
|
public void runExtractor(Message msg) {
try(final Timer.Context ignored = completeTimer.time()) {
final String field;
try (final Timer.Context ignored2 = conditionTimer.time()) {
// We can only work on Strings.
if (!(msg.getField(sourceField) instanceof String)) {
conditionMissesCounter.inc();
return;
}
field = (String) msg.getField(sourceField);
// Decide if to extract at all.
if (conditionType.equals(ConditionType.STRING)) {
if (field.contains(conditionValue)) {
conditionHitsCounter.inc();
} else {
conditionMissesCounter.inc();
return;
}
} else if (conditionType.equals(ConditionType.REGEX)) {
if (regexConditionPattern.matcher(field).find()) {
conditionHitsCounter.inc();
} else {
conditionMissesCounter.inc();
return;
}
}
}
try (final Timer.Context ignored2 = executionTimer.time()) {
Result[] results;
try {
results = run(field);
} catch (ExtractorException e) {
final String error = "Could not apply extractor <" + getTitle() + " (" + getId() + ")>";
msg.addProcessingError(new Message.ProcessingError(
ProcessingFailureCause.ExtractorException, error, ExceptionUtils.getRootCauseMessage(e)));
return;
}
if (results == null || results.length == 0 || Arrays.stream(results).anyMatch(result -> result.getValue() == null)) {
return;
} else if (results.length == 1 && results[0].target == null) {
// results[0].target is null if this extractor cannot produce multiple fields use targetField in that case
msg.addField(targetField, results[0].getValue());
} else {
for (final Result result : results) {
msg.addField(result.getTarget(), result.getValue());
}
}
// Remove original from message?
if (cursorStrategy.equals(CursorStrategy.CUT) && !targetField.equals(sourceField) && !Message.RESERVED_FIELDS.contains(sourceField) && results[0].beginIndex != -1) {
final StringBuilder sb = new StringBuilder(field);
final List<Result> reverseList = Arrays.stream(results)
.sorted(Comparator.<Result>comparingInt(result -> result.endIndex).reversed())
.collect(Collectors.toList());
// remove all from reverse so that the indices still match
for (final Result result : reverseList) {
sb.delete(result.getBeginIndex(), result.getEndIndex());
}
final String builtString = sb.toString();
final String finalResult = builtString.trim().isEmpty() ? "fullyCutByExtractor" : builtString;
msg.removeField(sourceField);
// TODO don't add an empty field back, or rather don't add fullyCutByExtractor
msg.addField(sourceField, finalResult);
}
runConverters(msg);
}
}
}
|
@Test
public void testExtractorsWithExceptions() throws Exception {
final TestExtractor extractor = new TestExtractor.Builder()
.callback(new Callable<Result[]>() {
@Override
public Result[] call() throws Exception {
throw new ExtractorException(new IOException("BARF"));
}
})
.build();
final Message msg = createMessage("message");
extractor.runExtractor(msg);
assertThat(msg.processingErrors()).hasSize(1);
assertThat(msg.processingErrors().get(0)).satisfies(pe -> {
assertThat(pe.getCause()).isEqualTo(ProcessingFailureCause.ExtractorException);
assertThat(pe.getMessage()).isEqualTo("Could not apply extractor <test-title (test-id)>");
assertThat(pe.getDetails()).isEqualTo("BARF.");
});
}
|
@Override
protected String transform(ILoggingEvent event, String in) {
AnsiElement element = ELEMENTS.get(getFirstOption());
List<Marker> markers = event.getMarkerList();
if ((markers != null && !markers.isEmpty() && markers.get(0).contains(CRLF_SAFE_MARKER)) || isLoggerSafe(event)) {
return in;
}
String replacement = element == null ? "_" : toAnsiString("_", element);
return in.replaceAll("[\n\r\t]", replacement);
}
|
@Test
void transformShouldReturnInputStringWhenMarkerListIsEmpty() {
ILoggingEvent event = mock(ILoggingEvent.class);
when(event.getMarkerList()).thenReturn(null);
when(event.getLoggerName()).thenReturn("org.hibernate.example.Logger");
String input = "Test input string";
CRLFLogConverter converter = new CRLFLogConverter();
String result = converter.transform(event, input);
assertEquals(input, result);
}
|
public boolean isEmpty() {
return size() == 0;
}
|
@Test
void testIsEmpty() {
HeapPriorityQueue<TestElement> priorityQueue = newPriorityQueue(1);
assertThat(priorityQueue.isEmpty()).isTrue();
assertThat(priorityQueue.add(new TestElement(4711L, 42L))).isTrue();
assertThat(priorityQueue.isEmpty()).isFalse();
priorityQueue.poll();
assertThat(priorityQueue.isEmpty()).isTrue();
}
|
@Override
public ChannelFuture writePriority(ChannelHandlerContext ctx, int streamId,
int streamDependency, short weight, boolean exclusive, ChannelPromise promise) {
try {
verifyStreamId(streamId, STREAM_ID);
verifyStreamOrConnectionId(streamDependency, STREAM_DEPENDENCY);
verifyWeight(weight);
ByteBuf buf = ctx.alloc().buffer(PRIORITY_FRAME_LENGTH);
writeFrameHeaderInternal(buf, PRIORITY_ENTRY_LENGTH, PRIORITY, new Http2Flags(), streamId);
buf.writeInt(exclusive ? (int) (0x80000000L | streamDependency) : streamDependency);
// Adjust the weight so that it fits into a single byte on the wire.
buf.writeByte(weight - 1);
return ctx.write(buf, promise);
} catch (Throwable t) {
return promise.setFailure(t);
}
}
|
@Test
public void writePriority() {
frameWriter.writePriority(
ctx, /* streamId= */ 1, /* dependencyId= */ 2, /* weight= */ (short) 256, /* exclusive= */ true, promise);
expectedOutbound = Unpooled.copiedBuffer(new byte[] {
(byte) 0x00, (byte) 0x00, (byte) 0x05, // payload length = 5
(byte) 0x02, // payload type = 2
(byte) 0x00, // flags = 0x00
(byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x01, // stream id = 1
(byte) 0x80, (byte) 0x00, (byte) 0x00, (byte) 0x02, // dependency id = 2 | exclusive = 1 << 63
(byte) 0xFF, // weight = 255 (implicit +1)
});
assertEquals(expectedOutbound, outbound);
}
|
@Override
public Class<? extends AvgPercentileFunctionBuilder> builder() {
return AvgPercentileFunctionBuilder.class;
}
|
@Test
public void testBuilder() throws IllegalAccessException, InstantiationException {
PercentileFunctionInst inst = new PercentileFunctionInst();
inst.accept(
MeterEntity.newService("service-test", Layer.GENERAL),
new PercentileArgument(
new BucketedValues(
BUCKETS,
new long[] {
10,
20,
30,
40
}
),
RANKS
)
);
inst.calculate();
final StorageBuilder storageBuilder = inst.builder().newInstance();
// Simulate the storage layer do, convert the datatable to string.
final HashMapConverter.ToStorage toStorage = new HashMapConverter.ToStorage();
storageBuilder.entity2Storage(inst, toStorage);
final Map<String, Object> map = toStorage.obtain();
map.put(
SumHistogramPercentileFunction.SUMMATION,
((DataTable) map.get(SumHistogramPercentileFunction.SUMMATION)).toStorageData()
);
map.put(
SumHistogramPercentileFunction.VALUE,
((DataTable) map.get(SumHistogramPercentileFunction.VALUE)).toStorageData()
);
map.put(
SumHistogramPercentileFunction.RANKS,
((IntList) map.get(SumHistogramPercentileFunction.RANKS)).toStorageData()
);
final SumHistogramPercentileFunction inst2 = (SumHistogramPercentileFunction) storageBuilder.storage2Entity(
new HashMapConverter.ToEntity(map));
assertEquals(inst, inst2);
// HistogramFunction equal doesn't include dataset.
assertEquals(inst.getPercentileValues(), inst2.getPercentileValues());
assertEquals(inst.getRanks(), inst2.getRanks());
}
|
public QueueStoreConfig setClassName(@Nonnull String className) {
this.className = checkHasText(className, "Queue store class name must contain text");
this.storeImplementation = null;
return this;
}
|
@Test
public void testEqualsAndHashCode() {
assumeDifferentHashCodes();
EqualsVerifier.forClass(QueueStoreConfig.class)
.suppress(Warning.NONFINAL_FIELDS)
.withPrefabValues(QueueStoreConfigReadOnly.class,
new QueueStoreConfigReadOnly(new QueueStoreConfig().setClassName("red")),
new QueueStoreConfigReadOnly(new QueueStoreConfig().setClassName("black")))
.verify();
}
|
boolean hasNonDeterministicFunctions(Expr expr) {
if (expr instanceof FunctionCallExpr) {
FunctionCallExpr callExpr = (FunctionCallExpr) expr;
String funcName = callExpr.getFn().functionName();
if (FunctionSet.nonDeterministicFunctions.contains(funcName)) {
return true;
}
if (FunctionSet.NOW.equals(funcName)) {
return true;
}
if (FunctionSet.nonDeterministicTimeFunctions.contains(funcName) && callExpr.getChildren().isEmpty()) {
return true;
}
}
return expr.getChildren().stream().anyMatch(this::hasNonDeterministicFunctions);
}
|
@Test
public void testNondetermisticTimeFunction() {
FragmentNormalizer fragmentNormalizer = new FragmentNormalizer(null, null);
ConnectContext ctx = UtFrameUtils.createDefaultCtx();
for (String funcName : FunctionSet.nonDeterministicTimeFunctions) {
String sql = String.format("select %s()", funcName);
StatementBase statementBase;
try {
statementBase = com.starrocks.sql.parser.SqlParser.parse(sql, ctx.getSessionVariable()).get(0);
com.starrocks.sql.analyzer.Analyzer.analyze(statementBase, ctx);
} catch (Throwable ignored) {
continue;
}
QueryStatement queryStatement = (QueryStatement) statementBase;
SelectRelation selectRelation = (SelectRelation) queryStatement.getQueryRelation();
Expr expr = selectRelation.getSelectList().getItems().get(0).getExpr();
Assert.assertTrue(expr instanceof FunctionCallExpr);
Assert.assertTrue(fragmentNormalizer.hasNonDeterministicFunctions(expr));
}
for (String funcName : FunctionSet.nonDeterministicTimeFunctions) {
String sql = String.format("select %s('2022-12-01')", funcName);
StatementBase statementBase;
try {
statementBase = com.starrocks.sql.parser.SqlParser.parse(sql, ctx.getSessionVariable()).get(0);
com.starrocks.sql.analyzer.Analyzer.analyze(statementBase, ctx);
} catch (Throwable ignored) {
continue;
}
QueryStatement queryStatement = (QueryStatement) statementBase;
SelectRelation selectRelation = (SelectRelation) queryStatement.getQueryRelation();
Expr expr = selectRelation.getSelectList().getItems().get(0).getExpr();
Assert.assertTrue(expr instanceof FunctionCallExpr);
if (funcName.equals(FunctionSet.NOW)) {
Assert.assertTrue(fragmentNormalizer.hasNonDeterministicFunctions(expr));
} else {
Assert.assertFalse(fragmentNormalizer.hasNonDeterministicFunctions(expr));
}
}
}
|
public FEELFnResult<Boolean> invoke(@ParameterName( "point1" ) Comparable point1, @ParameterName( "point2" ) Comparable point2) {
if ( point1 == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point1", "cannot be null"));
}
if ( point2 == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point2", "cannot be null"));
}
try {
boolean result = point1.compareTo( point2 ) > 0;
return FEELFnResult.ofResult( result );
} catch( Exception e ) {
// points are not comparable
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point1", "cannot be compared to point2"));
}
}
|
@Test
void invokeParamRangeAndRange() {
FunctionTestUtil.assertResult( afterFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ),
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ) ),
Boolean.FALSE );
FunctionTestUtil.assertResult( afterFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "g", "k", Range.RangeBoundary.CLOSED ),
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ) ),
Boolean.TRUE );
FunctionTestUtil.assertResult( afterFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "f", "k", Range.RangeBoundary.CLOSED ),
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ) ),
Boolean.FALSE );
FunctionTestUtil.assertResult( afterFunction.invoke(
new RangeImpl( Range.RangeBoundary.OPEN, "f", "k", Range.RangeBoundary.CLOSED ),
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ) ),
Boolean.TRUE );
FunctionTestUtil.assertResult( afterFunction.invoke(
new RangeImpl( Range.RangeBoundary.CLOSED, "f", "k", Range.RangeBoundary.CLOSED ),
new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.OPEN ) ),
Boolean.TRUE );
}
|
@Override
public boolean retryRequest(
HttpRequest request, IOException exception, int execCount, HttpContext context) {
if (execCount > maxRetries) {
// Do not retry if over max retries
return false;
}
if (nonRetriableExceptions.contains(exception.getClass())) {
return false;
} else {
for (Class<? extends IOException> rejectException : nonRetriableExceptions) {
if (rejectException.isInstance(exception)) {
return false;
}
}
}
if (request instanceof CancellableDependency
&& ((CancellableDependency) request).isCancelled()) {
return false;
}
// Retry if the request is considered idempotent
return Method.isIdempotent(request.getMethod());
}
|
@Test
public void noRetryOnSSLFailure() {
HttpGet request = new HttpGet("/");
assertThat(retryStrategy.retryRequest(request, new SSLException("encryption failed"), 1, null))
.isFalse();
}
|
public CompoundPluginDescriptorFinder add(PluginDescriptorFinder finder) {
if (finder == null) {
throw new IllegalArgumentException("null not allowed");
}
finders.add(finder);
return this;
}
|
@Test
public void add() {
CompoundPluginDescriptorFinder descriptorFinder = new CompoundPluginDescriptorFinder();
assertEquals(0, descriptorFinder.size());
descriptorFinder.add(new PropertiesPluginDescriptorFinder());
assertEquals(1, descriptorFinder.size());
}
|
public static AtomicInteger getLongConnectionMonitor() {
return longConnection;
}
|
@Test
void testGetLongConnectionMonitor() {
AtomicInteger atomicInteger = MetricsMonitor.getLongConnectionMonitor();
assertEquals(0, atomicInteger.get());
}
|
@Override
public Response replaceLabelsOnNode(Set<String> newNodeLabelsName,
HttpServletRequest hsr, String nodeId) throws Exception {
// Step1. Check the parameters to ensure that the parameters are not empty.
if (StringUtils.isBlank(nodeId)) {
routerMetrics.incrReplaceLabelsOnNodeFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), REPLACE_LABELSONNODE, UNKNOWN,
TARGET_WEB_SERVICE, "Parameter error, nodeId must not be null or empty.");
throw new IllegalArgumentException("Parameter error, nodeId must not be null or empty.");
}
if (CollectionUtils.isEmpty(newNodeLabelsName)) {
routerMetrics.incrReplaceLabelsOnNodeFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), REPLACE_LABELSONNODE, UNKNOWN,
TARGET_WEB_SERVICE, "Parameter error, newNodeLabelsName must not be empty.");
throw new IllegalArgumentException("Parameter error, newNodeLabelsName must not be empty.");
}
try {
// Step2. We find the subCluster according to the nodeId,
// and then call the replaceLabelsOnNode of the subCluster.
long startTime = clock.getTime();
SubClusterInfo subClusterInfo = getNodeSubcluster(nodeId);
DefaultRequestInterceptorREST interceptor = getOrCreateInterceptorByNodeId(nodeId);
final HttpServletRequest hsrCopy = clone(hsr);
interceptor.replaceLabelsOnNode(newNodeLabelsName, hsrCopy, nodeId);
// Step3. Return the response result.
long stopTime = clock.getTime();
RouterAuditLogger.logSuccess(getUser().getShortUserName(), REPLACE_LABELSONNODE,
TARGET_WEB_SERVICE);
routerMetrics.succeededReplaceLabelsOnNodeRetrieved(stopTime - startTime);
String msg = "subCluster#" + subClusterInfo.getSubClusterId().getId() + ":Success;";
return Response.status(Status.OK).entity(msg).build();
} catch (Exception e) {
routerMetrics.incrReplaceLabelsOnNodeFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), REPLACE_LABELSONNODE, UNKNOWN,
TARGET_WEB_SERVICE, e.getLocalizedMessage());
throw e;
}
}
|
@Test
public void testReplaceLabelsOnNode() throws Exception {
// subCluster3 -> node3:3 -> label:NodeLabel3
String nodeId = "node3:3";
Set<String> labels = Collections.singleton("NodeLabel3");
// We expect the following result: subCluster#3:Success;
String expectValue = "subCluster#3:Success;";
Response response = interceptor.replaceLabelsOnNode(labels, null, nodeId);
Assert.assertNotNull(response);
Assert.assertEquals(200, response.getStatus());
Object entityObject = response.getEntity();
Assert.assertNotNull(entityObject);
String entityValue = String.valueOf(entityObject);
Assert.assertNotNull(entityValue);
Assert.assertEquals(expectValue, entityValue);
}
|
@Override
public void open() throws Exception {
super.open();
windowSerializer = windowAssigner.getWindowSerializer(new ExecutionConfig());
internalTimerService = getInternalTimerService("window-timers", windowSerializer, this);
triggerContext = new TriggerContext();
triggerContext.open();
StateDescriptor<ListState<RowData>, List<RowData>> windowStateDescriptor =
new ListStateDescriptor<>("window-input", new RowDataSerializer(inputType));
StateDescriptor<ListState<RowData>, List<RowData>> dataRetractStateDescriptor =
new ListStateDescriptor<>("data-retract", new RowDataSerializer(inputType));
this.windowAccumulateData =
(InternalListState<K, W, RowData>)
getOrCreateKeyedState(windowSerializer, windowStateDescriptor);
this.windowRetractData =
(InternalListState<K, W, RowData>)
getOrCreateKeyedState(windowSerializer, dataRetractStateDescriptor);
inputKeyAndWindow = new LinkedList<>();
windowProperty = new GenericRowData(namedProperties.length);
windowAggResult = new JoinedRowData();
WindowContext windowContext = new WindowContext();
windowAssigner.open(windowContext);
}
|
@Test
void testGroupWindowAggregateFunction() throws Exception {
OneInputStreamOperatorTestHarness<RowData, RowData> testHarness =
getTestHarness(new Configuration());
long initialTime = 0L;
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
testHarness.open();
testHarness.processElement(
new StreamRecord<>(newBinaryRow(true, "c1", "c2", 0L, 0L), initialTime + 1));
testHarness.processElement(
new StreamRecord<>(newBinaryRow(true, "c1", "c4", 1L, 6000L), initialTime + 2));
testHarness.processElement(
new StreamRecord<>(newBinaryRow(true, "c1", "c6", 2L, 10000L), initialTime + 3));
testHarness.processElement(
new StreamRecord<>(newBinaryRow(true, "c2", "c8", 3L, 0L), initialTime + 4));
testHarness.processElement(
new StreamRecord<>(newBinaryRow(true, "c3", "c8", 3L, 0L), initialTime + 5));
testHarness.processElement(
new StreamRecord<>(newBinaryRow(false, "c3", "c8", 3L, 0L), initialTime + 6));
testHarness.processWatermark(Long.MAX_VALUE);
testHarness.close();
expectedOutput.add(
new StreamRecord<>(
newRow(
true,
"c1",
0L,
TimestampData.fromEpochMillis(-5000L),
TimestampData.fromEpochMillis(5000L))));
expectedOutput.add(
new StreamRecord<>(
newRow(
true,
"c2",
3L,
TimestampData.fromEpochMillis(-5000L),
TimestampData.fromEpochMillis(5000L))));
expectedOutput.add(
new StreamRecord<>(
newRow(
true,
"c1",
0L,
TimestampData.fromEpochMillis(0L),
TimestampData.fromEpochMillis(10000L))));
expectedOutput.add(
new StreamRecord<>(
newRow(
true,
"c2",
3L,
TimestampData.fromEpochMillis(0L),
TimestampData.fromEpochMillis(10000L))));
expectedOutput.add(
new StreamRecord<>(
newRow(
true,
"c1",
1L,
TimestampData.fromEpochMillis(5000L),
TimestampData.fromEpochMillis(15000L))));
expectedOutput.add(
new StreamRecord<>(
newRow(
true,
"c1",
2L,
TimestampData.fromEpochMillis(10000L),
TimestampData.fromEpochMillis(20000L))));
expectedOutput.add(new Watermark(Long.MAX_VALUE));
assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput());
}
|
public static ParamType getVarArgsSchemaFromType(final Type type) {
return getSchemaFromType(type, VARARGS_JAVA_TO_ARG_TYPE);
}
|
@Test
public void shouldGetPartialGenericTriFunctionVariadic() throws NoSuchMethodException {
// Given:
final Type genericType = getClass().getMethod("partialGenericTriFunctionType").getGenericReturnType();
// When:
final ParamType returnType = UdfUtil.getVarArgsSchemaFromType(genericType);
// Then:
assertThat(returnType, is(LambdaType.of(ImmutableList.of(GenericType.of("T"), ParamTypes.BOOLEAN, GenericType.of("U")), ParamTypes.INTEGER)));
}
|
public static long readLong(byte[] bytes, int offset) {
return (((long) (bytes[offset] & 0xff) << 56)
| ((long) (bytes[offset + 1] & 0xff) << 48)
| ((long) (bytes[offset + 2] & 0xff) << 40)
| ((long) (bytes[offset + 3] & 0xff) << 32)
| ((long) (bytes[offset + 4] & 0xff) << 24)
| ((long) (bytes[offset + 5] & 0xff) << 16)
| ((long) (bytes[offset + 6] & 0xff) << 8)
| (long) (bytes[offset + 7] & 0xff));
}
|
@Test
public void testReadLong() {
assertEquals(-4020014679618420408L, IOUtils.readLong(BYTE_ARRAY, 0));
assertEquals(3893910145419266185L, IOUtils.readLong(BYTE_ARRAY, 1));
assertEquals(716817247016356198L, IOUtils.readLong(BYTE_ARRAY, 2));
}
|
@SuppressWarnings("checkstyle:npathcomplexity")
public PartitionServiceState getPartitionServiceState() {
PartitionServiceState state = getPartitionTableState();
if (state != SAFE) {
return state;
}
if (!checkAndTriggerReplicaSync()) {
return REPLICA_NOT_SYNC;
}
return SAFE;
}
|
@Test
public void shouldBeSafe_whenNotInitialized() {
TestHazelcastInstanceFactory factory = createHazelcastInstanceFactory();
HazelcastInstance hz = factory.newHazelcastInstance();
InternalPartitionServiceImpl partitionService = getNode(hz).partitionService;
PartitionReplicaStateChecker replicaStateChecker = partitionService.getPartitionReplicaStateChecker();
PartitionServiceState state = replicaStateChecker.getPartitionServiceState();
assertEquals(PartitionServiceState.SAFE, state);
}
|
public Certificate add(X509Certificate cert) {
final Certificate db;
try {
db = Certificate.from(cert);
} catch (CertificateEncodingException e) {
logger.error("Encoding error in certificate", e);
throw new ClientException("Encoding error in certificate", e);
}
try {
// Special case for first CSCA certificate for this document type
if (repository.countByDocumentTypeAndType(db.getDocumentType(), db.getType()) == 0) {
cert.verify(cert.getPublicKey());
logger.warn("Added first CSCA certificate for {}, set trusted flag manually", db.getDocumentType());
} else {
verify(cert, allowAddingExpired ? cert.getNotAfter() : null);
}
} catch (GeneralSecurityException | VerificationException e) {
logger.error(
String.format("Could not verify certificate of %s issued by %s",
cert.getSubjectX500Principal(), cert.getIssuerX500Principal()
), e
);
throw new ClientException("Could not verify certificate", e);
}
return repository.saveAndFlush(db);
}
|
@Test
public void shouldAllowToAddCertificateIfFirstOfDocumentType() throws Exception {
final Certificate rdw = loadCertificate("rdw/acc/csca.crt", true);
final X509Certificate cert = readCertificate("nik/tv/csca.crt");
final Certificate dbCert = service.add(cert);
assertEquals(X509Factory.toCanonical(cert.getSubjectX500Principal()), dbCert.getSubject());
assertEquals(false, dbCert.isTrusted());
}
|
@Override
public void handleTenantMenu(TenantMenuHandler handler) {
// 如果禁用,则不执行逻辑
if (isTenantDisable()) {
return;
}
// 获得租户,然后获得菜单
TenantDO tenant = getTenant(TenantContextHolder.getRequiredTenantId());
Set<Long> menuIds;
if (isSystemTenant(tenant)) { // 系统租户,菜单是全量的
menuIds = CollectionUtils.convertSet(menuService.getMenuList(), MenuDO::getId);
} else {
menuIds = tenantPackageService.getTenantPackage(tenant.getPackageId()).getMenuIds();
}
// 执行处理器
handler.handle(menuIds);
}
|
@Test // 系统租户的情况
public void testHandleTenantMenu_system() {
// 准备参数
TenantMenuHandler handler = mock(TenantMenuHandler.class);
// mock 未禁用
when(tenantProperties.getEnable()).thenReturn(true);
// mock 租户
TenantDO dbTenant = randomPojo(TenantDO.class, o -> o.setPackageId(PACKAGE_ID_SYSTEM));
tenantMapper.insert(dbTenant);// @Sql: 先插入出一条存在的数据
TenantContextHolder.setTenantId(dbTenant.getId());
// mock 菜单
when(menuService.getMenuList()).thenReturn(Arrays.asList(randomPojo(MenuDO.class, o -> o.setId(100L)),
randomPojo(MenuDO.class, o -> o.setId(101L))));
// 调用
tenantService.handleTenantMenu(handler);
// 断言
verify(handler).handle(asSet(100L, 101L));
}
|
public void terminate(String executionId, WorkflowInstance.Status status, String reason) {
try {
workflowExecutor.terminateWorkflow(executionId, status + "-" + reason);
} catch (ApplicationException e) {
if (e.getCode() != CONFLICT) {
throw e;
}
}
}
|
@Test
public void testTerminate() {
runner.terminate("test-uuid", WorkflowInstance.Status.STOPPED, "test-reason");
verify(workflowExecutor, times(1)).terminateWorkflow("test-uuid", "STOPPED-test-reason");
}
|
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
Configuration conf = FlinkOptions.fromMap(context.getCatalogTable().getOptions());
checkArgument(!StringUtils.isNullOrEmpty(conf.getString(FlinkOptions.PATH)),
"Option [path] should not be empty.");
setupTableOptions(conf.getString(FlinkOptions.PATH), conf);
ResolvedSchema schema = context.getCatalogTable().getResolvedSchema();
sanityCheck(conf, schema);
setupConfOptions(conf, context.getObjectIdentifier(), context.getCatalogTable(), schema);
setupSortOptions(conf, context.getConfiguration());
return new HoodieTableSink(conf, schema);
}
|
@Test
void testSetupHoodieKeyOptionsForSink() {
this.conf.setString(FlinkOptions.RECORD_KEY_FIELD, "dummyField");
this.conf.setString(FlinkOptions.KEYGEN_CLASS_NAME, "dummyKeyGenClass");
// definition with simple primary key and partition path
ResolvedSchema schema1 = SchemaBuilder.instance()
.field("f0", DataTypes.INT().notNull())
.field("f1", DataTypes.VARCHAR(20))
.field("f2", DataTypes.BIGINT())
.field("ts", DataTypes.TIMESTAMP(3))
.primaryKey("f0")
.build();
final MockContext sinkContext1 = MockContext.getInstance(this.conf, schema1, "f2");
final HoodieTableSink tableSink1 = (HoodieTableSink) new HoodieTableFactory().createDynamicTableSink(sinkContext1);
final Configuration conf1 = tableSink1.getConf();
assertThat(conf1.get(FlinkOptions.RECORD_KEY_FIELD), is("f0"));
assertThat(conf1.get(FlinkOptions.KEYGEN_CLASS_NAME), is("dummyKeyGenClass"));
// definition with complex primary keys and partition paths
this.conf.removeConfig(FlinkOptions.KEYGEN_CLASS_NAME);
ResolvedSchema schema2 = SchemaBuilder.instance()
.field("f0", DataTypes.INT().notNull())
.field("f1", DataTypes.VARCHAR(20).notNull())
.field("f2", DataTypes.TIMESTAMP(3))
.field("ts", DataTypes.TIMESTAMP(3))
.primaryKey("f0", "f1")
.build();
final MockContext sinkContext2 = MockContext.getInstance(this.conf, schema2, "f2");
final HoodieTableSink tableSink2 = (HoodieTableSink) new HoodieTableFactory().createDynamicTableSink(sinkContext2);
final Configuration conf2 = tableSink2.getConf();
assertThat(conf2.get(FlinkOptions.RECORD_KEY_FIELD), is("f0,f1"));
assertThat(conf2.get(FlinkOptions.KEYGEN_CLASS_NAME), is(ComplexAvroKeyGenerator.class.getName()));
// definition with complex primary keys and empty partition paths
this.conf.removeConfig(FlinkOptions.KEYGEN_CLASS_NAME);
final MockContext sinkContext3 = MockContext.getInstance(this.conf, schema2, "");
final HoodieTableSink tableSink3 = (HoodieTableSink) new HoodieTableFactory().createDynamicTableSink(sinkContext3);
final Configuration conf3 = tableSink3.getConf();
assertThat(conf3.get(FlinkOptions.RECORD_KEY_FIELD), is("f0,f1"));
assertThat(conf3.get(FlinkOptions.KEYGEN_CLASS_NAME), is(NonpartitionedAvroKeyGenerator.class.getName()));
// definition of bucket index
this.conf.setString(FlinkOptions.INDEX_TYPE, HoodieIndex.IndexType.BUCKET.name());
final MockContext sinkContext4 = MockContext.getInstance(this.conf, schema2, "");
final HoodieTableSink tableSink4 = (HoodieTableSink) new HoodieTableFactory().createDynamicTableSink(sinkContext4);
final Configuration conf4 = tableSink4.getConf();
assertThat(conf4.get(FlinkOptions.RECORD_KEY_FIELD), is("f0,f1"));
assertThat(conf4.get(FlinkOptions.INDEX_KEY_FIELD), is("f0,f1"));
assertThat(conf4.get(FlinkOptions.INDEX_TYPE), is(HoodieIndex.IndexType.BUCKET.name()));
assertThat(conf4.get(FlinkOptions.KEYGEN_CLASS_NAME), is(NonpartitionedAvroKeyGenerator.class.getName()));
}
|
public static boolean isUri(String potentialUri) {
if (StringUtils.isBlank(potentialUri)) {
return false;
}
try {
URI uri = new URI(potentialUri);
return uri.getScheme() != null && uri.getHost() != null;
} catch (URISyntaxException e) {
return false;
}
}
|
@Test public void
returns_false_when_uri_is_empty() {
assertThat(UriValidator.isUri(""), is(false));
}
|
@Override
public TimestampedSegment getOrCreateSegmentIfLive(final long segmentId,
final ProcessorContext context,
final long streamTime) {
final TimestampedSegment segment = super.getOrCreateSegmentIfLive(segmentId, context, streamTime);
cleanupExpiredSegments(streamTime);
return segment;
}
|
@Test
public void shouldCreateSegments() {
final TimestampedSegment segment1 = segments.getOrCreateSegmentIfLive(0, context, -1L);
final TimestampedSegment segment2 = segments.getOrCreateSegmentIfLive(1, context, -1L);
final TimestampedSegment segment3 = segments.getOrCreateSegmentIfLive(2, context, -1L);
assertTrue(new File(context.stateDir(), "test/test.0").isDirectory());
assertTrue(new File(context.stateDir(), "test/test." + SEGMENT_INTERVAL).isDirectory());
assertTrue(new File(context.stateDir(), "test/test." + 2 * SEGMENT_INTERVAL).isDirectory());
assertTrue(segment1.isOpen());
assertTrue(segment2.isOpen());
assertTrue(segment3.isOpen());
}
|
@Override
public Comparison compare(final Path.Type type, final PathAttributes local, final PathAttributes remote) {
if(Checksum.NONE == remote.getChecksum()) {
log.warn(String.format("No remote checksum available for comparison %s", remote));
return Comparison.unknown;
}
if(Checksum.NONE == local.getChecksum()) {
log.warn(String.format("No local checksum available for comparison %s", local));
return Comparison.unknown;
}
if(remote.getChecksum().equals(local.getChecksum())) {
if(log.isDebugEnabled()) {
log.debug(String.format("Equal checksum %s", remote.getChecksum()));
}
return Comparison.equal;
}
if(log.isDebugEnabled()) {
log.debug(String.format("Local checksum %s not equal remote %s", local.getChecksum(), remote.getChecksum()));
}
return Comparison.notequal;
}
|
@Test
public void testDirectory() {
ComparisonService s = new ChecksumComparisonService();
assertEquals(Comparison.unknown, s.compare(Path.Type.directory, new PathAttributes(),
new PathAttributes()));
}
|
@Override
public void handle(final RoutingContext routingContext) {
routingContext.addEndHandler(ar -> {
// After the response is complete, log results here.
final int status = routingContext.request().response().getStatusCode();
if (!loggingRateLimiter.shouldLog(logger, routingContext.request().path(), status)) {
return;
}
final long contentLength = routingContext.request().response().bytesWritten();
final HttpVersion version = routingContext.request().version();
final HttpMethod method = routingContext.request().method();
final String uri = enableQueryLogging
? routingContext.request().uri()
: routingContext.request().path();
if (endpointFilter.isPresent() && endpointFilter.get().matcher(uri).matches()) {
return;
}
final long requestBodyLength = routingContext.request().bytesRead();
final String versionFormatted;
switch (version) {
case HTTP_1_0:
versionFormatted = "HTTP/1.0";
break;
case HTTP_1_1:
versionFormatted = "HTTP/1.1";
break;
case HTTP_2:
versionFormatted = "HTTP/2.0";
break;
default:
versionFormatted = "-";
}
final String name = Optional.ofNullable((ApiUser) routingContext.user())
.map(u -> u.getPrincipal().getName())
.orElse("-");
final String userAgent = Optional.ofNullable(
routingContext.request().getHeader(HTTP_HEADER_USER_AGENT)).orElse("-");
final String timestamp = Utils.formatRFC1123DateTime(clock.millis());
final SocketAddress socketAddress = routingContext.request().remoteAddress();
final String message = String.format(
"%s - %s [%s] \"%s %s %s\" %d %d \"-\" \"%s\" %d",
socketAddress == null ? "null" : socketAddress.host(),
name,
timestamp,
method,
uri,
versionFormatted,
status,
contentLength,
userAgent,
requestBodyLength);
doLog(status, message);
});
routingContext.next();
}
|
@Test
public void shouldSkipRateLimited() {
// Given:
when(response.getStatusCode()).thenReturn(200);
when(loggingRateLimiter.shouldLog(logger, "/query", 200)).thenReturn(true, true, false, false);
// When:
loggingHandler.handle(routingContext);
loggingHandler.handle(routingContext);
loggingHandler.handle(routingContext);
loggingHandler.handle(routingContext);
verify(routingContext, times(4)).addEndHandler(endCallback.capture());
for (Handler<AsyncResult<Void>> handler : endCallback.getAllValues()) {
handler.handle(null);
}
// Then:
verify(logger, times(2)).info(logStringCaptor.capture());
for (String message : logStringCaptor.getAllValues()) {
assertThat(message,
is("123.111.222.333 - - [Sun, 12 Nov 2023 18:23:54 GMT] "
+ "\"POST /query HTTP/1.1\" 200 5678 \"-\" \"bot\" 3456"));
}
}
|
public Stopwatch stop() {
if (isRunning()) stopTime = Instant.now();
return this;
}
|
@Test
public void stop() {
stopwatch.stop();
}
|
@Nonnull
@Override
public Optional<? extends INode> parse(
@Nullable final String str, @Nonnull DetectionLocation detectionLocation) {
if (str == null) {
return Optional.empty();
}
for (IMapper mapper : jcaSpecificAlgorithmMappers) {
Optional<? extends INode> asset = mapper.parse(str, detectionLocation);
if (asset.isPresent()) {
return asset;
}
}
return switch (str.toUpperCase().trim()) {
case "PBE", "PBES2" -> Optional.of(new PasswordBasedEncryption(detectionLocation));
case "DH", "DIFFIEHELLMAN" -> Optional.of(new DH(detectionLocation));
case "RSA" -> Optional.of(new RSA(detectionLocation));
case "EC" ->
Optional.of(new Algorithm(str, PublicKeyEncryption.class, detectionLocation));
default -> {
final Algorithm algorithm = new Algorithm(str, Unknown.class, detectionLocation);
algorithm.put(new Unknown(detectionLocation));
yield Optional.of(algorithm);
}
};
}
|
@Test
void mac() {
DetectionLocation testDetectionLocation =
new DetectionLocation("testfile", 1, 1, List.of("test"), () -> "SSL");
JcaAlgorithmMapper jcaAlgorithmMapper = new JcaAlgorithmMapper();
Optional<? extends INode> assetOptional =
jcaAlgorithmMapper.parse("HmacSHA512/224", testDetectionLocation);
assertThat(assetOptional).isPresent();
assertThat(assetOptional.get().is(Mac.class)).isTrue();
}
|
@Override
public void saveProperty(DbSession session, PropertyDto property, @Nullable String userLogin,
@Nullable String projectKey, @Nullable String projectName, @Nullable String qualifier) {
// do nothing
}
|
@Test
public void insertProperty1() {
underTest.saveProperty(propertyDto);
assertNoInteraction();
}
|
@Override
public ObjectNode encode(Instruction instruction, CodecContext context) {
checkNotNull(instruction, "Instruction cannot be null");
return new EncodeInstructionCodecHelper(instruction, context).encode();
}
|
@Test
public void pushHeaderInstructionsTest() {
final L2ModificationInstruction.ModMplsHeaderInstruction instruction =
(L2ModificationInstruction.ModMplsHeaderInstruction) Instructions.pushMpls();
final ObjectNode instructionJson = instructionCodec.encode(instruction, context);
assertThat(instructionJson, matchesInstruction(instruction));
}
|
public T send() throws IOException {
return web3jService.send(this, responseType);
}
|
@Test
public void testEthGetTransactionReceipt() throws Exception {
web3j.ethGetTransactionReceipt(
"0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238")
.send();
verifyResult(
"{\"jsonrpc\":\"2.0\",\"method\":\"eth_getTransactionReceipt\",\"params\":["
+ "\"0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238\"],"
+ "\"id\":1}");
}
|
@Override
public synchronized void editSchedule() {
updateConfigIfNeeded();
long startTs = clock.getTime();
CSQueue root = scheduler.getRootQueue();
Resource clusterResources = Resources.clone(scheduler.getClusterResource());
containerBasedPreemptOrKill(root, clusterResources);
if (LOG.isDebugEnabled()) {
LOG.debug("Total time used=" + (clock.getTime() - startTs) + " ms.");
}
}
|
@Test
public void testHierarchicalWithReserved() {
int[][] qData = new int[][] {
// / A B C D E F
{ 200, 100, 50, 50, 100, 10, 90 }, // abs
{ 200, 200, 200, 200, 200, 200, 200 }, // maxCap
{ 200, 110, 60, 50, 90, 90, 0 }, // used
{ 10, 0, 0, 0, 10, 0, 10 }, // pending
{ 40, 25, 15, 10, 15, 15, 0 }, // reserved
{ 4, 2, 1, 1, 2, 1, 1 }, // apps
{ -1, -1, 1, 1, -1, 1, 1 }, // req granularity
{ 2, 2, 0, 0, 2, 0, 0 }, // subqueues
};
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// verify capacity taken from A1, not B1 despite B1 being far over
// its absolute guaranteed capacity
verify(mDisp, times(10)).handle(argThat(new IsPreemptionRequestFor(appA)));
}
|
private CompletableFuture<Boolean> verifyTxnOwnership(TxnID txnID) {
assert ctx.executor().inEventLoop();
return service.pulsar().getTransactionMetadataStoreService()
.verifyTxnOwnership(txnID, getPrincipal())
.thenComposeAsync(isOwner -> {
if (isOwner) {
return CompletableFuture.completedFuture(true);
}
if (service.isAuthenticationEnabled() && service.isAuthorizationEnabled()) {
return isSuperUser();
} else {
return CompletableFuture.completedFuture(false);
}
}, ctx.executor());
}
|
@Test(timeOut = 30000)
public void sendEndTxnOnPartitionResponse() throws Exception {
final TransactionMetadataStoreService txnStore = mock(TransactionMetadataStoreService.class);
when(txnStore.getTxnMeta(any())).thenReturn(CompletableFuture.completedFuture(mock(TxnMeta.class)));
when(txnStore.verifyTxnOwnership(any(), any())).thenReturn(CompletableFuture.completedFuture(true));
when(txnStore.endTransaction(any(TxnID.class), anyInt(), anyBoolean()))
.thenReturn(CompletableFuture.completedFuture(null));
when(pulsar.getTransactionMetadataStoreService()).thenReturn(txnStore);
svcConfig.setTransactionCoordinatorEnabled(true);
resetChannel();
setChannelConnected();
Topic topic = mock(Topic.class);
doReturn(CompletableFuture.completedFuture(null)).when(topic).endTxn(any(TxnID.class), anyInt(), anyLong());
doReturn(CompletableFuture.completedFuture(Optional.of(topic))).when(brokerService)
.getTopicIfExists(any(String.class));
ByteBuf clientCommand = Commands.newEndTxnOnPartition(89L, 1L, 12L,
successTopicName, TxnAction.COMMIT, 1L);
channel.writeInbound(clientCommand);
CommandEndTxnOnPartitionResponse response = (CommandEndTxnOnPartitionResponse) getResponse();
assertEquals(response.getRequestId(), 89L);
assertEquals(response.getTxnidLeastBits(), 1L);
assertEquals(response.getTxnidMostBits(), 12L);
assertFalse(response.hasError());
assertFalse(response.hasMessage());
channel.finish();
}
|
@Override
public Set<TransferItem> find(final CommandLine input, final TerminalAction action, final Path remote) {
if(input.getOptionValues(action.name()).length == 2) {
switch(action) {
case download:
return new DownloadTransferItemFinder().find(input, action, remote);
case upload:
case synchronize:
return new UploadTransferItemFinder().find(input, action, remote);
}
}
else {
switch(action) {
case upload:
case synchronize:
return Collections.emptySet();
}
}
// Relative to current working directory using prefix finder.
return Collections.singleton(
new TransferItem(remote, LocalFactory.get(prefixer.normalize(remote.getName())))
);
}
|
@Test
public void testUploadFilesWithExpandedGlobToDirectoryTarget() throws Exception {
final CommandLineParser parser = new PosixParser();
final String temp = System.getProperty("java.io.tmpdir");
final CommandLine input = parser.parse(TerminalOptionsBuilder.options(), new String[]{"--upload", "ftps://test.cyberduck.ch/remote/", String.format("%s/f1", temp),
String.format("%s/f2", temp)});
final Set<TransferItem> found = new SingleTransferItemFinder().find(input, TerminalAction.upload, new Path("/remote", EnumSet.of(Path.Type.directory)));
assertFalse(found.isEmpty());
assertEquals(2, found.size());
final Iterator<TransferItem> iter = found.iterator();
assertEquals(new TransferItem(new Path("/remote/f1", EnumSet.of(Path.Type.file)), new Local(temp, "f1")), iter.next());
assertEquals(new TransferItem(new Path("/remote/f2", EnumSet.of(Path.Type.file)), new Local(temp, "f2")), iter.next());
}
|
public static long estimateSize(StructType tableSchema, long totalRecords) {
if (totalRecords == Long.MAX_VALUE) {
return totalRecords;
}
long result;
try {
result = LongMath.checkedMultiply(tableSchema.defaultSize(), totalRecords);
} catch (ArithmeticException e) {
result = Long.MAX_VALUE;
}
return result;
}
|
@Test
public void testEstimateSizeMaxValue() throws IOException {
Assert.assertEquals(
"estimateSize returns Long max value",
Long.MAX_VALUE,
SparkSchemaUtil.estimateSize(null, Long.MAX_VALUE));
}
|
public long parseForStore(TimelineDataManager tdm, Path appDirPath,
boolean appCompleted, JsonFactory jsonFactory, ObjectMapper objMapper,
FileSystem fs) throws IOException {
LOG.debug("Parsing for log dir {} on attempt {}", appDirPath,
attemptDirName);
Path logPath = getPath(appDirPath);
FileStatus status = fs.getFileStatus(logPath);
long numParsed = 0;
if (status != null) {
long curModificationTime = status.getModificationTime();
if (curModificationTime > getLastProcessedTime()) {
long startTime = Time.monotonicNow();
try {
LOG.info("Parsing {} at offset {}", logPath, offset);
long count =
parsePath(tdm, logPath, appCompleted, jsonFactory, objMapper, fs);
setLastProcessedTime(curModificationTime);
LOG.info("Parsed {} entities from {} in {} msec", count, logPath,
Time.monotonicNow() - startTime);
numParsed += count;
} catch (RuntimeException e) {
// If AppLogs cannot parse this log, it may be corrupted or just empty
if (e.getCause() instanceof JsonParseException
&& (status.getLen() > 0 || offset > 0)) {
// log on parse problems if the file as been read in the past or
// is visibly non-empty
LOG.info("Log {} appears to be corrupted. Skip. ", logPath);
} else {
LOG.error("Failed to parse " + logPath + " from offset " + offset,
e);
}
}
} else {
LOG.info("Skip Parsing {} as there is no change", logPath);
}
} else {
LOG.warn("{} no longer exists. Skip for scanning. ", logPath);
}
return numParsed;
}
|
@Test
void testParseDomain() throws Exception {
// Load test data
TimelineDataManager tdm = PluginStoreTestUtils.getTdmWithMemStore(config);
DomainLogInfo domainLogInfo = new DomainLogInfo(TEST_ATTEMPT_DIR_NAME,
TEST_DOMAIN_FILE_NAME,
UserGroupInformation.getLoginUser().getUserName());
domainLogInfo.parseForStore(tdm, getTestRootPath(), true, jsonFactory, objMapper,
fs);
// Verify domain data
TimelineDomain resultDomain = tdm.getDomain("domain_1",
UserGroupInformation.getLoginUser());
assertNotNull(resultDomain);
assertEquals(testDomain.getReaders(), resultDomain.getReaders());
assertEquals(testDomain.getOwner(), resultDomain.getOwner());
assertEquals(testDomain.getDescription(), resultDomain.getDescription());
}
|
public static Read read() {
return Read.create();
}
|
@Test
public void testReadingFailsTableDoesNotExist() throws Exception {
final String table = "TEST-TABLE";
BigtableIO.Read read =
BigtableIO.read()
.withBigtableOptions(BIGTABLE_OPTIONS)
.withTableId(table)
.withServiceFactory(factory);
// Exception will be thrown by read.validate() when read is applied.
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage(String.format("Table %s does not exist", table));
p.apply(read);
p.run();
}
|
public LogAggregationFileControllerFactory(Configuration conf) {
this.conf = conf;
Collection<String> fileControllers = conf.getStringCollection(
YarnConfiguration.LOG_AGGREGATION_FILE_FORMATS);
Map<String, String> controllerChecker = new HashMap<>();
for (String controllerName : fileControllers) {
validateAggregatedFileControllerName(controllerName);
validateConflictingControllers(conf, controllerChecker, controllerName);
DeterminedControllerClassName className =
new DeterminedControllerClassName(conf, controllerName);
LogAggregationFileController controller = createFileControllerInstance(conf,
controllerName, className);
controller.initialize(conf, controllerName);
controllers.add(controller);
}
}
|
@Test
void testLogAggregationFileControllerFactory() throws Exception {
enableFileControllers(getConf(), ALL_FILE_CONTROLLERS, ALL_FILE_CONTROLLER_NAMES);
LogAggregationFileControllerFactory factory =
new LogAggregationFileControllerFactory(getConf());
List<LogAggregationFileController> list =
factory.getConfiguredLogAggregationFileControllerList();
assertEquals(3, list.size(), "The expected number of LogAggregationFileController " +
"is not 3!");
assertTrue(list.get(0) instanceof
TestLogAggregationFileController, "Test format is expected to be the first " +
"LogAggregationFileController!");
assertTrue(list.get(1) instanceof
LogAggregationIndexedFileController, "IFile format is expected to be the second " +
"LogAggregationFileController!");
assertTrue(list.get(2) instanceof
LogAggregationTFileController, "TFile format is expected to be the first " +
"LogAggregationFileController!");
assertTrue(factory.getFileControllerForWrite() instanceof
TestLogAggregationFileController,
"Test format is expected to be used for writing!");
verifyFileControllerInstance(factory,
TestLogAggregationFileController.class);
}
|
private IcebergDecimalObjectInspector(int precision, int scale) {
super(new DecimalTypeInfo(precision, scale));
}
|
@Test
public void testIcebergDecimalObjectInspector() {
HiveDecimalObjectInspector oi = IcebergDecimalObjectInspector.get(38, 18);
Assert.assertEquals(ObjectInspector.Category.PRIMITIVE, oi.getCategory());
Assert.assertEquals(PrimitiveObjectInspector.PrimitiveCategory.DECIMAL, oi.getPrimitiveCategory());
Assert.assertEquals(new DecimalTypeInfo(38, 18), oi.getTypeInfo());
Assert.assertEquals(TypeInfoFactory.decimalTypeInfo.getTypeName(), oi.getTypeName());
Assert.assertEquals(38, oi.precision());
Assert.assertEquals(18, oi.scale());
Assert.assertEquals(HiveDecimal.class, oi.getJavaPrimitiveClass());
Assert.assertEquals(HiveDecimalWritable.class, oi.getPrimitiveWritableClass());
Assert.assertNull(oi.copyObject(null));
Assert.assertNull(oi.getPrimitiveJavaObject(null));
Assert.assertNull(oi.getPrimitiveWritableObject(null));
HiveDecimal one = HiveDecimal.create(BigDecimal.ONE);
Assert.assertEquals(one, oi.getPrimitiveJavaObject(BigDecimal.ONE));
Assert.assertEquals(new HiveDecimalWritable(one), oi.getPrimitiveWritableObject(BigDecimal.ONE));
HiveDecimal copy = (HiveDecimal) oi.copyObject(one);
Assert.assertEquals(one, copy);
Assert.assertNotSame(one, copy);
Assert.assertFalse(oi.preferWritable());
}
|
@Override
public PollResult poll(long currentTimeMs) {
return pollInternal(
prepareFetchRequests(),
this::handleFetchSuccess,
this::handleFetchFailure
);
}
|
@Test
public void testPreferredReadReplicaOffsetError() {
buildFetcher(new MetricConfig(), OffsetResetStrategy.EARLIEST, new BytesDeserializer(), new BytesDeserializer(),
Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED, Duration.ofMinutes(5).toMillis());
subscriptions.assignFromUser(singleton(tp0));
client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 4), tp -> validLeaderEpoch, topicIds, false));
subscriptions.seek(tp0, 0);
assertEquals(1, sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L,
FetchResponse.INVALID_LAST_STABLE_OFFSET, 0, Optional.of(1)));
networkClientDelegate.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
fetchRecords();
Node selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds());
assertEquals(selected.id(), 1);
assertEquals(1, sendFetches());
assertFalse(fetcher.hasCompletedFetches());
// Return an error, should unset the preferred read replica
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.OFFSET_OUT_OF_RANGE, 100L,
FetchResponse.INVALID_LAST_STABLE_OFFSET, 0, Optional.empty()));
networkClientDelegate.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
fetchRecords();
selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds());
assertEquals(selected.id(), -1);
}
|
public <T> ProducerBuilder<T> createProducerBuilder(String topic, Schema<T> schema, String producerName) {
ProducerBuilder<T> builder = client.newProducer(schema);
if (defaultConfigurer != null) {
defaultConfigurer.accept(builder);
}
builder.blockIfQueueFull(true)
.enableBatching(true)
.batchingMaxPublishDelay(10, TimeUnit.MILLISECONDS)
.hashingScheme(HashingScheme.Murmur3_32Hash) //
.messageRoutingMode(MessageRoutingMode.CustomPartition)
.messageRouter(FunctionResultRouter.of())
// set send timeout to be infinity to prevent potential deadlock with consumer
// that might happen when consumer is blocked due to unacked messages
.sendTimeout(0, TimeUnit.SECONDS)
.topic(topic);
if (producerName != null) {
builder.producerName(producerName);
}
if (producerConfig != null) {
if (producerConfig.getCompressionType() != null) {
builder.compressionType(producerConfig.getCompressionType());
} else {
// TODO: address this inconsistency.
// PR https://github.com/apache/pulsar/pull/19470 removed the default compression type of LZ4
// from the top level. This default is only used if producer config is provided.
builder.compressionType(CompressionType.LZ4);
}
if (producerConfig.getMaxPendingMessages() != null && producerConfig.getMaxPendingMessages() != 0) {
builder.maxPendingMessages(producerConfig.getMaxPendingMessages());
}
if (producerConfig.getMaxPendingMessagesAcrossPartitions() != null
&& producerConfig.getMaxPendingMessagesAcrossPartitions() != 0) {
builder.maxPendingMessagesAcrossPartitions(producerConfig.getMaxPendingMessagesAcrossPartitions());
}
if (producerConfig.getCryptoConfig() != null) {
builder.cryptoKeyReader(crypto.keyReader);
builder.cryptoFailureAction(crypto.failureAction);
for (String encryptionKeyName : crypto.getEncryptionKeys()) {
builder.addEncryptionKey(encryptionKeyName);
}
}
if (producerConfig.getBatchBuilder() != null) {
if (producerConfig.getBatchBuilder().equals("KEY_BASED")) {
builder.batcherBuilder(BatcherBuilder.KEY_BASED);
} else {
builder.batcherBuilder(BatcherBuilder.DEFAULT);
}
}
}
return builder;
}
|
@Test
public void testCreateProducerBuilderWithSimpleProducerConfig() {
ProducerConfig producerConfig = new ProducerConfig();
producerConfig.setBatchBuilder("KEY_BASED");
ProducerBuilderFactory builderFactory = new ProducerBuilderFactory(pulsarClient, producerConfig, null, null);
builderFactory.createProducerBuilder("topic", Schema.STRING, "producerName");
verifyCommon();
verify(producerBuilder).compressionType(CompressionType.LZ4);
verify(producerBuilder).batcherBuilder(BatcherBuilder.KEY_BASED);
verifyNoMoreInteractions(producerBuilder);
}
|
@Override
public void execute(GraphModel graphModel) {
Graph graph = graphModel.getGraphVisible();
execute(graph);
}
|
@Test
public void testCompleteGraphDegree() {
GraphModel graphModel = GraphGenerator.generateCompleteUndirectedGraph(5);
Graph graph = graphModel.getGraph();
Node n = graph.getNode("2");
WeightedDegree d = new WeightedDegree();
d.execute(graph);
assertEquals(n.getAttribute(WeightedDegree.WDEGREE), 4.0);
}
|
public static boolean isSensitive(String key) {
Preconditions.checkNotNull(key, "key is null");
final String keyInLower = key.toLowerCase();
for (String hideKey : SENSITIVE_KEYS) {
if (keyInLower.length() >= hideKey.length() && keyInLower.contains(hideKey)) {
return true;
}
}
return false;
}
|
@Test
void testHiddenKey() {
assertThat(GlobalConfiguration.isSensitive("password123")).isTrue();
assertThat(GlobalConfiguration.isSensitive("123pasSword")).isTrue();
assertThat(GlobalConfiguration.isSensitive("PasSword")).isTrue();
assertThat(GlobalConfiguration.isSensitive("Secret")).isTrue();
assertThat(GlobalConfiguration.isSensitive("polaris.client-secret")).isTrue();
assertThat(GlobalConfiguration.isSensitive("client-secret")).isTrue();
assertThat(GlobalConfiguration.isSensitive("service-key-json")).isTrue();
assertThat(GlobalConfiguration.isSensitive("auth.basic.password")).isTrue();
assertThat(GlobalConfiguration.isSensitive("auth.basic.token")).isTrue();
assertThat(GlobalConfiguration.isSensitive("avro-confluent.basic-auth.user-info")).isTrue();
assertThat(GlobalConfiguration.isSensitive("key.avro-confluent.basic-auth.user-info"))
.isTrue();
assertThat(GlobalConfiguration.isSensitive("value.avro-confluent.basic-auth.user-info"))
.isTrue();
assertThat(GlobalConfiguration.isSensitive("kafka.jaas.config")).isTrue();
assertThat(GlobalConfiguration.isSensitive("properties.ssl.truststore.password")).isTrue();
assertThat(GlobalConfiguration.isSensitive("properties.ssl.keystore.password")).isTrue();
assertThat(
GlobalConfiguration.isSensitive(
"fs.azure.account.key.storageaccount123456.core.windows.net"))
.isTrue();
assertThat(GlobalConfiguration.isSensitive("Hello")).isFalse();
assertThat(GlobalConfiguration.isSensitive("metrics.reporter.dghttp.apikey")).isTrue();
}
|
@Override
public int getOrder() {
return PluginEnum.GRPC.getCode();
}
|
@Test
public void testGetOrder() {
final int result = grpcPlugin.getOrder();
assertEquals(PluginEnum.GRPC.getCode(), result);
}
|
@Subscribe
public void onChatMessage(ChatMessage chatMessage)
{
MessageNode messageNode = chatMessage.getMessageNode();
boolean update = false;
switch (chatMessage.getType())
{
case TRADEREQ:
if (chatMessage.getMessage().contains("wishes to trade with you."))
{
notifier.notify(config.notifyOnTrade(), chatMessage.getMessage());
}
break;
case CHALREQ_TRADE:
if (chatMessage.getMessage().contains("wishes to duel with you."))
{
notifier.notify(config.notifyOnDuel(), chatMessage.getMessage());
}
break;
case BROADCAST:
// Some broadcasts have links attached, notated by `|` followed by a number, while others contain color tags.
// We don't want to see either in the printed notification.
String broadcast = chatMessage.getMessage();
int urlTokenIndex = broadcast.lastIndexOf('|');
if (urlTokenIndex != -1)
{
broadcast = broadcast.substring(0, urlTokenIndex);
}
notifier.notify(config.notifyOnBroadcast(), Text.removeFormattingTags(broadcast));
break;
case PRIVATECHAT:
case MODPRIVATECHAT:
notifier.notify(config.notifyOnPM(), Text.removeTags(chatMessage.getName()) + ": " + chatMessage.getMessage());
break;
case PRIVATECHATOUT:
case DIALOG:
case MESBOX:
return;
case MODCHAT:
case PUBLICCHAT:
case FRIENDSCHAT:
case CLAN_CHAT:
case CLAN_GUEST_CHAT:
case CLAN_GIM_CHAT:
case AUTOTYPER:
case MODAUTOTYPER:
if (client.getLocalPlayer() != null && Text.toJagexName(Text.removeTags(chatMessage.getName())).equals(client.getLocalPlayer().getName()))
{
return;
}
break;
case CONSOLE:
// Don't notify for notification messages
if (chatMessage.getName().equals(runeliteTitle))
{
return;
}
break;
}
if (usernameMatcher == null && client.getLocalPlayer() != null && client.getLocalPlayer().getName() != null)
{
String username = client.getLocalPlayer().getName();
String pattern = Arrays.stream(username.split(" "))
.map(s -> s.isEmpty() ? "" : Pattern.quote(s))
.collect(Collectors.joining("[\u00a0\u0020]")); // space or nbsp
usernameMatcher = Pattern.compile("\\b" + pattern + "\\b", Pattern.CASE_INSENSITIVE);
}
if (config.highlightOwnName() && usernameMatcher != null)
{
final String message = messageNode.getValue();
Matcher matcher = usernameMatcher.matcher(message);
if (matcher.find())
{
final String username = client.getLocalPlayer().getName();
StringBuffer stringBuffer = new StringBuffer();
do
{
final int start = matcher.start(); // start not end, since username won't contain a col tag
final String closeColor = MoreObjects.firstNonNull(
getLastColor(message.substring(0, start)),
"<col" + ChatColorType.NORMAL + '>');
final String replacement = "<col" + ChatColorType.HIGHLIGHT.name() + "><u>" + username + "</u>" + closeColor;
matcher.appendReplacement(stringBuffer, replacement);
}
while (matcher.find());
matcher.appendTail(stringBuffer);
messageNode.setValue(stringBuffer.toString());
update = true;
if (chatMessage.getType() == ChatMessageType.PUBLICCHAT
|| chatMessage.getType() == ChatMessageType.PRIVATECHAT
|| chatMessage.getType() == ChatMessageType.FRIENDSCHAT
|| chatMessage.getType() == ChatMessageType.MODCHAT
|| chatMessage.getType() == ChatMessageType.MODPRIVATECHAT
|| chatMessage.getType() == ChatMessageType.CLAN_CHAT
|| chatMessage.getType() == ChatMessageType.CLAN_GUEST_CHAT)
{
sendNotification(config.notifyOnOwnName(), chatMessage);
}
}
}
boolean matchesHighlight = false;
// Get nodeValue to store and update in between the different pattern passes
// The messageNode value is only set after all patterns have been processed
String nodeValue = messageNode.getValue();
for (Pattern pattern : highlightPatterns)
{
Matcher matcher = pattern.matcher(nodeValue);
if (!matcher.find())
{
continue;
}
StringBuffer stringBuffer = new StringBuffer();
do
{
final int end = matcher.end();
// Determine the ending color by finding the last color tag up to and
// including the match.
final String closeColor = MoreObjects.firstNonNull(
getLastColor(nodeValue.substring(0, end)),
"<col" + ChatColorType.NORMAL + '>');
// Strip color tags from the highlighted region so that it remains highlighted correctly
final String value = stripColor(matcher.group());
matcher.appendReplacement(stringBuffer, "<col" + ChatColorType.HIGHLIGHT + '>' + value + closeColor);
update = true;
matchesHighlight = true;
}
while (matcher.find());
// Append stringBuffer with remainder of message and update nodeValue
matcher.appendTail(stringBuffer);
nodeValue = stringBuffer.toString();
}
if (matchesHighlight)
{
messageNode.setValue(nodeValue);
sendNotification(config.notifyOnHighlight(), chatMessage);
}
if (update)
{
messageNode.setRuneLiteFormatMessage(messageNode.getValue());
}
}
|
@Test
public void testLocalPlayerSelfMention()
{
final String localPlayerName = "Broo klyn";
MessageNode messageNode = mock(MessageNode.class);
Player localPlayer = mock(Player.class);
when(client.getLocalPlayer()).thenReturn(localPlayer);
when(localPlayer.getName()).thenReturn(localPlayerName);
lenient().when(config.highlightOwnName()).thenReturn(true);
lenient().when(messageNode.getValue()).thenReturn("Spread love it's the Broo klyn way");
ChatMessage chatMessage = new ChatMessage();
chatMessage.setType(ChatMessageType.PUBLICCHAT);
chatMessage.setName("Broo\u00a0klyn");
chatMessage.setMessageNode(messageNode);
chatNotificationsPlugin.onChatMessage(chatMessage);
verify(messageNode, times(0)).setValue(any());
}
|
static void parseTargetAddress(HttpHost target, Span span) {
if (span.isNoop()) return;
if (target == null) return;
InetAddress address = target.getAddress();
if (address != null) {
if (span.remoteIpAndPort(address.getHostAddress(), target.getPort())) return;
}
span.remoteIpAndPort(target.getHostName(), target.getPort());
}
|
@Test void parseTargetAddress_prefersAddress() throws UnknownHostException {
when(span.isNoop()).thenReturn(false);
when(span.remoteIpAndPort("1.2.3.4", -1)).thenReturn(true);
HttpHost host = new HttpHost(InetAddress.getByName("1.2.3.4"), "3.4.5.6", -1, "http");
TracingHttpAsyncClientBuilder.parseTargetAddress(host, span);
verify(span).isNoop();
verify(span).remoteIpAndPort("1.2.3.4", -1);
verifyNoMoreInteractions(span);
}
|
private double totalBlockedTime(final Producer<?, ?> producer) {
return getMetricValue(producer.metrics(), "bufferpool-wait-time-ns-total")
+ getMetricValue(producer.metrics(), "flush-time-ns-total")
+ getMetricValue(producer.metrics(), "txn-init-time-ns-total")
+ getMetricValue(producer.metrics(), "txn-begin-time-ns-total")
+ getMetricValue(producer.metrics(), "txn-send-offsets-time-ns-total")
+ getMetricValue(producer.metrics(), "txn-commit-time-ns-total")
+ getMetricValue(producer.metrics(), "txn-abort-time-ns-total")
+ getMetricValue(producer.metrics(), "metadata-wait-time-ns-total");
}
|
@Test
public void shouldComputeTotalBlockedTime() {
setProducerMetrics(
nonEosMockProducer,
BUFFER_POOL_WAIT_TIME,
FLUSH_TME,
TXN_INIT_TIME,
TXN_BEGIN_TIME,
TXN_SEND_OFFSETS_TIME,
TXN_COMMIT_TIME,
TXN_ABORT_TIME,
METADATA_WAIT_TIME
);
final double expectedTotalBlocked = BUFFER_POOL_WAIT_TIME + FLUSH_TME + TXN_INIT_TIME +
TXN_BEGIN_TIME + TXN_SEND_OFFSETS_TIME + TXN_COMMIT_TIME + TXN_ABORT_TIME +
METADATA_WAIT_TIME;
assertThat(nonEosStreamsProducer.totalBlockedTime(), closeTo(expectedTotalBlocked, 0.01));
}
|
public Optional<SearchVersion> getVersion() {
return nodeAdapter.version();
}
|
@Test
public void retrievingVersionSucceedsIfElasticsearchVersionIsValid() throws Exception {
when(nodeAdapter.version()).thenReturn(Optional.of(SearchVersion.elasticsearch("5.4.0")));
final Optional<SearchVersion> elasticsearchVersion = node.getVersion();
assertThat(elasticsearchVersion).contains(SearchVersion.elasticsearch(5, 4, 0));
}
|
public static SubscriptionData build(final String topic, final String subString,
final String type) throws Exception {
if (ExpressionType.TAG.equals(type) || type == null) {
return buildSubscriptionData(topic, subString);
}
if (StringUtils.isEmpty(subString)) {
throw new IllegalArgumentException("Expression can't be null! " + type);
}
SubscriptionData subscriptionData = new SubscriptionData();
subscriptionData.setTopic(topic);
subscriptionData.setSubString(subString);
subscriptionData.setExpressionType(type);
return subscriptionData;
}
|
@Test(expected = IllegalArgumentException.class)
public void testBuildSQLWithNullSubString() throws Exception {
FilterAPI.build("TOPIC", null, ExpressionType.SQL92);
}
|
@Override
public void execute(Exchange exchange) throws SmppException {
SubmitMulti[] submitMulties = createSubmitMulti(exchange);
List<SubmitMultiResult> results = new ArrayList<>(submitMulties.length);
for (SubmitMulti submitMulti : submitMulties) {
SubmitMultiResult result;
if (log.isDebugEnabled()) {
log.debug("Sending multiple short messages for exchange id '{}'...", exchange.getExchangeId());
}
try {
result = session.submitMultiple(
submitMulti.getServiceType(),
TypeOfNumber.valueOf(submitMulti.getSourceAddrTon()),
NumberingPlanIndicator.valueOf(submitMulti.getSourceAddrNpi()),
submitMulti.getSourceAddr(),
(Address[]) submitMulti.getDestAddresses(),
new ESMClass(submitMulti.getEsmClass()),
submitMulti.getProtocolId(),
submitMulti.getPriorityFlag(),
submitMulti.getScheduleDeliveryTime(),
submitMulti.getValidityPeriod(),
new RegisteredDelivery(submitMulti.getRegisteredDelivery()),
new ReplaceIfPresentFlag(submitMulti.getReplaceIfPresentFlag()),
DataCodings.newInstance(submitMulti.getDataCoding()),
submitMulti.getSmDefaultMsgId(),
submitMulti.getShortMessage(),
submitMulti.getOptionalParameters());
results.add(result);
} catch (Exception e) {
throw new SmppException(e);
}
}
if (log.isDebugEnabled()) {
log.debug("Sent multiple short messages for exchange id '{}' and received results '{}'", exchange.getExchangeId(),
results);
}
List<String> messageIDs = new ArrayList<>(results.size());
// {messageID : [{destAddr : address, error : errorCode}]}
Map<String, List<Map<String, Object>>> errors = new HashMap<>();
for (SubmitMultiResult result : results) {
UnsuccessDelivery[] deliveries = result.getUnsuccessDeliveries();
if (deliveries != null) {
List<Map<String, Object>> undelivered = new ArrayList<>();
for (UnsuccessDelivery delivery : deliveries) {
Map<String, Object> error = new HashMap<>();
error.put(SmppConstants.DEST_ADDR, delivery.getDestinationAddress().getAddress());
error.put(SmppConstants.ERROR, delivery.getErrorStatusCode());
undelivered.add(error);
}
if (!undelivered.isEmpty()) {
errors.put(result.getMessageId(), undelivered);
}
}
messageIDs.add(result.getMessageId());
}
Message message = ExchangeHelper.getResultMessage(exchange);
message.setHeader(SmppConstants.ID, messageIDs);
message.setHeader(SmppConstants.SENT_MESSAGE_COUNT, messageIDs.size());
if (!errors.isEmpty()) {
message.setHeader(SmppConstants.ERROR, errors);
}
}
|
@Test
public void eightBitDataCodingOverridesDefaultAlphabet() throws Exception {
final byte binDataCoding = (byte) 0x04; /* SMPP 8-bit */
byte[] body = { (byte) 0xFF, 'A', 'B', (byte) 0x00, (byte) 0xFF, (byte) 0x7F, 'C', (byte) 0xFF };
Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut);
exchange.getIn().setHeader(SmppConstants.COMMAND, "SubmitMulti");
exchange.getIn().setHeader(SmppConstants.ALPHABET, Alphabet.ALPHA_DEFAULT.value());
exchange.getIn().setHeader(SmppConstants.DATA_CODING, binDataCoding);
exchange.getIn().setBody(body);
Address[] destAddrs = new Address[] {
new Address(
TypeOfNumber.UNKNOWN,
NumberingPlanIndicator.UNKNOWN,
"1717")
};
when(session.submitMultiple(eq("CMT"),
eq(TypeOfNumber.UNKNOWN),
eq(NumberingPlanIndicator.UNKNOWN),
eq("1616"),
eq(destAddrs),
eq(new ESMClass()),
eq((byte) 0),
eq((byte) 1),
(String) isNull(),
(String) isNull(),
eq(new RegisteredDelivery(SMSCDeliveryReceipt.SUCCESS_FAILURE)),
eq(ReplaceIfPresentFlag.DEFAULT),
eq(DataCodings.newInstance(binDataCoding)),
eq((byte) 0),
eq(body)))
.thenReturn(new SubmitMultiResult("1", null, null));
command.execute(exchange);
assertEquals(Collections.singletonList("1"), exchange.getMessage().getHeader(SmppConstants.ID));
}
|
@Override
public List<String> getRegisterData(final String key) {
try {
List<String> children = client.getChildren().forPath(key);
List<String> datas = new ArrayList<>();
for (String child : children) {
String nodePath = key + "/" + child;
byte[] data = client.getData().forPath(nodePath);
datas.add(new String(data, StandardCharsets.UTF_8));
}
return datas;
} catch (Exception e) {
throw new ShenyuException(e);
}
}
|
@Test
void getRegisterDataTest() throws Exception {
assertThrows(ShenyuException.class, () -> zookeeperDiscoveryServiceUnderTest.getRegisterData("/test"));
GetChildrenBuilder getChildrenBuilder = mock(GetChildrenBuilder.class);
when(curatorFramework.getChildren()).thenReturn(getChildrenBuilder);
when(getChildrenBuilder.forPath(anyString())).thenReturn(new ArrayList<>());
List<String> children = zookeeperDiscoveryServiceUnderTest.getRegisterData("/test");
Assertions.assertEquals(0, children.size());
}
|
@Override
public void createJobManagerComponent(KubernetesJobManagerSpecification kubernetesJMSpec) {
final Deployment deployment = kubernetesJMSpec.getDeployment();
final List<HasMetadata> accompanyingResources = kubernetesJMSpec.getAccompanyingResources();
// create Deployment
LOG.debug(
"Start to create deployment with spec {}{}",
System.lineSeparator(),
KubernetesUtils.tryToGetPrettyPrintYaml(deployment));
final Deployment createdDeployment = this.internalClient.resource(deployment).create();
// Note that we should use the uid of the created Deployment for the OwnerReference.
setOwnerReference(createdDeployment, accompanyingResources);
this.internalClient.resourceList(accompanyingResources).createOrReplace();
}
|
@Test
void testCreateFlinkMasterComponent() throws Exception {
flinkKubeClient.createJobManagerComponent(this.kubernetesJobManagerSpecification);
final List<Deployment> resultedDeployments =
kubeClient.apps().deployments().inNamespace(NAMESPACE).list().getItems();
assertThat(resultedDeployments).hasSize(1);
final List<ConfigMap> resultedConfigMaps =
kubeClient.configMaps().inNamespace(NAMESPACE).list().getItems();
assertThat(resultedConfigMaps).hasSize(1);
final List<Service> resultedServices =
kubeClient.services().inNamespace(NAMESPACE).list().getItems();
assertThat(resultedServices).hasSize(2);
testOwnerReferenceSetting(resultedDeployments.get(0), resultedConfigMaps);
testOwnerReferenceSetting(resultedDeployments.get(0), resultedServices);
}
|
@Override
public boolean addServiceSubscriber(Service service, Subscriber subscriber) {
if (null == subscribers.put(service, subscriber)) {
MetricsMonitor.incrementSubscribeCount();
}
return true;
}
|
@Test
void addServiceSubscriber() {
assertTrue(abstractClient.addServiceSubscriber(service, subscriber));
}
|
@Override
public Map<String, ColumnHandle> getColumnHandles(ConnectorSession session, ConnectorTableHandle tableHandle)
{
JdbcTableHandle jdbcTableHandle = (JdbcTableHandle) tableHandle;
ImmutableMap.Builder<String, ColumnHandle> columnHandles = ImmutableMap.builder();
for (JdbcColumnHandle column : jdbcMetadataCache.getColumns(session, jdbcTableHandle)) {
columnHandles.put(column.getColumnMetadata().getName(), column);
}
return columnHandles.build();
}
|
@Test
public void testGetColumnHandles()
{
// known table
assertEquals(metadata.getColumnHandles(SESSION, tableHandle), ImmutableMap.of(
"text", new JdbcColumnHandle(CONNECTOR_ID, "TEXT", JDBC_VARCHAR, VARCHAR, true, Optional.empty()),
"text_short", new JdbcColumnHandle(CONNECTOR_ID, "TEXT_SHORT", JDBC_VARCHAR, createVarcharType(32), true, Optional.empty()),
"value", new JdbcColumnHandle(CONNECTOR_ID, "VALUE", JDBC_BIGINT, BIGINT, true, Optional.empty())));
// unknown table
unknownTableColumnHandle(new JdbcTableHandle(CONNECTOR_ID, new SchemaTableName("unknown", "unknown"), "unknown", "unknown", "unknown"));
unknownTableColumnHandle(new JdbcTableHandle(CONNECTOR_ID, new SchemaTableName("example", "numbers"), null, "example", "unknown"));
}
|
@Override
public void setRootResources(final Map<String, ResourceModel> rootResources)
{
log.debug("Setting root resources");
_rootResources = rootResources;
Collection<Class<?>> allResourceClasses = new HashSet<>();
for (ResourceModel resourceModel : _rootResources.values())
{
processChildResource(resourceModel, allResourceClasses);
}
_jsr330Adapter = new Jsr330Adapter(allResourceClasses, _beanProvider);
}
|
@Test
public void testAmbiguousBeanResolution() throws Exception
{
Map<String, ResourceModel> pathRootResourceMap =
buildResourceModels(SomeResource1.class,
SomeResource2.class,
SomeResource4.class);
// set up mock ApplicationContext
BeanProvider ctx = EasyMock.createMock(BeanProvider.class);
EasyMock.expect(ctx.getBean(EasyMock.eq("dep1"))).andReturn(new SomeDependency1()).anyTimes();
Map<String, SomeDependency2> map2 = new HashMap<>();
map2.put("someBeanName", new SomeDependency2());
EasyMock.expect(ctx.getBeansOfType(EasyMock.eq(SomeDependency2.class)))
.andReturn(map2).anyTimes();
Map<String, SomeDependency1> map1 = new HashMap<>();
map1.put("someDep1", new SomeDependency1());
map1.put("anotherDep1", new SomeDependency1());
EasyMock.expect(ctx.getBeansOfType(EasyMock.eq(SomeDependency1.class)))
.andReturn(map1).anyTimes();
EasyMock.replay(ctx);
InjectResourceFactory factory = new InjectResourceFactory(ctx);
// #4 ambiguous dep
try
{
factory.setRootResources(pathRootResourceMap);
fail("Expected unresolvable bean exception");
}
catch (RestLiInternalException e)
{
assertTrue(e.getMessage().startsWith("Expected to find"));
}
EasyMock.verify(ctx);
EasyMock.reset(ctx);
}
|
@Override
public boolean remove(Object o) {
if (!(o instanceof Integer)) {
throw new ClassCastException("PartitionIdSet can be only used with Integers");
}
return remove(((Integer) o).intValue());
}
|
@Test(expected = ClassCastException.class)
public void test_remove_whenNotInteger() {
partitionIdSet.remove(new Object());
}
|
public static List<CredentialInfo> check(
final List<String> tokens,
final ExternalServiceCredentialsGenerator credentialsGenerator,
final long maxAgeSeconds) {
// the credential for the username with the latest timestamp (so far)
final Map<String, CredentialInfo> bestForUsername = new HashMap<>();
final List<CredentialInfo> results = new ArrayList<>();
for (String token : tokens) {
// each token is supposed to be in a "${username}:${password}" form,
// (note that password part may also contain ':' characters)
final String[] parts = token.split(":", 2);
if (parts.length != 2) {
results.add(new CredentialInfo(token, false, null, 0L));
continue;
}
final ExternalServiceCredentials credentials = new ExternalServiceCredentials(parts[0], parts[1]);
final Optional<Long> maybeTimestamp = credentialsGenerator.validateAndGetTimestamp(credentials, maxAgeSeconds);
if (maybeTimestamp.isEmpty()) {
results.add(new CredentialInfo(token, false, null, 0L));
continue;
}
// now that we validated signature and token age, we will also find the latest of the tokens
// for each username
final long timestamp = maybeTimestamp.get();
final CredentialInfo best = bestForUsername.get(credentials.username());
if (best == null) {
bestForUsername.put(credentials.username(), new CredentialInfo(token, true, credentials, timestamp));
continue;
}
if (best.timestamp() < timestamp) {
// we found a better credential for the username
bestForUsername.put(credentials.username(), new CredentialInfo(token, true, credentials, timestamp));
// mark the previous best as an invalid credential, since we have a better credential now
results.add(best.invalidate());
} else {
// the credential we already had was more recent, this one can be marked invalid
results.add(new CredentialInfo(token, false, null, 0L));
}
}
// all invalid tokens should be in results, just add the valid ones
results.addAll(bestForUsername.values());
return results;
}
|
@Test
void multipleUsernames() {
final ExternalServiceCredentials cred1New = GEN1.generateForUuid(UUID1);
final ExternalServiceCredentials cred1Old = atTime(GEN1, -1, UUID1);
final ExternalServiceCredentials cred2New = GEN1.generateForUuid(UUID2);
final ExternalServiceCredentials cred2Old = atTime(GEN1, -1, UUID2);
final List<String> tokens = Stream.of(cred1New, cred1Old, cred2New, cred2Old)
.map(ExternalServiceCredentialsSelectorTest::token)
.toList();
final List<CredentialInfo> result = ExternalServiceCredentialsSelector.check(tokens, GEN1,
TimeUnit.MINUTES.toSeconds(1));
assertThat(result).hasSize(4);
assertThat(result).filteredOn(CredentialInfo::valid)
.hasSize(2)
.map(CredentialInfo::credentials)
.containsExactlyInAnyOrder(cred1New, cred2New);
assertThat(result).filteredOn(info -> !info.valid())
.map(CredentialInfo::token)
.containsExactlyInAnyOrder(token(cred1Old), token(cred2Old));
}
|
@Override
public Integer doCall() throws Exception {
// Operator id must be set
if (ObjectHelper.isEmpty(operatorId)) {
printer().println("Operator id must be set");
return -1;
}
delegate.setFile(name);
delegate.setSource(source);
delegate.setSink(sink);
delegate.setSteps(steps);
delegate.setErrorHandler(errorHandler);
delegate.setProperties(properties);
String pipe = delegate.constructPipe();
if (pipe.isEmpty()) {
// Error in delegate exit now
printer().println("Failed to construct Pipe resource");
return -1;
}
// --operator-id={id} is a syntax sugar for '--annotation camel.apache.org/operator.id={id}'
if (annotations == null) {
annotations = new String[] { "%s=%s".formatted(CamelKCommand.OPERATOR_ID_LABEL, operatorId) };
} else {
annotations = Arrays.copyOf(annotations, annotations.length + 1);
annotations[annotations.length - 1] = "%s=%s".formatted(CamelKCommand.OPERATOR_ID_LABEL, operatorId);
}
String annotationsContext = "";
if (annotations != null) {
StringBuilder sb = new StringBuilder(" annotations:\n");
for (String annotation : annotations) {
String[] keyValue = annotation.split("=", 2);
if (keyValue.length != 2) {
printer().printf(
"annotation '%s' does not follow format <key>=<value>%n",
annotation);
continue;
}
sb.append(" ").append(keyValue[0]).append(": ").append(keyValue[1]).append("\n");
}
annotationsContext = sb.toString();
}
pipe = pipe.replaceFirst("\\{\\{ \\.Annotations }}\n", annotationsContext);
String integrationSpec = "";
Traits traitsSpec = null;
if (traits != null && traits.length > 0) {
traitsSpec = TraitHelper.parseTraits(traits);
}
if (connects != null) {
if (traitsSpec == null) {
traitsSpec = new Traits();
}
TraitHelper.configureConnects(traitsSpec, connects);
}
if (traitsSpec != null) {
String traitYaml = KubernetesHelper.dumpYaml(traitsSpec);
traitYaml = traitYaml.replaceAll("\n", "\n ");
integrationSpec = " integration:\n spec:\n traits:\n %s\n".formatted(traitYaml.trim());
}
pipe = pipe.replaceFirst("\\{\\{ \\.IntegrationSpec }}\n", integrationSpec);
if (output != null) {
delegate.setOutput(output);
return delegate.dumpPipe(pipe);
}
Pipe pipeResource = KubernetesHelper.yaml(this.getClass().getClassLoader()).loadAs(pipe, Pipe.class);
final AtomicBoolean updated = new AtomicBoolean(false);
client(Pipe.class).resource(pipeResource).createOr(it -> {
updated.set(true);
return it.update();
});
if (updated.get()) {
printer().printf("Pipe %s updated%n", pipeResource.getMetadata().getName());
} else {
printer().printf("Pipe %s created%n", pipeResource.getMetadata().getName());
}
if (wait || logs) {
client(Pipe.class).withName(pipeResource.getMetadata().getName())
.waitUntilCondition(it -> "Running".equals(it.getStatus().getPhase()), 10, TimeUnit.MINUTES);
}
if (logs) {
IntegrationLogs logsCommand = new IntegrationLogs(getMain());
logsCommand.withClient(client());
logsCommand.withName(pipeResource.getMetadata().getName());
logsCommand.doCall();
}
return 0;
}
|
@Test
public void shouldUpdatePipe() throws Exception {
Pipe pipe = createPipe("timer-to-log");
kubernetesClient.resources(Pipe.class).resource(pipe).create();
Bind command = createCommand("timer", "log");
command.output = null;
command.properties = new String[] {
"sink.showHeaders=true"
};
command.doCall();
Assertions.assertEquals("Pipe timer-to-log updated", printer.getOutput());
Pipe created = kubernetesClient.resources(Pipe.class).withName("timer-to-log").get();
Assertions.assertEquals("camel-k", created.getMetadata().getAnnotations().get(CamelKCommand.OPERATOR_ID_LABEL));
}
|
@Override
protected String selectorHandler(final MetaDataRegisterDTO metaDataDTO) {
return "";
}
|
@Test
public void testSelectorHandler() {
MetaDataRegisterDTO metaDataRegisterDTO = MetaDataRegisterDTO.builder().build();
assertEquals(StringUtils.EMPTY, shenyuClientRegisterTarsService.selectorHandler(metaDataRegisterDTO));
}
|
public static Map<String, String> deserialize2Map(String jsonStr) {
try {
if (StringUtils.hasText(jsonStr)) {
Map<String, Object> temp = OM.readValue(jsonStr, Map.class);
Map<String, String> result = new HashMap<>();
temp.forEach((key, value) -> {
result.put(String.valueOf(key), String.valueOf(value));
});
return result;
}
return new HashMap<>();
}
catch (JsonProcessingException e) {
LOG.error(
"Json to map failed. check if the format of the json string[{}] is correct.", jsonStr, e);
throw new RuntimeException("Json to map failed.", e);
}
}
|
@Test
public void testDeserializeThrowsRuntimeException() {
String jsonStr = "{\"k1\":\"v1\",\"k2\":\"v2\",\"k3\":\"v3\"";
assertThatThrownBy(() -> JacksonUtils.deserialize2Map(jsonStr))
.isExactlyInstanceOf(RuntimeException.class).hasMessage("Json to map failed.");
}
|
@Delete(uri = "{namespace}/{id}")
@ExecuteOn(TaskExecutors.IO)
@Operation(tags = {"Flows"}, summary = "Delete a flow")
@ApiResponse(responseCode = "204", description = "On success")
public HttpResponse<Void> delete(
@Parameter(description = "The flow namespace") @PathVariable String namespace,
@Parameter(description = "The flow id") @PathVariable String id
) {
Optional<Flow> flow = flowRepository.findById(tenantService.resolveTenant(), namespace, id);
if (flow.isPresent()) {
flowRepository.delete(flow.get());
return HttpResponse.status(HttpStatus.NO_CONTENT);
} else {
return HttpResponse.status(HttpStatus.NOT_FOUND);
}
}
|
@Test
void deleteFlowsByIds(){
Flow flow = generateFlow("toDelete","io.kestra.unittest.delete", "a");
client.toBlocking().retrieve(POST("/api/v1/flows", flow), String.class);
client.toBlocking().exchange(HttpRequest.DELETE("/api/v1/flows/delete/by-query?namespace=io.kestra.unittest.delete"));
HttpClientResponseException e = assertThrows(HttpClientResponseException.class, () -> {
client.toBlocking().retrieve(HttpRequest.GET("/api/v1/flows/io.kestra.unittest.disabled/toDelete"));
});
assertThat(e.getStatus(), is(HttpStatus.NOT_FOUND));
}
|
@Override
public void metricChange(final KafkaMetric metric) {
if (!metric.metricName().name().equals("total-sst-files-size")) {
return;
}
handleNewSstFilesSizeMetric(
metric,
metric.metricName().tags().getOrDefault(TASK_ID_TAG, ""),
getQueryId(metric)
);
}
|
@Test
public void shouldAddNewGauges() {
// Given:
listener.metricChange(mockMetric(
KAFKA_METRIC_GROUP,
KAFKA_METRIC_NAME,
BigInteger.valueOf(2),
ImmutableMap.of("task-id", "t1", "thread-id", THREAD_ID))
);
// When:
final Gauge<?> queryGauge = verifyAndGetRegisteredMetric(QUERY_STORAGE_METRIC, QUERY_TAGS);
final Object queryValue = queryGauge.value(null, 0);
final Gauge<?> taskGauge = verifyAndGetRegisteredMetric(TASK_STORAGE_METRIC, TASK_ONE_TAGS);
final Object taskValue = taskGauge.value(null, 0);
// Then:
assertThat(taskValue, equalTo(BigInteger.valueOf(2)));
assertThat(queryValue, equalTo(BigInteger.valueOf(2)));
}
|
@Override
public SelLong field(SelString field) {
String fieldName = field.getInternalVal();
if ("length".equals(fieldName)) {
return SelLong.of(val.length);
}
throw new UnsupportedOperationException(type() + " DO NOT support accessing field: " + field);
}
|
@Test
public void field() {
SelLong res = one.field(SelString.of("length"));
assertEquals(2, res.longVal());
}
|
@Override
public GZIPCompressionInputStream createInputStream( InputStream in ) throws IOException {
return new GZIPCompressionInputStream( in, this );
}
|
@Test
public void testCreateInputStream() throws IOException {
GZIPCompressionProvider provider = (GZIPCompressionProvider) factory.getCompressionProviderByName( PROVIDER_NAME );
// Create an in-memory GZIP output stream for use by the input stream (to avoid exceptions)
ByteArrayOutputStream baos = new ByteArrayOutputStream();
GZIPOutputStream gos = new GZIPOutputStream( baos );
byte[] testBytes = "Test".getBytes();
gos.write( testBytes );
ByteArrayInputStream in = new ByteArrayInputStream( baos.toByteArray() );
// Test stream creation paths
GZIPInputStream gis = new GZIPInputStream( in );
in = new ByteArrayInputStream( baos.toByteArray() );
GZIPCompressionInputStream ncis = provider.createInputStream( in );
assertNotNull( ncis );
GZIPCompressionInputStream ncis2 = provider.createInputStream( gis );
assertNotNull( ncis2 );
}
|
@Override
public List<String> splitAndEvaluate() {
return Strings.isNullOrEmpty(inlineExpression) ? Collections.emptyList() : flatten(evaluate(GroovyUtils.split(handlePlaceHolder(inlineExpression))));
}
|
@Test
void assertEvaluateForArray() {
List<String> expected = TypedSPILoader.getService(InlineExpressionParser.class, "GROOVY", PropertiesBuilder.build(
new PropertiesBuilder.Property(InlineExpressionParser.INLINE_EXPRESSION_KEY, "t_order_${[0, 1, 2]},t_order_item_${[0, 2]}"))).splitAndEvaluate();
assertThat(expected.size(), is(5));
assertThat(expected, hasItems("t_order_0", "t_order_1", "t_order_2", "t_order_item_0", "t_order_item_2"));
}
|
public PickTableLayoutForPredicate pickTableLayoutForPredicate()
{
return new PickTableLayoutForPredicate(metadata);
}
|
@Test
public void doesNotFireIfRuleNotChangePlan()
{
tester().assertThat(pickTableLayout.pickTableLayoutForPredicate())
.on(p -> {
p.variable("nationkey", BIGINT);
return p.filter(p.rowExpression("nationkey % 17 = BIGINT '44' AND nationkey % 15 = BIGINT '43'"),
p.tableScan(
nationTableHandle,
ImmutableList.of(p.variable("nationkey", BIGINT)),
ImmutableMap.of(p.variable("nationkey", BIGINT), new TpchColumnHandle("nationkey", BIGINT)),
TupleDomain.all(),
TupleDomain.all()));
})
.doesNotFire();
tester().assertThat(pickTableLayout.pickTableLayoutForPredicate())
.on(p -> {
p.variable("nationkey");
return p.filter(p.rowExpression("nationkey % 17 = BIGINT '44' AND nationkey % 15 = BIGINT '43'"),
p.tableScan(
nationTableHandle,
ImmutableList.of(variable("nationkey", BIGINT)),
ImmutableMap.of(variable("nationkey", BIGINT), new TpchColumnHandle("nationkey", BIGINT)),
TupleDomain.all(),
TupleDomain.all()));
})
.doesNotFire();
}
|
@Override
public void report(SortedMap<MetricName, Gauge> gauges,
SortedMap<MetricName, Counter> counters,
SortedMap<MetricName, Histogram> histograms,
SortedMap<MetricName, Meter> meters,
SortedMap<MetricName, Timer> timers) {
if (loggerProxy.isEnabled(marker)) {
for (Entry<MetricName, Gauge> entry : gauges.entrySet()) {
logGauge(entry.getKey(), entry.getValue());
}
for (Entry<MetricName, Counter> entry : counters.entrySet()) {
logCounter(entry.getKey(), entry.getValue());
}
for (Entry<MetricName, Histogram> entry : histograms.entrySet()) {
logHistogram(entry.getKey(), entry.getValue());
}
for (Entry<MetricName, Meter> entry : meters.entrySet()) {
logMeter(entry.getKey(), entry.getValue());
}
for (Entry<MetricName, Timer> entry : timers.entrySet()) {
logTimer(entry.getKey(), entry.getValue());
}
}
}
|
@Test
public void reportsCounterValues() throws Exception {
final Counter counter = mock(Counter.class);
when(counter.getCount()).thenReturn(100L);
when(logger.isInfoEnabled(marker)).thenReturn(true);
infoReporter.report(this.map(),
map("test.counter", counter),
this.map(),
this.map(),
this.map());
verify(logger).info(marker, "type={}, name={}, count={}", "COUNTER", "prefix.test.counter", 100L);
}
|
@Nonnull
public static <T> Traverser<T> traverseIterator(@Nonnull Iterator<? extends T> iterator) {
return () -> iterator.hasNext() ? requireNonNull(iterator.next(), "Iterator returned a null item") : null;
}
|
@Test
public void when_traverseIteratorIgnoringNulls_then_filteredOut() {
Traverser<Integer> trav = traverseIterator(asList(null, 1, null, 2, null).iterator(), true);
assertEquals(1, (int) trav.next());
assertEquals(2, (int) trav.next());
assertNull(trav.next());
}
|
protected void saveAndRunJobFilters(List<Job> jobs) {
if (jobs.isEmpty()) return;
try {
jobFilterUtils.runOnStateElectionFilter(jobs);
storageProvider.save(jobs);
jobFilterUtils.runOnStateAppliedFilters(jobs);
} catch (ConcurrentJobModificationException concurrentJobModificationException) {
try {
backgroundJobServer.getConcurrentJobModificationResolver().resolve(concurrentJobModificationException);
} catch (UnresolvableConcurrentJobModificationException unresolvableConcurrentJobModificationException) {
throw new SevereJobRunrException("Could not resolve ConcurrentJobModificationException", unresolvableConcurrentJobModificationException);
}
}
}
|
@Test
void onConcurrentJobModificationExceptionTaskTriesToResolveAndThrowsNoExceptionIfResolved() {
Job jobInProgress = aJobInProgress().build();
Job deletedJob = aCopyOf(jobInProgress).withDeletedState().build();
when(storageProvider.save(anyList())).thenThrow(new ConcurrentJobModificationException(jobInProgress));
when(storageProvider.getJobById(jobInProgress.getId())).thenReturn(deletedJob);
assertThatCode(() -> task.saveAndRunJobFilters(singletonList(jobInProgress))).doesNotThrowAnyException();
}
|
@Override
public void run(T configuration, Environment environment) throws Exception {
final PooledDataSourceFactory dbConfig = getDataSourceFactory(configuration);
this.sessionFactory = requireNonNull(sessionFactoryFactory.build(this, environment, dbConfig,
entities, name()));
registerUnitOfWorkListenerIfAbsent(environment).registerSessionFactory(name(), sessionFactory);
environment.healthChecks().register(name(),
new SessionFactoryHealthCheck(
environment.getHealthCheckExecutorService(),
dbConfig.getValidationQueryTimeout().orElse(Duration.seconds(5)),
sessionFactory,
dbConfig.getValidationQuery()));
}
|
@Test
void registersATransactionalListener() throws Exception {
bundle.run(configuration, environment);
final ArgumentCaptor<UnitOfWorkApplicationListener> captor =
ArgumentCaptor.forClass(UnitOfWorkApplicationListener.class);
verify(jerseyEnvironment).register(captor.capture());
}
|
public String getFormattedMessage() {
if (formattedMessage != null) {
return formattedMessage;
}
if (argumentArray != null) {
formattedMessage = MessageFormatter.arrayFormat(message, argumentArray)
.getMessage();
} else {
formattedMessage = message;
}
return formattedMessage;
}
|
@Test
public void testFormattingOneArg() {
String message = "x={}";
Throwable throwable = null;
Object[] argArray = new Object[] {12};
LoggingEvent event = new LoggingEvent("", logger, Level.INFO, message, throwable, argArray);
assertNull(event.formattedMessage);
assertEquals("x=12", event.getFormattedMessage());
}
|
@Override
public List<AdminUserDO> getUserListByPostIds(Collection<Long> postIds) {
if (CollUtil.isEmpty(postIds)) {
return Collections.emptyList();
}
Set<Long> userIds = convertSet(userPostMapper.selectListByPostIds(postIds), UserPostDO::getUserId);
if (CollUtil.isEmpty(userIds)) {
return Collections.emptyList();
}
return userMapper.selectBatchIds(userIds);
}
|
@Test
public void testUserListByPostIds() {
// 准备参数
Collection<Long> postIds = asSet(10L, 20L);
// mock user1 数据
AdminUserDO user1 = randomAdminUserDO(o -> o.setPostIds(asSet(10L, 30L)));
userMapper.insert(user1);
userPostMapper.insert(new UserPostDO().setUserId(user1.getId()).setPostId(10L));
userPostMapper.insert(new UserPostDO().setUserId(user1.getId()).setPostId(30L));
// mock user2 数据
AdminUserDO user2 = randomAdminUserDO(o -> o.setPostIds(singleton(100L)));
userMapper.insert(user2);
userPostMapper.insert(new UserPostDO().setUserId(user2.getId()).setPostId(100L));
// 调用
List<AdminUserDO> result = userService.getUserListByPostIds(postIds);
// 断言
assertEquals(1, result.size());
assertEquals(user1, result.get(0));
}
|
public Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, Timer timer) {
return beginningOrEndOffset(partitions, ListOffsetsRequest.EARLIEST_TIMESTAMP, timer);
}
|
@Test
public void testBeginningOffsetsEmpty() {
buildFetcher();
assertEquals(emptyMap(), offsetFetcher.beginningOffsets(emptyList(), time.timer(5000L)));
}
|
public static String getUsername(String token) {
List<String> userInfo = Arrays.asList(getTokenBody(token).getSubject().split(Constants.SPLIT_COMMA));
return userInfo.get(1);
}
|
@Test
public void getUsername() {
String name = JwtTokenUtil.getUsername(token);
Assert.isTrue(username.equals(name));
}
|
@Override
public InterpreterResult interpret(String st, InterpreterContext context)
throws InterpreterException {
try {
Properties finalProperties = new Properties();
finalProperties.putAll(getProperties());
Properties newProperties = new Properties();
newProperties.load(new StringReader(st));
for (String key : newProperties.stringPropertyNames()) {
finalProperties.put(key.trim(), newProperties.getProperty(key).trim());
}
LOGGER.debug("Properties for InterpreterGroup: {} is {}", interpreterGroupId, finalProperties);
interpreterSetting.setInterpreterGroupProperties(interpreterGroupId, finalProperties);
return new InterpreterResult(InterpreterResult.Code.SUCCESS);
} catch (IOException e) {
LOGGER.error("Fail to update interpreter setting", e);
return new InterpreterResult(InterpreterResult.Code.ERROR, ExceptionUtils.getStackTrace(e));
}
}
|
@Test
void testPropertyTrim() throws InterpreterException {
assertTrue(interpreterFactory.getInterpreter("test.conf", executionContext) instanceof ConfInterpreter);
ConfInterpreter confInterpreter = (ConfInterpreter) interpreterFactory.getInterpreter("test.conf", executionContext);
InterpreterContext context = InterpreterContext.builder()
.setNoteId("noteId")
.setParagraphId("paragraphId")
.build();
// space before key and space after values
InterpreterResult result = confInterpreter.interpret(" property_1 \tnew_value \n new_property \t dummy_value \n", context);
assertEquals(InterpreterResult.Code.SUCCESS, result.code);
assertTrue(interpreterFactory.getInterpreter("test", executionContext) instanceof RemoteInterpreter);
RemoteInterpreter remoteInterpreter = (RemoteInterpreter) interpreterFactory.getInterpreter("test", executionContext);
remoteInterpreter.interpret("hello world", context);
Properties intpProperties = remoteInterpreter.getProperties();
// Total 7 properties,
// 3 built-in properties (zeppelin.interpreter.output.limit, zeppelin.interpreter.localRepo, zeppelin.interpreter.connection.poolsize)
assertEquals(7, intpProperties.size());
assertNotNull(intpProperties.getProperty("zeppelin.interpreter.output.limit"));
assertNotNull(intpProperties.getProperty("zeppelin.interpreter.localRepo"));
assertNotNull(intpProperties.getProperty("zeppelin.interpreter.connection.poolsize"));
assertEquals("new_value", intpProperties.getProperty("property_1"));
assertEquals("new_value_2", intpProperties.getProperty("property_2"));
assertEquals("value_3", intpProperties.getProperty("property_3"));
assertEquals("dummy_value", intpProperties.getProperty("new_property"));
}
|
public static boolean isGzipStream(byte[] bytes) {
int minByteArraySize = 2;
if (bytes == null || bytes.length < minByteArraySize) {
return false;
}
return GZIPInputStream.GZIP_MAGIC == ((bytes[1] << 8 | bytes[0]) & 0xFFFF);
}
|
@Test
void testIsGzipStreamWithNull() {
assertFalse(IoUtils.isGzipStream(null));
}
|
static <T extends CompoundPredicate> T flattenCompound(Predicate predicateLeft, Predicate predicateRight, Class<T> klass) {
// The following could have been achieved with {@link com.hazelcast.query.impl.predicates.FlatteningVisitor},
// however since we only care for 2-argument flattening, we can avoid constructing a visitor and its internals
// for each token pass at the cost of the following explicit code.
Predicate[] predicates;
if (klass.isInstance(predicateLeft) || klass.isInstance(predicateRight)) {
Predicate[] left = getSubPredicatesIfClass(predicateLeft, klass);
Predicate[] right = getSubPredicatesIfClass(predicateRight, klass);
predicates = new Predicate[left.length + right.length];
ArrayUtils.concat(left, right, predicates);
} else {
predicates = new Predicate[]{predicateLeft, predicateRight};
}
try {
T compoundPredicate = klass.getDeclaredConstructor().newInstance();
compoundPredicate.setPredicates(predicates);
return compoundPredicate;
} catch (ReflectiveOperationException e) {
throw new IllegalArgumentException(String.format("%s must have a public default constructor", klass.getName()));
}
}
|
@Test
public void testFlattenAnd_withOrAndPredicates() {
OrPredicate orPredicate = new OrPredicate(leftOfOr, rightOfOr);
AndPredicate andPredicate = new AndPredicate(leftOfAnd, rightOfAnd);
AndPredicate flattenedCompoundAnd = SqlPredicate.flattenCompound(orPredicate, andPredicate, AndPredicate.class);
assertSame(orPredicate, flattenedCompoundAnd.getPredicates()[0]);
assertSame(leftOfAnd, flattenedCompoundAnd.getPredicates()[1]);
assertSame(rightOfAnd, flattenedCompoundAnd.getPredicates()[2]);
}
|
@Override
public String getStorageAccountKey(String accountName, Configuration rawConfig)
throws KeyProviderException {
String envelope = super.getStorageAccountKey(accountName, rawConfig);
AbfsConfiguration abfsConfig;
try {
abfsConfig = new AbfsConfiguration(rawConfig, accountName);
} catch(IllegalAccessException | IOException e) {
throw new KeyProviderException("Unable to get key from credential providers.", e);
}
final String command = abfsConfig.get(ConfigurationKeys.AZURE_KEY_ACCOUNT_SHELLKEYPROVIDER_SCRIPT);
if (command == null) {
throw new KeyProviderException(
"Script path is not specified via fs.azure.shellkeyprovider.script");
}
String[] cmd = command.split(" ");
String[] cmdWithEnvelope = Arrays.copyOf(cmd, cmd.length + 1);
cmdWithEnvelope[cmdWithEnvelope.length - 1] = envelope;
String decryptedKey = null;
try {
decryptedKey = Shell.execCommand(cmdWithEnvelope);
} catch (IOException ex) {
throw new KeyProviderException(ex);
}
// trim any whitespace
return decryptedKey.trim();
}
|
@Test
public void testScriptPathNotSpecified() throws Exception {
if (!Shell.WINDOWS) {
return;
}
ShellDecryptionKeyProvider provider = new ShellDecryptionKeyProvider();
Configuration conf = new Configuration();
String account = "testacct";
String key = "key";
conf.set(ConfigurationKeys.FS_AZURE_ACCOUNT_KEY_PROPERTY_NAME + account, key);
try {
provider.getStorageAccountKey(account, conf);
Assert
.fail("fs.azure.shellkeyprovider.script is not specified, we should throw");
} catch (KeyProviderException e) {
LOG.info("Received an expected exception: " + e.getMessage());
}
}
|
void refreshNamenodes(Configuration conf)
throws IOException {
LOG.info("Refresh request received for nameservices: " +
conf.get(DFSConfigKeys.DFS_NAMESERVICES));
Map<String, Map<String, InetSocketAddress>> newAddressMap = null;
Map<String, Map<String, InetSocketAddress>> newLifelineAddressMap = null;
try {
newAddressMap =
DFSUtil.getNNServiceRpcAddressesForCluster(conf);
newLifelineAddressMap =
DFSUtil.getNNLifelineRpcAddressesForCluster(conf);
} catch (IOException ioe) {
LOG.warn("Unable to get NameNode addresses.", ioe);
}
if (newAddressMap == null || newAddressMap.isEmpty()) {
throw new IOException("No services to connect, missing NameNode " +
"address.");
}
synchronized (refreshNamenodesLock) {
doRefreshNamenodes(newAddressMap, newLifelineAddressMap);
}
}
|
@Test
public void testFederationRefresh() throws Exception {
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_NAMESERVICES,
"ns1,ns2");
addNN(conf, "ns1", "mock1:8020");
addNN(conf, "ns2", "mock1:8020");
bpm.refreshNamenodes(conf);
assertEquals(
"create #1\n" +
"create #2\n", log.toString());
log.setLength(0);
// Remove the first NS
conf.set(DFSConfigKeys.DFS_NAMESERVICES,
"ns2");
bpm.refreshNamenodes(conf);
assertEquals(
"stop #1\n" +
"refresh #2\n", log.toString());
log.setLength(0);
// Add back an NS -- this creates a new BPOS since the old
// one for ns2 should have been previously retired
conf.set(DFSConfigKeys.DFS_NAMESERVICES,
"ns1,ns2");
bpm.refreshNamenodes(conf);
assertEquals(
"create #3\n" +
"refresh #2\n", log.toString());
}
|
@ApiOperation(value = "Get a process definition", tags = { "Process Definitions" })
@ApiResponses(value = {
@ApiResponse(code = 200, message = "Indicates request was successful and the process-definitions are returned"),
@ApiResponse(code = 404, message = "Indicates the requested process definition was not found.")
})
@GetMapping(value = "/repository/process-definitions/{processDefinitionId}", produces = "application/json")
public ProcessDefinitionResponse getProcessDefinition(@ApiParam(name = "processDefinitionId") @PathVariable String processDefinitionId) {
ProcessDefinition processDefinition = getProcessDefinitionFromRequest(processDefinitionId);
return restResponseFactory.createProcessDefinitionResponse(processDefinition);
}
|
@Test
@Deployment(resources = { "org/flowable/rest/service/api/repository/oneTaskProcess.bpmn20.xml" })
public void testGetProcessDefinition() throws Exception {
ProcessDefinition processDefinition = repositoryService.createProcessDefinitionQuery().singleResult();
HttpGet httpGet = new HttpGet(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_PROCESS_DEFINITION, processDefinition.getId()));
CloseableHttpResponse response = executeRequest(httpGet, HttpStatus.SC_OK);
JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent());
closeResponse(response);
assertThatJson(responseNode)
.when(Option.IGNORING_EXTRA_FIELDS)
.isEqualTo("{"
+ "id: '" + processDefinition.getId() + "',"
+ "name: '" + processDefinition.getName() + "',"
+ "key: '" + processDefinition.getKey() + "',"
+ "category: '" + processDefinition.getCategory() + "',"
+ "version: " + processDefinition.getVersion() + ","
+ "description: '" + processDefinition.getDescription() + "',"
+ "url: '" + httpGet.getURI().toString() + "',"
+ "deploymentId: '" + processDefinition.getDeploymentId() + "',"
+ "deploymentUrl: '" + SERVER_URL_PREFIX + RestUrls
.createRelativeResourceUrl(RestUrls.URL_DEPLOYMENT, processDefinition.getDeploymentId()) + "',"
+ "resource: '" + SERVER_URL_PREFIX + RestUrls
.createRelativeResourceUrl(RestUrls.URL_DEPLOYMENT_RESOURCE, processDefinition.getDeploymentId(), processDefinition.getResourceName())
+ "',"
+ "graphicalNotationDefined: false,"
+ "diagramResource: null"
+ "}");
}
|
@Override
public RouteContext route(final ShardingRule shardingRule) {
RouteContext result = new RouteContext();
Collection<String> logicTableNames = getLogicTableNames();
if (logicTableNames.isEmpty()) {
result.getRouteUnits().addAll(getBroadcastTableRouteUnits(shardingRule, ""));
return result;
}
if (logicTableNames.size() > 1 && shardingRule.isAllBindingTables(logicTableNames)) {
result.getRouteUnits().addAll(getBindingTableRouteUnits(shardingRule, logicTableNames));
} else {
Collection<RouteContext> routeContexts = getRouteContexts(shardingRule, logicTableNames);
RouteContext routeContext = new ShardingCartesianRoutingEngine(routeContexts).route(shardingRule);
result.getOriginalDataNodes().addAll(routeContext.getOriginalDataNodes());
result.getRouteUnits().addAll(routeContext.getRouteUnits());
}
return result;
}
|
@Test
void assertRouteForNormalTable() {
Collection<String> tableNames = Collections.singletonList("t_order");
ShardingTableBroadcastRoutingEngine shardingTableBroadcastRoutingEngine =
new ShardingTableBroadcastRoutingEngine(mock(ShardingSphereDatabase.class), createSQLStatementContext(tableNames), tableNames);
RouteContext routeContext = shardingTableBroadcastRoutingEngine.route(createShardingRule());
assertThat(routeContext.getActualDataSourceNames().size(), is(2));
assertThat(routeContext.getRouteUnits().size(), is(4));
Iterator<RouteUnit> routeUnits = routeContext.getRouteUnits().iterator();
assertRouteUnit(routeUnits.next(), "ds0", "t_order_0");
assertRouteUnit(routeUnits.next(), "ds0", "t_order_1");
assertRouteUnit(routeUnits.next(), "ds1", "t_order_0");
assertRouteUnit(routeUnits.next(), "ds1", "t_order_1");
}
|
@Override
protected Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
// has the class loaded already?
Class<?> loadedClass = findLoadedClass(name);
if (loadedClass == null) {
try {
// find the class from given jar urls as in first constructor parameter.
loadedClass = findClass(name);
} catch (ClassNotFoundException ignored) {
// ignore class not found
}
if (loadedClass == null) {
loadedClass = getParent().loadClass(name);
}
if (loadedClass == null) {
throw new ClassNotFoundException("Could not find class " + name + " in classloader nor in parent classloader");
}
}
if (resolve) {
resolveClass(loadedClass);
}
return loadedClass;
}
|
@Test
public void canLoadClassFromChildClassLoaderWhenPresentInParentClassloader() throws Exception {
URL testClassesUrl = new File("./target/test-classes").toURI().toURL();
cl = new ChildFirstClassLoader(new URL[]{testClassesUrl}, ChildFirstClassLoader.class.getClassLoader());
String className = ChildFirstClassLoaderTest.class.getName();
Class<?> clazz = cl.loadClass(className);
assertThat(clazz).isNotNull();
assertThat(clazz.getName()).isEqualTo(className);
assertThat(clazz.getClassLoader()).isEqualTo(cl);
}
|
public static void closeQuietly(HttpURLConnection connection) {
if (connection != null) {
try {
closeQuietly(connection.getInputStream());
} catch (Exception ignore) {
}
}
}
|
@Test
public void testCloseQuietly() throws IOException {
Closeable closeable = new BrokenInputStream();
URL url = new URL("https://www.baidu.com");
HttpURLConnection httpURLConnection = (HttpURLConnection) url.openConnection();
Assertions.assertDoesNotThrow(() -> IoUtil.closeQuietly(closeable));
Assertions.assertDoesNotThrow(() -> IoUtil.closeQuietly(closeable, closeable, closeable));
Assertions.assertDoesNotThrow(() -> IoUtil.closeQuietly(httpURLConnection));
}
|
@Override
public Path move(final Path file, final Path renamed, final TransferStatus status, final Delete.Callback delete, final ConnectionCallback callback) throws BackgroundException {
try {
if(status.isExists()) {
if(log.isWarnEnabled()) {
log.warn(String.format("Delete file %s to be replaced with %s", renamed, file));
}
new BoxDeleteFeature(session, fileid).delete(Collections.singletonList(renamed), callback, delete);
}
final String id = fileid.getFileId(file);
if(file.isDirectory()) {
final Folder result = new FoldersApi(new BoxApiClient(session.getClient())).putFoldersId(
id, new FoldersFolderIdBody()
.name(renamed.getName())
.parent(new FoldersfolderIdParent()
.id(fileid.getFileId(renamed.getParent()))),
null, BoxAttributesFinderFeature.DEFAULT_FIELDS);
fileid.cache(file, null);
fileid.cache(renamed, id);
return renamed.withAttributes(new BoxAttributesFinderFeature(session, fileid).toAttributes(result));
}
final File result = new FilesApi(new BoxApiClient(session.getClient())).putFilesId(
id, new FilesFileIdBody()
.name(renamed.getName())
.parent(new FilesfileIdParent()
.id(fileid.getFileId(renamed.getParent()))),
null, BoxAttributesFinderFeature.DEFAULT_FIELDS);
fileid.cache(file, null);
fileid.cache(renamed, id);
return renamed.withAttributes(new BoxAttributesFinderFeature(session, fileid).toAttributes(result));
}
catch(ApiException e) {
throw new BoxExceptionMappingService(fileid).map("Cannot rename {0}", e, file);
}
}
|
@Test
public void testMoveDirectory() throws Exception {
final BoxFileidProvider fileid = new BoxFileidProvider(session);
final Path test = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory));
new BoxDirectoryFeature(session, fileid).mkdir(test, new TransferStatus());
final Path target = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory));
new BoxMoveFeature(session, fileid).move(test, target, new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback());
assertFalse(new BoxFindFeature(session, fileid).find(test.withAttributes(PathAttributes.EMPTY)));
assertTrue(new BoxFindFeature(session, fileid).find(target));
new BoxDeleteFeature(session, fileid).delete(Collections.<Path>singletonList(target), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
public void handlerRule(final RuleData ruleData) {
String key = CacheKeyUtils.INST.getKey(ruleData);
Resilience4JRegistryFactory.remove(key);
Optional.ofNullable(ruleData.getHandle()).ifPresent(s -> {
final Resilience4JHandle resilience4JHandle = GsonUtils.getInstance().fromJson(s, Resilience4JHandle.class);
CACHED_HANDLE.get().cachedHandle(key, resilience4JHandle);
});
}
|
@Test
public void testHandlerRule() {
ruleData.setSelectorId("1");
ruleData.setHandle("{\"urlPath\":\"test\"}");
ruleData.setId("test");
resilience4JHandler.handlerRule(ruleData);
Supplier<CommonHandleCache<String, Resilience4JHandle>> cache = Resilience4JHandler.CACHED_HANDLE;
Assertions.assertNotNull(cache.get().obtainHandle("1_test"));
}
|
@Override
public Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
synchronized (getClassLoadingLock(name)) {
Class<?> loadedClass = findLoadedClass(name);
if (loadedClass != null) {
return loadedClass;
}
if (isClosed) {
throw new ClassNotFoundException("This ClassLoader is closed");
}
if (config.shouldAcquire(name)) {
loadedClass =
PerfStatsCollector.getInstance()
.measure("load sandboxed class", () -> maybeInstrumentClass(name));
} else {
loadedClass = getParent().loadClass(name);
}
if (resolve) {
resolveClass(loadedClass);
}
return loadedClass;
}
}
|
@Test
public void callingNativeMethodShouldInvokeClassHandler() throws Exception {
Class<?> exampleClass = loadClass(AClassWithNativeMethod.class);
Method normalMethod = exampleClass.getDeclaredMethod("nativeMethod", String.class, int.class);
Object exampleInstance = exampleClass.getDeclaredConstructor().newInstance();
assertEquals(
"response from methodInvoked:"
+ " AClassWithNativeMethod.nativeMethod(java.lang.String value1, int 123)",
normalMethod.invoke(exampleInstance, "value1", 123));
assertThat(transcript)
.containsExactly(
"methodInvoked: AClassWithNativeMethod.__constructor__()",
"methodInvoked: AClassWithNativeMethod.nativeMethod(java.lang.String value1, int 123)");
}
|
@Override
public void onStateElection(Job job, JobState newState) {
if (isNotFailed(newState) || isJobNotFoundException(newState) || isProblematicExceptionAndMustNotRetry(newState) || maxAmountOfRetriesReached(job))
return;
job.scheduleAt(now().plusSeconds(getSecondsToAdd(job)), String.format("Retry %d of %d", getFailureCount(job), getMaxNumberOfRetries(job)));
}
|
@Test
void retryFilterSchedulesJobAgainIfStateIsFailedButMaxNumberOfRetriesIsNotReached() {
final Job job = aJob()
.<TestService>withJobDetails(ts -> ts.doWorkThatFails())
.withState(new FailedState("a message", new RuntimeException("boem")))
.build();
applyDefaultJobFilter(job);
int beforeVersion = job.getJobStates().size();
retryFilter.onStateElection(job, job.getJobState());
int afterVersion = job.getJobStates().size();
assertThat(afterVersion).isEqualTo(beforeVersion + 1);
assertThat(job.getState()).isEqualTo(SCHEDULED);
}
|
public static KafkaUserModel fromCrd(KafkaUser kafkaUser,
String secretPrefix,
boolean aclsAdminApiSupported) {
KafkaUserModel result = new KafkaUserModel(kafkaUser.getMetadata().getNamespace(),
kafkaUser.getMetadata().getName(),
Labels.fromResource(kafkaUser).withStrimziKind(kafkaUser.getKind()),
secretPrefix);
validateTlsUsername(kafkaUser);
validateDesiredPassword(kafkaUser);
result.setOwnerReference(kafkaUser);
result.setAuthentication(kafkaUser.getSpec().getAuthentication());
if (kafkaUser.getSpec().getAuthorization() != null && kafkaUser.getSpec().getAuthorization().getType().equals(KafkaUserAuthorizationSimple.TYPE_SIMPLE)) {
if (aclsAdminApiSupported) {
KafkaUserAuthorizationSimple simple = (KafkaUserAuthorizationSimple) kafkaUser.getSpec().getAuthorization();
result.setSimpleAclRules(simple.getAcls());
} else {
throw new InvalidResourceException("Simple authorization ACL rules are configured but not supported in the Kafka cluster configuration.");
}
}
result.setQuotas(kafkaUser.getSpec().getQuotas());
if (kafkaUser.getSpec().getTemplate() != null
&& kafkaUser.getSpec().getTemplate().getSecret() != null
&& kafkaUser.getSpec().getTemplate().getSecret().getMetadata() != null) {
result.templateSecretLabels = kafkaUser.getSpec().getTemplate().getSecret().getMetadata().getLabels();
result.templateSecretAnnotations = kafkaUser.getSpec().getTemplate().getSecret().getMetadata().getAnnotations();
}
return result;
}
|
@Test
public void testFromCrdTlsUserWith65CharTlsUsernameThrows() {
KafkaUser tooLong = new KafkaUserBuilder(tlsUser)
.editMetadata()
.withName("User-123456789012345678901234567890123456789012345678901234567890")
.endMetadata()
.build();
assertThrows(InvalidResourceException.class, () -> {
// 65 characters => Should throw exception with TLS
KafkaUserModel.fromCrd(tooLong, UserOperatorConfig.SECRET_PREFIX.defaultValue(), Boolean.parseBoolean(UserOperatorConfig.ACLS_ADMIN_API_SUPPORTED.defaultValue()));
});
}
|
@Override
public void processItems(List<IntentData> items) {
ready = false;
delegate.execute(reduce(items));
}
|
@Test
public void checkAccumulator() {
MockIntentBatchDelegate delegate = new MockIntentBatchDelegate();
IntentAccumulator accumulator = new IntentAccumulator(delegate);
List<IntentData> intentDataItems = ImmutableList.of(
new IntentData(intent1, IntentState.INSTALLING,
new MockTimestamp(1)),
new IntentData(intent2, IntentState.INSTALLING,
new MockTimestamp(1)),
new IntentData(intent3, IntentState.INSTALLED,
new MockTimestamp(1)),
new IntentData(intent2, IntentState.INSTALLED,
new MockTimestamp(1)),
new IntentData(intent2, IntentState.INSTALLED,
new MockTimestamp(1)),
new IntentData(intent1, IntentState.INSTALLED,
new MockTimestamp(1)));
accumulator.processItems(intentDataItems);
}
|
String getProviderId() {
return configuration.get(PROVIDER_ID).orElseThrow(() -> new IllegalArgumentException("Provider ID is missing"));
}
|
@Test
public void fail_to_get_provider_id_when_null() {
assertThatThrownBy(() -> underTest.getProviderId())
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Provider ID is missing");
}
|
@Override
public void execute(Exchange exchange) throws SmppException {
byte[] message = getShortMessage(exchange.getIn());
ReplaceSm replaceSm = createReplaceSmTempate(exchange);
replaceSm.setShortMessage(message);
if (log.isDebugEnabled()) {
log.debug("Sending replacement command for a short message for exchange id '{}' and message id '{}'",
exchange.getExchangeId(), replaceSm.getMessageId());
}
try {
session.replaceShortMessage(
replaceSm.getMessageId(),
TypeOfNumber.valueOf(replaceSm.getSourceAddrTon()),
NumberingPlanIndicator.valueOf(replaceSm.getSourceAddrNpi()),
replaceSm.getSourceAddr(),
replaceSm.getScheduleDeliveryTime(),
replaceSm.getValidityPeriod(),
new RegisteredDelivery(replaceSm.getRegisteredDelivery()),
replaceSm.getSmDefaultMsgId(),
replaceSm.getShortMessage());
} catch (Exception e) {
throw new SmppException(e);
}
if (log.isDebugEnabled()) {
log.debug("Sent replacement command for a short message for exchange id '{}' and message id '{}'",
exchange.getExchangeId(), replaceSm.getMessageId());
}
Message rspMsg = ExchangeHelper.getResultMessage(exchange);
rspMsg.setHeader(SmppConstants.ID, replaceSm.getMessageId());
}
|
@Test
public void execute() throws Exception {
Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut);
exchange.getIn().setHeader(SmppConstants.COMMAND, "ReplaceSm");
exchange.getIn().setHeader(SmppConstants.ID, "1");
exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR_TON, TypeOfNumber.NATIONAL.value());
exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR_NPI, NumberingPlanIndicator.NATIONAL.value());
exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR, "1818");
exchange.getIn().setHeader(SmppConstants.SCHEDULE_DELIVERY_TIME, new Date(1111111));
exchange.getIn().setHeader(SmppConstants.VALIDITY_PERIOD, new Date(2222222));
exchange.getIn().setHeader(SmppConstants.REGISTERED_DELIVERY,
new RegisteredDelivery(SMSCDeliveryReceipt.FAILURE).value());
exchange.getIn().setBody("new short message body");
command.execute(exchange);
verify(session).replaceShortMessage(eq("1"), eq(TypeOfNumber.NATIONAL), eq(NumberingPlanIndicator.NATIONAL), eq("1818"),
eq("-300101001831100+"), eq("-300101003702200+"),
eq(new RegisteredDelivery(SMSCDeliveryReceipt.FAILURE)), eq((byte) 0), eq("new short message body".getBytes()));
assertEquals("1", exchange.getMessage().getHeader(SmppConstants.ID));
}
|
@Override
public void handle(final RoutingContext routingContext) {
routingContext.addEndHandler(ar -> {
// After the response is complete, log results here.
final int status = routingContext.request().response().getStatusCode();
if (!loggingRateLimiter.shouldLog(logger, routingContext.request().path(), status)) {
return;
}
final long contentLength = routingContext.request().response().bytesWritten();
final HttpVersion version = routingContext.request().version();
final HttpMethod method = routingContext.request().method();
final String uri = enableQueryLogging
? routingContext.request().uri()
: routingContext.request().path();
if (endpointFilter.isPresent() && endpointFilter.get().matcher(uri).matches()) {
return;
}
final long requestBodyLength = routingContext.request().bytesRead();
final String versionFormatted;
switch (version) {
case HTTP_1_0:
versionFormatted = "HTTP/1.0";
break;
case HTTP_1_1:
versionFormatted = "HTTP/1.1";
break;
case HTTP_2:
versionFormatted = "HTTP/2.0";
break;
default:
versionFormatted = "-";
}
final String name = Optional.ofNullable((ApiUser) routingContext.user())
.map(u -> u.getPrincipal().getName())
.orElse("-");
final String userAgent = Optional.ofNullable(
routingContext.request().getHeader(HTTP_HEADER_USER_AGENT)).orElse("-");
final String timestamp = Utils.formatRFC1123DateTime(clock.millis());
final SocketAddress socketAddress = routingContext.request().remoteAddress();
final String message = String.format(
"%s - %s [%s] \"%s %s %s\" %d %d \"-\" \"%s\" %d",
socketAddress == null ? "null" : socketAddress.host(),
name,
timestamp,
method,
uri,
versionFormatted,
status,
contentLength,
userAgent,
requestBodyLength);
doLog(status, message);
});
routingContext.next();
}
|
@Test
public void shouldProduceLogWithQuery() {
// Given:
when(response.getStatusCode()).thenReturn(200);
config = new KsqlRestConfig(
ImmutableMap.of(KsqlRestConfig.KSQL_ENDPOINT_LOGGING_LOG_QUERIES_CONFIG, true)
);
when(server.getConfig()).thenReturn(config);
loggingHandler = new LoggingHandler(server, loggingRateLimiter, logger, clock);
// When:
loggingHandler.handle(routingContext);
verify(routingContext).addEndHandler(endCallback.capture());
endCallback.getValue().handle(null);
// Then:
verify(logger).info(logStringCaptor.capture());
assertThat(logStringCaptor.getValue(),
is("123.111.222.333 - - [Sun, 12 Nov 2023 18:23:54 GMT] "
+ "\"POST /query?foo=bar HTTP/1.1\" 200 5678 \"-\" \"bot\" 3456"));
}
|
@Override
public void start() {
File dbHome = new File(getRequiredSetting(PATH_DATA.getKey()));
if (!dbHome.exists()) {
dbHome.mkdirs();
}
startServer(dbHome);
}
|
@Test
public void start_supports_in_memory_H2_JDBC_URL() throws IOException {
int port = NetworkUtilsImpl.INSTANCE.getNextLoopbackAvailablePort();
settings
.setProperty(PATH_DATA.getKey(), temporaryFolder.newFolder().getAbsolutePath())
.setProperty(JDBC_URL.getKey(), "jdbc:h2:mem:sonar")
.setProperty(JDBC_EMBEDDED_PORT.getKey(), "" + port)
.setProperty(JDBC_USERNAME.getKey(), "foo")
.setProperty(JDBC_PASSWORD.getKey(), "bar");
underTest.start();
checkDbIsUp(port, "foo", "bar");
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.