focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public V remove(K key) {
return cache.getAndRemove(key);
}
|
@Test
public void testRemove() {
cache.put(23, "value-23");
assertTrue(cache.containsKey(23));
assertEquals("value-23", adapter.remove(23));
assertFalse(cache.containsKey(23));
}
|
@Override
public void acknowledgeAllRecordsProcessed(RemoteInputChannel inputChannel) {
sendToChannel(new AcknowledgeAllRecordsProcessedMessage(inputChannel));
}
|
@TestTemplate
void testAcknowledgeAllRecordsProcessed() throws Exception {
CreditBasedPartitionRequestClientHandler handler =
new CreditBasedPartitionRequestClientHandler();
EmbeddedChannel channel = new EmbeddedChannel(handler);
PartitionRequestClient client =
createPartitionRequestClient(channel, handler, connectionReuseEnabled);
NetworkBufferPool networkBufferPool = new NetworkBufferPool(10, 32);
SingleInputGate inputGate = createSingleInputGate(1, networkBufferPool);
RemoteInputChannel inputChannel = createRemoteInputChannel(inputGate, client);
try {
BufferPool bufferPool = networkBufferPool.createBufferPool(6, 6);
inputGate.setBufferPool(bufferPool);
inputGate.setupChannels();
inputChannel.requestSubpartitions();
inputChannel.acknowledgeAllRecordsProcessed();
channel.runPendingTasks();
Object readFromOutbound = channel.readOutbound();
assertThat(readFromOutbound).isInstanceOf(PartitionRequest.class);
readFromOutbound = channel.readOutbound();
assertThat(readFromOutbound)
.isInstanceOf(NettyMessage.AckAllUserRecordsProcessed.class);
assertThat(((NettyMessage.AckAllUserRecordsProcessed) readFromOutbound).receiverId)
.isEqualTo(inputChannel.getInputChannelId());
assertThat((Object) channel.readOutbound()).isNull();
} finally {
// Release all the buffer resources
inputGate.close();
networkBufferPool.destroyAllBufferPools();
networkBufferPool.destroy();
}
}
|
private int refreshClusterMaxPriority(String subClusterId) throws IOException, YarnException {
// Refresh cluster max priority
ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol();
RefreshClusterMaxPriorityRequest request =
recordFactory.newRecordInstance(RefreshClusterMaxPriorityRequest.class);
if (StringUtils.isNotBlank(subClusterId)) {
request.setSubClusterId(subClusterId);
}
adminProtocol.refreshClusterMaxPriority(request);
return 0;
}
|
@Test
public void testRefreshClusterMaxPriority() throws Exception {
String[] args = { "-refreshClusterMaxPriority" };
assertEquals(0, rmAdminCLI.run(args));
verify(admin).refreshClusterMaxPriority(
any(RefreshClusterMaxPriorityRequest.class));
}
|
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
}
|
@Test
public void editMessageLiveLocation() {
BaseResponse response = bot.execute(new EditMessageLiveLocation(chatId, 10009, 21, 105)
.replyMarkup(new InlineKeyboardMarkup()));
if (!response.isOk()) {
assertEquals(400, response.errorCode());
assertEquals("Bad Request: message can't be edited", response.description());
}
String buttonText = "btn_" + System.currentTimeMillis();
response = bot.execute(
new EditMessageLiveLocation("AgAAAPrwAQCj_Q4D2s-51_8jsuU", 21, 102)
.horizontalAccuracy(1f)
.heading(10)
.proximityAlertRadius(100)
.replyMarkup(new InlineKeyboardMarkup(new InlineKeyboardButton(buttonText).callbackGame(buttonText)))
);
assertTrue(response.isOk());
}
|
@Override
public boolean isSearchable(final int column) {
Preconditions.checkArgument(1 == column);
return false;
}
|
@Test
void assertIsSearchable() throws SQLException {
assertFalse(actualMetaData.isSearchable(1));
}
|
public static CreateSourceProperties from(final Map<String, Literal> literals) {
try {
return new CreateSourceProperties(literals, DurationParser::parse, false);
} catch (final ConfigException e) {
final String message = e.getMessage().replace(
"configuration",
"property"
);
throw new KsqlException(message, e);
}
}
|
@Test
public void shouldThrowOnHoppingWindowWithOutSize() {
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> CreateSourceProperties.from(
ImmutableMap.<String, Literal>builder()
.putAll(MINIMUM_VALID_PROPS)
.put(WINDOW_TYPE_PROPERTY, new StringLiteral("hopping"))
.build())
);
// Then:
assertThat(e.getMessage(), containsString("HOPPING windows require 'WINDOW_SIZE' to be provided in the WITH clause. "
+ "For example: 'WINDOW_SIZE'='10 SECONDS'"));
}
|
public List<String> searchTags(@Nullable String textQuery, int page, int size) {
int maxPageSize = 100;
int maxPage = 20;
checkArgument(size <= maxPageSize, "Page size must be lower than or equals to " + maxPageSize);
checkArgument(page > 0 && page <= maxPage, "Page must be between 0 and " + maxPage);
if (size <= 0) {
return emptyList();
}
TermsAggregationBuilder tagFacet = AggregationBuilders.terms(FIELD_TAGS)
.field(FIELD_TAGS)
.size(size * page)
.minDocCount(1)
.order(BucketOrder.key(true));
if (textQuery != null) {
tagFacet.includeExclude(new IncludeExclude(".*" + escapeSpecialRegexChars(textQuery) + ".*", null));
}
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder()
.query(authorizationTypeSupport.createQueryFilter())
.fetchSource(false)
.aggregation(tagFacet);
SearchResponse response = client.search(EsClient.prepareSearch(TYPE_PROJECT_MEASURES.getMainType())
.source(searchSourceBuilder));
Terms aggregation = response.getAggregations().get(FIELD_TAGS);
return aggregation.getBuckets().stream()
.skip((page - 1) * size)
.map(Bucket::getKeyAsString)
.toList();
}
|
@Test
public void search_tags_follows_paging() {
index(
newDoc().setTags(newArrayList("finance", "offshore", "java")),
newDoc().setTags(newArrayList("official", "javascript")),
newDoc().setTags(newArrayList("marketing", "official")),
newDoc().setTags(newArrayList("marketing", "Madhoff")),
newDoc().setTags(newArrayList("finance", "offshore")),
newDoc().setTags(newArrayList("offshore")));
List<String> result = underTest.searchTags(null, 1, 3);
assertThat(result).containsExactly("Madhoff", "finance", "java");
result = underTest.searchTags(null, 2, 3);
assertThat(result).containsExactly("javascript", "marketing", "official");
result = underTest.searchTags(null, 3, 3);
assertThat(result).containsExactly("offshore");
result = underTest.searchTags(null, 3, 4);
assertThat(result).isEmpty();
}
|
public Node parse() throws ScanException {
if (tokenList == null || tokenList.isEmpty())
return null;
return E();
}
|
@Test
public void literalWithAccolade0() throws ScanException {
Tokenizer tokenizer = new Tokenizer("{}");
Parser parser = new Parser(tokenizer.tokenize());
Node node = parser.parse();
Node witness = new Node(Node.Type.LITERAL, "{");
witness.next = new Node(Node.Type.LITERAL, "}");
assertEquals(witness, node);
}
|
@Override
public long extractWatermark(IcebergSourceSplit split) {
return split.task().files().stream()
.map(
scanTask -> {
Preconditions.checkArgument(
scanTask.file().lowerBounds() != null
&& scanTask.file().lowerBounds().get(eventTimeFieldId) != null,
"Missing statistics for column name = %s in file = %s",
eventTimeFieldName,
eventTimeFieldId,
scanTask.file());
return timeUnit.toMillis(
Conversions.fromByteBuffer(
Types.LongType.get(), scanTask.file().lowerBounds().get(eventTimeFieldId)));
})
.min(Comparator.comparingLong(l -> l))
.get();
}
|
@TestTemplate
public void testTimeUnit() throws IOException {
assumeThat(columnName).isEqualTo("long_column");
ColumnStatsWatermarkExtractor extractor =
new ColumnStatsWatermarkExtractor(SCHEMA, columnName, TimeUnit.MICROSECONDS);
assertThat(extractor.extractWatermark(split(0)))
.isEqualTo(MIN_VALUES.get(0).get(columnName) / 1000L);
}
|
@Override
protected ExecuteContext doBefore(ExecuteContext context) throws Exception {
LogUtils.printHttpRequestBeforePoint(context);
final InvokerService invokerService = PluginServiceManager.getPluginService(InvokerService.class);
Request request = (Request) context.getArguments()[0];
Map<String, String> urlInfo = RequestInterceptorUtils.recoverUrl(request.url());
if (!PlugEffectWhiteBlackUtils.isAllowRun(urlInfo.get(HttpConstants.HTTP_URI_HOST),
urlInfo.get(HttpConstants.HTTP_URI_SERVICE))) {
return context;
}
RequestInterceptorUtils.printRequestLog("feign", urlInfo);
Optional<Object> result = invokerService.invoke(
buildInvokerFunc(context, request, urlInfo),
ex -> ex,
urlInfo.get(HttpConstants.HTTP_URI_SERVICE));
if (result.isPresent()) {
Object obj = result.get();
if (obj instanceof Exception) {
LOGGER.log(Level.SEVERE, "request is error, uri is " + request.url(), (Exception) obj);
context.setThrowableOut((Exception) obj);
return context;
}
context.skip(obj);
}
return context;
}
|
@Test
public void testFeignInvokeInterceptor() throws Exception {
ExecuteContext context = ExecuteContext.forMemberMethod(new Object(), null, arguments, null, null);
Request request = createRequest(HttpMethod.GET, url);
arguments[0] = request;
// No domain name is configured in the environment
interceptor.doBefore(context);
Request temp = (Request) context.getArguments()[0];
Assert.assertEquals(url, temp.url());
// Contains domain names, sets a single domain name, and does not set a blacklist or whitelist
discoveryPluginConfig.setRealmName(realmName);
interceptor.doBefore(context);
temp = (Request) context.getArguments()[0];
Assert.assertEquals(url, temp.url());
// Contains domain names, sets multiple domain names, and does not set blacklist or whitelist
discoveryPluginConfig.setRealmName(realmNames);
interceptor.doBefore(context);
temp = (Request) context.getArguments()[0];
Assert.assertEquals(url, temp.url());
// Contains domain names, and sets all do not pass the policy
initStrategy(PlugEffectWhiteBlackConstants.STRATEGY_NONE, "");
interceptor.doBefore(context);
temp = (Request) context.getArguments()[0];
Assert.assertEquals(url, temp.url());
// Contains domain names, and sets the blacklist to include the corresponding service name
initStrategy(PlugEffectWhiteBlackConstants.STRATEGY_BLACK, "zookeeper-provider-demo");
interceptor.doBefore(context);
temp = (Request) context.getArguments()[0];
Assert.assertEquals(url, temp.url());
// Contains domain names, and the blacklist does not contain the corresponding service name
initStrategy(PlugEffectWhiteBlackConstants.STRATEGY_BLACK, "service1");
Mockito.when(invokerService.invoke(null, null, "zookeeper-provider-demo"))
.thenReturn(Optional.ofNullable(new Object()));
interceptor.doBefore(context);
temp = (Request) context.getArguments()[0];
Assert.assertEquals(url, temp.url());
// Contains the domain name, and sets the whitelist to include the corresponding service name
initStrategy(PlugEffectWhiteBlackConstants.STRATEGY_WHITE, "zookeeper-provider-demo");
Mockito.when(invokerService.invoke(null, null, "zookeeper-provider-demo"))
.thenReturn(Optional.ofNullable(new Object()));
interceptor.doBefore(context);
temp = (Request) context.getArguments()[0];
Assert.assertEquals(url, temp.url());
// If the domain name is included, the whitelist does not contain the corresponding service name
initStrategy(PlugEffectWhiteBlackConstants.STRATEGY_WHITE, "service1");
interceptor.doBefore(context);
temp = (Request) context.getArguments()[0];
Assert.assertEquals(url, temp.url());
// Contains domain names, and sets all through policies
initStrategy(PlugEffectWhiteBlackConstants.STRATEGY_ALL, "service1");
Mockito.when(invokerService.invoke(null, null, "zookeeper-provider-demo"))
.thenReturn(Optional.ofNullable(new Object()));
interceptor.doBefore(context);
temp = (Request) context.getArguments()[0];
Assert.assertEquals(url, temp.url());
}
|
@Override
public <R> R eval(Mode mode, String luaScript, ReturnType returnType) {
return eval(mode, luaScript, returnType, Collections.emptyList());
}
|
@Test
public void testMulti() {
RLexSortedSet idx2 = redisson.getLexSortedSet("ABCD17436");
Long l = Long.valueOf("1506524856000");
for (int i = 0; i < 100; i++) {
String s = "DENY" + "\t" + "TESTREDISSON" + "\t"
+ Long.valueOf(l) + "\t" + "helloworld_hongqin";
idx2.add(s);
l = l + 1;
}
String max = "'[DENY" + "\t" + "TESTREDISSON" + "\t" + "1506524856099'";
String min = "'[DENY" + "\t" + "TESTREDISSON" + "\t" + "1506524856000'";
String luaScript1= "local d = {}; d[1] = redis.call('zrevrangebylex','ABCD17436'," +max+","+min+",'LIMIT',0,5); ";
luaScript1= luaScript1 + " d[2] = redis.call('zrevrangebylex','ABCD17436'," +max+","+min+",'LIMIT',0,15); ";
luaScript1= luaScript1 + " d[3] = redis.call('zrevrangebylex','ABCD17436'," +max+","+min+",'LIMIT',0,25); ";
luaScript1 = luaScript1 + " return d;";
List<List<Object>> objs = redisson.getScript(StringCodec.INSTANCE).eval(RScript.Mode.READ_ONLY,
luaScript1,
RScript.ReturnType.MULTI, Collections.emptyList());
assertThat(objs).hasSize(3);
assertThat(objs.get(0)).hasSize(5);
assertThat(objs.get(1)).hasSize(15);
assertThat(objs.get(2)).hasSize(25);
}
|
public void createTableLike(CreateTableLikeStmt stmt) throws DdlException {
String catalogName = stmt.getCatalogName();
Optional<ConnectorMetadata> connectorMetadata = getOptionalMetadata(catalogName);
if (connectorMetadata.isPresent()) {
String dbName = stmt.getDbName();
String tableName = stmt.getTableName();
if (tableExists(catalogName, dbName, tableName)) {
if (stmt.isSetIfNotExists()) {
LOG.info("create table[{}] which already exists", tableName);
return;
} else {
ErrorReport.reportDdlException(ErrorCode.ERR_TABLE_EXISTS_ERROR, tableName);
}
}
connectorMetadata.get().createTableLike(stmt);
} else {
throw new DdlException("Invalid catalog " + catalogName + " , ConnectorMetadata doesn't exist");
}
}
|
@Test
public void testHiveCreateTableLike() throws Exception {
class MockedHiveMetadataMgr extends MockedMetadataMgr {
public MockedHiveMetadataMgr(LocalMetastore localMetastore, ConnectorMgr connectorMgr) {
super(localMetastore, connectorMgr);
}
@Override
public com.starrocks.catalog.Database getDb(String catalogName, String dbName) {
return new com.starrocks.catalog.Database(0, "hive_db", "s3://test-db/");
}
@Override
public com.starrocks.catalog.Table getTable(String catalogName, String dbName, String tblName) {
List<FieldSchema> partKeys = Lists.newArrayList(new FieldSchema("col1", "INT", ""));
List<FieldSchema> unPartKeys = Lists.newArrayList(new FieldSchema("col2", "INT", ""));
String hdfsPath = "hdfs://127.0.0.1:10000/hive";
StorageDescriptor sd = new StorageDescriptor();
sd.setInputFormat(MAPRED_PARQUET_INPUT_FORMAT_CLASS);
sd.setCols(unPartKeys);
sd.setLocation(hdfsPath);
Table msTable = new Table();
msTable.setPartitionKeys(partKeys);
msTable.setSd(sd);
msTable.setTableType("MANAGED_TABLE");
msTable.setTableName("hive_tbl");
msTable.setDbName("hive_db");
int createTime = (int) System.currentTimeMillis();
msTable.setCreateTime(createTime);
return HiveMetastoreApiConverter.toHiveTable(msTable, "hive_catalog");
}
@Override
public boolean tableExists(String catalogName, String dbName, String tblName) {
return (catalogName.equals("hive_catalog") && dbName.equals("hive_db") && tblName.equals("hive_tbl")) ||
(catalogName.equals("hive_catalog") && dbName.equals("hive_db") && tblName.equals("hive_tbl_1"));
}
}
ConnectContext connectContext = AnalyzeTestUtil.getConnectContext();
MetadataMgr metadataMgr = GlobalStateMgr.getCurrentState().getMetadataMgr();
MockedHiveMetadataMgr mockedHiveMetadataMgr = new MockedHiveMetadataMgr(
connectContext.getGlobalStateMgr().getLocalMetastore(),
connectContext.getGlobalStateMgr().getConnectorMgr());
// set to mockedHiveMetadataMgr to pass Analyzer check
GlobalStateMgr.getCurrentState().setMetadataMgr(mockedHiveMetadataMgr);
MockedHiveMetadata mockedHiveMetadata = new MockedHiveMetadata();
mockedHiveMetadataMgr.registerMockedMetadata("hive_catalog", mockedHiveMetadata);
String stmt = "create external table hive_catalog_1.hive_db.hive_tbl_1 like hive_catalog.hive_db.hive_tbl";
CreateTableLikeStmt createTableLikeStmt =
(CreateTableLikeStmt) UtFrameUtils.parseStmtWithNewParser(stmt, AnalyzeTestUtil.getConnectContext());
try {
mockedHiveMetadataMgr.createTableLike(createTableLikeStmt);
} catch (Exception e) {
Assert.assertTrue(e instanceof DdlException);
Assert.assertTrue(e.getMessage().contains("Invalid catalog hive_catalog_1"));
}
stmt = "create external table hive_catalog.hive_db.hive_tbl_1 like hive_catalog.hive_db.hive_table";
createTableLikeStmt =
(CreateTableLikeStmt) UtFrameUtils.parseStmtWithNewParser(stmt, AnalyzeTestUtil.getConnectContext());
try {
mockedHiveMetadataMgr.createTableLike(createTableLikeStmt);
} catch (Exception e) {
Assert.assertTrue(e instanceof DdlException);
Assert.assertTrue(e.getMessage().contains("Table 'hive_tbl_1' already exists"));
}
stmt = "create external table hive_catalog.hive_db.hive_tbl_2 like hive_catalog.hive_db.hive_tbl";
createTableLikeStmt =
(CreateTableLikeStmt) UtFrameUtils.parseStmtWithNewParser(stmt, AnalyzeTestUtil.getConnectContext());
try {
mockedHiveMetadataMgr.createTableLike(createTableLikeStmt);
} catch (Exception e) {
Assert.assertNull(e);
}
stmt = "create external table if not exists hive_catalog.hive_db.hive_tbl_1 like hive_catalog.hive_db.hive_table";
createTableLikeStmt =
(CreateTableLikeStmt) UtFrameUtils.parseStmtWithNewParser(stmt, AnalyzeTestUtil.getConnectContext());
try {
mockedHiveMetadataMgr.createTableLike(createTableLikeStmt);
} catch (Exception e) {
Assert.assertNull(e);
}
stmt = "create external table if not exists hive_catalog.hive_db.hive_tbl_2 like hive_catalog.hive_db.hive_table";
createTableLikeStmt =
(CreateTableLikeStmt) UtFrameUtils.parseStmtWithNewParser(stmt, AnalyzeTestUtil.getConnectContext());
try {
mockedHiveMetadataMgr.createTableLike(createTableLikeStmt);
} catch (Exception e) {
Assert.assertNull(e);
}
// set back to original metadataMrg
GlobalStateMgr.getCurrentState().setMetadataMgr(metadataMgr);
}
|
@Override
public TypeSerializerSchemaCompatibility<T> resolveSchemaCompatibility(
TypeSerializerSnapshot<T> oldSerializerSnapshot) {
if (!(oldSerializerSnapshot instanceof AvroSerializerSnapshot)) {
return TypeSerializerSchemaCompatibility.incompatible();
}
AvroSerializerSnapshot<?> oldAvroSerializerSnapshot =
(AvroSerializerSnapshot<?>) oldSerializerSnapshot;
return resolveSchemaCompatibility(oldAvroSerializerSnapshot.schema, schema);
}
|
@Test
void restorePastSnapshots() throws IOException {
for (int pastVersion : PAST_VERSIONS) {
AvroSerializer<GenericRecord> currentSerializer =
new AvroSerializer<>(GenericRecord.class, Address.getClassSchema());
DataInputView in =
new DataInputDeserializer(
Files.readAllBytes(getSerializerSnapshotFilePath(pastVersion)));
TypeSerializerSnapshot<GenericRecord> restored =
TypeSerializerSnapshotSerializationUtil.readSerializerSnapshot(
in, AvroSerializer.class.getClassLoader());
assertThat(
currentSerializer
.snapshotConfiguration()
.resolveSchemaCompatibility(restored))
.is(isCompatibleAsIs());
}
}
|
public Path parse(final String uri) throws HostParserException {
final Host host = new HostParser(factory).get(uri);
if(StringUtils.isBlank(host.getDefaultPath())) {
return new Path(String.valueOf(Path.DELIMITER), EnumSet.of((Path.Type.directory)));
}
switch(new ContainerPathKindDetector(host.getProtocol().getFeature(PathContainerService.class)).detect(host.getDefaultPath())) {
case directory:
return new Path(PathNormalizer.normalize(host.getDefaultPath()), EnumSet.of(Path.Type.directory,
Path.Type.volume));
}
return new Path(PathNormalizer.normalize(host.getDefaultPath()), EnumSet.of(
new DelimiterPathKindDetector().detect(host.getDefaultPath())));
}
|
@Test
public void testParseProfile() throws Exception {
final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new SwiftProtocol())));
final ProfilePlistReader reader = new ProfilePlistReader(factory);
final Profile profile = reader.read(
this.getClass().getResourceAsStream("/Rackspace US.cyberduckprofile")
);
assertNotNull(profile);
factory.register(profile);
final CommandLineParser parser = new PosixParser();
final CommandLine input = parser.parse(new Options(), new String[]{});
assertEquals(new Path("/cdn.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume)),
new CommandLinePathParser(input, factory).parse("rackspace://u@cdn.cyberduck.ch/"));
assertEquals(new Path("/", EnumSet.of(Path.Type.directory, Path.Type.volume)),
new CommandLinePathParser(input, factory).parse("rackspace:///"));
}
|
public Optional<Object> getLiteralValue(final int index) {
ExpressionSegment valueExpression = valueExpressions.get(index);
if (valueExpression instanceof ParameterMarkerExpressionSegment) {
return Optional.ofNullable(parameters.get(getParameterIndex((ParameterMarkerExpressionSegment) valueExpression)));
}
if (valueExpression instanceof LiteralExpressionSegment) {
return Optional.ofNullable(((LiteralExpressionSegment) valueExpression).getLiterals());
}
return Optional.empty();
}
|
@Test
void assertGetLiteralValueWhenParameterIsNull() {
Collection<ExpressionSegment> assignments = makeParameterMarkerExpressionSegment();
int parametersOffset = 0;
InsertValueContext insertValueContext = new InsertValueContext(assignments, Collections.singletonList(null), parametersOffset);
Optional<Object> literalValue = insertValueContext.getLiteralValue(0);
assertThat(false, is(literalValue.isPresent()));
}
|
protected Stream<DTO> streamQueryWithSort(Bson query, Bson sort) {
final DBCursor<DTO> cursor = db.find(query).sort(sort);
return Streams.stream((Iterable<DTO>) cursor).onClose(cursor::close);
}
|
@Test
public void streamQueryWithSort() {
dbService.save(newDto("hello1"));
dbService.save(newDto("hello2"));
dbService.save(newDto("hello3"));
dbService.save(newDto("hello4"));
dbService.save(newDto("hello5"));
final DBQuery.Query query = DBQuery.in("title", "hello5", "hello3", "hello1");
final DBSort.SortBuilder sort = DBSort.desc("title");
try (final Stream<TestDTO> cursor = dbService.streamQueryWithSort(query, sort)) {
final List<TestDTO> list = cursor.collect(Collectors.toList());
assertThat(list)
.hasSize(3)
.extracting("title")
.containsExactly("hello5", "hello3", "hello1");
}
}
|
@Override
public void onPartitionsAssigned(final Collection<TopicPartition> partitions) {
// NB: all task management is already handled by:
// org.apache.kafka.streams.processor.internals.StreamsPartitionAssignor.onAssignment
if (assignmentErrorCode.get() == AssignorError.INCOMPLETE_SOURCE_TOPIC_METADATA.code()) {
log.error("Received error code {}. {}",
AssignorError.INCOMPLETE_SOURCE_TOPIC_METADATA.codeName(),
AssignorError.INCOMPLETE_SOURCE_TOPIC_METADATA.description());
taskManager.handleRebalanceComplete();
throw new MissingSourceTopicException("One or more source topics were missing during rebalance");
} else if (assignmentErrorCode.get() == AssignorError.VERSION_PROBING.code()) {
log.info("Received version probing code {}", AssignorError.VERSION_PROBING);
} else if (assignmentErrorCode.get() == AssignorError.ASSIGNMENT_ERROR.code()) {
log.error("Received error code {}", AssignorError.ASSIGNMENT_ERROR);
taskManager.handleRebalanceComplete();
throw new TaskAssignmentException("Hit an unexpected exception during task assignment phase of rebalance");
} else if (assignmentErrorCode.get() == AssignorError.SHUTDOWN_REQUESTED.code()) {
log.error("A Kafka Streams client in this Kafka Streams application is requesting to shutdown the application");
taskManager.handleRebalanceComplete();
streamThread.shutdownToError();
return;
} else if (assignmentErrorCode.get() != AssignorError.NONE.code()) {
log.error("Received unknown error code {}", assignmentErrorCode.get());
throw new TaskAssignmentException("Hit an unrecognized exception during rebalance");
}
streamThread.setState(State.PARTITIONS_ASSIGNED);
streamThread.setPartitionAssignedTime(time.milliseconds());
taskManager.handleRebalanceComplete();
}
|
@Test
public void shouldThrowTaskAssignmentException() {
assignmentErrorCode.set(AssignorError.ASSIGNMENT_ERROR.code());
final TaskAssignmentException exception = assertThrows(
TaskAssignmentException.class,
() -> streamsRebalanceListener.onPartitionsAssigned(Collections.emptyList())
);
assertThat(exception.getMessage(), is("Hit an unexpected exception during task assignment phase of rebalance"));
verify(taskManager).handleRebalanceComplete();
}
|
@Override
public boolean tryClaim(Long i) {
checkArgument(
lastAttemptedOffset == null || i > lastAttemptedOffset,
"Trying to claim offset %s while last attempted was %s",
i,
lastAttemptedOffset);
checkArgument(
i >= range.getFrom(), "Trying to claim offset %s before start of the range %s", i, range);
lastAttemptedOffset = i;
// No respective checkArgument for i < range.to() - it's ok to try claiming offsets beyond it.
if (i >= range.getTo()) {
return false;
}
lastClaimedOffset = i;
return true;
}
|
@Test
public void testTryClaim() throws Exception {
OffsetRange range = new OffsetRange(100, 200);
OffsetRangeTracker tracker = new OffsetRangeTracker(range);
assertEquals(range, tracker.currentRestriction());
assertTrue(tracker.tryClaim(100L));
assertTrue(tracker.tryClaim(150L));
assertTrue(tracker.tryClaim(199L));
assertFalse(tracker.tryClaim(200L));
}
|
public static double conversion(String expression) {
return (new Calculator()).calculate(expression);
}
|
@Test
public void issue2964Test() {
// https://github.com/dromara/hutool/issues/2964
final double calcValue = Calculator.conversion("(11+2)12");
assertEquals(156D, calcValue, 0.001);
}
|
@Nullable
static ProxyProvider createFrom(Properties properties) {
Objects.requireNonNull(properties, "properties");
if (properties.containsKey(HTTP_PROXY_HOST) || properties.containsKey(HTTPS_PROXY_HOST)) {
return createHttpProxyFrom(properties);
}
if (properties.containsKey(SOCKS_PROXY_HOST)) {
return createSocksProxyFrom(properties);
}
return null;
}
|
@Test
void proxyFromSystemProperties_errorWhenHttpsPortIsEmptyString() {
Properties properties = new Properties();
properties.setProperty(ProxyProvider.HTTPS_PROXY_HOST, "host");
properties.setProperty(ProxyProvider.HTTPS_PROXY_PORT, "");
assertThatIllegalArgumentException()
.isThrownBy(() -> ProxyProvider.createFrom(properties))
.withMessage("expected system property https.proxyPort to be a number but got empty string");
}
|
@Override
public IntStream intStream() {
return IntStream.range(0, size);
}
|
@Test
public void intStream() throws Exception {
IntSet rs = new RangeSet(5);
assertEquals(10, rs.intStream().sum());
}
|
@Override
public BranchRegisterResponseProto convert2Proto(BranchRegisterResponse branchRegisterResponse) {
final short typeCode = branchRegisterResponse.getTypeCode();
final AbstractMessageProto abstractMessage = AbstractMessageProto.newBuilder().setMessageType(
MessageTypeProto.forNumber(typeCode)).build();
final String msg = branchRegisterResponse.getMsg();
final AbstractResultMessageProto abstractResultMessageProto = AbstractResultMessageProto.newBuilder().setMsg(
msg == null ? "" : msg).setResultCode(
ResultCodeProto.valueOf(branchRegisterResponse.getResultCode().name())).setAbstractMessage(abstractMessage)
.build();
AbstractTransactionResponseProto abstractTransactionResponseProto = AbstractTransactionResponseProto
.newBuilder().setAbstractResultMessage(abstractResultMessageProto).setTransactionExceptionCode(
TransactionExceptionCodeProto.valueOf(branchRegisterResponse.getTransactionExceptionCode().name()))
.build();
BranchRegisterResponseProto result = BranchRegisterResponseProto.newBuilder().setAbstractTransactionResponse(
abstractTransactionResponseProto).setBranchId(branchRegisterResponse.getBranchId()).build();
return result;
}
|
@Test
public void convert2Proto() {
BranchRegisterResponse branchRegisterResponse = new BranchRegisterResponse();
branchRegisterResponse.setTransactionExceptionCode(TransactionExceptionCode.GlobalTransactionNotActive);
branchRegisterResponse.setResultCode(ResultCode.Failed);
branchRegisterResponse.setMsg("msg");
branchRegisterResponse.setBranchId(123);
BranchRegisterResponseConvertor convertor = new BranchRegisterResponseConvertor();
BranchRegisterResponseProto proto = convertor.convert2Proto(
branchRegisterResponse);
BranchRegisterResponse real = convertor.convert2Model(proto);
assertThat(real.getTransactionExceptionCode()).isEqualTo(branchRegisterResponse.getTransactionExceptionCode());
assertThat(real.getResultCode()).isEqualTo(branchRegisterResponse.getResultCode());
assertThat(real.getMsg()).isEqualTo(branchRegisterResponse.getMsg());
assertThat(real.getBranchId()).isEqualTo(branchRegisterResponse.getBranchId());
}
|
@Override
synchronized public void close() {
if (stream != null) {
IOUtils.cleanupWithLogger(LOG, stream);
stream = null;
}
}
|
@Test(timeout=120000)
public void testRandomInt() throws Exception {
OsSecureRandom random = getOsSecureRandom();
int rand1 = random.nextInt();
int rand2 = random.nextInt();
while (rand1 == rand2) {
rand2 = random.nextInt();
}
random.close();
}
|
@Override
public SymbolTable getSymbolTable(String symbolTableName)
{
try
{
SymbolTableMetadata metadata = _symbolTableNameHandler.extractMetadata(symbolTableName);
String serverNodeUri = metadata.getServerNodeUri();
String tableName = metadata.getSymbolTableName();
boolean isRemote = metadata.isRemote();
// Check if it's the default table name.
if (tableName.equals(_defaultResponseSymbolTableName))
{
return _defaultResponseSymbolTable;
}
// First check the cache.
SymbolTable symbolTable = _symbolTableNameToSymbolTableCache.getIfPresent(tableName);
if (symbolTable != null)
{
return symbolTable;
}
// If this is a local table, and we didn't find it in the cache, cry foul.
if (!isRemote)
{
throw new IllegalStateException("Unable to fetch symbol table with name: " + symbolTableName);
}
// Ok, we didn't find it in the cache, let's go query the service the table was served from.
URI symbolTableUri = new URI(serverNodeUri + "/" + RestLiSymbolTableRequestHandler.SYMBOL_TABLE_URI_PATH + "/" + tableName);
symbolTable = fetchRemoteSymbolTable(symbolTableUri, Collections.emptyMap(), false);
if (symbolTable != null)
{
// Cache the retrieved table.
_symbolTableNameToSymbolTableCache.put(tableName, symbolTable);
return symbolTable;
}
}
catch (URISyntaxException ex)
{
LOGGER.error("Failed to construct symbol table URI from symbol table name: " + symbolTableName, ex);
}
throw new IllegalStateException("Unable to fetch symbol table with name: " + symbolTableName);
}
|
@Test
public void testGetRemoteSymbolTableFetchSuccess() throws IOException
{
RestResponseBuilder builder = new RestResponseBuilder();
builder.setStatus(200);
SymbolTable symbolTable = new InMemorySymbolTable("https://OtherHost:100/service|Test--332004310",
Collections.unmodifiableList(Arrays.asList("Haha", "Hehe")));
builder.setEntity(SymbolTableSerializer.toByteString(ContentType.PROTOBUF2.getCodec(), symbolTable));
builder.setHeader(RestConstants.HEADER_CONTENT_TYPE, ContentType.PROTOBUF2.getHeaderKey());
when(_client.restRequest(eq(new RestRequestBuilder(
URI.create("https://OtherHost:100/service/symbolTable/Test--332004310"))
.setHeaders(Collections.singletonMap(RestConstants.HEADER_FETCH_SYMBOL_TABLE, Boolean.TRUE.toString()))
.build()))).thenReturn(CompletableFuture.completedFuture(builder.build()));
SymbolTable remoteSymbolTable = _provider.getSymbolTable("https://OtherHost:100/service|Test--332004310");
Assert.assertNotNull(remoteSymbolTable);
Assert.assertEquals("https://Host:100/service|Test--332004310", remoteSymbolTable.getName());
Assert.assertEquals(2, remoteSymbolTable.size());
// Subsequent fetch should not trigger network fetch and get the table from the cache.
when(_client.restRequest(any(RestRequest.class))).thenThrow(new IllegalStateException());
SymbolTable cachedSymbolTable = _provider.getSymbolTable("https://OtherHost:100/service|Test--332004310");
Assert.assertSame(remoteSymbolTable, cachedSymbolTable);
}
|
@SuppressWarnings("checkstyle:NestedIfDepth")
@Nullable
public PartitioningStrategy getPartitioningStrategy(
String mapName,
PartitioningStrategyConfig config,
final List<PartitioningAttributeConfig> attributeConfigs
) {
if (attributeConfigs != null && !attributeConfigs.isEmpty()) {
return cache.computeIfAbsent(mapName, k -> createAttributePartitionStrategy(attributeConfigs));
}
if (config != null && config.getPartitioningStrategy() != null) {
return config.getPartitioningStrategy();
}
if (config != null && config.getPartitioningStrategyClass() != null) {
PartitioningStrategy<?> strategy = cache.get(mapName);
if (strategy != null) {
return strategy;
}
try {
// We don't use computeIfAbsent intentionally so that the map isn't blocked if the instantiation takes a
// long time - it's user code
strategy = ClassLoaderUtil.newInstance(configClassLoader, config.getPartitioningStrategyClass());
} catch (Exception e) {
throw ExceptionUtil.rethrow(e);
}
cache.putIfAbsent(mapName, strategy);
return strategy;
}
return null;
}
|
@Test
public void whenStrategyForMapAlreadyDefined_getPartitioningStrategy_returnsSameInstance() {
PartitioningStrategyConfig cfg = new PartitioningStrategyConfig();
cfg.setPartitioningStrategyClass("com.hazelcast.partition.strategy.StringPartitioningStrategy");
// when we have already obtained the partitioning strategy for a given map name
PartitioningStrategy partitioningStrategy = partitioningStrategyFactory.getPartitioningStrategy(mapName, cfg, null);
// then once we get it again with the same arguments, we retrieve the same instance
PartitioningStrategy cachedPartitioningStrategy = partitioningStrategyFactory.getPartitioningStrategy(mapName, cfg, null);
assertSame(partitioningStrategy, cachedPartitioningStrategy);
}
|
@Override
public GetNodesToAttributesResponse getNodesToAttributes(
GetNodesToAttributesRequest request) throws YarnException, IOException {
if (request == null || request.getHostNames() == null) {
routerMetrics.incrGetNodesToAttributesFailedRetrieved();
String msg = "Missing getNodesToAttributes request or hostNames.";
RouterAuditLogger.logFailure(user.getShortUserName(), GET_NODESTOATTRIBUTES, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, msg);
RouterServerUtil.logAndThrowException(msg, null);
}
long startTime = clock.getTime();
ClientMethod remoteMethod = new ClientMethod("getNodesToAttributes",
new Class[] {GetNodesToAttributesRequest.class}, new Object[] {request});
Collection<GetNodesToAttributesResponse> nodesToAttributesResponses = null;
try {
nodesToAttributesResponses = invokeConcurrent(remoteMethod,
GetNodesToAttributesResponse.class);
} catch (Exception ex) {
routerMetrics.incrGetNodesToAttributesFailedRetrieved();
String msg = "Unable to get nodes to attributes due to exception.";
RouterAuditLogger.logFailure(user.getShortUserName(), GET_NODESTOATTRIBUTES, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, msg);
RouterServerUtil.logAndThrowException(msg, ex);
}
long stopTime = clock.getTime();
routerMetrics.succeededGetNodesToAttributesRetrieved(stopTime - startTime);
RouterAuditLogger.logSuccess(user.getShortUserName(), GET_NODESTOATTRIBUTES,
TARGET_CLIENT_RM_SERVICE);
return RouterYarnClientUtils.mergeNodesToAttributesResponse(nodesToAttributesResponses);
}
|
@Test
public void testNodesToAttributes() throws Exception {
LOG.info("Test FederationClientInterceptor : Get NodesToAttributes request.");
// null request
LambdaTestUtils.intercept(YarnException.class,
"Missing getNodesToAttributes request or hostNames.",
() -> interceptor.getNodesToAttributes(null));
// normal request
Set<String> hostNames = Collections.singleton("0-host1");
GetNodesToAttributesResponse response =
interceptor.getNodesToAttributes(GetNodesToAttributesRequest.newInstance(hostNames));
Assert.assertNotNull(response);
Map<String, Set<NodeAttribute>> nodeAttributeMap = response.getNodeToAttributes();
Assert.assertNotNull(nodeAttributeMap);
Assert.assertEquals(1, nodeAttributeMap.size());
NodeAttribute gpu = NodeAttribute.newInstance(NodeAttribute.PREFIX_CENTRALIZED, "GPU",
NodeAttributeType.STRING, "nvida");
Assert.assertTrue(nodeAttributeMap.get("0-host1").contains(gpu));
}
|
@Override
public boolean equals( Object obj ) {
if ( !( obj instanceof TransMeta ) ) {
return false;
}
return compare( this, (TransMeta) obj ) == 0;
}
|
@Test
public void testEquals() {
TransMeta transMeta = new TransMeta( "1", "2" );
assertNotEquals( "somethingelse", transMeta );
assertEquals( transMeta, new TransMeta( "1", "2" ) );
}
|
protected void commitTransaction(final Map<TopicPartition, OffsetAndMetadata> offsets,
final ConsumerGroupMetadata consumerGroupMetadata) {
if (!eosEnabled()) {
throw new IllegalStateException(formatException("Exactly-once is not enabled"));
}
maybeBeginTransaction();
try {
// EOS-v2 assumes brokers are on version 2.5+ and thus can understand the full set of consumer group metadata
// Thus if we are using EOS-v1 and can't make this assumption, we must downgrade the request to include only the group id metadata
final ConsumerGroupMetadata maybeDowngradedGroupMetadata = processingMode == EXACTLY_ONCE_V2 ? consumerGroupMetadata : new ConsumerGroupMetadata(consumerGroupMetadata.groupId());
producer.sendOffsetsToTransaction(offsets, maybeDowngradedGroupMetadata);
producer.commitTransaction();
transactionInFlight = false;
} catch (final ProducerFencedException | InvalidProducerEpochException | CommitFailedException | InvalidPidMappingException error) {
throw new TaskMigratedException(
formatException("Producer got fenced trying to commit a transaction"),
error
);
} catch (final TimeoutException timeoutException) {
// re-throw to trigger `task.timeout.ms`
throw timeoutException;
} catch (final KafkaException error) {
throw new StreamsException(
formatException("Error encountered trying to commit a transaction"),
error
);
}
}
|
@Test
public void shouldThrowStreamsExceptionOnEosCommitTxError() {
eosAlphaMockProducer.commitTransactionException = new KafkaException("KABOOM!");
final StreamsException thrown = assertThrows(
StreamsException.class,
() -> eosAlphaStreamsProducer.commitTransaction(offsetsAndMetadata, new ConsumerGroupMetadata("appId"))
);
assertThat(eosAlphaMockProducer.sentOffsets(), is(true));
assertThat(thrown.getCause(), is(eosAlphaMockProducer.commitTransactionException));
assertThat(
thrown.getMessage(),
is("Error encountered trying to commit a transaction [test]")
);
}
|
@Override
public ParsedSchema fromConnectSchema(final Schema schema) {
// Bug in ProtobufData means `fromConnectSchema` throws on the second invocation if using
// default naming.
return new ProtobufData(new ProtobufDataConfig(updatedConfigs))
.fromConnectSchema(injectSchemaFullName(schema));
}
|
@Test
public void shouldApplyNullableAsWrapper() {
// Given:
givenNullableAsWrapper();
// When:
final ParsedSchema schema = schemaTranslator.fromConnectSchema(CONNECT_SCHEMA_WITH_NULLABLE_PRIMITIVES);
// Then:
assertThat(schema.canonicalString(), is("syntax = \"proto3\";\n"
+ "\n"
+ "import \"google/protobuf/wrappers.proto\";\n"
+ "\n"
+ "message ConnectDefault1 {\n"
+ " google.protobuf.Int32Value optional_int32 = 1;\n"
+ " google.protobuf.BoolValue optional_boolean = 2;\n"
+ " google.protobuf.StringValue optional_string = 3;\n"
+ "}\n"));
}
|
@Override
protected Object getTargetObject(boolean key) {
Object targetObject;
if (key) {
// keyData is never null
if (keyData.isPortable() || keyData.isJson() || keyData.isCompact()) {
targetObject = keyData;
} else {
targetObject = getKey();
}
} else {
if (valueObject == null) {
targetObject = getTargetObjectFromData();
} else {
if (valueObject instanceof PortableGenericRecord
|| valueObject instanceof CompactGenericRecord) {
// These two classes should be able to be handled by respective Getters
// see PortableGetter and CompactGetter
// We get into this branch when in memory format is Object and
// - the cluster does not have PortableFactory configuration for Portable
// - the cluster does not related classes for Compact
targetObject = getValue();
} else if (valueObject instanceof Portable
|| serializationService.isCompactSerializable(valueObject)) {
targetObject = getValueData();
} else {
// Note that targetObject can be PortableGenericRecord
// and it will be handled with PortableGetter for query.
// We get PortableGenericRecord here when in-memory format is OBJECT and
// the cluster does not have PortableFactory configuration for the object's factory ID
targetObject = getValue();
}
}
}
return targetObject;
}
|
@Test
public void testGetTargetObject_givenValueIsData_whenKeyFlagIsFalse_thenReturnValueObject() {
Data key = serializationService.toData("key");
Data value = serializationService.toData("value");
QueryableEntry entry = createEntry(key, value, newExtractor());
Object targetObject = entry.getTargetObject(false);
assertEquals("value", targetObject);
}
|
@Override
public SqlGatewayEndpoint createSqlGatewayEndpoint(Context context) {
SqlGatewayEndpointFactoryUtils.EndpointFactoryHelper helper =
SqlGatewayEndpointFactoryUtils.createEndpointFactoryHelper(this, context);
ReadableConfig configuration = helper.getOptions();
validate(configuration);
return new HiveServer2Endpoint(
context.getSqlGatewayService(),
getInetSocketAddress(configuration),
checkNotNull(configuration.get(THRIFT_MAX_MESSAGE_SIZE)),
(int) configuration.get(THRIFT_LOGIN_TIMEOUT).toMillis(),
(int) configuration.get(THRIFT_LOGIN_BEBACKOFF_SLOT_LENGTH).toMillis(),
configuration.get(THRIFT_WORKER_THREADS_MIN),
configuration.get(THRIFT_WORKER_THREADS_MAX),
configuration.get(THRIFT_WORKER_KEEPALIVE_TIME),
configuration.get(CATALOG_NAME),
HiveCatalog.createHiveConf(configuration.get(CATALOG_HIVE_CONF_DIR), null),
configuration.get(CATALOG_DEFAULT_DATABASE),
configuration.get(MODULE_NAME));
}
|
@Test
public void testCreateHiveServer2Endpoint() throws Exception {
assertThat(
SqlGatewayEndpointFactoryUtils.createSqlGatewayEndpoint(
service, Configuration.fromMap(getDefaultConfig())))
.isEqualTo(
Collections.singletonList(
new HiveServer2Endpoint(
service,
new InetSocketAddress("localhost", port),
maxMessageSize,
(int) loginTimeout.toMillis(),
(int) backOffSlotLength.toMillis(),
minWorkerThreads,
maxWorkerThreads,
workerAliveDuration,
catalogName,
HiveTestUtils.createHiveConf(),
defaultDatabase,
moduleName)));
}
|
@SuppressWarnings("unchecked")
@Override
public void sendMessage(final P message) {
try {
if (message == null) {
delegate().sendMessage(null);
return;
}
String jsonFormat = JsonFormat.printer().includingDefaultValueFields().preservingProtoFieldNames()
.print((MessageOrBuilder) message);
DynamicMessage respMessage = JsonMessage.buildJsonMessage(jsonFormat);
LOG.debug("begin send json response");
delegate().sendMessage((P) respMessage);
} catch (InvalidProtocolBufferException e) {
throw Status.INTERNAL.withDescription(e.getMessage()).asRuntimeException();
}
}
|
@Test
public void sentMsgTest() {
testJsonForwardingServerCall.sendMessage(new TestResponse("test-response"));
}
|
public void decode(ByteBuf buffer) {
boolean last;
int statusCode;
while (true) {
switch(state) {
case READ_COMMON_HEADER:
if (buffer.readableBytes() < SPDY_HEADER_SIZE) {
return;
}
int frameOffset = buffer.readerIndex();
int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET;
int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET;
buffer.skipBytes(SPDY_HEADER_SIZE);
boolean control = (buffer.getByte(frameOffset) & 0x80) != 0;
int version;
int type;
if (control) {
// Decode control frame common header
version = getUnsignedShort(buffer, frameOffset) & 0x7FFF;
type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET);
streamId = 0; // Default to session Stream-ID
} else {
// Decode data frame common header
version = spdyVersion; // Default to expected version
type = SPDY_DATA_FRAME;
streamId = getUnsignedInt(buffer, frameOffset);
}
flags = buffer.getByte(flagsOffset);
length = getUnsignedMedium(buffer, lengthOffset);
// Check version first then validity
if (version != spdyVersion) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SPDY Version");
} else if (!isValidFrameHeader(streamId, type, flags, length)) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid Frame Error");
} else {
state = getNextState(type, length);
}
break;
case READ_DATA_FRAME:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0));
break;
}
// Generate data frames that do not exceed maxChunkSize
int dataLength = Math.min(maxChunkSize, length);
// Wait until entire frame is readable
if (buffer.readableBytes() < dataLength) {
return;
}
ByteBuf data = buffer.alloc().buffer(dataLength);
data.writeBytes(buffer, dataLength);
length -= dataLength;
if (length == 0) {
state = State.READ_COMMON_HEADER;
}
last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN);
delegate.readDataFrame(streamId, last, data);
break;
case READ_SYN_STREAM_FRAME:
if (buffer.readableBytes() < 10) {
return;
}
int offset = buffer.readerIndex();
streamId = getUnsignedInt(buffer, offset);
int associatedToStreamId = getUnsignedInt(buffer, offset + 4);
byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07);
last = hasFlag(flags, SPDY_FLAG_FIN);
boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL);
buffer.skipBytes(10);
length -= 10;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_STREAM Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional);
}
break;
case READ_SYN_REPLY_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SYN_REPLY Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readSynReplyFrame(streamId, last);
}
break;
case READ_RST_STREAM_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (streamId == 0 || statusCode == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid RST_STREAM Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readRstStreamFrame(streamId, statusCode);
}
break;
case READ_SETTINGS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR);
numSettings = getUnsignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
length -= 4;
// Validate frame length against number of entries. Each ID/Value entry is 8 bytes.
if ((length & 0x07) != 0 || length >> 3 != numSettings) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid SETTINGS Frame");
} else {
state = State.READ_SETTING;
delegate.readSettingsFrame(clear);
}
break;
case READ_SETTING:
if (numSettings == 0) {
state = State.READ_COMMON_HEADER;
delegate.readSettingsEnd();
break;
}
if (buffer.readableBytes() < 8) {
return;
}
byte settingsFlags = buffer.getByte(buffer.readerIndex());
int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1);
int value = getSignedInt(buffer, buffer.readerIndex() + 4);
boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE);
boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED);
buffer.skipBytes(8);
--numSettings;
delegate.readSetting(id, value, persistValue, persisted);
break;
case READ_PING_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
int pingId = getSignedInt(buffer, buffer.readerIndex());
buffer.skipBytes(4);
state = State.READ_COMMON_HEADER;
delegate.readPingFrame(pingId);
break;
case READ_GOAWAY_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex());
statusCode = getSignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
state = State.READ_COMMON_HEADER;
delegate.readGoAwayFrame(lastGoodStreamId, statusCode);
break;
case READ_HEADERS_FRAME:
if (buffer.readableBytes() < 4) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
last = hasFlag(flags, SPDY_FLAG_FIN);
buffer.skipBytes(4);
length -= 4;
if (streamId == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid HEADERS Frame");
} else {
state = State.READ_HEADER_BLOCK;
delegate.readHeadersFrame(streamId, last);
}
break;
case READ_WINDOW_UPDATE_FRAME:
if (buffer.readableBytes() < 8) {
return;
}
streamId = getUnsignedInt(buffer, buffer.readerIndex());
int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4);
buffer.skipBytes(8);
if (deltaWindowSize == 0) {
state = State.FRAME_ERROR;
delegate.readFrameError("Invalid WINDOW_UPDATE Frame");
} else {
state = State.READ_COMMON_HEADER;
delegate.readWindowUpdateFrame(streamId, deltaWindowSize);
}
break;
case READ_HEADER_BLOCK:
if (length == 0) {
state = State.READ_COMMON_HEADER;
delegate.readHeaderBlockEnd();
break;
}
if (!buffer.isReadable()) {
return;
}
int compressedBytes = Math.min(buffer.readableBytes(), length);
ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes);
headerBlock.writeBytes(buffer, compressedBytes);
length -= compressedBytes;
delegate.readHeaderBlock(headerBlock);
break;
case DISCARD_FRAME:
int numBytes = Math.min(buffer.readableBytes(), length);
buffer.skipBytes(numBytes);
length -= numBytes;
if (length == 0) {
state = State.READ_COMMON_HEADER;
break;
}
return;
case FRAME_ERROR:
buffer.skipBytes(buffer.readableBytes());
return;
default:
throw new Error("Shouldn't reach here.");
}
}
}
|
@Test
public void testLastSpdySynStreamFrame() throws Exception {
short type = 1;
byte flags = 0x01; // FLAG_FIN
int length = 10;
int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01;
int associatedToStreamId = RANDOM.nextInt() & 0x7FFFFFFF;
byte priority = (byte) (RANDOM.nextInt() & 0x07);
ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length);
encodeControlFrameHeader(buf, type, flags, length);
buf.writeInt(streamId);
buf.writeInt(associatedToStreamId);
buf.writeByte(priority << 5);
buf.writeByte(0);
decoder.decode(buf);
verify(delegate).readSynStreamFrame(streamId, associatedToStreamId, priority, true, false);
verify(delegate).readHeaderBlockEnd();
assertFalse(buf.isReadable());
buf.release();
}
|
@SafeVarargs
static <K, V> Mono<Map<K, V>> toMonoWithExceptionFilter(Map<K, KafkaFuture<V>> values,
Class<? extends KafkaException>... classes) {
if (values.isEmpty()) {
return Mono.just(Map.of());
}
List<Mono<Tuple2<K, Optional<V>>>> monos = values.entrySet().stream()
.map(e ->
toMono(e.getValue())
.map(r -> Tuples.of(e.getKey(), Optional.of(r)))
.defaultIfEmpty(Tuples.of(e.getKey(), Optional.empty())) //tracking empty Monos
.onErrorResume(
// tracking Monos with suppressible error
th -> Stream.of(classes).anyMatch(clazz -> th.getClass().isAssignableFrom(clazz)),
th -> Mono.just(Tuples.of(e.getKey(), Optional.empty()))))
.toList();
return Mono.zip(
monos,
resultsArr -> Stream.of(resultsArr)
.map(obj -> (Tuple2<K, Optional<V>>) obj)
.filter(t -> t.getT2().isPresent()) //skipping empty & suppressible-errors
.collect(Collectors.toMap(Tuple2::getT1, t -> t.getT2().get()))
);
}
|
@Test
void testToMonoWithExceptionFilter() {
var failedFuture = new KafkaFutureImpl<String>();
failedFuture.completeExceptionally(new UnknownTopicOrPartitionException());
var okFuture = new KafkaFutureImpl<String>();
okFuture.complete("done");
var emptyFuture = new KafkaFutureImpl<String>();
emptyFuture.complete(null);
Map<String, KafkaFuture<String>> arg = Map.of(
"failure", failedFuture,
"ok", okFuture,
"empty", emptyFuture
);
StepVerifier.create(toMonoWithExceptionFilter(arg, UnknownTopicOrPartitionException.class))
.assertNext(result -> assertThat(result).hasSize(1).containsEntry("ok", "done"))
.verifyComplete();
}
|
@Override
public synchronized T getValue(int index) {
BarSeries series = getBarSeries();
if (series == null) {
// Series is null; the indicator doesn't need cache.
// (e.g. simple computation of the value)
// --> Calculating the value
T result = calculate(index);
if (log.isTraceEnabled()) {
log.trace("{}({}): {}", this, index, result);
}
return result;
}
// Series is not null
final int removedBarsCount = series.getRemovedBarsCount();
final int maximumResultCount = series.getMaximumBarCount();
T result;
if (index < removedBarsCount) {
// Result already removed from cache
if (log.isTraceEnabled()) {
log.trace("{}: result from bar {} already removed from cache, use {}-th instead",
getClass().getSimpleName(), index, removedBarsCount);
}
increaseLengthTo(removedBarsCount, maximumResultCount);
highestResultIndex = removedBarsCount;
result = results.get(0);
if (result == null) {
// It should be "result = calculate(removedBarsCount);".
// We use "result = calculate(0);" as a workaround
// to fix issue #120 (https://github.com/mdeverdelhan/ta4j/issues/120).
result = calculate(0);
results.set(0, result);
}
} else {
if (index == series.getEndIndex()) {
// Don't cache result if last bar
result = calculate(index);
} else {
increaseLengthTo(index, maximumResultCount);
if (index > highestResultIndex) {
// Result not calculated yet
highestResultIndex = index;
result = calculate(index);
results.set(results.size() - 1, result);
} else {
// Result covered by current cache
int resultInnerIndex = results.size() - 1 - (highestResultIndex - index);
result = results.get(resultInnerIndex);
if (result == null) {
result = calculate(index);
results.set(resultInnerIndex, result);
}
}
}
}
if (log.isTraceEnabled()) {
log.trace("{}({}): {}", this, index, result);
}
return result;
}
|
@Test
public void getValueWithCacheLengthIncrease() {
double[] data = new double[200];
Arrays.fill(data, 10);
SMAIndicator sma = new SMAIndicator(new ClosePriceIndicator(new MockBarSeries(numFunction, data)), 100);
assertNumEquals(10, sma.getValue(105));
}
|
public Optional<Column> findValueColumn(final ColumnName columnName) {
return findColumnMatching(withNamespace(VALUE).and(withName(columnName)));
}
|
@Test
public void shouldGetColumnByName() {
// When:
final Optional<Column> result = SOME_SCHEMA.findValueColumn(F0);
// Then:
assertThat(result, is(Optional.of(
Column.of(F0, STRING, Namespace.VALUE, 0))
));
}
|
@Override
public SelType call(String methodName, SelType[] args) {
if (args.length == 0 && "size".equals(methodName)) {
return SelLong.of(val == null ? 0 : val.size());
} else if (args.length == 1 && "get".equals(methodName)) {
if (!val.containsKey((SelString) args[0])) {
return NULL;
}
return val.get((SelString) args[0]);
} else if (args.length == 1 && "containsKey".equals(methodName)) {
return SelBoolean.of(val != null && val.containsKey((SelString) args[0]));
} else if (args.length == 2 && "put".equals(methodName)) {
SelType value = args[1] == null ? NULL : args[1];
SelType res = val.put((SelString) args[0], value);
if (res == null) {
return NULL;
}
return res;
} else if (args.length == 2 && "getOrDefault".equals(methodName)) {
if (!val.containsKey((SelString) args[0])) {
return args[1];
}
return val.get((SelString) args[0]);
}
throw new UnsupportedOperationException(
type()
+ " DO NOT support calling method: "
+ methodName
+ " with args: "
+ Arrays.toString(args));
}
|
@Test(expected = UnsupportedOperationException.class)
public void testCallGetInvalidArgs() {
orig.call("get", new SelType[] {});
}
|
@SuppressWarnings("unchecked")
public static <K, V> Map<K, V> toMap(Object... pairs) {
Map<K, V> ret = new HashMap<>();
if (pairs == null || pairs.length == 0) {
return ret;
}
if (pairs.length % 2 != 0) {
throw new IllegalArgumentException("Map pairs can not be odd number.");
}
int len = pairs.length / 2;
for (int i = 0; i < len; i++) {
ret.put((K) pairs[2 * i], (V) pairs[2 * i + 1]);
}
return ret;
}
|
@Test
void testToMap2() {
Assertions.assertThrows(IllegalArgumentException.class, () -> toMap("a", "b", "c"));
}
|
public static MDS of(double[][] proximity) {
return of(proximity, new Properties());
}
|
@Test
public void testPositive() {
System.out.println("MDS positive = true");
double[] eigs = {42274973.753, 31666186.428};
double[][] points = {
{-2716.561820, -3549.216493},
{ 1453.753109, -455.895291},
{ -217.426476, 1073.442137},
{ -1.682974, 1135.742860},
{ 461.875781, 871.913389},
{ -594.256798, 1029.818088},
{-1271.216005, 1622.039302},
{ 88.721376, -4.068005},
{ 3059.180990, -836.535103},
{-1056.316198, 1350.037932},
{ -445.663432, 1304.392098},
{ 2866.160085, -211.043554},
{ 436.147722, 140.147975},
{ 2300.753691, -234.863677},
{ 586.877042, -217.428075},
{ -336.906562, -350.948939},
{ -928.407679, 112.132182},
{ 193.653844, 847.157498},
{ -908.682100, -1742.395923},
{-1499.140467, 1897.522969},
{-1319.918808, -295.010834}
};
MDS mds = MDS.of(Eurodist.x, 2, true);
assertArrayEquals(eigs, mds.scores, 1E-2);
double sign0 = Math.signum(points[0][0] * mds.coordinates[0][0]);
double sign1 = Math.signum(points[0][1] * mds.coordinates[0][1]);
for (int i = 0; i < points.length; i++) {
points[i][0] *= sign0;
points[i][1] *= sign1;
assertArrayEquals(points[i], mds.coordinates[i], 1E-2);
}
}
|
public CeActivityDto setEntityUuid(@Nullable String s) {
validateUuid(s, "ENTITY_UUID");
this.entityUuid = s;
return this;
}
|
@Test
void seEntityUuid_throws_IAE_if_value_is_41_chars() {
String str_41_chars = STR_40_CHARS + "a";
assertThatThrownBy(() -> underTest.setEntityUuid(str_41_chars))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("Value is too long for column CE_ACTIVITY.ENTITY_UUID: " + str_41_chars);
}
|
public DirectGraph getGraph() {
checkState(finalized, "Can't get a graph before the Pipeline has been completely traversed");
return DirectGraph.create(
producers, viewWriters, perElementConsumers, rootTransforms, stepNames);
}
|
@Test
public void getGraphWithoutVisitingThrows() {
thrown.expect(IllegalStateException.class);
thrown.expectMessage("completely traversed");
thrown.expectMessage("get a graph");
visitor.getGraph();
}
|
@Override
public boolean wasNull() throws SQLException {
return mergedResult.wasNull();
}
|
@Test
void assertWasNull() throws SQLException {
assertFalse(new MaskMergedResult(mock(MaskRule.class), mock(SelectStatementContext.class), mergedResult).wasNull());
}
|
@SuppressWarnings("unchecked")
@Override
public RegisterNodeManagerResponse registerNodeManager(
RegisterNodeManagerRequest request) throws YarnException,
IOException {
NodeId nodeId = request.getNodeId();
String host = nodeId.getHost();
int cmPort = nodeId.getPort();
int httpPort = request.getHttpPort();
Resource capability = request.getResource();
String nodeManagerVersion = request.getNMVersion();
Resource physicalResource = request.getPhysicalResource();
NodeStatus nodeStatus = request.getNodeStatus();
RegisterNodeManagerResponse response = recordFactory
.newRecordInstance(RegisterNodeManagerResponse.class);
if (!minimumNodeManagerVersion.equals("NONE")) {
if (minimumNodeManagerVersion.equals("EqualToRM")) {
minimumNodeManagerVersion = YarnVersionInfo.getVersion();
}
if ((nodeManagerVersion == null) ||
(VersionUtil.compareVersions(nodeManagerVersion,minimumNodeManagerVersion)) < 0) {
String message =
"Disallowed NodeManager Version " + nodeManagerVersion
+ ", is less than the minimum version "
+ minimumNodeManagerVersion + " sending SHUTDOWN signal to "
+ "NodeManager.";
LOG.info(message);
response.setDiagnosticsMessage(message);
response.setNodeAction(NodeAction.SHUTDOWN);
return response;
}
}
if (checkIpHostnameInRegistration) {
InetSocketAddress nmAddress =
NetUtils.createSocketAddrForHost(host, cmPort);
InetAddress inetAddress = Server.getRemoteIp();
if (inetAddress != null && nmAddress.isUnresolved()) {
// Reject registration of unresolved nm to prevent resourcemanager
// getting stuck at allocations.
final String message =
"hostname cannot be resolved (ip=" + inetAddress.getHostAddress()
+ ", hostname=" + host + ")";
LOG.warn("Unresolved nodemanager registration: " + message);
response.setDiagnosticsMessage(message);
response.setNodeAction(NodeAction.SHUTDOWN);
return response;
}
}
// Check if this node is a 'valid' node
if (!this.nodesListManager.isValidNode(host) &&
!isNodeInDecommissioning(nodeId)) {
String message =
"Disallowed NodeManager from " + host
+ ", Sending SHUTDOWN signal to the NodeManager.";
LOG.info(message);
response.setDiagnosticsMessage(message);
response.setNodeAction(NodeAction.SHUTDOWN);
return response;
}
// check if node's capacity is load from dynamic-resources.xml
String nid = nodeId.toString();
Resource dynamicLoadCapability = loadNodeResourceFromDRConfiguration(nid);
if (dynamicLoadCapability != null) {
LOG.debug("Resource for node: {} is adjusted from: {} to: {} due to"
+ " settings in dynamic-resources.xml.", nid, capability,
dynamicLoadCapability);
capability = dynamicLoadCapability;
// sync back with new resource.
response.setResource(capability);
}
// Check if this node has minimum allocations
if (capability.getMemorySize() < minAllocMb
|| capability.getVirtualCores() < minAllocVcores) {
String message = "NodeManager from " + host
+ " doesn't satisfy minimum allocations, Sending SHUTDOWN"
+ " signal to the NodeManager. Node capabilities are " + capability
+ "; minimums are " + minAllocMb + "mb and " + minAllocVcores
+ " vcores";
LOG.info(message);
response.setDiagnosticsMessage(message);
response.setNodeAction(NodeAction.SHUTDOWN);
return response;
}
response.setContainerTokenMasterKey(containerTokenSecretManager
.getCurrentKey());
response.setNMTokenMasterKey(nmTokenSecretManager
.getCurrentKey());
RMNode rmNode = new RMNodeImpl(nodeId, rmContext, host, cmPort, httpPort,
resolve(host), capability, nodeManagerVersion, physicalResource);
RMNode oldNode = this.rmContext.getRMNodes().putIfAbsent(nodeId, rmNode);
if (oldNode == null) {
RMNodeStartedEvent startEvent = new RMNodeStartedEvent(nodeId,
request.getNMContainerStatuses(),
request.getRunningApplications(), nodeStatus);
if (request.getLogAggregationReportsForApps() != null
&& !request.getLogAggregationReportsForApps().isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Found the number of previous cached log aggregation "
+ "status from nodemanager:" + nodeId + " is :"
+ request.getLogAggregationReportsForApps().size());
}
startEvent.setLogAggregationReportsForApps(request
.getLogAggregationReportsForApps());
}
this.rmContext.getDispatcher().getEventHandler().handle(
startEvent);
} else {
LOG.info("Reconnect from the node at: " + host);
this.nmLivelinessMonitor.unregister(nodeId);
if (CollectionUtils.isEmpty(request.getRunningApplications())
&& rmNode.getState() != NodeState.DECOMMISSIONING
&& rmNode.getHttpPort() != oldNode.getHttpPort()) {
// Reconnected node differs, so replace old node and start new node
switch (rmNode.getState()) {
case RUNNING:
ClusterMetrics.getMetrics().decrNumActiveNodes();
break;
case UNHEALTHY:
ClusterMetrics.getMetrics().decrNumUnhealthyNMs();
break;
default:
LOG.debug("Unexpected Rmnode state");
}
this.rmContext.getDispatcher().getEventHandler()
.handle(new NodeRemovedSchedulerEvent(rmNode));
this.rmContext.getRMNodes().put(nodeId, rmNode);
this.rmContext.getDispatcher().getEventHandler()
.handle(new RMNodeStartedEvent(nodeId, null, null, nodeStatus));
} else {
// Reset heartbeat ID since node just restarted.
oldNode.resetLastNodeHeartBeatResponse();
this.rmContext.getDispatcher().getEventHandler()
.handle(new RMNodeReconnectEvent(nodeId, rmNode,
request.getRunningApplications(),
request.getNMContainerStatuses()));
}
}
// On every node manager register we will be clearing NMToken keys if
// present for any running application.
this.nmTokenSecretManager.removeNodeKey(nodeId);
this.nmLivelinessMonitor.register(nodeId);
// Handle received container status, this should be processed after new
// RMNode inserted
if (!rmContext.isWorkPreservingRecoveryEnabled()) {
if (!request.getNMContainerStatuses().isEmpty()) {
LOG.info("received container statuses on node manager register :"
+ request.getNMContainerStatuses());
for (NMContainerStatus status : request.getNMContainerStatuses()) {
handleNMContainerStatus(status, nodeId);
}
}
}
// Update node's labels to RM's NodeLabelManager.
Set<String> nodeLabels = NodeLabelsUtils.convertToStringSet(
request.getNodeLabels());
if (isDistributedNodeLabelsConf && nodeLabels != null) {
try {
updateNodeLabelsFromNMReport(nodeLabels, nodeId);
response.setAreNodeLabelsAcceptedByRM(true);
} catch (IOException ex) {
// Ensure the exception is captured in the response
response.setDiagnosticsMessage(ex.getMessage());
response.setAreNodeLabelsAcceptedByRM(false);
}
} else if (isDelegatedCentralizedNodeLabelsConf) {
this.rmContext.getRMDelegatedNodeLabelsUpdater().updateNodeLabels(nodeId);
}
// Update node's attributes to RM's NodeAttributesManager.
if (request.getNodeAttributes() != null) {
try {
// update node attributes if necessary then update heartbeat response
updateNodeAttributesIfNecessary(nodeId, request.getNodeAttributes());
response.setAreNodeAttributesAcceptedByRM(true);
} catch (IOException ex) {
//ensure the error message is captured and sent across in response
String errorMsg = response.getDiagnosticsMessage() == null ?
ex.getMessage() :
response.getDiagnosticsMessage() + "\n" + ex.getMessage();
response.setDiagnosticsMessage(errorMsg);
response.setAreNodeAttributesAcceptedByRM(false);
}
}
StringBuilder message = new StringBuilder();
message.append("NodeManager from node ").append(host).append("(cmPort: ")
.append(cmPort).append(" httpPort: ");
message.append(httpPort).append(") ")
.append("registered with capability: ").append(capability);
message.append(", assigned nodeId ").append(nodeId);
if (response.getAreNodeLabelsAcceptedByRM()) {
message.append(", node labels { ").append(
StringUtils.join(",", nodeLabels) + " } ");
}
if (response.getAreNodeAttributesAcceptedByRM()) {
message.append(", node attributes { ")
.append(request.getNodeAttributes() + " } ");
}
LOG.info(message.toString());
response.setNodeAction(NodeAction.NORMAL);
response.setRMIdentifier(ResourceManager.getClusterTimeStamp());
response.setRMVersion(YarnVersionInfo.getVersion());
return response;
}
|
@Test
public void testNodeRegistrationWithInvalidLabelsSyntax() throws Exception {
writeToHostsFile("host2");
Configuration conf = new Configuration();
conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,
hostFile.getAbsolutePath());
conf.set(YarnConfiguration.NODELABEL_CONFIGURATION_TYPE,
YarnConfiguration.DISTRIBUTED_NODELABEL_CONFIGURATION_TYPE);
final RMNodeLabelsManager nodeLabelsMgr = new NullRMNodeLabelsManager();
rm = new MockRM(conf) {
@Override
protected RMNodeLabelsManager createNodeLabelManager() {
return nodeLabelsMgr;
}
};
rm.start();
try {
nodeLabelsMgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("X", "Y", "Z"));
} catch (IOException e) {
Assert.fail("Caught Exception while initializing");
e.printStackTrace();
}
ResourceTrackerService resourceTrackerService =
rm.getResourceTrackerService();
RegisterNodeManagerRequest req =
Records.newRecord(RegisterNodeManagerRequest.class);
NodeId nodeId = NodeId.newInstance("host2", 1234);
Resource capability = Resources.createResource(1024);
req.setResource(capability);
req.setNodeId(nodeId);
req.setHttpPort(1234);
req.setNMVersion(YarnVersionInfo.getVersion());
req.setNodeLabels(toNodeLabelSet("#Y"));
RegisterNodeManagerResponse response =
resourceTrackerService.registerNodeManager(req);
Assert.assertEquals(
"On Invalid Node Labels action is expected to be normal",
NodeAction.NORMAL, response.getNodeAction());
Assert.assertNull(nodeLabelsMgr.getNodeLabels().get(nodeId));
Assert.assertNotNull(response.getDiagnosticsMessage());
Assert.assertFalse("Node Labels should not accepted by RM If Invalid",
response.getAreNodeLabelsAcceptedByRM());
if (rm != null) {
rm.stop();
}
}
|
@Override
public boolean getStatus(final Path file) throws BackgroundException {
final Path bucket = containerService.getContainer(file);
try {
return session.getClient().getAccelerateConfig(bucket.isRoot() ? StringUtils.EMPTY : bucket.getName()).isEnabled();
}
catch(S3ServiceException failure) {
throw new S3ExceptionMappingService().map("Failure to read attributes of {0}", failure, bucket);
}
}
|
@Test
public void getStatus() throws Exception {
final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.volume, Path.Type.directory));
final S3TransferAccelerationService service = new S3TransferAccelerationService(session);
service.getStatus(container);
}
|
@PostMapping("/server/config")
public ServerConfig createOrUpdatePortalDBConfig(@Valid @RequestBody ServerConfig serverConfig) {
return serverConfigService.createOrUpdateConfig(serverConfig);
}
|
@Test
@Sql(scripts = "/controller/test-server-config.sql", executionPhase = ExecutionPhase.BEFORE_TEST_METHOD)
@Sql(scripts = "/controller/cleanup.sql", executionPhase = ExecutionPhase.AFTER_TEST_METHOD)
void createOrUpdatePortalDBConfig() {
ServerConfig serverConfig = new ServerConfig();
serverConfig.setKey("name");
serverConfig.setValue("ckl");
ServerConfig response = restTemplate.postForObject(url("/server/config"), serverConfig, ServerConfig.class);
assertNotNull(response);
ServerConfig[] serverConfigs = restTemplate.getForObject(url("/server/config/find-all-config"), ServerConfig[].class);
assertNotNull(serverConfigs);
assertEquals(1, serverConfigs.length);
assertEquals("name", serverConfigs[0].getKey());
assertEquals("ckl", serverConfigs[0].getValue());
serverConfig = new ServerConfig();
serverConfig.setKey("age");
serverConfig.setValue("30");
response = restTemplate.postForObject(url("/server/config"), serverConfig, ServerConfig.class);
assertNotNull(response);
serverConfigs = restTemplate.getForObject(url("/server/config/find-all-config"), ServerConfig[].class);
assertNotNull(serverConfigs);
assertEquals(2, serverConfigs.length);
}
|
public static HttpRequest toHttpRequest(int streamId, Http2Headers http2Headers, boolean validateHttpHeaders)
throws Http2Exception {
// HTTP/2 does not define a way to carry the version identifier that is included in the HTTP/1.1 request line.
final CharSequence method = checkNotNull(http2Headers.method(),
"method header cannot be null in conversion to HTTP/1.x");
final CharSequence path = extractPath(method, http2Headers);
HttpRequest msg = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.valueOf(method.toString()),
path.toString(), validateHttpHeaders);
try {
addHttp2ToHttpHeaders(streamId, http2Headers, msg.headers(), msg.protocolVersion(), false, true);
} catch (Http2Exception e) {
throw e;
} catch (Throwable t) {
throw streamError(streamId, PROTOCOL_ERROR, t, "HTTP/2 to HTTP/1.x headers conversion error");
}
return msg;
}
|
@Test
public void connectNoPath() throws Exception {
String authority = "netty.io:80";
Http2Headers headers = new DefaultHttp2Headers();
headers.authority(authority);
headers.method(HttpMethod.CONNECT.asciiName());
HttpRequest request = HttpConversionUtil.toHttpRequest(0, headers, true);
assertNotNull(request);
assertEquals(authority, request.uri());
assertEquals(authority, request.headers().get(HOST));
}
|
public boolean isTimeField() {
// since we don't really have an info if the field is a time or not, we use a hack that if the field name ends with `ms` and of type
// int or long. Not pretty but is the only feasible workaround here.
return isMillSecondsInTheFieldName(fieldDef.name)
&& (fieldDef.type == ConfigDef.Type.INT || fieldDef.type == ConfigDef.Type.LONG);
}
|
@Test
void testIfDiscoversDurationFieldCorrectly() {
final ConfigDef.ConfigKey configKey = new ConfigDef.ConfigKey(
"field.test_underscore.Ms", ConfigDef.Type.LONG, "100",
null, ConfigDef.Importance.MEDIUM, "testing", "testGroup", 1, ConfigDef.Width.MEDIUM, "displayName",
Collections.emptyList(),
null, false);
final ConnectorConfigField connectorConfigField = new ConnectorConfigField(configKey, false, true, null);
assertTrue(connectorConfigField.isTimeField());
final ConfigDef.ConfigKey configKey2 = new ConfigDef.ConfigKey(
"field.test_underscore.ms", ConfigDef.Type.INT, "100",
null, ConfigDef.Importance.MEDIUM, "testing", "testGroup", 1, ConfigDef.Width.MEDIUM, "displayName",
Collections.emptyList(),
null, false);
final ConnectorConfigField connectorConfigField2 = new ConnectorConfigField(configKey2, false, true, null);
assertTrue(connectorConfigField2.isTimeField());
final ConfigDef.ConfigKey configKey3 = new ConfigDef.ConfigKey(
"field", ConfigDef.Type.INT, "100",
null, ConfigDef.Importance.MEDIUM, "testing", "testGroup", 1, ConfigDef.Width.MEDIUM, "displayName",
Collections.emptyList(),
null, false);
final ConnectorConfigField connectorConfigField3 = new ConnectorConfigField(configKey3, false, true, null);
assertFalse(connectorConfigField3.isTimeField());
final ConfigDef.ConfigKey configKey4 = new ConfigDef.ConfigKey(
"field.ms.field", ConfigDef.Type.LONG, "100",
null, ConfigDef.Importance.MEDIUM, "testing", "testGroup", 1, ConfigDef.Width.MEDIUM, "displayName",
Collections.emptyList(),
null, false);
final ConnectorConfigField connectorConfigField4 = new ConnectorConfigField(configKey4, false, true, null);
assertFalse(connectorConfigField4.isTimeField());
}
|
@Override
public Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
synchronized (getClassLoadingLock(name)) {
Class<?> loadedClass = findLoadedClass(name);
if (loadedClass != null) {
return loadedClass;
}
if (isClosed) {
throw new ClassNotFoundException("This ClassLoader is closed");
}
if (config.shouldAcquire(name)) {
loadedClass =
PerfStatsCollector.getInstance()
.measure("load sandboxed class", () -> maybeInstrumentClass(name));
} else {
loadedClass = getParent().loadClass(name);
}
if (resolve) {
resolveClass(loadedClass);
}
return loadedClass;
}
}
|
@Test
public void shouldInterceptFilteredStaticMethodInvocations() throws Exception {
setClassLoader(
new SandboxClassLoader(
configureBuilder()
.addInterceptedMethod(
new MethodRef(AClassToForget.class, "forgettableStaticMethod"))
.build()));
Class<?> theClass = loadClass(AClassThatRefersToAForgettableClass.class);
Object instance = theClass.getDeclaredConstructor().newInstance();
Object output =
theClass
.getMethod("interactWithForgettableStaticMethod")
.invoke(shadow.directlyOn(instance, (Class<Object>) theClass));
assertEquals("yess? forget this: null", output);
}
|
@Override
public void apply(IntentOperationContext<FlowRuleIntent> context) {
Optional<IntentData> toUninstall = context.toUninstall();
Optional<IntentData> toInstall = context.toInstall();
if (toInstall.isPresent() && toUninstall.isPresent()) {
Intent intentToInstall = toInstall.get().intent();
if (requireNonDisruptive(intentToInstall) && INSTALLED.equals(toUninstall.get().state())) {
reallocate(context);
return;
}
}
if (!toInstall.isPresent() && !toUninstall.isPresent()) {
// Nothing to do.
intentInstallCoordinator.intentInstallSuccess(context);
return;
}
List<FlowRuleIntent> uninstallIntents = context.intentsToUninstall();
List<FlowRuleIntent> installIntents = context.intentsToInstall();
List<FlowRule> flowRulesToUninstall;
List<FlowRule> flowRulesToInstall;
if (toUninstall.isPresent()) {
// Remove tracked resource from both Intent and installable Intents.
trackIntentResources(toUninstall.get(), uninstallIntents, REMOVE);
// Retrieves all flow rules from all flow rule Intents.
flowRulesToUninstall = uninstallIntents.stream()
.map(FlowRuleIntent::flowRules)
.flatMap(Collection::stream)
.filter(flowRule -> flowRuleService.getFlowEntry(flowRule) != null)
.collect(Collectors.toList());
} else {
// No flow rules to be uninstalled.
flowRulesToUninstall = Collections.emptyList();
}
if (toInstall.isPresent()) {
// Track resource from both Intent and installable Intents.
trackIntentResources(toInstall.get(), installIntents, ADD);
// Retrieves all flow rules from all flow rule Intents.
flowRulesToInstall = installIntents.stream()
.map(FlowRuleIntent::flowRules)
.flatMap(Collection::stream)
.collect(Collectors.toList());
} else {
// No flow rules to be installed.
flowRulesToInstall = Collections.emptyList();
}
List<FlowRule> flowRuleToModify;
List<FlowRule> dontTouch;
// If both uninstall/install list contained equal (=match conditions are equal) FlowRules,
// omit it from remove list, since it will/should be overwritten by install
flowRuleToModify = flowRulesToInstall.stream()
.filter(flowRule -> flowRulesToUninstall.stream().anyMatch(flowRule::equals))
.collect(Collectors.toList());
// If both contained exactMatch-ing FlowRules, remove from both list,
// since it will result in no-op.
dontTouch = flowRulesToInstall.stream()
.filter(flowRule -> flowRulesToUninstall.stream().anyMatch(flowRule::exactMatch))
.collect(Collectors.toList());
flowRulesToUninstall.removeAll(flowRuleToModify);
flowRulesToUninstall.removeAll(dontTouch);
flowRulesToInstall.removeAll(flowRuleToModify);
flowRulesToInstall.removeAll(dontTouch);
flowRuleToModify.removeAll(dontTouch);
if (flowRulesToInstall.isEmpty() && flowRulesToUninstall.isEmpty() && flowRuleToModify.isEmpty()) {
// There is no flow rules to install/uninstall
intentInstallCoordinator.intentInstallSuccess(context);
return;
}
FlowRuleOperations.Builder builder = FlowRuleOperations.builder();
// Add flows
flowRulesToInstall.forEach(builder::add);
// Modify flows
flowRuleToModify.forEach(builder::modify);
// Remove flows
flowRulesToUninstall.forEach(builder::remove);
FlowRuleOperationsContext flowRuleOperationsContext = new FlowRuleOperationsContext() {
@Override
public void onSuccess(FlowRuleOperations ops) {
intentInstallCoordinator.intentInstallSuccess(context);
}
@Override
public void onError(FlowRuleOperations ops) {
intentInstallCoordinator.intentInstallFailed(context);
}
};
FlowRuleOperations operations = builder.build(flowRuleOperationsContext);
log.debug("applying intent {} -> {} with {} rules: {}",
toUninstall.map(x -> x.key().toString()).orElse("<empty>"),
toInstall.map(x -> x.key().toString()).orElse("<empty>"),
operations.stages().stream().mapToLong(Set::size).sum(),
operations.stages());
flowRuleService.apply(operations);
}
|
@Test
public void testRuleModifyMissing() {
List<Intent> intentsToInstall = createFlowRuleIntents();
List<Intent> intentsToUninstall = createFlowRuleIntentsWithSameMatch();
IntentData toInstall = new IntentData(createP2PIntent(),
IntentState.INSTALLING,
new WallClockTimestamp());
toInstall = IntentData.compiled(toInstall, intentsToInstall);
IntentData toUninstall = new IntentData(createP2PIntent(),
IntentState.INSTALLED,
new WallClockTimestamp());
toUninstall = IntentData.compiled(toUninstall, intentsToUninstall);
IntentOperationContext<FlowRuleIntent> operationContext;
IntentInstallationContext context = new IntentInstallationContext(toUninstall, toInstall);
operationContext = new IntentOperationContext(intentsToUninstall, intentsToInstall, context);
installer.apply(operationContext);
IntentOperationContext successContext = intentInstallCoordinator.successContext;
assertEquals(successContext, operationContext);
assertEquals(0, flowRuleService.flowRulesRemove.size());
assertEquals(1, flowRuleService.flowRulesAdd.size());
assertEquals(0, flowRuleService.flowRulesModify.size());
FlowRuleIntent installedIntent = (FlowRuleIntent) intentsToInstall.get(0);
assertEquals(flowRuleService.flowRulesAdd.size(), installedIntent.flowRules().size());
assertTrue(flowRuleService.flowRulesAdd.containsAll(installedIntent.flowRules()));
}
|
public Comparator<?> getValueComparator(int column) {
return valueComparators[column];
}
|
@Test
public void getDefaultComparatorForNullClass() {
ObjectTableSorter sorter = new ObjectTableSorter(createTableModel("null", null));
assertThat(sorter.getValueComparator(0), is(nullValue()));
}
|
public static void trim(String[] strs) {
if (null == strs) {
return;
}
String str;
for (int i = 0; i < strs.length; i++) {
str = strs[i];
if (null != str) {
strs[i] = trim(str);
}
}
}
|
@Test
public void trimTabTest() {
final String str = "\taaa";
assertEquals("aaa", StrUtil.trim(str));
}
|
private Collection<Integer> transform(Transformation<?> transform) {
if (alreadyTransformed.containsKey(transform)) {
return alreadyTransformed.get(transform);
}
LOG.debug("Transforming " + transform);
if (transform.getMaxParallelism() <= 0) {
// if the max parallelism hasn't been set, then first use the job wide max parallelism
// from the ExecutionConfig.
int globalMaxParallelismFromConfig = executionConfig.getMaxParallelism();
if (globalMaxParallelismFromConfig > 0) {
transform.setMaxParallelism(globalMaxParallelismFromConfig);
}
}
transform
.getSlotSharingGroup()
.ifPresent(
slotSharingGroup -> {
final ResourceSpec resourceSpec =
SlotSharingGroupUtils.extractResourceSpec(slotSharingGroup);
if (!resourceSpec.equals(ResourceSpec.UNKNOWN)) {
slotSharingGroupResources.compute(
slotSharingGroup.getName(),
(name, profile) -> {
if (profile == null) {
return ResourceProfile.fromResourceSpec(
resourceSpec, MemorySize.ZERO);
} else if (!ResourceProfile.fromResourceSpec(
resourceSpec, MemorySize.ZERO)
.equals(profile)) {
throw new IllegalArgumentException(
"The slot sharing group "
+ slotSharingGroup.getName()
+ " has been configured with two different resource spec.");
} else {
return profile;
}
});
}
});
// call at least once to trigger exceptions about MissingTypeInfo
transform.getOutputType();
@SuppressWarnings("unchecked")
final TransformationTranslator<?, Transformation<?>> translator =
(TransformationTranslator<?, Transformation<?>>)
translatorMap.get(transform.getClass());
Collection<Integer> transformedIds;
if (translator != null) {
transformedIds = translate(translator, transform);
} else {
transformedIds = legacyTransform(transform);
}
// need this check because the iterate transformation adds itself before
// transforming the feedback edges
if (!alreadyTransformed.containsKey(transform)) {
alreadyTransformed.put(transform, transformedIds);
}
return transformedIds;
}
|
@Test
void testOutputTypeConfigurationWithOneInputTransformation() {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStream<Integer> source = env.fromData(1, 10);
OutputTypeConfigurableOperationWithOneInput outputTypeConfigurableOperation =
new OutputTypeConfigurableOperationWithOneInput();
DataStream<Integer> result =
source.transform(
"Single input and output type configurable operation",
BasicTypeInfo.INT_TYPE_INFO,
outputTypeConfigurableOperation);
result.sinkTo(new DiscardingSink<>());
env.getStreamGraph();
assertThat(outputTypeConfigurableOperation.getTypeInformation())
.isEqualTo(BasicTypeInfo.INT_TYPE_INFO);
}
|
public static Serializable decode(final ByteBuf byteBuf) {
int valueType = byteBuf.readUnsignedByte() & 0xff;
StringBuilder result = new StringBuilder();
decodeValue(valueType, 1, byteBuf, result);
return result.toString();
}
|
@Test
void assertDecodeSmallJsonObjectWithString() {
List<JsonEntry> jsonEntries = new LinkedList<>();
String value1 = "";
String value2 = Strings.repeat("1", (int) (Math.pow(2D, 7D) - 1D));
String value3 = Strings.repeat("1", (int) (Math.pow(2D, 7D) - 1D + 1D));
String value4 = Strings.repeat("1", (int) (Math.pow(2D, 14D) - 1D));
jsonEntries.add(new JsonEntry(JsonValueTypes.STRING, "key1", value1));
jsonEntries.add(new JsonEntry(JsonValueTypes.STRING, "key2", value2));
jsonEntries.add(new JsonEntry(JsonValueTypes.STRING, "key3", value3));
jsonEntries.add(new JsonEntry(JsonValueTypes.STRING, "key4", value4));
ByteBuf payload = mockJsonObjectByteBuf(jsonEntries, true);
String actual = (String) MySQLJsonValueDecoder.decode(payload);
assertThat(actual, is(String.format("{\"key1\":\"%s\",\"key2\":\"%s\",\"key3\":\"%s\",\"key4\":\"%s\"}", value1, value2, value3, value4)));
}
|
public static Builder in(Table table) {
return new Builder(table);
}
|
@TestTemplate
public void testInPartitions() {
table
.newAppend()
.appendFile(FILE_A) // bucket 0
.appendFile(FILE_B) // bucket 1
.appendFile(FILE_C) // bucket 2
.appendFile(FILE_D) // bucket 3
.commit();
Iterable<DataFile> files =
FindFiles.in(table)
.inPartitions(table.spec(), StaticDataTask.Row.of(1), StaticDataTask.Row.of(2))
.collect();
assertThat(pathSet(files)).isEqualTo(pathSet(FILE_B, FILE_C));
}
|
public static void initMatchKeys(RouterConfiguration configuration) {
MATCH_KEYS.clear();
if (!RouterConfiguration.isInValid(configuration, RouterConstant.FLOW_MATCH_KIND)) {
Map<String, List<Rule>> routeRules = configuration.getRouteRule().get(RouterConstant.FLOW_MATCH_KIND);
if (!CollectionUtils.isEmpty(routeRules)) {
for (List<Rule> rules : routeRules.values()) {
addKeys(rules);
}
}
List<Rule> globalRules = configuration.getGlobalRule().get(RouterConstant.FLOW_MATCH_KIND);
if (!CollectionUtils.isEmpty(globalRules)) {
addKeys(globalRules);
}
}
}
|
@Test
public void testInitMatchKeys() {
RouterConfiguration configuration = new RouterConfiguration();
RuleUtils.initMatchKeys(configuration);
Assert.assertTrue(RuleUtils.getMatchKeys().isEmpty());
Map<String, List<EntireRule>> map = new HashMap<>();
EntireRule entireRule = new EntireRule();
entireRule.setRules(list);
entireRule.setKind(RouterConstant.FLOW_MATCH_KIND);
map.put("test", Collections.singletonList(entireRule));
configuration.resetRouteRule(map);
RuleUtils.initMatchKeys(configuration);
Set<String> keys = RuleUtils.getMatchKeys();
Assert.assertEquals(3, keys.size());
}
|
@Udf
public Map<String, String> records(@UdfParameter final String jsonObj) {
if (jsonObj == null) {
return null;
}
final JsonNode node = UdfJsonMapper.parseJson(jsonObj);
if (node.isMissingNode() || !node.isObject()) {
return null;
}
final Map<String, String> ret = new HashMap<>(node.size());
node.fieldNames().forEachRemaining(k -> {
final JsonNode value = node.get(k);
if (value instanceof TextNode) {
ret.put(k, value.textValue());
} else {
ret.put(k, value.toString());
}
});
return ret;
}
|
@Test(expected = KsqlFunctionException.class)
public void shouldThrowForInvalidJson() {
udf.records("abc");
}
|
@Override
public String pluginNamed() {
return PluginEnum.REWRITE.getName();
}
|
@Test
public void testPluginNamed() {
Assertions.assertEquals(rewritePluginDataHandler.pluginNamed(), "rewrite");
}
|
@Override
public double p(int k) {
if (k < Math.max(0, m + n - N) || k > Math.min(m, n)) {
return 0.0;
} else {
return Math.exp(logp(k));
}
}
|
@Test
public void testP() {
System.out.println("p");
HyperGeometricDistribution instance = new HyperGeometricDistribution(100, 30, 70);
instance.rand();
assertEquals(0.0, instance.p(-1), 1E-6);
assertEquals(3.404564e-26, instance.p(0), 1E-30);
assertEquals(7.149584e-23, instance.p(1), 1E-27);
assertEquals(3.576579e-20, instance.p(2), 1E-25);
assertEquals(0.1655920, instance.p(20), 1E-7);
assertEquals(0.1877461, instance.p(21), 1E-7);
assertEquals(0.00041413, instance.p(28), 1E-8);
assertEquals(4.136376e-05, instance.p(29), 1E-10);
assertEquals(1.884349e-06, instance.p(30), 1E-12);
assertEquals(0.0, instance.p(31), 1E-6);
}
|
public static BigInteger decodeQuantity(String value) {
if (isLongValue(value)) {
return BigInteger.valueOf(Long.parseLong(value));
}
if (!isValidHexQuantity(value)) {
throw new MessageDecodingException("Value must be in format 0x[0-9a-fA-F]+");
}
try {
return parsePaddedNumberHex(value);
} catch (NumberFormatException e) {
throw new MessageDecodingException("Negative ", e);
}
}
|
@Test
public void testQuantityDecode() {
assertEquals(Numeric.decodeQuantity("0x0"), (BigInteger.valueOf(0L)));
assertEquals(Numeric.decodeQuantity("0x400"), (BigInteger.valueOf(1024L)));
assertEquals(Numeric.decodeQuantity("0x41"), (BigInteger.valueOf(65L)));
assertEquals(
Numeric.decodeQuantity("0x7fffffffffffffff"),
(BigInteger.valueOf((Long.MAX_VALUE))));
assertEquals(
Numeric.decodeQuantity("0x99dc848b94efc27edfad28def049810f"),
(new BigInteger("204516877000845695339750056077105398031")));
}
|
@Override
public boolean retryRequest(
HttpRequest request, IOException exception, int execCount, HttpContext context) {
if (execCount > maxRetries) {
// Do not retry if over max retries
return false;
}
if (nonRetriableExceptions.contains(exception.getClass())) {
return false;
} else {
for (Class<? extends IOException> rejectException : nonRetriableExceptions) {
if (rejectException.isInstance(exception)) {
return false;
}
}
}
if (request instanceof CancellableDependency
&& ((CancellableDependency) request).isCancelled()) {
return false;
}
// Retry if the request is considered idempotent
return Method.isIdempotent(request.getMethod());
}
|
@Test
public void noRetryOnConnectTimeout() {
HttpGet request = new HttpGet("/");
assertThat(retryStrategy.retryRequest(request, new SocketTimeoutException(), 1, null))
.isFalse();
}
|
public static ResourceHints fromOptions(PipelineOptions options) {
ResourceHintsOptions resourceHintsOptions = options.as(ResourceHintsOptions.class);
ResourceHints result = create();
List<String> hints = resourceHintsOptions.getResourceHints();
Splitter splitter = Splitter.on('=').limit(2);
for (String hint : hints) {
List<String> parts = splitter.splitToList(hint);
if (parts.size() != 2) {
throw new IllegalArgumentException("Unparsable resource hint: " + hint);
}
String nameOrUrn = parts.get(0);
String stringValue = parts.get(1);
String urn;
if (hintNameToUrn.containsKey(nameOrUrn)) {
urn = hintNameToUrn.get(nameOrUrn);
} else if (!nameOrUrn.startsWith("beam:resources:")) {
// Allow unknown hints to be passed, but validate a little bit to prevent typos.
throw new IllegalArgumentException("Unknown resource hint: " + hint);
} else {
urn = nameOrUrn;
}
ResourceHint value =
Preconditions.checkNotNull(parsers.getOrDefault(urn, StringHint::new)).apply(stringValue);
result = result.withHint(urn, value);
}
return result;
}
|
@Test
public void testFromOptions() {
ResourceHintsOptions options =
PipelineOptionsFactory.fromArgs(
"--resourceHints=minRam=1KB", "--resourceHints=beam:resources:bar=foo")
.as(ResourceHintsOptions.class);
assertEquals(
ResourceHints.fromOptions(options),
ResourceHints.create()
.withMinRam(1000)
.withHint("beam:resources:bar", new ResourceHints.StringHint("foo")));
options =
PipelineOptionsFactory.fromArgs(
"--resourceHints=min_ram=1KB",
"--resourceHints=accelerator=foo",
"--resourceHints=cpu_count=4")
.as(ResourceHintsOptions.class);
ResourceHints fromOptions = ResourceHints.fromOptions(options);
ResourceHints expect =
ResourceHints.create().withMinRam(1000).withAccelerator("foo").withCPUCount(4);
assertEquals(fromOptions, expect);
}
|
@Override
public boolean mayHaveMergesPending(String bucketSpace, int contentNodeIndex) {
if (!stats.hasUpdatesFromAllDistributors()) {
return true;
}
ContentNodeStats nodeStats = stats.getStats().getNodeStats(contentNodeIndex);
if (nodeStats != null) {
ContentNodeStats.BucketSpaceStats bucketSpaceStats = nodeStats.getBucketSpace(bucketSpace);
return (bucketSpaceStats != null &&
bucketSpaceStats.mayHaveBucketsPending(minMergeCompletionRatio));
}
return true;
}
|
@Test
void unknown_bucket_space_has_no_merges_pending() {
Fixture f = Fixture.fromBucketsPending(1);
assertFalse(f.mayHaveMergesPending("global", 1));
}
|
@Override
public boolean assign(final Map<ProcessId, ClientState> clients,
final Set<TaskId> allTaskIds,
final Set<TaskId> statefulTaskIds,
final AssignmentConfigs configs) {
final int numStandbyReplicas = configs.numStandbyReplicas();
final Set<String> rackAwareAssignmentTags = new HashSet<>(tagsFunction.apply(configs));
final Map<TaskId, Integer> tasksToRemainingStandbys = computeTasksToRemainingStandbys(
numStandbyReplicas,
statefulTaskIds
);
final Map<String, Set<String>> tagKeyToValues = new HashMap<>();
final Map<TagEntry, Set<ProcessId>> tagEntryToClients = new HashMap<>();
fillClientsTagStatistics(clients, tagEntryToClients, tagKeyToValues);
final ConstrainedPrioritySet standbyTaskClientsByTaskLoad = createLeastLoadedPrioritySetConstrainedByAssignedTask(clients);
final Map<TaskId, ProcessId> pendingStandbyTasksToClientId = new HashMap<>();
for (final TaskId statefulTaskId : statefulTaskIds) {
for (final Map.Entry<ProcessId, ClientState> entry : clients.entrySet()) {
final ProcessId clientId = entry.getKey();
final ClientState clientState = entry.getValue();
if (clientState.activeTasks().contains(statefulTaskId)) {
assignStandbyTasksToClientsWithDifferentTags(
numStandbyReplicas,
standbyTaskClientsByTaskLoad,
statefulTaskId,
clientId,
rackAwareAssignmentTags,
clients,
tasksToRemainingStandbys,
tagKeyToValues,
tagEntryToClients,
pendingStandbyTasksToClientId
);
}
}
}
if (!tasksToRemainingStandbys.isEmpty()) {
assignPendingStandbyTasksToLeastLoadedClients(clients,
numStandbyReplicas,
standbyTaskClientsByTaskLoad,
tasksToRemainingStandbys);
}
// returning false, because standby task assignment will never require a follow-up probing rebalance.
return false;
}
|
@Test
public void shouldDistributeStandbyTasksWhenActiveTasksAreLocatedOnSameCluster() {
final Map<ProcessId, ClientState> clientStates = mkMap(
mkEntry(PID_1, createClientStateWithCapacity(PID_1, 2, mkMap(mkEntry(ZONE_TAG, ZONE_1), mkEntry(CLUSTER_TAG, CLUSTER_1)), TASK_0_0, TASK_1_0)),
mkEntry(PID_2, createClientStateWithCapacity(PID_2, 2, mkMap(mkEntry(ZONE_TAG, ZONE_2), mkEntry(CLUSTER_TAG, CLUSTER_1)), TASK_0_1, TASK_1_1)),
mkEntry(PID_3, createClientStateWithCapacity(PID_3, 2, mkMap(mkEntry(ZONE_TAG, ZONE_3), mkEntry(CLUSTER_TAG, CLUSTER_1)), TASK_0_2, TASK_1_2)),
mkEntry(PID_4, createClientStateWithCapacity(PID_4, 2, mkMap(mkEntry(ZONE_TAG, ZONE_1), mkEntry(CLUSTER_TAG, CLUSTER_2)))),
mkEntry(PID_5, createClientStateWithCapacity(PID_5, 2, mkMap(mkEntry(ZONE_TAG, ZONE_2), mkEntry(CLUSTER_TAG, CLUSTER_2)))),
mkEntry(PID_6, createClientStateWithCapacity(PID_6, 2, mkMap(mkEntry(ZONE_TAG, ZONE_3), mkEntry(CLUSTER_TAG, CLUSTER_2)))),
mkEntry(PID_7, createClientStateWithCapacity(PID_7, 2, mkMap(mkEntry(ZONE_TAG, ZONE_1), mkEntry(CLUSTER_TAG, CLUSTER_3)))),
mkEntry(PID_8, createClientStateWithCapacity(PID_8, 2, mkMap(mkEntry(ZONE_TAG, ZONE_2), mkEntry(CLUSTER_TAG, CLUSTER_3)))),
mkEntry(PID_9, createClientStateWithCapacity(PID_9, 2, mkMap(mkEntry(ZONE_TAG, ZONE_3), mkEntry(CLUSTER_TAG, CLUSTER_3))))
);
final Set<TaskId> allActiveTasks = findAllActiveTasks(clientStates);
final AssignmentConfigs assignmentConfigs = newAssignmentConfigs(2, ZONE_TAG, CLUSTER_TAG);
standbyTaskAssignor.assign(clientStates, allActiveTasks, allActiveTasks, assignmentConfigs);
assertTrue(clientStates.values().stream().allMatch(ClientState::reachedCapacity));
Stream.of(PID_1, PID_2, PID_3).forEach(client -> assertStandbyTaskCountForClientEqualsTo(clientStates, client, 0));
Stream.of(PID_4, PID_5, PID_6, PID_7, PID_8, PID_9).forEach(client -> assertStandbyTaskCountForClientEqualsTo(clientStates, client, 2));
assertTotalNumberOfStandbyTasksEqualsTo(clientStates, 12);
assertTrue(
standbyClientsHonorRackAwareness(
TASK_0_0,
clientStates,
asList(
mkSet(PID_9, PID_5), mkSet(PID_6, PID_8)
)
)
);
assertTrue(
standbyClientsHonorRackAwareness(
TASK_1_0,
clientStates,
asList(
mkSet(PID_9, PID_5), mkSet(PID_6, PID_8)
)
)
);
assertTrue(
standbyClientsHonorRackAwareness(
TASK_0_1,
clientStates,
asList(
mkSet(PID_4, PID_9), mkSet(PID_6, PID_7)
)
)
);
assertTrue(
standbyClientsHonorRackAwareness(
TASK_1_1,
clientStates,
asList(
mkSet(PID_4, PID_9), mkSet(PID_6, PID_7)
)
)
);
assertTrue(
standbyClientsHonorRackAwareness(
TASK_0_2,
clientStates,
asList(
mkSet(PID_5, PID_7), mkSet(PID_4, PID_8)
)
)
);
assertTrue(
standbyClientsHonorRackAwareness(
TASK_1_2,
clientStates,
asList(
mkSet(PID_5, PID_7), mkSet(PID_4, PID_8)
)
)
);
}
|
public FEELFnResult<Boolean> invoke(@ParameterName("string") String string, @ParameterName("match") String match) {
if ( string == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "string", "cannot be null"));
}
if ( match == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "match", "cannot be null"));
}
return FEELFnResult.ofResult(string.contains(match));
}
|
@Test
void invokeParamsNull() {
FunctionTestUtil.assertResultError(containsFunction.invoke((String) null, null), InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(containsFunction.invoke(null, "test"), InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(containsFunction.invoke("test", null), InvalidParametersEvent.class);
}
|
public static String processPattern(String pattern, TbMsg tbMsg) {
try {
String result = processPattern(pattern, tbMsg.getMetaData());
JsonNode json = JacksonUtil.toJsonNode(tbMsg.getData());
if (json.isObject()) {
Matcher matcher = DATA_PATTERN.matcher(result);
while (matcher.find()) {
String group = matcher.group(2);
String[] keys = group.split("\\.");
JsonNode jsonNode = json;
for (String key : keys) {
if (!StringUtils.isEmpty(key) && jsonNode != null) {
jsonNode = jsonNode.get(key);
} else {
jsonNode = null;
break;
}
}
if (jsonNode != null && jsonNode.isValueNode()) {
result = result.replace(formatDataVarTemplate(group), jsonNode.asText());
}
}
}
return result;
} catch (Exception e) {
throw new RuntimeException("Failed to process pattern!", e);
}
}
|
@Test
public void testSameKeysReplacement() {
String pattern = "ABC ${key} $[key]";
TbMsgMetaData md = new TbMsgMetaData();
md.putValue("key", "metadata_value");
ObjectNode node = JacksonUtil.newObjectNode();
node.put("key", "data_value");
TbMsg msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, TenantId.SYS_TENANT_ID, md, JacksonUtil.toString(node));
String result = TbNodeUtils.processPattern(pattern, msg);
Assertions.assertEquals("ABC metadata_value data_value", result);
}
|
public byte[] readBytes()
{
byte[] bytes = slice.getBytes();
offset = slice.length();
return bytes;
}
|
@Test
public void testReadBytes()
{
int numElements = 100;
Slice slice = Slices.allocate(2 * numElements);
byte[] expected = new byte[2 * numElements];
int offset = 0;
for (int i = 0; i < numElements; i++) {
String str = "" + i;
slice.setBytes(offset, str.getBytes());
int length = str.getBytes().length;
System.arraycopy(str.getBytes(), 0, expected, offset, length);
offset += length;
}
SimpleSliceInputStream simpleSliceInputStream = new SimpleSliceInputStream(slice);
byte[] actual = simpleSliceInputStream.readBytes();
assertEquals(actual, expected);
}
|
@Override
public TransformResultMetadata getResultMetadata() {
return _resultMetadata;
}
|
@Test
public void testArrayIndexOfAllInt() {
ExpressionContext expression = RequestContextUtils.getExpression(
String.format("array_indexes_of_int(%s, 0)", INT_MONO_INCREASING_MV_1));
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
assertTrue(transformFunction instanceof ScalarTransformFunctionWrapper);
assertEquals(transformFunction.getResultMetadata().getDataType(), DataType.INT);
assertFalse(transformFunction.getResultMetadata().isSingleValue());
int[][] expectedValues = new int[NUM_ROWS][];
for (int i = 0; i < NUM_ROWS; i++) {
int[] expectedValue = {0};
expectedValues[i] = expectedValue;
}
testTransformFunctionMV(transformFunction, expectedValues);
}
|
@Override
public RedisClusterNode clusterGetNodeForKey(byte[] key) {
int slot = executorService.getConnectionManager().calcSlot(key);
return clusterGetNodeForSlot(slot);
}
|
@Test
public void testClusterGetNodeForKey() {
RedisClusterNode node = connection.clusterGetNodeForKey("123".getBytes());
assertThat(node).isNotNull();
}
|
@Override
public boolean next() throws SQLException {
return proxyBackendHandler.next();
}
|
@Test
void assertNext() throws SQLException {
when(proxyBackendHandler.next()).thenReturn(true, false);
assertTrue(queryExecutor.next());
assertFalse(queryExecutor.next());
}
|
static boolean objectIsAcyclic(Object object)
{
if (object == null)
{
return true;
}
Class<?> klass = object.getClass();
if (isPrimitiveClass(klass))
{
return true;
}
else if (isComplexClass(klass))
{
DataComplex complex = (DataComplex) object;
try
{
Data.traverse(complex, new TraverseCallback() {});
return true;
}
catch (IOException e)
{
return false;
}
}
else
{
throw new IllegalStateException("Object of unknown type: " + object);
}
}
|
@Test
public void testNoCyclesOnAddAndPut()
{
assertTrue(Data.objectIsAcyclic(true));
assertTrue(Data.objectIsAcyclic(1));
assertTrue(Data.objectIsAcyclic(1L));
assertTrue(Data.objectIsAcyclic(1.0f));
assertTrue(Data.objectIsAcyclic(1.0));
assertTrue(Data.objectIsAcyclic("string"));
assertTrue(Data.objectIsAcyclic(new DataMap()));
assertTrue(Data.objectIsAcyclic(new DataList()));
DataMap a = new DataMap();
DataList b = new DataList();
DataMap c = new DataMap();
a.put("b", b);
a.put("c", c);
assertTrue(Data.objectIsAcyclic(a));
assertTrue(Data.objectIsAcyclic(b));
assertTrue(Data.objectIsAcyclic(c));
DataMap dm = new DataMap();
b.add(dm);
c.put("d", dm);
assertTrue(Data.objectIsAcyclic(a));
assertTrue(Data.objectIsAcyclic(b));
assertTrue(Data.objectIsAcyclic(c));
assertTrue(Data.objectIsAcyclic(dm));
DataList e = new DataList();
DataMap f = new DataMap();
dm.put("e", e);
dm.put("f", f);
assertTrue(Data.objectIsAcyclic(a));
// loop from e to e
addAndExpectIllegalArgumentException(e, e);
assertTrue(Data.objectIsAcyclic(a));
// loop from e to dm
addAndExpectIllegalArgumentException(e, dm);
assertTrue(Data.objectIsAcyclic(a));
// loop from e to c
addAndExpectIllegalArgumentException(e, c);
assertTrue(Data.objectIsAcyclic(a));
// loop from e to b
addAndExpectIllegalArgumentException(e, b);
assertTrue(Data.objectIsAcyclic(a));
// loop from e to a
addAndExpectIllegalArgumentException(e, a);
assertTrue(Data.objectIsAcyclic(a));
// loop from f to f
putAndExpectIllegalArgumentException(f, "f", f);
assertTrue(Data.objectIsAcyclic(a));
// loop from f to dm
putAndExpectIllegalArgumentException(f, "d", dm);
assertTrue(Data.objectIsAcyclic(a));
// loop from e to c
putAndExpectIllegalArgumentException(f, "c", c);
assertTrue(Data.objectIsAcyclic(a));
// loop from e to b
putAndExpectIllegalArgumentException(f, "b", b);
assertTrue(Data.objectIsAcyclic(a));
// loop from e to a
putAndExpectIllegalArgumentException(f, "a", a);
assertTrue(Data.objectIsAcyclic(a));
// loop from dm to dm
putAndExpectIllegalArgumentException(dm, "d", dm);
assertTrue(Data.objectIsAcyclic(a));
// loop from dm to c
putAndExpectIllegalArgumentException(dm, "c", c);
assertTrue(Data.objectIsAcyclic(a));
// loop from dm to b
putAndExpectIllegalArgumentException(dm, "b", b);
assertTrue(Data.objectIsAcyclic(a));
// loop from dm to a
putAndExpectIllegalArgumentException(dm, "a", a);
assertTrue(Data.objectIsAcyclic(a));
// loop from c to b
putAndExpectIllegalArgumentException(c, "c", c);
assertTrue(Data.objectIsAcyclic(a));
// loop from c to a
putAndExpectIllegalArgumentException(c, "a", a);
assertTrue(Data.objectIsAcyclic(a));
// loop from b to b
addAndExpectIllegalArgumentException(b, b);
assertTrue(Data.objectIsAcyclic(a));
// loop from b to a
addAndExpectIllegalArgumentException(b, a);
assertTrue(Data.objectIsAcyclic(a));
DataList dl = new DataList();
b.clear();
b.add(dl);
c.put("d", dl);
assertTrue(Data.objectIsAcyclic(a));
assertTrue(Data.objectIsAcyclic(b));
assertTrue(Data.objectIsAcyclic(c));
assertTrue(Data.objectIsAcyclic(dl));
dl.add(e);
dl.add(f);
assertTrue(Data.objectIsAcyclic(a));
// loop from e to e
addAndExpectIllegalArgumentException(e, e);
assertTrue(Data.objectIsAcyclic(a));
// loop from e to dl
addAndExpectIllegalArgumentException(e, dl);
assertTrue(Data.objectIsAcyclic(a));
// loop from e to c
addAndExpectIllegalArgumentException(e, c);
assertTrue(Data.objectIsAcyclic(a));
// loop from e to b
addAndExpectIllegalArgumentException(e, b);
assertTrue(Data.objectIsAcyclic(a));
// loop from e to a
addAndExpectIllegalArgumentException(e, a);
assertTrue(Data.objectIsAcyclic(a));
// loop from f to f
putAndExpectIllegalArgumentException(f, "f", f);
assertTrue(Data.objectIsAcyclic(a));
// loop from f to dl
putAndExpectIllegalArgumentException(f, "d", dl);
assertTrue(Data.objectIsAcyclic(a));
// loop from e to c
putAndExpectIllegalArgumentException(f, "c", c);
assertTrue(Data.objectIsAcyclic(a));
// loop from e to b
putAndExpectIllegalArgumentException(f, "b", b);
assertTrue(Data.objectIsAcyclic(a));
// loop from e to a
putAndExpectIllegalArgumentException(f, "a", a);
assertTrue(Data.objectIsAcyclic(a));
// loop from dl to dl
addAndExpectIllegalArgumentException(dl, dl);
assertTrue(Data.objectIsAcyclic(a));
// loop from dl to c
addAndExpectIllegalArgumentException(dl, c);
assertTrue(Data.objectIsAcyclic(a));
// loop from dl to b
addAndExpectIllegalArgumentException(dl, b);
assertTrue(Data.objectIsAcyclic(a));
// loop from dl to a
addAndExpectIllegalArgumentException(dl, a);
assertTrue(Data.objectIsAcyclic(a));
// loop from c to b
putAndExpectIllegalArgumentException(c, "c", c);
assertTrue(Data.objectIsAcyclic(a));
// loop from c to a
putAndExpectIllegalArgumentException(c, "a", a);
assertTrue(Data.objectIsAcyclic(a));
// loop from b to b
addAndExpectIllegalArgumentException(b, b);
assertTrue(Data.objectIsAcyclic(a));
// loop from b to a
addAndExpectIllegalArgumentException(b, a);
assertTrue(Data.objectIsAcyclic(a));
}
|
public List<X509Certificate> operatorCertificates() {
return operatorCertificates;
}
|
@Test
public void testOperatorCertificates() throws IOException {
Slime slime = SlimeUtils.jsonToSlime(json);
Cursor cursor = slime.get();
Cursor array = cursor.setArray(PrepareParams.OPERATOR_CERTIFICATES);
X509Certificate certificate = X509CertificateUtils.createSelfSigned("cn=myservice", Duration.ofDays(1)).certificate();
array.addString(X509CertificateUtils.toPem(certificate));
PrepareParams prepareParams = PrepareParams.fromJson(SlimeUtils.toJsonBytes(slime), TenantName.from("foo"), Duration.ofSeconds(60));
assertEquals(1, prepareParams.operatorCertificates().size());
assertEquals(certificate, prepareParams.operatorCertificates().get(0));
}
|
public static String asString(Duration duration) {
long numDays = duration.toDays();
long numHours = duration.toHours() % 24;
long numMinutes = duration.toMinutes() % 60;
long numSeconds = duration.getSeconds() % 60;
String output = String.format("%d:%02d:%02d", numHours, numMinutes, numSeconds);
if (numDays > 0L) {
return numDays + " days, " + output;
} else {
return output;
}
}
|
@Test
public void testDurationFormatting_2() {
//2 days, 13 hours, 22 minutes, and 15 seconds
int SECONDS_PER_DAY = 24 * 60 * 60;
long numSeconds = 2 * SECONDS_PER_DAY + 13 * 3600 + 22 * 60 + 15;
Duration dur = Duration.ofSeconds(numSeconds);
assertEquals(
"2 days, 13:22:15",
TimeUtils.asString(dur)
);
}
|
static Object parseCell(String cell, Schema.Field field) {
Schema.FieldType fieldType = field.getType();
try {
switch (fieldType.getTypeName()) {
case STRING:
return cell;
case INT16:
return Short.parseShort(cell);
case INT32:
return Integer.parseInt(cell);
case INT64:
return Long.parseLong(cell);
case BOOLEAN:
return Boolean.parseBoolean(cell);
case BYTE:
return Byte.parseByte(cell);
case DECIMAL:
return new BigDecimal(cell);
case DOUBLE:
return Double.parseDouble(cell);
case FLOAT:
return Float.parseFloat(cell);
case DATETIME:
return Instant.parse(cell);
default:
throw new UnsupportedOperationException(
"Unsupported type: " + fieldType + ", consider using withCustomRecordParsing");
}
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(
e.getMessage() + " field " + field.getName() + " was received -- type mismatch");
}
}
|
@Test
public void givenValidShortCell_parses() {
Short shortNum = Short.parseShort("36");
DefaultMapEntry cellToExpectedValue = new DefaultMapEntry("36", shortNum);
Schema schema =
Schema.builder()
.addInt32Field("an_integer")
.addInt64Field("a_long")
.addInt16Field("a_short")
.build();
assertEquals(
cellToExpectedValue.getValue(),
CsvIOParseHelpers.parseCell(
cellToExpectedValue.getKey().toString(), schema.getField("a_short")));
}
|
@Override
public T deserialize(final String topic, final byte[] bytes) {
return tryDeserialize(topic, bytes).get();
}
|
@Test
public void shouldThrowIfDelegateThrows() {
// Given:
when(delegate.deserialize(any(), any())).thenThrow(ERROR);
// When:
final RuntimeException e = assertThrows(
RuntimeException.class,
() -> deserializer.deserialize("t", SOME_BYTES)
);
// Then:
assertThat(e, is(ERROR));
}
|
public CompletableFuture<Account> confirmReservedUsernameHash(final Account account, final byte[] reservedUsernameHash, @Nullable final byte[] encryptedUsername) {
if (account.getUsernameHash().map(currentUsernameHash -> Arrays.equals(currentUsernameHash, reservedUsernameHash)).orElse(false)) {
// the client likely already succeeded and is retrying
return CompletableFuture.completedFuture(account);
}
if (!account.getReservedUsernameHash().map(oldHash -> Arrays.equals(oldHash, reservedUsernameHash)).orElse(false)) {
// no such reservation existed, either there was no previous call to reserveUsername
// or the reservation changed
return CompletableFuture.failedFuture(new UsernameReservationNotFoundException());
}
return redisDeleteAsync(account)
.thenCompose(ignored -> updateWithRetriesAsync(
account,
a -> true,
a -> accounts.confirmUsernameHash(a, reservedUsernameHash, encryptedUsername),
() -> accounts.getByAccountIdentifierAsync(account.getUuid()).thenApply(Optional::orElseThrow),
AccountChangeValidator.USERNAME_CHANGE_VALIDATOR,
MAX_UPDATE_ATTEMPTS
))
.whenComplete((updatedAccount, throwable) -> {
if (throwable == null) {
// Make a best effort to clear any stale data that may have been cached while this operation was in progress
redisDeleteAsync(updatedAccount);
}
});
}
|
@Test
void testConfirmReservedRetry() throws UsernameHashNotAvailableException, UsernameReservationNotFoundException {
final Account account = AccountsHelper.generateTestAccount("+18005551234", UUID.randomUUID(), UUID.randomUUID(), new ArrayList<>(), new byte[UnidentifiedAccessUtil.UNIDENTIFIED_ACCESS_KEY_LENGTH]);
account.setUsernameHash(USERNAME_HASH_1);
// reserved username already set, should be treated as a replay
accountsManager.confirmReservedUsernameHash(account, USERNAME_HASH_1, ENCRYPTED_USERNAME_1).join();
verifyNoInteractions(accounts);
}
|
@Override
public List<String> findRolesLikeRoleName(String role) {
String sql = "SELECT role FROM roles WHERE role LIKE ?";
List<String> users = this.jt.queryForList(sql, new String[] {String.format("%%%s%%", role)}, String.class);
return users;
}
|
@Test
void testFindRolesLikeRoleName() {
List<String> role = externalRolePersistService.findRolesLikeRoleName("role");
assertEquals(0, role.size());
}
|
void configure(Tomcat tomcat, Props props) {
tomcat.setSilent(true);
tomcat.getService().addLifecycleListener(new LifecycleLogger(LoggerFactory.getLogger(TomcatAccessLog.class)));
configureLogbackAccess(tomcat, props);
}
|
@Test
public void enable_access_logs_by_Default() throws Exception {
Tomcat tomcat = mock(Tomcat.class, Mockito.RETURNS_DEEP_STUBS);
Props props = new Props(new Properties());
props.set(PATH_LOGS.getKey(), temp.newFolder().getAbsolutePath());
underTest.configure(tomcat, props);
verify(tomcat.getHost().getPipeline()).addValve(any(ProgrammaticLogbackValve.class));
}
|
public static String toJavaCode(
final String argName,
final Class<?> argType,
final String lambdaBody
) {
return toJavaCode(ImmutableList.of(new Pair<>(argName, argType)), lambdaBody);
}
|
@Test
public void shouldGenerateBiFunction() {
// Given:
final Pair<String, Class<?>> argName1 = new Pair<>("fred", Long.class);
final Pair<String, Class<?>> argName2 = new Pair<>("bob", Long.class);
final List<Pair<String, Class<?>>> argList = ImmutableList.of(argName1, argName2);
// When:
final String javaCode = LambdaUtil.toJavaCode(argList, "fred + bob + 2");
// Then:
final Object result = CodeGenTestUtil.cookAndEval(javaCode, BiFunction.class);
assertThat(result, is(instanceOf(BiFunction.class)));
assertThat(((BiFunction<Object, Object, Object>) result).apply(10L, 15L), is(27L));
}
|
@Override
public URL select(List<URL> urls, String serviceId, String tag, String requestKey) {
String key = tag == null ? serviceId : serviceId + "|" + tag;
// search for a URL in the same ip first
List<URL> localUrls = searchLocalUrls(urls, ip);
if(localUrls.size() > 0) {
if(localUrls.size() == 1) {
return localUrls.get(0);
} else {
// round robin within localUrls
return doSelect(localUrls, key);
}
} else {
// round robin within urls
return doSelect(urls, key);
}
}
|
@Test
public void testSelectFirstThenRoundRobin() throws Exception{
List<URL> urls = new ArrayList<>();
urls.add(new URLImpl("http", "127.0.0.10", 8081, "v1", new HashMap<String, String>()));
urls.add(new URLImpl("http", "127.0.0.10", 8082, "v1", new HashMap<String, String>()));
urls.add(new URLImpl("http", "127.0.0.10", 8083, "v1", new HashMap<String, String>()));
urls.add(new URLImpl("http", "127.0.0.10", 8084, "v1", new HashMap<String, String>()));
// no local host URL available, go round-robin
URL url = loadBalance.select(urls, "serviceId", "tag", null);
Assert.assertTrue(urls.contains(url));
}
|
public Object resolve(final Expression expression) {
return new Visitor().process(expression, null);
}
|
@Test
public void shouldThrowIfCannotCoerce() {
// Given:
final SqlType type = SqlTypes.array(SqlTypes.INTEGER);
final Expression exp = new IntegerLiteral(1);
// When:
final KsqlException e = assertThrows(
KsqlException.class,
() -> new GenericExpressionResolver(type, FIELD_NAME, registry, config,
"insert value", false).resolve(exp));
// Then:
assertThat(e.getMessage(), containsString("Expected type ARRAY<INTEGER> for field `FOO` but got INTEGER(1)"));
}
|
@Override
public String[] listFiles(URI fileUri, boolean recursive)
throws IOException {
ImmutableList.Builder<String> builder = ImmutableList.builder();
visitFiles(fileUri, recursive, s3Object -> {
// TODO: Looks like S3PinotFS filters out directories, inconsistent with the other implementations.
// Only add files and not directories
if (!s3Object.key().equals(fileUri.getPath()) && !s3Object.key().endsWith(DELIMITER)) {
builder.add(S3_SCHEME + fileUri.getHost() + DELIMITER + getNormalizedFileKey(s3Object));
}
});
String[] listedFiles = builder.build().toArray(new String[0]);
LOGGER.info("Listed {} files from URI: {}, is recursive: {}", listedFiles.length, fileUri, recursive);
return listedFiles;
}
|
@Test
public void testListFilesInFolderNonRecursive()
throws Exception {
String folder = "list-files";
String[] originalFiles = new String[]{"a-list-2.txt", "b-list-2.txt", "c-list-2.txt"};
for (String fileName : originalFiles) {
createEmptyFile(folder, fileName);
}
// Files in sub folders should be skipped.
createEmptyFile(folder + DELIMITER + "subfolder1", "a-sub-file.txt");
createEmptyFile(folder + DELIMITER + "subfolder2", "a-sub-file.txt");
String[] actualFiles = _s3PinotFS.listFiles(URI.create(String.format(FILE_FORMAT, SCHEME, BUCKET, folder)), false);
Assert.assertEquals(actualFiles.length, originalFiles.length);
actualFiles = Arrays.stream(actualFiles).filter(x -> x.contains("list-2")).toArray(String[]::new);
Assert.assertEquals(actualFiles.length, originalFiles.length);
Assert.assertTrue(Arrays.equals(Arrays.stream(originalFiles)
.map(fileName -> String.format(FILE_FORMAT, SCHEME, BUCKET, folder + DELIMITER + fileName)).toArray(),
actualFiles));
}
|
@Override // NameNode
public void stop() {
stop(true);
}
|
@Test
public void startBackupNodeWithIncorrectAuthentication() throws IOException {
Configuration c = new HdfsConfiguration();
StartupOption startupOpt = StartupOption.CHECKPOINT;
String dirs = getBackupNodeDir(startupOpt, 1);
c.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY,
"hdfs://127.0.0.1:" + ServerSocketUtil.getPort(0, 100));
c.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
c.set(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, "0");
c.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,
-1); // disable block scanner
c.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1);
c.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dirs);
c.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
"${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
"127.0.0.1:0");
c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
"127.0.0.1:0");
NameNode nn;
try {
Configuration nnconf = new HdfsConfiguration(c);
DFSTestUtil.formatNameNode(nnconf);
nn = NameNode.createNameNode(new String[] {}, nnconf);
} catch (IOException e) {
LOG.info("IOException is thrown creating name node");
throw e;
}
c.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
c.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, "");
BackupNode bn = null;
try {
bn = (BackupNode)NameNode.createNameNode(
new String[] {startupOpt.getName()}, c);
assertTrue("Namesystem in BackupNode should be null",
bn.getNamesystem() == null);
fail("Incorrect authentication setting should throw IOException");
} catch (IOException e) {
LOG.info("IOException thrown.", e);
assertTrue(e.getMessage().contains("Running in secure mode"));
} finally {
if (nn != null) {
nn.stop();
}
if (bn != null) {
bn.stop();
}
SecurityUtil.setAuthenticationMethod(
UserGroupInformation.AuthenticationMethod.SIMPLE, c);
// reset security authentication
UserGroupInformation.setConfiguration(c);
}
}
|
public static int[] computePhysicalIndicesOrTimeAttributeMarkers(
TableSource<?> tableSource,
List<TableColumn> logicalColumns,
boolean streamMarkers,
Function<String, String> nameRemapping) {
Optional<String> proctimeAttribute = getProctimeAttribute(tableSource);
List<String> rowtimeAttributes = getRowtimeAttributes(tableSource);
List<TableColumn> columnsWithoutTimeAttributes =
logicalColumns.stream()
.filter(
col ->
!rowtimeAttributes.contains(col.getName())
&& proctimeAttribute
.map(attr -> !attr.equals(col.getName()))
.orElse(true))
.collect(Collectors.toList());
Map<TableColumn, Integer> columnsToPhysicalIndices =
TypeMappingUtils.computePhysicalIndices(
columnsWithoutTimeAttributes.stream(),
tableSource.getProducedDataType(),
nameRemapping);
return logicalColumns.stream()
.mapToInt(
logicalColumn -> {
if (proctimeAttribute
.map(attr -> attr.equals(logicalColumn.getName()))
.orElse(false)) {
verifyTimeAttributeType(logicalColumn, "Proctime");
if (streamMarkers) {
return TimeIndicatorTypeInfo.PROCTIME_STREAM_MARKER;
} else {
return TimeIndicatorTypeInfo.PROCTIME_BATCH_MARKER;
}
} else if (rowtimeAttributes.contains(logicalColumn.getName())) {
verifyTimeAttributeType(logicalColumn, "Rowtime");
if (streamMarkers) {
return TimeIndicatorTypeInfo.ROWTIME_STREAM_MARKER;
} else {
return TimeIndicatorTypeInfo.ROWTIME_BATCH_MARKER;
}
} else {
return columnsToPhysicalIndices.get(logicalColumn);
}
})
.toArray();
}
|
@Test
void testMappingWithBatchTimeAttributes() {
TestTableSource tableSource =
new TestTableSource(
DataTypes.BIGINT(), Collections.singletonList("rowtime"), "proctime");
int[] indices =
TypeMappingUtils.computePhysicalIndicesOrTimeAttributeMarkers(
tableSource,
TableSchema.builder()
.field("a", Types.LONG)
.field("rowtime", Types.SQL_TIMESTAMP)
.field("proctime", Types.SQL_TIMESTAMP)
.build()
.getTableColumns(),
false,
Function.identity());
assertThat(indices).isEqualTo(new int[] {0, -3, -4});
}
|
public boolean isFinished() {
return job.isFinished();
}
|
@Test
public void testInsertOverwrite() throws Exception {
String sql = "insert overwrite t1 select * from t2";
InsertStmt insertStmt = (InsertStmt) UtFrameUtils.parseStmtWithNewParser(sql, connectContext);
StmtExecutor executor = new StmtExecutor(connectContext, insertStmt);
Database database = GlobalStateMgr.getCurrentState().getDb("insert_overwrite_test");
Table table = database.getTable("t1");
Assert.assertTrue(table instanceof OlapTable);
OlapTable olapTable = (OlapTable) table;
InsertOverwriteJob insertOverwriteJob = new InsertOverwriteJob(100L, insertStmt, database.getId(), olapTable.getId(),
WarehouseManager.DEFAULT_WAREHOUSE_ID);
InsertOverwriteJobRunner runner = new InsertOverwriteJobRunner(insertOverwriteJob, connectContext, executor);
Assert.assertFalse(runner.isFinished());
}
|
CompletableFuture<Void> beginExecute(
@Nonnull List<? extends Tasklet> tasklets,
@Nonnull CompletableFuture<Void> cancellationFuture,
@Nonnull ClassLoader jobClassLoader
) {
final ExecutionTracker executionTracker = new ExecutionTracker(tasklets.size(), cancellationFuture);
try {
final Map<Boolean, List<Tasklet>> byCooperation =
tasklets.stream().collect(partitioningBy(
tasklet -> doWithClassLoader(jobClassLoader, tasklet::isCooperative)
));
submitCooperativeTasklets(executionTracker, jobClassLoader, byCooperation.get(true));
submitBlockingTasklets(executionTracker, jobClassLoader, byCooperation.get(false));
} catch (Throwable t) {
executionTracker.future.internalCompleteExceptionally(t);
}
return executionTracker.future;
}
|
@Test
public void when_nonBlockingTaskletIsCancelled_then_completesEarly() {
// Given
final List<MockTasklet> tasklets =
Stream.generate(() -> new MockTasklet().callsBeforeDone(Integer.MAX_VALUE))
.limit(100).collect(toList());
// When
CompletableFuture<Void> f = tes.beginExecute(tasklets, cancellationFuture, classLoader);
cancellationFuture.cancel(true);
// Then
tasklets.forEach(MockTasklet::assertNotDone);
assertThrows(CancellationException.class, f::get);
}
|
@Override
public boolean isWarnEnabled() {
return logger.isWarnEnabled();
}
|
@Test
void isWarnEnabled() {
jobRunrDashboardLogger.isWarnEnabled();
verify(slfLogger).isWarnEnabled();
}
|
public RegistryBuilder file(String file) {
this.file = file;
return getThis();
}
|
@Test
void file() {
RegistryBuilder builder = new RegistryBuilder();
builder.file("file");
Assertions.assertEquals("file", builder.build().getFile());
}
|
@Override
@SuppressWarnings("unchecked")
public <T> T getProxy(Class<T> interfaceClass, Invoker proxyInvoker) {
StringBuilder debug = null;
if (LOGGER.isDebugEnabled()) {
debug = new StringBuilder();
}
try {
Class clazz = null;
if (!disableCache) {
clazz = PROXY_CLASS_MAP.get(interfaceClass);
}
if (clazz == null) {
//生成代理类
String interfaceName = ClassTypeUtils.getTypeStr(interfaceClass);
ClassPool mPool = ClassPool.getDefault();
mPool.appendClassPath(new LoaderClassPath(ClassLoaderUtils.getClassLoader(JavassistProxy.class)));
CtClass mCtc = mPool.makeClass(interfaceName + "_proxy_" + counter.getAndIncrement());
if (interfaceClass.isInterface()) {
mCtc.addInterface(mPool.get(interfaceName));
} else {
throw new IllegalArgumentException(interfaceClass.getName() + " is not an interface");
}
// 继承 java.lang.reflect.Proxy
mCtc.setSuperclass(mPool.get(java.lang.reflect.Proxy.class.getName()));
CtConstructor constructor = new CtConstructor(null, mCtc);
constructor.setModifiers(Modifier.PUBLIC);
constructor.setBody("{super(new " + UselessInvocationHandler.class.getName() + "());}");
mCtc.addConstructor(constructor);
List<String> fieldList = new ArrayList<String>();
List<String> methodList = new ArrayList<String>();
fieldList.add("public " + Invoker.class.getCanonicalName() + " proxyInvoker = null;");
createMethod(interfaceClass, fieldList, methodList);
for (String fieldStr : fieldList) {
if (LOGGER.isDebugEnabled()) {
debug.append(fieldStr).append("\n");
}
mCtc.addField(CtField.make(fieldStr, mCtc));
}
for (String methodStr : methodList) {
if (LOGGER.isDebugEnabled()) {
debug.append(methodStr).append("\n");
}
mCtc.addMethod(CtMethod.make(methodStr, mCtc));
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("javassist proxy of interface: {} \r\n{}", interfaceClass,
debug != null ? debug.toString() : "");
}
// Under jdk 11+, `neighbour` param is required to apply LookUp.defineClass
// and avoid reflect calling to Classloader.defineClass(), as calling
// Classloader.defineClass() from unnamed module is prohibited, which may
// provoke InaccessibleObjectException.
clazz = mPool.toClass(mCtc, interfaceClass,
// use tccl as former
Thread.currentThread().getContextClassLoader(),
interfaceClass.getProtectionDomain());
PROXY_CLASS_MAP.put(interfaceClass, clazz);
}
Object instance = clazz.newInstance();
clazz.getField("proxyInvoker").set(instance, proxyInvoker);
return (T) instance;
} catch (Exception e) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("javassist proxy of interface: {} \r\n{}", interfaceClass,
debug != null ? debug.toString() : "");
}
throw new SofaRpcRuntimeException(LogCodes.getLog(LogCodes.ERROR_PROXY_CONSTRUCT, "javassist"), e);
}
}
|
@Test
public void getProxy() throws Exception {
JavassistProxy proxy = new JavassistProxy();
AbstractTestClass testClass = null;
try {
testClass = proxy.getProxy(AbstractTestClass.class, new TestInvoker());
} catch (Exception e) {
LOGGER.info(e.getMessage());
}
Assert.assertNull(testClass);
TestInvoker invoker = new TestInvoker();
TestInterface testInterface = proxy.getProxy(TestInterface.class, invoker);
Assert.assertNotNull(testInterface);
Class clazz = testInterface.getClass().getInterfaces()[0];
Assert.assertEquals(TestInterface.class, clazz);
Assert.assertTrue(Proxy.class.isAssignableFrom(testInterface.getClass()));
Assert.assertFalse(Proxy.isProxyClass(testInterface.getClass()));
Assert.assertEquals(proxy.getInvoker(testInterface).getClass(), TestInvoker.class);
Assert.assertEquals(testInterface.toString(), invoker.toString());
Assert.assertEquals(testInterface.hashCode(), invoker.hashCode());
TestInterface another1 = proxy.getProxy(TestInterface.class, invoker);
TestInterface another2 = proxy.getProxy(TestInterface.class, new TestInvoker());
Assert.assertFalse(testInterface.equals(invoker));
Assert.assertFalse(testInterface.equals(another2));
Assert.assertEquals(testInterface, another1);
Assert.assertEquals(678, another1.sayNum(true));
SofaRequest request = invoker.getRequest();
Assert.assertEquals(TestInterface.class.getCanonicalName(), request.getInterfaceName());
Assert.assertEquals("sayNum", request.getMethodName());
Assert.assertEquals("boolean", request.getMethodArgSigs()[0]);
Assert.assertEquals(true, request.getMethodArgs()[0]);
Assert.assertNotNull(request.getMethod());
Assert.assertEquals("sayHello", another1.sayHello("xxxx"));
another1.sayNoting();
Assert.assertArrayEquals(new int[] { 6, 7, 8 }, another1.sayNums(null, new HashMap()));
Assert.assertNull(another1.sayNum2(1.2D));
boolean error = false;
try {
another1.throwbiz1();
} catch (Throwable e) {
error = true;
}
Assert.assertFalse(error);
error = false;
try {
another1.throwbiz2();
} catch (Throwable e) {
error = true;
}
Assert.assertFalse(error);
try {
another1.throwRPC();
} catch (Throwable e) {
error = true;
}
Assert.assertTrue(error);
}
|
public static List<Transformation<?>> optimize(List<Transformation<?>> transformations) {
final Map<Transformation<?>, Set<Transformation<?>>> outputMap =
buildOutputMap(transformations);
final LinkedHashSet<Transformation<?>> chainedTransformations = new LinkedHashSet<>();
final Set<Transformation<?>> alreadyTransformed = Sets.newIdentityHashSet();
final Queue<Transformation<?>> toTransformQueue = Queues.newArrayDeque(transformations);
while (!toTransformQueue.isEmpty()) {
final Transformation<?> transformation = toTransformQueue.poll();
if (!alreadyTransformed.contains(transformation)) {
alreadyTransformed.add(transformation);
final ChainInfo chainInfo = chainWithInputIfPossible(transformation, outputMap);
chainedTransformations.add(chainInfo.newTransformation);
chainedTransformations.removeAll(chainInfo.oldTransformations);
alreadyTransformed.addAll(chainInfo.oldTransformations);
// Add the chained transformation and its inputs to the to-optimize list
toTransformQueue.add(chainInfo.newTransformation);
toTransformQueue.addAll(chainInfo.newTransformation.getInputs());
}
}
return new ArrayList<>(chainedTransformations);
}
|
@Test
void testChainingTwoInputOperators() {
ExternalPythonKeyedCoProcessOperator<?> keyedCoProcessOperator1 =
createCoKeyedProcessOperator(
"f1",
new RowTypeInfo(Types.INT(), Types.STRING()),
new RowTypeInfo(Types.INT(), Types.INT()),
Types.STRING());
ExternalPythonProcessOperator<?, ?> processOperator1 =
createProcessOperator(
"f2", new RowTypeInfo(Types.INT(), Types.INT()), Types.STRING());
ExternalPythonProcessOperator<?, ?> processOperator2 =
createProcessOperator(
"f3", new RowTypeInfo(Types.INT(), Types.INT()), Types.LONG());
ExternalPythonKeyedProcessOperator<?> keyedProcessOperator2 =
createKeyedProcessOperator(
"f4", new RowTypeInfo(Types.INT(), Types.INT()), Types.STRING());
ExternalPythonProcessOperator<?, ?> processOperator3 =
createProcessOperator(
"f5", new RowTypeInfo(Types.INT(), Types.INT()), Types.STRING());
Transformation<?> sourceTransformation1 = mock(SourceTransformation.class);
Transformation<?> sourceTransformation2 = mock(SourceTransformation.class);
TwoInputTransformation<?, ?, ?> keyedCoProcessTransformation =
new TwoInputTransformation(
sourceTransformation1,
sourceTransformation2,
"keyedCoProcess",
keyedCoProcessOperator1,
keyedCoProcessOperator1.getProducedType(),
2);
Transformation<?> processTransformation1 =
new OneInputTransformation(
keyedCoProcessTransformation,
"process",
processOperator1,
processOperator1.getProducedType(),
2);
Transformation<?> processTransformation2 =
new OneInputTransformation(
processTransformation1,
"process",
processOperator2,
processOperator2.getProducedType(),
2);
OneInputTransformation<?, ?> keyedProcessTransformation =
new OneInputTransformation(
processTransformation2,
"keyedProcess",
keyedProcessOperator2,
keyedProcessOperator2.getProducedType(),
2);
Transformation<?> processTransformation3 =
new OneInputTransformation(
keyedProcessTransformation,
"process",
processOperator3,
processOperator3.getProducedType(),
2);
List<Transformation<?>> transformations = new ArrayList<>();
transformations.add(sourceTransformation1);
transformations.add(sourceTransformation2);
transformations.add(keyedCoProcessTransformation);
transformations.add(processTransformation1);
transformations.add(processTransformation2);
transformations.add(keyedProcessTransformation);
transformations.add(processTransformation3);
List<Transformation<?>> optimized =
PythonOperatorChainingOptimizer.optimize(transformations);
assertThat(optimized).hasSize(4);
TwoInputTransformation<?, ?, ?> chainedTransformation1 =
(TwoInputTransformation<?, ?, ?>) optimized.get(2);
assertThat(sourceTransformation1.getOutputType())
.isEqualTo(chainedTransformation1.getInputType1());
assertThat(sourceTransformation2.getOutputType())
.isEqualTo(chainedTransformation1.getInputType2());
assertThat(processOperator2.getProducedType())
.isEqualTo(chainedTransformation1.getOutputType());
OneInputTransformation<?, ?> chainedTransformation2 =
(OneInputTransformation<?, ?>) optimized.get(3);
assertThat(processOperator2.getProducedType())
.isEqualTo(chainedTransformation2.getInputType());
assertThat(processOperator3.getProducedType())
.isEqualTo(chainedTransformation2.getOutputType());
TwoInputStreamOperator<?, ?, ?> chainedOperator1 = chainedTransformation1.getOperator();
assertThat(chainedOperator1).isInstanceOf(ExternalPythonKeyedCoProcessOperator.class);
validateChainedPythonFunctions(
((ExternalPythonKeyedCoProcessOperator<?>) chainedOperator1)
.getPythonFunctionInfo(),
"f3",
"f2",
"f1");
OneInputStreamOperator<?, ?> chainedOperator2 = chainedTransformation2.getOperator();
assertThat(chainedOperator2).isInstanceOf(ExternalPythonKeyedProcessOperator.class);
validateChainedPythonFunctions(
((ExternalPythonKeyedProcessOperator<?>) chainedOperator2).getPythonFunctionInfo(),
"f5",
"f4");
}
|
public static void retry(String action, RunnableThrowsIOException f, RetryPolicy policy)
throws IOException {
IOException e = null;
while (policy.attempt()) {
try {
f.run();
return;
} catch (IOException ioe) {
e = ioe;
LOG.debug("Failed to {} (attempt {}): {}", action, policy.getAttemptCount(), e.toString());
}
}
if (e != null) {
throw e;
}
throw new IOException(String.format("Failed to run action %s after %d attempts",
action, policy.getAttemptCount()));
}
|
@Test
public void success() throws IOException {
AtomicInteger count = new AtomicInteger(0);
RetryUtils.retry("success test", () -> {
count.incrementAndGet();
if (count.get() == 5) {
return;
}
throw new IOException("Fail");
}, new CountingRetry(10));
assertEquals(5, count.get());
}
|
public static ObjectMapper of() {
if (MAPPER == null) {
MAPPER = JacksonMapper.ofJson(false).copy();
final SimpleModule module = new SimpleModule();
module.addSerializer(Instant.class, new JsonSerializer<>() {
@Override
public void serialize(Instant instant, JsonGenerator jsonGenerator, SerializerProvider serializerProvider) throws IOException {
jsonGenerator.writeString(INSTANT_FORMATTER.format(instant));
}
});
module.addSerializer(ZonedDateTime.class, new JsonSerializer<>() {
@Override
public void serialize(ZonedDateTime instant, JsonGenerator jsonGenerator, SerializerProvider serializerProvider) throws IOException {
jsonGenerator.writeString(ZONED_DATE_TIME_FORMATTER.format(instant));
}
});
MAPPER.registerModule(module);
}
return MAPPER;
}
|
@Test
void zoneDateTime() throws JsonProcessingException {
String serialize = JdbcMapper.of().writeValueAsString(MultipleConditionWindow.builder()
.start(ZonedDateTime.parse("2013-09-08T16:19:12.000000+02:00"))
.build()
);
assertThat(serialize, containsString("2013-09-08T16:19:12.000+02:00"));
}
|
public String getHostname() {
return hostNameSupplier.getHostName();
}
|
@Test
void testGetHostname1() {
try {
InetAddress address = mock(InetAddress.class);
when(address.getCanonicalHostName()).thenReturn("worker10");
when(address.getHostName()).thenReturn("worker10");
when(address.getHostAddress()).thenReturn("127.0.0.1");
TaskManagerLocation info =
new TaskManagerLocation(ResourceID.generate(), address, 19871);
assertThat("worker10").isEqualTo(info.getHostname());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
|
public void setInitialAttributes(File file, FileAttribute<?>... attrs) {
// default values should already be sanitized by their providers
for (int i = 0; i < defaultValues.size(); i++) {
FileAttribute<?> attribute = defaultValues.get(i);
int separatorIndex = attribute.name().indexOf(':');
String view = attribute.name().substring(0, separatorIndex);
String attr = attribute.name().substring(separatorIndex + 1);
file.setAttribute(view, attr, attribute.value());
}
for (FileAttribute<?> attr : attrs) {
setAttribute(file, attr.name(), attr.value(), true);
}
}
|
@Test
public void testSetAttribute_onCreate_failsForAttributeThatIsNotSettableOnCreate() {
File file = createFile();
try {
service.setInitialAttributes(file, new BasicFileAttribute<>("test:foo", "world"));
fail();
} catch (UnsupportedOperationException expected) {
// it turns out that UOE should be thrown on create even if the attribute isn't settable
// under any circumstances
}
try {
service.setInitialAttributes(file, new BasicFileAttribute<>("test:bar", 5));
fail();
} catch (UnsupportedOperationException expected) {
}
}
|
public static List<AclEntry> mergeAclEntries(List<AclEntry> existingAcl,
List<AclEntry> inAclSpec) throws AclException {
ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec);
ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES);
List<AclEntry> foundAclSpecEntries =
Lists.newArrayListWithCapacity(MAX_ENTRIES);
EnumMap<AclEntryScope, AclEntry> providedMask =
Maps.newEnumMap(AclEntryScope.class);
EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class);
EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class);
for (AclEntry existingEntry: existingAcl) {
AclEntry aclSpecEntry = aclSpec.findByKey(existingEntry);
if (aclSpecEntry != null) {
foundAclSpecEntries.add(aclSpecEntry);
scopeDirty.add(aclSpecEntry.getScope());
if (aclSpecEntry.getType() == MASK) {
providedMask.put(aclSpecEntry.getScope(), aclSpecEntry);
maskDirty.add(aclSpecEntry.getScope());
} else {
aclBuilder.add(aclSpecEntry);
}
} else {
if (existingEntry.getType() == MASK) {
providedMask.put(existingEntry.getScope(), existingEntry);
} else {
aclBuilder.add(existingEntry);
}
}
}
// ACL spec entries that were not replacements are new additions.
for (AclEntry newEntry: aclSpec) {
if (Collections.binarySearch(foundAclSpecEntries, newEntry,
ACL_ENTRY_COMPARATOR) < 0) {
scopeDirty.add(newEntry.getScope());
if (newEntry.getType() == MASK) {
providedMask.put(newEntry.getScope(), newEntry);
maskDirty.add(newEntry.getScope());
} else {
aclBuilder.add(newEntry);
}
}
}
copyDefaultsIfNeeded(aclBuilder);
calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty);
return buildAndValidateAcl(aclBuilder);
}
|
@Test(expected=AclException.class)
public void testMergeAclDefaultEntriesInputTooLarge() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
mergeAclEntries(existing, ACL_SPEC_DEFAULT_TOO_LARGE);
}
|
@Override
public Output run(RunContext runContext) throws Exception {
String renderedNamespace = runContext.render(this.namespace);
FlowService flowService = ((DefaultRunContext) runContext).getApplicationContext().getBean(FlowService.class);
flowService.checkAllowedNamespace(runContext.tenantId(), renderedNamespace, runContext.tenantId(), runContext.flowInfo().namespace());
String renderedKey = runContext.render(this.key);
Optional<KVValue> maybeValue = runContext.namespaceKv(renderedNamespace).getValue(renderedKey);
if (this.errorOnMissing && maybeValue.isEmpty()) {
throw new NoSuchElementException("No value found for key '" + renderedKey + "' in namespace '" + renderedNamespace + "' and `errorOnMissing` is set to true");
}
return Output.builder()
.value(maybeValue.map(KVValue::value).orElse(null))
.build();
}
|
@Test
void shouldGetGivenExistingKey() throws Exception {
// Given
String namespaceId = "io.kestra." + IdUtils.create();
RunContext runContext = this.runContextFactory.of(Map.of(
"flow", Map.of("namespace", namespaceId),
"inputs", Map.of(
"key", TEST_KV_KEY,
"namespace", namespaceId
)
));
var value = Map.of("date", Instant.now().truncatedTo(ChronoUnit.MILLIS), "int", 1, "string", "string");
Get get = Get.builder()
.id(Get.class.getSimpleName())
.type(Get.class.getName())
.namespace("{{ inputs.namespace }}")
.key("{{ inputs.key }}")
.build();
final KVStore kv = runContext.namespaceKv(namespaceId);
// When
kv.put(TEST_KV_KEY, new KVValueAndMetadata(null, value));
// Then
Get.Output run = get.run(runContext);
assertThat(run.getValue(), is(value));
}
|
@Transactional
@Cacheable(CACHE_DATABASE_SEARCH)
@CacheEvict(value = CACHE_AVERAGE_REVIEW_RATING, allEntries = true)
public SearchHits<ExtensionSearch> search(ISearchService.Options options) {
// grab all extensions
var matchingExtensions = repositories.findAllActiveExtensions();
// no extensions in the database
if (matchingExtensions.isEmpty()) {
return new SearchHitsImpl<>(0,TotalHitsRelation.OFF, 0f, null, null, Collections.emptyList(), null, null);
}
// exlude namespaces
if(options.namespacesToExclude != null) {
for(var namespaceToExclude : options.namespacesToExclude) {
matchingExtensions = matchingExtensions.filter(extension -> !extension.getNamespace().getName().equals(namespaceToExclude));
}
}
// filter target platform
if(TargetPlatform.isValid(options.targetPlatform)) {
matchingExtensions = matchingExtensions.filter(extension -> extension.getVersions().stream().anyMatch(ev -> ev.getTargetPlatform().equals(options.targetPlatform)));
}
// filter category
if (options.category != null) {
matchingExtensions = matchingExtensions.filter(extension -> {
var latest = repositories.findLatestVersion(extension, null, false, true);
return latest.getCategories().stream().anyMatch(category -> category.equalsIgnoreCase(options.category));
});
}
// filter text
if (options.queryString != null) {
matchingExtensions = matchingExtensions.filter(extension -> {
var latest = repositories.findLatestVersion(extension, null, false, true);
return extension.getName().toLowerCase().contains(options.queryString.toLowerCase())
|| extension.getNamespace().getName().contains(options.queryString.toLowerCase())
|| (latest.getDescription() != null && latest.getDescription()
.toLowerCase().contains(options.queryString.toLowerCase()))
|| (latest.getDisplayName() != null && latest.getDisplayName()
.toLowerCase().contains(options.queryString.toLowerCase()));
});
}
// need to perform the sortBy ()
// 'relevance' | 'timestamp' | 'rating' | 'downloadCount';
Stream<ExtensionSearch> searchEntries;
if("relevance".equals(options.sortBy) || "rating".equals(options.sortBy)) {
var searchStats = new SearchStats(repositories);
searchEntries = matchingExtensions.stream().map(extension -> relevanceService.toSearchEntry(extension, searchStats));
} else {
searchEntries = matchingExtensions.stream().map(extension -> {
var latest = repositories.findLatestVersion(extension, null, false, true);
var targetPlatforms = repositories.findExtensionTargetPlatforms(extension);
return extension.toSearch(latest, targetPlatforms);
});
}
var comparators = new HashMap<>(Map.of(
"relevance", new RelevanceComparator(),
"timestamp", new TimestampComparator(),
"rating", new RatingComparator(),
"downloadCount", new DownloadedCountComparator()
));
var comparator = comparators.get(options.sortBy);
if(comparator != null) {
searchEntries = searchEntries.sorted(comparator);
}
var sortedExtensions = searchEntries.collect(Collectors.toList());
// need to do sortOrder
// 'asc' | 'desc';
if ("desc".equals(options.sortOrder)) {
// reverse the order
Collections.reverse(sortedExtensions);
}
// Paging
var totalHits = sortedExtensions.size();
var endIndex = Math.min(sortedExtensions.size(), options.requestedOffset + options.requestedSize);
var startIndex = Math.min(endIndex, options.requestedOffset);
sortedExtensions = sortedExtensions.subList(startIndex, endIndex);
List<SearchHit<ExtensionSearch>> searchHits;
if (sortedExtensions.isEmpty()) {
searchHits = Collections.emptyList();
} else {
// client is interested only in the extension IDs
searchHits = sortedExtensions.stream().map(extensionSearch -> new SearchHit<>(null, null, null, 0.0f, null, null, null, null, null, null, extensionSearch)).collect(Collectors.toList());
}
return new SearchHitsImpl<>(totalHits, TotalHitsRelation.OFF, 0f, null, null, searchHits, null, null);
}
|
@Test
public void testCategory() {
var ext1 = mockExtension("yaml", 3.0, 100, 0, "redhat", List.of("Snippets", "Programming Languages"));
var ext2 = mockExtension("java", 4.0, 100, 0, "redhat", List.of("Snippets", "Programming Languages"));
var ext3 = mockExtension("openshift", 4.0, 100, 0, "redhat", List.of("Snippets", "Other"));
Mockito.when(repositories.findAllActiveExtensions()).thenReturn(Streamable.of(List.of(ext1, ext2, ext3)));
var searchOptions = new ISearchService.Options(null, "Programming Languages", TargetPlatform.NAME_UNIVERSAL, 50, 0, null, null, false);
var result = search.search(searchOptions);
// should find two extensions
assertThat(result.getTotalHits()).isEqualTo(2);
}
|
@SuppressWarnings("unchecked")
public <IN, OUT> AvroDatumConverter<IN, OUT> create(Class<IN> inputClass) {
boolean isMapOnly = ((JobConf) getConf()).getNumReduceTasks() == 0;
if (AvroKey.class.isAssignableFrom(inputClass)) {
Schema schema;
if (isMapOnly) {
schema = AvroJob.getMapOutputKeySchema(getConf());
if (null == schema) {
schema = AvroJob.getOutputKeySchema(getConf());
}
} else {
schema = AvroJob.getOutputKeySchema(getConf());
}
if (null == schema) {
throw new IllegalStateException("Writer schema for output key was not set. Use AvroJob.setOutputKeySchema().");
}
return (AvroDatumConverter<IN, OUT>) new AvroWrapperConverter(schema);
}
if (AvroValue.class.isAssignableFrom(inputClass)) {
Schema schema;
if (isMapOnly) {
schema = AvroJob.getMapOutputValueSchema(getConf());
if (null == schema) {
schema = AvroJob.getOutputValueSchema(getConf());
}
} else {
schema = AvroJob.getOutputValueSchema(getConf());
}
if (null == schema) {
throw new IllegalStateException(
"Writer schema for output value was not set. Use AvroJob.setOutputValueSchema().");
}
return (AvroDatumConverter<IN, OUT>) new AvroWrapperConverter(schema);
}
if (BooleanWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new BooleanWritableConverter();
}
if (BytesWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new BytesWritableConverter();
}
if (ByteWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new ByteWritableConverter();
}
if (DoubleWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new DoubleWritableConverter();
}
if (FloatWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new FloatWritableConverter();
}
if (IntWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new IntWritableConverter();
}
if (LongWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new LongWritableConverter();
}
if (NullWritable.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new NullWritableConverter();
}
if (Text.class.isAssignableFrom(inputClass)) {
return (AvroDatumConverter<IN, OUT>) new TextConverter();
}
throw new UnsupportedOperationException("Unsupported input type: " + inputClass.getName());
}
|
@Test
void convertAvroValue() throws IOException {
AvroJob.setOutputValueSchema(mJob, Schema.create(Schema.Type.INT));
AvroValue<Integer> avroValue = new AvroValue<>(42);
@SuppressWarnings("unchecked")
AvroDatumConverter<AvroValue<Integer>, Integer> converter = mFactory
.create((Class<AvroValue<Integer>>) avroValue.getClass());
assertEquals(42, converter.convert(avroValue).intValue());
}
|
public static MutableInodeDirectory create(long id, long parentId, String name,
CreateDirectoryContext context) {
return new MutableInodeDirectory(id)
.setParentId(parentId)
.setName(name)
.setTtl(context.getTtl())
.setTtlAction(context.getTtlAction())
.setOwner(context.getOwner().intern())
.setGroup(context.getGroup().intern())
.setMode(context.getMode().toShort())
.setAcl(context.getAcl())
// SetAcl call is also setting default AclEntries
.setAcl(context.getDefaultAcl())
.setMountPoint(context.isMountPoint())
.setXAttr(context.getXAttr());
}
|
@Test
public void equalsTest() throws Exception {
MutableInodeDirectory inode1 =
MutableInodeDirectory.create(1, 0, "test1", CreateDirectoryContext.defaults());
MutableInodeDirectory inode2 =
MutableInodeDirectory.create(1, 0, "test2", CreateDirectoryContext.defaults());
MutableInodeDirectory inode3 =
MutableInodeDirectory.create(3, 0, "test3", CreateDirectoryContext.defaults());
Assert.assertTrue(inode1.equals(inode2));
Assert.assertTrue(inode1.equals(inode1));
Assert.assertFalse(inode1.equals(inode3));
}
|
protected void notifyModelsChanged() {
if (diffHelper == null) {
throw new IllegalStateException("You must enable diffing before notifying models changed");
}
diffHelper.notifyModelChanges();
}
|
@Test
public void testThrowIfChangeModelIdAfterDiff() {
TestModel testModel = new TestModel();
testModel.id(100);
testAdapter.models.add(testModel);
testAdapter.notifyModelsChanged();
thrown.expect(IllegalEpoxyUsage.class);
thrown.expectMessage("Cannot change a model's id after it has been added to the adapter");
testModel.id(200);
}
|
@Override
public CommitWorkStream commitWorkStream() {
return windmillStreamFactory.createCommitWorkStream(
dispatcherClient.getWindmillServiceStub(), throttleTimers.commitWorkThrottleTimer());
}
|
@Test
public void testStreamingCommitManyThreads() throws Exception {
ConcurrentHashMap<Long, WorkItemCommitRequest> commitRequests = new ConcurrentHashMap<>();
serviceRegistry.addService(
new CloudWindmillServiceV1Alpha1ImplBase() {
@Override
public StreamObserver<StreamingCommitWorkRequest> commitWorkStream(
StreamObserver<StreamingCommitResponse> responseObserver) {
return getTestCommitStreamObserver(responseObserver, commitRequests);
}
});
ScheduledExecutorService executor = Executors.newScheduledThreadPool(10);
// Make the commit requests, waiting for each of them to be verified and acknowledged.
CommitWorkStream stream = client.commitWorkStream();
List<Future<?>> futures = new ArrayList<>();
for (int i = 0; i < 10; ++i) {
final int startRequestId = i * 50;
futures.add(
executor.submit(
() -> {
try {
commitWorkTestHelper(stream, commitRequests, startRequestId, 50);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}));
}
// Surface any exceptions that might be thrown by submitting by blocking on the future.
for (Future<?> f : futures) {
f.get();
}
stream.halfClose();
assertTrue(stream.awaitTermination(30, TimeUnit.SECONDS));
executor.shutdown();
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.