focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public Collection<JID> getAdmins() {
return administrators;
}
|
@Test
public void testReflectedInCacheGroupDeleted() throws Exception
{
// Setup test fixture.
final String groupName = "unit-test-group-p";
final Group group = groupManager.createGroup(groupName);
final JID testUser = new JID("unit-test-user-p", "example.org", null);
group.getAdmins().add(testUser);
// Execute system under test.
groupManager.deleteGroup(group);
final CacheableOptional<Group> cachedGroup = groupCache.get(groupName);
// Verify results.
assertNull(cachedGroup.get());
}
|
@Udf(description = "Converts a TIMESTAMP value into the"
+ " string representation of the timestamp in the given format. Single quotes in the"
+ " timestamp format can be escaped with '', for example: 'yyyy-MM-dd''T''HH:mm:ssX'"
+ " The system default time zone is used when no time zone is explicitly provided."
+ " The format pattern should be in the format expected"
+ " by java.time.format.DateTimeFormatter")
public String formatTimestamp(
@UdfParameter(
description = "TIMESTAMP value.") final Timestamp timestamp,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
return formatTimestamp(timestamp, formatPattern, ZoneId.of("GMT").getId());
}
|
@Test
public void shouldThrowIfFormatInvalid() {
// When:
final KsqlException e = assertThrows(
KsqlFunctionException.class,
() -> udf.formatTimestamp(new Timestamp(1638360611123L), "invalid")
);
// Then:
assertThat(e.getMessage(), containsString("Unknown pattern letter: i"));
}
|
@Override
public void clear() {
complete(treeMap.clear());
}
|
@Test
public void testClear() {
treeMap.clear();
assertThat(treeMap.size(), is(0));
}
|
void validateAttributesField(Schema schema) {
String attributesKeyName = getAttributesKeyName();
if (!schema.hasField(attributesKeyName)) {
return;
}
checkArgument(
SchemaReflection.of(schema)
.matchesAll(FieldMatcher.of(attributesKeyName, ATTRIBUTES_FIELD_TYPE)));
}
|
@Test
public void testValidateAttributesField() {
PubsubRowToMessage pubsubRowToMessage = PubsubRowToMessage.builder().build();
pubsubRowToMessage.validateAttributesField(ALL_DATA_TYPES_SCHEMA);
pubsubRowToMessage.validateAttributesField(
Schema.of(Field.of(DEFAULT_ATTRIBUTES_KEY_NAME, ATTRIBUTES_FIELD_TYPE)));
assertThrows(
IllegalArgumentException.class,
() ->
pubsubRowToMessage.validateAttributesField(
Schema.of(Field.of(DEFAULT_ATTRIBUTES_KEY_NAME, FieldType.STRING))));
assertThrows(
IllegalArgumentException.class,
() ->
pubsubRowToMessage.validateAttributesField(
Schema.of(
Field.of(
DEFAULT_ATTRIBUTES_KEY_NAME,
FieldType.map(FieldType.STRING, FieldType.BYTES)))));
assertThrows(
IllegalArgumentException.class,
() ->
pubsubRowToMessage.validateAttributesField(
Schema.of(
Field.of(
DEFAULT_ATTRIBUTES_KEY_NAME,
FieldType.map(FieldType.BYTES, FieldType.STRING)))));
assertThrows(
IllegalArgumentException.class,
() ->
PubsubRowToMessage.builder().build().validateSerializableFields(ALL_DATA_TYPES_SCHEMA));
}
|
@Transactional
public ServerConfig createOrUpdateConfig(ServerConfig serverConfig) {
ServerConfig storedConfig = serverConfigRepository.findByKey(serverConfig.getKey());
if (Objects.isNull(storedConfig)) {//create
serverConfig.setId(0L);//为空,设置ID 为0,jpa执行新增操作
if(Objects.isNull(serverConfig.getCluster())){
serverConfig.setCluster("default");
}
return serverConfigRepository.save(serverConfig);
}
//update
storedConfig.setComment(serverConfig.getComment());
storedConfig.setDataChangeLastModifiedBy(serverConfig.getDataChangeLastModifiedBy());
storedConfig.setValue(serverConfig.getValue());
return serverConfigRepository.save(storedConfig);
}
|
@Test
public void createOrUpdateConfig() {
ServerConfig serverConfig = new ServerConfig();
serverConfig.setKey("name");
serverConfig.setValue("kl");
serverConfigService.createOrUpdateConfig(serverConfig);
List<ServerConfig> serverConfigs = serverConfigService.findAll();
assertThat(serverConfigs).isNotNull();
assertThat(serverConfigs.get(0).getValue()).isEqualTo("kl");
assertThat(serverConfigs.get(0).getCluster()).isEqualTo("default");
assertThat(serverConfigs.get(0).getKey()).isEqualTo("name");
serverConfig.setValue("kl2");
serverConfigService.createOrUpdateConfig(serverConfig);
serverConfigs = serverConfigService.findAll();
assertThat(serverConfigs).isNotNull();
assertThat(serverConfigs.size()).isEqualTo(1);
assertThat(serverConfigs.get(0).getValue()).isEqualTo("kl2");
assertThat(serverConfigs.get(0).getKey()).isEqualTo("name");
serverConfig = new ServerConfig();
serverConfig.setKey("name2");
serverConfig.setValue("kl2");
serverConfigService.createOrUpdateConfig(serverConfig);
serverConfigs = serverConfigService.findAll();
assertThat(serverConfigs).isNotNull();
assertThat(serverConfigs.size()).isEqualTo(2);
}
|
public final synchronized void setStreamsConfig(final StreamsConfig applicationConfig) {
Objects.requireNonNull(applicationConfig, "config can't be null");
topologyConfigs = new TopologyConfig(applicationConfig);
}
|
@Test
public void shouldNotSetStreamsConfigToNull() {
assertThrows(NullPointerException.class, () -> builder.setStreamsConfig(null));
}
|
public long getSignature() {
return this.signature;
}
|
@Test
public void toThriftTest() throws Exception {
Class<? extends AgentBatchTask> agentBatchTaskClass = agentBatchTask.getClass();
Class[] typeParams = new Class[] {AgentTask.class};
Method toAgentTaskRequest = agentBatchTaskClass.getDeclaredMethod("toAgentTaskRequest", typeParams);
toAgentTaskRequest.setAccessible(true);
// create
TAgentTaskRequest request = (TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask, createReplicaTask);
Assert.assertEquals(TTaskType.CREATE, request.getTask_type());
Assert.assertEquals(createReplicaTask.getSignature(), request.getSignature());
Assert.assertNotNull(request.getCreate_tablet_req());
// drop
TAgentTaskRequest request2 = (TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask, dropTask);
Assert.assertEquals(TTaskType.DROP, request2.getTask_type());
Assert.assertEquals(dropTask.getSignature(), request2.getSignature());
Assert.assertNotNull(request2.getDrop_tablet_req());
// clone
TAgentTaskRequest request4 = (TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask, cloneTask);
Assert.assertEquals(TTaskType.CLONE, request4.getTask_type());
Assert.assertEquals(cloneTask.getSignature(), request4.getSignature());
Assert.assertNotNull(request4.getClone_req());
// modify enable_persistent_index
TAgentTaskRequest request7 =
(TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask, modifyEnablePersistentIndexTask1);
Assert.assertEquals(TTaskType.UPDATE_TABLET_META_INFO, request7.getTask_type());
Assert.assertEquals(modifyEnablePersistentIndexTask1.getSignature(), request7.getSignature());
Assert.assertNotNull(request7.getUpdate_tablet_meta_info_req());
TAgentTaskRequest request8 =
(TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask, modifyEnablePersistentIndexTask2);
Assert.assertEquals(TTaskType.UPDATE_TABLET_META_INFO, request8.getTask_type());
Assert.assertEquals(modifyEnablePersistentIndexTask2.getSignature(), request8.getSignature());
Assert.assertNotNull(request8.getUpdate_tablet_meta_info_req());
// modify in_memory
TAgentTaskRequest request9 = (TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask, modifyInMemoryTask);
Assert.assertEquals(TTaskType.UPDATE_TABLET_META_INFO, request9.getTask_type());
Assert.assertEquals(modifyInMemoryTask.getSignature(), request9.getSignature());
Assert.assertNotNull(request9.getUpdate_tablet_meta_info_req());
// modify primary index cache
TAgentTaskRequest request10 = (TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask,
modifyPrimaryIndexCacheExpireSecTask1);
Assert.assertEquals(TTaskType.UPDATE_TABLET_META_INFO, request10.getTask_type());
Assert.assertEquals(modifyPrimaryIndexCacheExpireSecTask1.getSignature(), request10.getSignature());
Assert.assertNotNull(request10.getUpdate_tablet_meta_info_req());
TAgentTaskRequest request11 = (TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask,
modifyPrimaryIndexCacheExpireSecTask2);
Assert.assertEquals(TTaskType.UPDATE_TABLET_META_INFO, request11.getTask_type());
Assert.assertEquals(modifyPrimaryIndexCacheExpireSecTask2.getSignature(), request11.getSignature());
Assert.assertNotNull(request11.getUpdate_tablet_meta_info_req());
}
|
@Override
public HttpHeaders filter(HttpHeaders headers, ServerWebExchange exchange) {
HttpHeaders updated = new HttpHeaders();
for (Map.Entry<String, List<String>> entry : headers.entrySet()) {
updated.addAll(entry.getKey(), entry.getValue());
}
// https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.2
if (isGRPC(headers.getFirst(HttpHeaders.CONTENT_TYPE))) {
updated.add("te", "trailers");
}
return updated;
}
|
@Test
public void shouldNotIncludeTrailersHeaderIfNotGRPC() {
MockServerHttpRequest request = MockServerHttpRequest.get("http://localhost:8080/get")
.header(HttpHeaders.CONTENT_TYPE, "application/json")
.build();
GRPCRequestHeadersFilter filter = new GRPCRequestHeadersFilter();
HttpHeaders headers = filter.filter(request.getHeaders(), MockServerWebExchange.from(request));
assertThat(headers).doesNotContainKeys("te");
}
|
@Override
public byte[] toByteArray() {
return toByteArray(0);
}
|
@Test(expected = UnsupportedOperationException.class)
public void testToByteArray() throws Exception {
dataOutputStream.toByteArray();
}
|
@Override
protected boolean isStepCompleted(@NonNull Context context) {
return SetupSupport.isThisKeyboardSetAsDefaultIME(context);
}
|
@Test
public void testKeyboardEnabledAndDefault() {
final String flatASKComponent =
new ComponentName(BuildConfig.APPLICATION_ID, SoftKeyboard.class.getName())
.flattenToString();
Settings.Secure.putString(
getApplicationContext().getContentResolver(),
Settings.Secure.ENABLED_INPUT_METHODS,
flatASKComponent);
Settings.Secure.putString(
getApplicationContext().getContentResolver(),
Settings.Secure.DEFAULT_INPUT_METHOD,
flatASKComponent);
WizardPageSwitchToKeyboardFragment fragment = startFragment();
Assert.assertTrue(fragment.isStepCompleted(getApplicationContext()));
ImageView stateIcon = fragment.getView().findViewById(R.id.step_state_icon);
Assert.assertNotNull(stateIcon);
Assert.assertEquals(
R.drawable.ic_wizard_switch_on,
Shadows.shadowOf(stateIcon.getDrawable()).getCreatedFromResId());
Assert.assertFalse(stateIcon.isClickable());
}
|
public static ParameterTool fromPropertiesFile(String path) throws IOException {
File propertiesFile = new File(path);
return fromPropertiesFile(propertiesFile);
}
|
@Test
void testFromPropertiesFile(@TempDir File propertiesFile) throws IOException {
Properties props = new Properties();
props.setProperty("input", "myInput");
props.setProperty("expectedCount", "15");
Path path = new File(propertiesFile, UUID.randomUUID().toString()).toPath();
try (final OutputStream out = Files.newOutputStream(path)) {
props.store(out, "Test properties");
}
ParameterTool parameter = ParameterTool.fromPropertiesFile(path.toFile());
assertThat(parameter.getNumberOfParameters()).isEqualTo(2);
validate(parameter);
parameter = ParameterTool.fromPropertiesFile(path.toFile());
assertThat(parameter.getNumberOfParameters()).isEqualTo(2);
validate(parameter);
try (FileInputStream fis = new FileInputStream(path.toFile())) {
parameter = ParameterTool.fromPropertiesFile(fis);
}
assertThat(parameter.getNumberOfParameters()).isEqualTo(2);
validate(parameter);
}
|
@Override
public boolean contains(Object o) {
for (M member : members) {
if (selector.select(member) && o.equals(member)) {
return true;
}
}
return false;
}
|
@Test
public void testDoesNotContainNonMatchingMemberWhenLiteMembersSelectedAndNoLocalMember() {
Collection<MemberImpl> collection = new MemberSelectingCollection<>(members,
and(LITE_MEMBER_SELECTOR, NON_LOCAL_MEMBER_SELECTOR));
assertFalse(collection.contains(dataMember));
}
|
@Override
public void updateDictType(DictTypeSaveReqVO updateReqVO) {
// 校验自己存在
validateDictTypeExists(updateReqVO.getId());
// 校验字典类型的名字的唯一性
validateDictTypeNameUnique(updateReqVO.getId(), updateReqVO.getName());
// 校验字典类型的类型的唯一性
validateDictTypeUnique(updateReqVO.getId(), updateReqVO.getType());
// 更新字典类型
DictTypeDO updateObj = BeanUtils.toBean(updateReqVO, DictTypeDO.class);
dictTypeMapper.updateById(updateObj);
}
|
@Test
public void testUpdateDictType_success() {
// mock 数据
DictTypeDO dbDictType = randomDictTypeDO();
dictTypeMapper.insert(dbDictType);// @Sql: 先插入出一条存在的数据
// 准备参数
DictTypeSaveReqVO reqVO = randomPojo(DictTypeSaveReqVO.class, o -> {
o.setId(dbDictType.getId()); // 设置更新的 ID
o.setStatus(randomEle(CommonStatusEnum.values()).getStatus());
});
// 调用
dictTypeService.updateDictType(reqVO);
// 校验是否更新正确
DictTypeDO dictType = dictTypeMapper.selectById(reqVO.getId()); // 获取最新的
assertPojoEquals(reqVO, dictType);
}
|
@Override
public void execute(SensorContext context) {
Set<String> reportPaths = loadReportPaths();
Map<String, SarifImportResults> filePathToImportResults = new HashMap<>();
for (String reportPath : reportPaths) {
try {
SarifImportResults sarifImportResults = processReport(context, reportPath);
filePathToImportResults.put(reportPath, sarifImportResults);
} catch (NoSuchFileException e) {
throw MessageException.of(format("SARIF report file not found: %s", e.getFile()));
} catch (Exception exception) {
LOG.warn("Failed to process SARIF report from file '{}', error: '{}'", reportPath, exception.getMessage());
}
}
filePathToImportResults.forEach(SarifIssuesImportSensor::displayResults);
}
|
@Test
public void execute_whenImportFails_shouldSkipReport() throws NoSuchFileException {
sensorSettings.setProperty("sonar.sarifReportPaths", SARIF_REPORT_PATHS_PARAM);
ReportAndResults reportAndResults1 = mockFailedReportAndResults(FILE_1);
ReportAndResults reportAndResults2 = mockSuccessfulReportAndResults(FILE_2);
doThrow(new NullPointerException("import failed")).when(sarifImporter).importSarif(reportAndResults1.getSarifReport());
SarifIssuesImportSensor sensor = new SarifIssuesImportSensor(sarifSerializer, sarifImporter, sensorSettings.asConfig());
sensor.execute(sensorContext);
verify(sarifImporter).importSarif(reportAndResults2.getSarifReport());
assertThat(logTester.logs(Level.WARN)).contains("Failed to process SARIF report from file 'path/to/sarif/file.sarif', error: 'import failed'");
assertSummaryIsCorrectlyDisplayedForSuccessfulFile(FILE_2, reportAndResults2.getSarifImportResults());
}
|
@Override
@SuppressWarnings("deprecation")
protected String getAppName() {
String appName = registration.getServiceId();
return StringUtils.isEmpty(appName) ? super.getAppName() : appName;
}
|
@Test
public void testGetAppName() {
doReturn("application").when(environment).getProperty(anyString(), anyString());
assertThat(polarisAutoServiceRegistration.getAppName()).isEqualTo("application");
doReturn(SERVICE_PROVIDER).when(registration).getServiceId();
assertThat(polarisAutoServiceRegistration.getAppName()).isEqualTo(SERVICE_PROVIDER);
}
|
@Override
public int getMemoryFootprint() {
return m;
}
|
@Test
public void testGetMemoryFootprint() {
DenseHyperLogLogEncoder encoder = getDenseHyperLogLogEncoder();
int memoryFootprint = encoder.getMemoryFootprint();
assertEquals(1 << precision(), memoryFootprint);
}
|
private void stop(int numOfServicesStarted, boolean stopOnlyStartedServices) {
// stop in reverse order of start
Exception firstException = null;
List<Service> services = getServices();
for (int i = numOfServicesStarted - 1; i >= 0; i--) {
Service service = services.get(i);
if (LOG.isDebugEnabled()) {
LOG.debug("Stopping service #" + i + ": " + service);
}
STATE state = service.getServiceState();
//depending on the stop police
if (state == STATE.STARTED
|| (!stopOnlyStartedServices && state == STATE.INITED)) {
Exception ex = ServiceOperations.stopQuietly(LOG, service);
if (ex != null && firstException == null) {
firstException = ex;
}
}
}
//after stopping all services, rethrow the first exception raised
if (firstException != null) {
throw ServiceStateException.convert(firstException);
}
}
|
@Test(timeout = 10000)
public void testAddStoppedChildInStart() throws Throwable {
CompositeService parent = new CompositeService("parent");
BreakableService child = new BreakableService();
child.init(new Configuration());
child.start();
child.stop();
parent.init(new Configuration());
parent.start();
AddSiblingService.addChildToService(parent, child);
parent.stop();
}
|
public static boolean isCollection(Object obj) {
return obj instanceof Collection || obj instanceof Map;
}
|
@Test
public void isCollection() {
assertTrue(CommonUtils.isCollection(Sets.newHashSet()));
assertTrue(CommonUtils.isCollection(Maps.newHashMap()));
assertTrue(CommonUtils.isCollection(Lists.newArrayList()));
assertFalse(CommonUtils.isCollection(null));
assertFalse(CommonUtils.isCollection(1));
}
|
@Override
public List<Object> handle(String targetName, List<Object> instances, RequestData requestData) {
if (!shouldHandle(instances)) {
return instances;
}
List<Object> result = getTargetInstancesByRules(targetName, instances);
return super.handle(targetName, result, requestData);
}
|
@Test
public void testGetTargetInstancesByTagRulesWithPolicySceneOne() {
RuleInitializationUtils.initAZTagMatchTriggerThresholdPolicyRule();
List<Object> instances = new ArrayList<>();
ServiceInstance instance1 = TestDefaultServiceInstance.getTestDefaultServiceInstance("1.0.0", "az1");
ServiceInstance instance2 = TestDefaultServiceInstance.getTestDefaultServiceInstance("1.0.0", "az2");
ServiceInstance instance3 = TestDefaultServiceInstance.getTestDefaultServiceInstance("1.0.1", "az1");
instances.add(instance1);
instances.add(instance2);
instances.add(instance3);
Map<String, String> metadata = new HashMap<>();
metadata.put("zone", "az1");
AppCache.INSTANCE.setMetadata(metadata);
List<Object> targetInstances = tagRouteHandler.handle("foo", instances,
new RequestData(null, null, null));
Assert.assertEquals(2, targetInstances.size());
ConfigCache.getLabel(RouterConstant.SPRING_CACHE_NAME).resetRouteRule(Collections.emptyMap());
}
|
public OpenConfigConfigOfComponentHandler addName(String name) {
modelObject.name(name);
return this;
}
|
@Test
public void testAddName() {
// test Handler
OpenConfigConfigOfComponentHandler config = new OpenConfigConfigOfComponentHandler(parent);
// call addName
config.addName("name");
// expected ModelObject
DefaultConfig modelObject = new DefaultConfig();
modelObject.name("name");
assertEquals("[NG]addName:ModelObject(Name added) is not an expected one.\n",
modelObject, config.getModelObject());
}
|
@Override
public boolean isSatisfied(int index, TradingRecord tradingRecord) {
boolean satisfied = false;
// No trading history, no need to wait
if (tradingRecord != null) {
Trade lastTrade = tradingRecord.getLastTrade(tradeType);
if (lastTrade != null) {
int currentNumberOfBars = index - lastTrade.getIndex();
satisfied = currentNumberOfBars >= numberOfBars;
}
}
traceIsSatisfied(index, satisfied);
return satisfied;
}
|
@Test
public void waitForSinceLastSellRuleIsSatisfied() {
// Waits for 2 bars since last sell trade
rule = new WaitForRule(Trade.TradeType.SELL, 2);
assertFalse(rule.isSatisfied(0, null));
assertFalse(rule.isSatisfied(1, tradingRecord));
tradingRecord.enter(10);
assertFalse(rule.isSatisfied(10, tradingRecord));
assertFalse(rule.isSatisfied(11, tradingRecord));
assertFalse(rule.isSatisfied(12, tradingRecord));
assertFalse(rule.isSatisfied(13, tradingRecord));
tradingRecord.exit(15);
assertFalse(rule.isSatisfied(15, tradingRecord));
assertFalse(rule.isSatisfied(16, tradingRecord));
assertTrue(rule.isSatisfied(17, tradingRecord));
tradingRecord.enter(17);
assertTrue(rule.isSatisfied(17, tradingRecord));
assertTrue(rule.isSatisfied(18, tradingRecord));
tradingRecord.exit(20);
assertFalse(rule.isSatisfied(20, tradingRecord));
assertFalse(rule.isSatisfied(21, tradingRecord));
assertTrue(rule.isSatisfied(22, tradingRecord));
}
|
@Produces
@DefaultBean
@Singleton
JobRunrDashboardWebServerConfiguration dashboardWebServerConfiguration() {
if (jobRunrBuildTimeConfiguration.dashboard().enabled()) {
final JobRunrDashboardWebServerConfiguration dashboardWebServerConfiguration = usingStandardDashboardConfiguration();
jobRunrRuntimeConfiguration.dashboard().port().ifPresent(dashboardWebServerConfiguration::andPort);
if (jobRunrRuntimeConfiguration.dashboard().username().isPresent() && jobRunrRuntimeConfiguration.dashboard().password().isPresent()) {
dashboardWebServerConfiguration.andBasicAuthentication(jobRunrRuntimeConfiguration.dashboard().username().get(), jobRunrRuntimeConfiguration.dashboard().password().get());
}
dashboardWebServerConfiguration.andAllowAnonymousDataUsage(jobRunrRuntimeConfiguration.miscellaneous().allowAnonymousDataUsage());
return dashboardWebServerConfiguration;
}
return null;
}
|
@Test
void dashboardWebServerConfigurationIsNotSetupWhenNotConfigured() {
when(dashboardBuildTimeConfiguration.enabled()).thenReturn(false);
assertThat(jobRunrProducer.dashboardWebServerConfiguration()).isNull();
}
|
@Override
public long getMin() {
if (values.length == 0) {
return 0;
}
return values[0];
}
|
@Test
public void calculatesTheMinimumValue() throws Exception {
assertThat(snapshot.getMin())
.isEqualTo(1);
}
|
public static <T, R> Callable<R> andThen(Callable<T> callable, Function<T, R> resultHandler) {
return () -> resultHandler.apply(callable.call());
}
|
@Test
public void shouldChainCallableAndResultHandler() throws Exception {
Callable<String> Callable = () -> "BLA";
Callable<String> callableWithRecovery = CallableUtils.andThen(Callable, result -> "Bla");
String result = callableWithRecovery.call();
assertThat(result).isEqualTo("Bla");
}
|
@Override
protected Object createBody() {
if (command instanceof MessageRequest) {
MessageRequest msgRequest = (MessageRequest) command;
byte[] shortMessage = msgRequest.getShortMessage();
if (shortMessage == null || shortMessage.length == 0) {
return null;
}
Alphabet alphabet = Alphabet.parseDataCoding(msgRequest.getDataCoding());
if (SmppUtils.is8Bit(alphabet)) {
return shortMessage;
}
String encoding = ExchangeHelper.getCharsetName(getExchange(), false);
if (ObjectHelper.isEmpty(encoding) || !Charset.isSupported(encoding)) {
encoding = configuration.getEncoding();
}
try {
return new String(shortMessage, encoding);
} catch (UnsupportedEncodingException e) {
LOG.info("Unsupported encoding \"{}\". Using system default encoding.", encoding);
}
return new String(shortMessage);
}
return null;
}
|
@Test
public void createBodyShouldReturnNullIfTheCommandIsNull() {
message = new SmppMessage(camelContext, null, new SmppConfiguration());
assertNull(message.createBody());
}
|
public void generateAcknowledgementPayload(
MllpSocketBuffer mllpSocketBuffer, byte[] hl7MessageBytes, String acknowledgementCode)
throws MllpAcknowledgementGenerationException {
generateAcknowledgementPayload(mllpSocketBuffer, hl7MessageBytes, acknowledgementCode, null);
}
|
@Test
public void testGenerateAcknowledgementPayloadFromEmptyMessage() {
MllpSocketBuffer mllpSocketBuffer = new MllpSocketBuffer(new MllpEndpointStub());
assertThrows(MllpAcknowledgementGenerationException.class,
() -> hl7util.generateAcknowledgementPayload(mllpSocketBuffer, new byte[0], "AA"));
}
|
public boolean containsVertex(V vertex) {
return neighbors.containsKey(vertex);
}
|
@Test
void containsVertex() {
assertTrue(graph.containsVertex('A'));
}
|
public static String[] parseKey(String groupKey) {
return groupKey.split(Constants.GROUP_KEY_DELIMITER_TRANSLATION);
}
|
@Test
public void parseKey() {
String groupKey = "prescription+dynamic-threadpool-example+message-consume+12";
String[] strings = GroupKey.parseKey(groupKey);
Assert.isTrue(strings.length == 4);
}
|
public void updatePartitionStatistics(String dbName, String tableName, String partitionName,
Function<HivePartitionStats, HivePartitionStats> update) {
List<org.apache.hadoop.hive.metastore.api.Partition> partitions = client.getPartitionsByNames(
dbName, tableName, ImmutableList.of(partitionName));
if (partitions.size() != 1) {
throw new StarRocksConnectorException("Metastore returned multiple partitions for name: " + partitionName);
}
org.apache.hadoop.hive.metastore.api.Partition originPartition = getOnlyElement(partitions);
HiveCommonStats curCommonStats = toHiveCommonStats(originPartition.getParameters());
HivePartitionStats curPartitionStats = new HivePartitionStats(curCommonStats, new HashMap<>());
HivePartitionStats updatedStats = update.apply(curPartitionStats);
org.apache.hadoop.hive.metastore.api.Partition modifiedPartition = originPartition.deepCopy();
HiveCommonStats commonStats = updatedStats.getCommonStats();
Map<String, String> originParams = modifiedPartition.getParameters();
originParams.put(TRANSIENT_LAST_DDL_TIME, String.valueOf(System.currentTimeMillis() / 1000));
modifiedPartition.setParameters(updateStatisticsParameters(modifiedPartition.getParameters(), commonStats));
client.alterPartition(dbName, tableName, modifiedPartition);
//TODO(stephen): update partition column statistics
}
|
@Test
public void testUpdatePartitionStatistics() {
HiveMetaClient client = new MockedHiveMetaClient();
HiveMetastore metastore = new HiveMetastore(client, "hive_catalog", MetastoreType.HMS);
HivePartitionStats partitionStats = HivePartitionStats.empty();
metastore.updatePartitionStatistics("db", "table", "p1=1", ignore -> partitionStats);
}
|
public static String getExactlyExpression(final String value) {
return Strings.isNullOrEmpty(value) ? value : CharMatcher.anyOf(" ").removeFrom(value);
}
|
@Test
void assertGetExactlyExpressionUsingAndReturningNull() {
assertNull(SQLUtils.getExactlyExpression(null));
}
|
Map<ExecNode<?>, Integer> calculateMaximumDistance() {
Map<ExecNode<?>, Integer> result = new HashMap<>();
Map<TopologyNode, Integer> inputsVisitedMap = new HashMap<>();
Queue<TopologyNode> queue = new LinkedList<>();
for (TopologyNode node : nodes.values()) {
if (node.inputs.size() == 0) {
queue.offer(node);
}
}
while (!queue.isEmpty()) {
TopologyNode node = queue.poll();
int dist = -1;
for (TopologyNode input : node.inputs) {
dist =
Math.max(
dist,
Preconditions.checkNotNull(
result.get(input.execNode),
"The distance of an input node is not calculated. This is a bug."));
}
dist++;
result.put(node.execNode, dist);
for (TopologyNode output : node.outputs) {
int inputsVisited =
inputsVisitedMap.compute(output, (k, v) -> v == null ? 1 : v + 1);
if (inputsVisited == output.inputs.size()) {
queue.offer(output);
}
}
}
return result;
}
|
@Test
void testCalculateMaximumDistance() {
Tuple2<TopologyGraph, TestingBatchExecNode[]> tuple2 = buildTopologyGraph();
TopologyGraph graph = tuple2.f0;
TestingBatchExecNode[] nodes = tuple2.f1;
Map<ExecNode<?>, Integer> result = graph.calculateMaximumDistance();
assertThat(result).hasSize(8);
assertThat(result.get(nodes[0]).intValue()).isEqualTo(0);
assertThat(result.get(nodes[1]).intValue()).isEqualTo(1);
assertThat(result.get(nodes[2]).intValue()).isEqualTo(2);
assertThat(result.get(nodes[3]).intValue()).isEqualTo(2);
assertThat(result.get(nodes[4]).intValue()).isEqualTo(3);
assertThat(result.get(nodes[6]).intValue()).isEqualTo(3);
assertThat(result.get(nodes[5]).intValue()).isEqualTo(4);
assertThat(result.get(nodes[7]).intValue()).isEqualTo(4);
}
|
public ParamRequestCondition getMatchingCondition(HttpServletRequest request) {
for (ParamExpression expression : this.expressions) {
if (!expression.match(request)) {
return null;
}
}
return this;
}
|
@Test
void testGetMatchingCondition() {
MockHttpServletRequest request = new MockHttpServletRequest();
ParamRequestCondition paramRequestCondition1 = paramRequestCondition.getMatchingCondition(request);
assertNull(paramRequestCondition1);
request.setParameter("test", "1244");
ParamRequestCondition paramRequestCondition2 = paramRequestCondition.getMatchingCondition(request);
assertNotNull(paramRequestCondition2);
}
|
public static SbeSchema fromIr(Ir ir, IrOptions irOptions) {
ImmutableList<SbeField> sbeFields = IrFieldGenerator.generateFields(ir, irOptions);
Ir copy = createIrCopy(ir);
return new SbeSchema(SerializableIr.fromIr(copy), irOptions, sbeFields);
}
|
@Test
public void testFromIr() throws Exception {
Ir ir = getIr(OnlyPrimitives.RESOURCE_PATH);
SbeSchema actual = SbeSchema.fromIr(ir, IrOptions.DEFAULT);
assertNotNull(actual.getIr());
assertNotSame(ir, actual.getIr());
assertEquals(IrOptions.DEFAULT, actual.getIrOptions());
assertEquals(OnlyPrimitives.FIELDS, actual.getSbeFields());
}
|
public abstract void verify(String value);
|
@Test
public void testLongRangeAttribute() {
LongRangeAttribute longRangeAttribute = new LongRangeAttribute("long.range.key", true, 10, 20, 15);
Assert.assertThrows(RuntimeException.class, () -> longRangeAttribute.verify(""));
Assert.assertThrows(RuntimeException.class, () -> longRangeAttribute.verify(","));
Assert.assertThrows(RuntimeException.class, () -> longRangeAttribute.verify("a"));
Assert.assertThrows(RuntimeException.class, () -> longRangeAttribute.verify("-1"));
Assert.assertThrows(RuntimeException.class, () -> longRangeAttribute.verify("21"));
longRangeAttribute.verify("11");
longRangeAttribute.verify("10");
longRangeAttribute.verify("20");
}
|
@Override
public String[] split(String text) {
ArrayList<String> sentences = new ArrayList<>();
// The number of words in the sentence.
int len = 0;
// Remove any carriage returns etc.
text = REGEX_CARRIAGE_RETURN.matcher(text).replaceAll(" ");
// We will use oct 031 (hex 19) as a special character for missing
// space after punctuation. Oct 031 means "end of medium", which
// probably never appears in a string in real applications.
text = text.replace('\031', ' ');
// make sure there are always spaces following punctuation to enable
// splitter to work properly - covers such cases as "believe.I ...",
// where a space has forgotten to be.
text = REGEX_FORGOTTEN_SPACE.matcher(text).replaceAll("$1$2\031$3");
text = text + "\n";
// sentence ends with [.!?], followed by capital or number. Use base-line
// splitter and then use some heuristics to improve upon this e.g.
// dealing with Mr. and etc. In this rather large regex we allow for
// quotes, brackets etc.
// $1 = the complete sentence including beginning punctuation and brackets
// $2 = the punctuation mark - either [.!?:]
// $3 = the brackets or quotes after the [!?.:]. This is non-grouping i.e. does not consume.
// $4 = the next word after the [.?!:].This is non-grouping i.e. does not consume.
// $5 = rather than a next word, it may have been the last sentence in the file. Therefore, capture
// punctuation and brackets before end of file. This is non-grouping i.e. does not consume.
Matcher matcher = REGEX_SENTENCE.matcher(text);
StringBuilder currentSentence = new StringBuilder();
int end = 0; // The offset of the end of sentence
while (matcher.find()) {
end = matcher.end();
String sentence = matcher.group(1).trim();
String punctuation = matcher.group(2);
String stuffAfterPeriod = matcher.group(3);
if (stuffAfterPeriod == null) {
stuffAfterPeriod = matcher.group(5);
if (stuffAfterPeriod == null) {
stuffAfterPeriod = "";
} else {
end = matcher.end(5);
}
} else {
end = matcher.end(3);
}
String[] words = REGEX_WHITESPACE.split(sentence);
len += words.length;
String nextWord = matcher.group(4);
if (nextWord == null) {
nextWord = "";
}
if (punctuation.compareTo(".") == 0) {
// Consider the word before the period.
// Is it an abbreviation? (then not full-stop)
// Abbreviation if:
// 1) all consonants and not all capitalised (and contain no lower case y e.g. shy, sly
// 2) a span of single letters followed by periods
// 3) a single letter (except I).
// 4) in the known abbreviations list.
// In above cases, then the period is NOT a full stop.
// perhaps only one word e.g. P.S rather than a whole sentence
Matcher lastWordMatcher = REGEX_LAST_WORD.matcher(sentence);
String lastWord = "";
if (lastWordMatcher.find()) {
lastWord = lastWordMatcher.group();
}
if ((!lastWord.matches(".*[AEIOUaeiou]+.*") && lastWord.matches(".*[a-z]+.*") && !lastWord.matches(".*[y]+.*"))
|| lastWord.matches("([a-zA-Z][\\.])+")
|| (lastWord.matches("^[A-Za-z]$") && !lastWord.matches("^[I]$"))
|| EnglishAbbreviations.contains(lastWord.toLowerCase())) {
// We have an abbreviation, but this could come at the middle or end of a
// sentence. Therefore, we assume that the abbreviation is not at the end of
// a sentence if the next word is a common word and the abbreviation occurs
// less than 5 words from the start of the sentence.
if (EnglishDictionary.CONCISE.contains(nextWord) && len > 6) {
// a sentence break
currentSentence.append(sentence);
currentSentence.append(punctuation);
currentSentence.append(stuffAfterPeriod.trim());
sentences.add(currentSentence.toString());
currentSentence = new StringBuilder();
len = 0;
} else {
// not a sentence break
currentSentence.append(sentence);
currentSentence.append(punctuation);
if (stuffAfterPeriod.indexOf('\031') == -1) {
currentSentence.append(' ');
}
}
} else {
// a sentence break
currentSentence.append(sentence);
currentSentence.append(punctuation);
currentSentence.append(stuffAfterPeriod.trim());
sentences.add(currentSentence.toString());
currentSentence = new StringBuilder();
len = 0;
}
} else {
// only consider sentences if : comes after at least 6 words from start of sentence
if (punctuation.matches("[!?]") || (punctuation.compareTo(":") == 0 && len > 6)) {
// a sentence break
currentSentence.append(sentence);
currentSentence.append(punctuation);
currentSentence.append(stuffAfterPeriod.trim());
sentences.add(currentSentence.toString());
currentSentence = new StringBuilder();
len = 0;
} else {
// not a sentence break
currentSentence.append(sentence);
currentSentence.append(punctuation);
if (stuffAfterPeriod.indexOf('\031') == -1) {
currentSentence.append(' ');
}
}
}
}
if (end < text.length()) {
// There may be something after the last sentence.
String lastPart = text.substring(end);
if (!lastPart.isEmpty()) {
currentSentence.append(lastPart);
}
}
// If currentSentence is not empty (e.g. break at abbrev), add it to the results.
if (!currentSentence.isEmpty()) {
sentences.add(currentSentence.toString().trim());
}
String[] result = new String[sentences.size()];
for (int i = 0; i < result.length; i++) {
result[i] = sentences.get(i).replaceAll("\031", "");
}
return result;
}
|
@Test
public void testSplit() {
System.out.println("split");
String text = "THE BIG RIPOFF\n\n"
+ "Mr. John B. Smith bought www.cheap.com for 1.5 million dollars, "
+ "i.e. he paid far too much for it.Did he mind? "
+ "Adam Jones Jr. thinks he didn't. In any case, this isn't true..."
+ "Well, it isn't with a probability of .9.Right?"
+ "Again, it isn't with a probability of .9 .Right?"
+ "[This is bracketed sentence.] "
+ "\"This is quoted sentence.\" "
+ "This last sentence has no period";
String[] expResult = {
"THE BIG RIPOFF Mr. John B. Smith bought www.cheap.com for 1.5 million dollars, i.e. he paid far too much for it.",
"Did he mind?",
"Adam Jones Jr. thinks he didn't.",
"In any case, this isn't true...",
"Well, it isn't with a probability of .9.",
"Right?",
"Again, it isn't with a probability of .9.",
"Right?",
"[This is bracketed sentence.]",
"\"This is quoted sentence.\"",
"This last sentence has no period"
};
SimpleSentenceSplitter instance = SimpleSentenceSplitter.getInstance();
String[] result = instance.split(text);
assertEquals(expResult.length, result.length);
for (int i = 0; i < result.length; i++)
assertEquals(expResult[i], result[i]);
}
|
@Override
public void storeMappingEntry(Type type, MappingEntry entry) {
store.storeMapping(type, entry);
}
|
@Test
public void storeMappingEntry() {
Mapping m1 = mapping(1, 1);
Mapping m2 = mapping(2, 2);
Mapping m3 = mapping(3, 3);
MappingEntry me1 = new DefaultMappingEntry(m1);
MappingEntry me2 = new DefaultMappingEntry(m2);
MappingEntry me3 = new DefaultMappingEntry(m3);
assertTrue("store should be empty", Sets.newHashSet(
service.getMappingEntries(MAP_DATABASE, LISP_DID)).isEmpty());
adminService.storeMappingEntry(MAP_DATABASE, me1);
adminService.storeMappingEntry(MAP_DATABASE, me2);
adminService.storeMappingEntry(MAP_DATABASE, me3);
assertEquals("3 mappings should exist", 3, mappingCount(MAP_DATABASE));
}
|
protected void runMigration(SqlMigration migration) {
LOGGER.info("Running migration {}", migration);
try (final Connection conn = getConnection(); final Transaction tran = new Transaction(conn)) {
if (!isEmptyMigration(migration)) {
runMigrationStatement(conn, migration);
}
updateMigrationsTable(conn, migration);
tran.commit();
} catch (Exception e) {
throw shouldNotHappenException(new IllegalStateException("Error running database migration " + migration.getFileName(), e));
}
}
|
@Test
void testMigrationIsNotDoneMoreThanOnce() {
final JdbcDataSource dataSource = createH2DataSource("jdbc:h2:mem:/test;DB_CLOSE_DELAY=-1");
final DatabaseCreator databaseCreator = Mockito.spy(new DatabaseCreator(dataSource, H2StorageProvider.class));
assertThatCode(databaseCreator::runMigrations).doesNotThrowAnyException();
insertExtraMigrationInDB(dataSource, databaseCreator);
Mockito.reset(databaseCreator);
assertThatCode(databaseCreator::runMigrations)
.isInstanceOf(IllegalStateException.class)
.hasMessage("A migration was applied multiple times (probably because it took too long and the process was killed). " +
"Please cleanup the migrations_table and remove duplicate entries.");
verify(databaseCreator, never()).runMigration(any());
}
|
@VisibleForTesting
ZonedDateTime parseZoned(final String text, final ZoneId zoneId) {
final TemporalAccessor parsed = formatter.parse(text);
final ZoneId parsedZone = parsed.query(TemporalQueries.zone());
ZonedDateTime resolved = DEFAULT_ZONED_DATE_TIME.apply(
ObjectUtils.defaultIfNull(parsedZone, zoneId));
for (final TemporalField override : ChronoField.values()) {
if (parsed.isSupported(override)) {
if (!resolved.isSupported(override)) {
throw new KsqlException(
"Unsupported temporal field in timestamp: " + text + " (" + override + ")");
}
final long value = parsed.getLong(override);
if (override == ChronoField.DAY_OF_YEAR && value == LEAP_DAY_OF_THE_YEAR) {
if (!parsed.isSupported(ChronoField.YEAR)) {
throw new KsqlException("Leap day cannot be parsed without supplying the year field");
}
// eagerly override year, to avoid mismatch with epoch year, which is not a leap year
resolved = resolved.withYear(parsed.get(ChronoField.YEAR));
}
resolved = resolved.with(override, value);
}
}
return resolved;
}
|
@Test
public void shouldParseFullLocalDateWithPassedInTimeZone() {
// Given
final String format = "yyyy-MM-dd HH";
final String timestamp = "1605-11-05 10";
// When
final ZonedDateTime ts = new StringToTimestampParser(format).parseZoned(timestamp, GMT_3);
// Then
assertThat(ts, is(sameInstant(FIFTH_OF_NOVEMBER.withHour(10).withZoneSameLocal(GMT_3))));
}
|
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
return this.list(directory, listener, String.valueOf(Path.DELIMITER));
}
|
@Test
public void testList() throws Exception {
final Path container = new Path("versioning-test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final AttributedList<Path> list = new S3ObjectListService(session, new S3AccessControlListFeature(session)).list(container, new DisabledListProgressListener());
for(Path p : list) {
assertEquals(container, p.getParent());
if(p.isFile()) {
assertSame(container, p.getParent());
assertNotEquals(-1L, p.attributes().getModificationDate());
assertNotEquals(-1L, p.attributes().getSize());
assertNotNull(p.attributes().getETag());
assertNotNull(p.attributes().getStorageClass());
assertNull(p.attributes().getVersionId());
}
}
}
|
private boolean setNodeState(OrchestratorContext context, HostName host, int storageNodeIndex,
ClusterControllerNodeState wantedState, ContentService contentService,
Condition condition, boolean throwOnFailure) {
try {
ClusterControllerClientTimeouts timeouts = context.getClusterControllerTimeouts();
Inspector response = client.send(strategy(hosts), Method.POST)
.at("cluster", "v2", clusterName, contentService.nameInClusterController(),
Integer.toString(storageNodeIndex))
.deadline(timeouts.readBudget())
.parameters(() -> deadline(timeouts))
.body(stateChangeRequestBytes(wantedState, condition, context.isProbe()))
.throwing(retryOnRedirect)
.read(SlimeUtils::jsonToSlime).get();
if ( ! response.field("wasModified").asBool()) {
if (throwOnFailure)
throw new HostStateChangeDeniedException(host,
HostedVespaPolicy.SET_NODE_STATE_CONSTRAINT,
"Failed to set state to " + wantedState +
" in cluster controller: " + response.field("reason").asString());
return false;
}
return true;
}
catch (ResponseException e) {
throw new HostStateChangeDeniedException(host,
HostedVespaPolicy.SET_NODE_STATE_CONSTRAINT,
"Failed setting node " + storageNodeIndex + " in cluster " +
clusterName + " to state " + wantedState + ": " + e.getMessage());
}
catch (UncheckedIOException e) {
throw new HostStateChangeDeniedException(host,
HostedVespaPolicy.CLUSTER_CONTROLLER_AVAILABLE_CONSTRAINT,
String.format("Giving up setting %s for storage node with index %d in cluster %s: %s",
wantedState,
storageNodeIndex,
clusterName,
e.getMessage()),
e.getCause());
}
catch (UncheckedTimeoutException e) {
throw new HostStateChangeDeniedException(host,
HostedVespaPolicy.DEADLINE_CONSTRAINT,
"Timeout while waiting for setNodeState(" + storageNodeIndex + ", " + wantedState +
") against " + hosts + ": " + e.getMessage(),
e);
}
}
|
@Test
public void testRetriesUntilExhaustion() {
OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock);
for (int i = 0; i < clusterControllers.size(); i++) {
int j = i + 1;
wire.expect((url, body) -> {
assertEquals("http://host" + j + ":19050/cluster/v2/cc/storage/2?timeout=9.6",
url.asURI().toString());
return "";
},
503);
}
// All retries exhausted
assertEquals("Changing the state of node would violate controller-set-node-state: Failed setting node 2 in cluster cc to state UP: " +
"got status code 503 for POST http://host1:19050/cluster/v2/cc/storage/2?timeout=9.6",
assertThrows(HostStateChangeDeniedException.class,
() -> client.setNodeState(context, host, 2, UP, ContentService.STORAGE_NODE, false))
.getMessage());
}
|
public static long toLong(String value) {
String[] octets = value.split(":");
if (octets.length > 8) {
throw new NumberFormatException("Input string is too big to fit in long: " + value);
}
long l = 0;
for (String octet: octets) {
if (octet.length() > 2) {
throw new NumberFormatException(
"Each colon-separated byte component must consist of 1 or 2 hex digits: " + value);
}
short s = Short.parseShort(octet, 16);
l = (l << 8) + s;
}
return l;
}
|
@Test
public void testToLong() {
String dpidStr = "3e:1f:01:fc:72:8c:63:31";
long valid = 0x3e1f01fc728c6331L;
long testLong = HexString.toLong(dpidStr);
assertEquals(valid, testLong);
}
|
public static void executeWithRetries(
final Function function,
final RetryBehaviour retryBehaviour
) throws Exception {
executeWithRetries(() -> {
function.call();
return null;
}, retryBehaviour);
}
|
@Test
public void shouldRetryAndSucceed() throws Exception {
// Given:
final AtomicInteger counts = new AtomicInteger(5);
final Callable<Object> eventuallySucceeds = () -> {
if (counts.decrementAndGet() == 0) {
return null;
}
throw new TestRetriableException("I will never succeed");
};
// When:
ExecutorUtil.executeWithRetries(eventuallySucceeds, ON_RETRYABLE, () -> SMALL_RETRY_BACKOFF);
// Then: Succeeded, i.e. did not throw.
}
|
@Override
public void login(final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException {
final Credentials credentials = authorizationService.validate();
try {
if(log.isInfoEnabled()) {
log.info(String.format("Attempt authentication with %s", credentials.getOauth()));
}
client.authenticate(new HubicAuthenticationRequest(credentials.getOauth().getAccessToken()), new HubicAuthenticationResponseHandler());
}
catch(GenericException e) {
throw new SwiftExceptionMappingService().map(e);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map(e);
}
}
|
@Test(expected = LoginCanceledException.class)
public void testConnectInvalidAccessToken() throws Exception {
final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new HubicProtocol())));
final Profile profile = new ProfilePlistReader(factory).read(
this.getClass().getResourceAsStream("/hubiC.cyberduckprofile"));
final HubicSession session = new HubicSession(new Host(profile,
new HubicProtocol().getDefaultHostname(), new Credentials("u@domain")), new DisabledX509TrustManager(), new DefaultX509KeyManager());
session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback());
session.login(new DisabledLoginCallback(), new DisabledCancelCallback());
session.close();
}
|
@Override
public Runner get() {
if (runner == null) {
runner = createRunner();
}
return runner;
}
|
@Test
void should_create_a_runner() {
assertThat(runnerSupplier.get(), is(notNullValue()));
}
|
@Override
public boolean dbExists(String dbName) {
ConnectorMetadata metadata = metadataOfDb(dbName);
return metadata.dbExists(dbName);
}
|
@Test
void testDbExists(@Mocked ConnectorMetadata connectorMetadata) {
new Expectations() {
{
connectorMetadata.dbExists("test_db");
result = true;
times = 1;
}
};
CatalogConnectorMetadata catalogConnectorMetadata = new CatalogConnectorMetadata(
connectorMetadata,
informationSchemaMetadata,
metaMetadata
);
assertTrue(catalogConnectorMetadata.dbExists("test_db"));
assertTrue(catalogConnectorMetadata.dbExists(InfoSchemaDb.DATABASE_NAME));
}
|
@ConstantFunction.List(list = {
@ConstantFunction(name = "mod", argTypes = {DECIMALV2, DECIMALV2}, returnType = DECIMALV2),
@ConstantFunction(name = "mod", argTypes = {DECIMAL32, DECIMAL32}, returnType = DECIMAL32),
@ConstantFunction(name = "mod", argTypes = {DECIMAL64, DECIMAL64}, returnType = DECIMAL64),
@ConstantFunction(name = "mod", argTypes = {DECIMAL128, DECIMAL128}, returnType = DECIMAL128)
})
public static ConstantOperator modDecimal(ConstantOperator first, ConstantOperator second) {
if (BigDecimal.ZERO.compareTo(second.getDecimal()) == 0) {
return ConstantOperator.createNull(first.getType());
}
return createDecimalConstant(first.getDecimal().remainder(second.getDecimal()));
}
|
@Test
public void modDecimal() {
assertEquals("0", ScalarOperatorFunctions.modDecimal(O_DECIMAL_100, O_DECIMAL_100).getDecimal().toString());
assertEquals("0",
ScalarOperatorFunctions.modDecimal(O_DECIMAL32P7S2_100, O_DECIMAL32P7S2_100).getDecimal().toString());
assertEquals("0",
ScalarOperatorFunctions.modDecimal(O_DECIMAL32P9S0_100, O_DECIMAL32P9S0_100).getDecimal().toString());
assertEquals("0",
ScalarOperatorFunctions.modDecimal(O_DECIMAL64P15S10_100, O_DECIMAL64P15S10_100).getDecimal()
.toString());
assertEquals("0",
ScalarOperatorFunctions.modDecimal(O_DECIMAL64P18S15_100, O_DECIMAL64P18S15_100).getDecimal()
.toString());
assertEquals("0",
ScalarOperatorFunctions.modDecimal(O_DECIMAL128P30S2_100, O_DECIMAL128P30S2_100).getDecimal()
.toString());
assertEquals("0",
ScalarOperatorFunctions.modDecimal(O_DECIMAL128P38S20_100, O_DECIMAL128P38S20_100).getDecimal()
.toString());
assertTrue(ScalarOperatorFunctions.modDecimal(O_DECIMAL128P38S20_100, O_DECIMAL128P38S20_100).getType()
.isDecimalV3());
}
|
@Override
public Future<Map<String, String>> listConnectLoggers(Reconciliation reconciliation, String host, int port) {
String path = "/admin/loggers/";
LOGGER.debugCr(reconciliation, "Making GET request to {}", path);
return HttpClientUtils.withHttpClient(vertx, new HttpClientOptions().setLogActivity(true), (httpClient, result) ->
httpClient.request(HttpMethod.GET, port, host, path, request -> {
if (request.succeeded()) {
request.result().setFollowRedirects(true)
.putHeader("Accept", "application/json");
request.result().send(response -> {
if (response.succeeded()) {
if (response.result().statusCode() == 200) {
response.result().bodyHandler(buffer -> {
try {
LOGGER.debugCr(reconciliation, "Got {} response to GET request to {}", response.result().statusCode(), path);
Map<String, Map<String, String>> fetchedLoggers = mapper.readValue(buffer.getBytes(), MAP_OF_MAP_OF_STRINGS);
Map<String, String> loggerMap = new HashMap<>(fetchedLoggers.size());
for (var loggerEntry : fetchedLoggers.entrySet()) {
String level = loggerEntry.getValue().get("level");
if (level != null) {
loggerMap.put(loggerEntry.getKey(), level);
}
}
result.tryComplete(loggerMap);
} catch (IOException e) {
LOGGER.warnCr(reconciliation, "Failed to get list of connector loggers", e);
result.fail(new ConnectRestException(response.result(), "Failed to get connector loggers", e));
}
});
} else {
result.fail(new ConnectRestException(response.result(), "Unexpected status code"));
}
} else {
result.tryFail(response.cause());
}
});
} else {
result.tryFail(request.cause());
}
}));
}
|
@Test
public void testListConnectLoggersWithLevelAndLastModified(Vertx vertx, VertxTestContext context) throws Exception {
final HttpServer server = mockApi(vertx, 200, new ObjectMapper().writeValueAsString(
Map.of(
"org.apache.kafka.connect",
Map.of(
"level", "WARN",
"last_modified", "2020-01-01T00:00:00.000Z"
)
)
));
final KafkaConnectApi api = new KafkaConnectApiImpl(vertx);
final Checkpoint async = context.checkpoint();
api.listConnectLoggers(Reconciliation.DUMMY_RECONCILIATION, "127.0.0.1", server.actualPort())
.onComplete(context.succeeding(res -> context.verify(() -> {
assertThat(res, allOf(
aMapWithSize(1),
hasEntry("org.apache.kafka.connect", "WARN")
));
server.close();
async.flag();
})));
}
|
public static Exception lookupExceptionInCause(Throwable source, Class<? extends Exception>... clazzes) {
while (source != null) {
for (Class<? extends Exception> clazz : clazzes) {
if (clazz.isAssignableFrom(source.getClass())) {
return (Exception) source;
}
}
source = source.getCause();
}
return null;
}
|
@Test
void givenCause_whenLookupExceptionInCause_thenReturnCause() {
assertThat(ExceptionUtil.lookupExceptionInCause(new Exception(cause), RuntimeException.class)).isSameAs(cause);
}
|
public static OutStreamOptions defaults(FileSystemContext context,
AlluxioConfiguration alluxioConf) {
return new OutStreamOptions(context, alluxioConf);
}
|
@Test
public void defaults() throws IOException {
AlluxioStorageType alluxioType = AlluxioStorageType.STORE;
UnderStorageType ufsType = UnderStorageType.SYNC_PERSIST;
mConf.set(PropertyKey.USER_BLOCK_SIZE_BYTES_DEFAULT, "64MB");
mConf.set(PropertyKey.USER_FILE_WRITE_TYPE_DEFAULT, WriteType.CACHE_THROUGH.toString());
mConf.set(PropertyKey.USER_FILE_WRITE_TIER_DEFAULT, Constants.LAST_TIER);
mConf.set(PropertyKey.SECURITY_GROUP_MAPPING_CLASS, FakeUserGroupsMapping.class.getName());
Subject subject = new Subject();
subject.getPrincipals().add(new User("test_user"));
ClientContext clientContext = ClientContext.create(subject, mConf);
OutStreamOptions options = OutStreamOptions.defaults(FileSystemContext.create(clientContext));
assertEquals(alluxioType, options.getAlluxioStorageType());
assertEquals(64 * Constants.MB, options.getBlockSizeBytes());
assertEquals("test_user", options.getOwner());
assertEquals("test_group", options.getGroup());
assertEquals(ModeUtils.applyFileUMask(Mode.defaults(),
mConf.getString(PropertyKey.SECURITY_AUTHORIZATION_PERMISSION_UMASK)), options.getMode());
assertEquals(Constants.NO_TTL, options.getCommonOptions().getTtl());
assertEquals(TtlAction.FREE, options.getCommonOptions().getTtlAction());
assertEquals(ufsType, options.getUnderStorageType());
assertEquals(WriteType.CACHE_THROUGH, options.getWriteType());
assertEquals(Constants.LAST_TIER, options.getWriteTier());
}
|
public List<ChangeStreamRecord> toChangeStreamRecords(
PartitionMetadata partition,
ChangeStreamResultSet resultSet,
ChangeStreamResultSetMetadata resultSetMetadata) {
if (this.isPostgres()) {
// In PostgresQL, change stream records are returned as JsonB.
return Collections.singletonList(
toChangeStreamRecordJson(partition, resultSet.getPgJsonb(0), resultSetMetadata));
}
// In GoogleSQL, change stream records are returned as an array of structs.
return resultSet.getCurrentRowAsStruct().getStructList(0).stream()
.flatMap(struct -> toChangeStreamRecord(partition, struct, resultSetMetadata))
.collect(Collectors.toList());
}
|
@Test
public void testMappingJsonRowWithUnknownModTypeAndValueCaptureTypeToDataChangeRecord() {
final DataChangeRecord dataChangeRecord =
new DataChangeRecord(
"partitionToken",
Timestamp.ofTimeSecondsAndNanos(10L, 20),
"transactionId",
false,
"1",
"tableName",
Arrays.asList(
new ColumnType("column1", new TypeCode("{\"code\":\"INT64\"}"), true, 1L),
new ColumnType("column2", new TypeCode("{\"code\":\"BYTES\"}"), false, 2L)),
Collections.singletonList(
new Mod("{\"column1\":\"value1\"}", null, "{\"column2\":\"newValue2\"}")),
ModType.UNKNOWN,
ValueCaptureType.UNKNOWN,
10L,
2L,
"transactionTag",
true,
null);
final String jsonString = recordToJson(dataChangeRecord, true, true);
assertNotNull(jsonString);
ChangeStreamResultSet resultSet = mock(ChangeStreamResultSet.class);
when(resultSet.getPgJsonb(0)).thenReturn(jsonString);
assertEquals(
Collections.singletonList(dataChangeRecord),
mapperPostgres.toChangeStreamRecords(partition, resultSet, resultSetMetadata));
}
|
@Override
public String addResourceReference(String key,
SharedCacheResourceReference ref) {
String interned = intern(key);
synchronized (interned) {
SharedCacheResource resource = cachedResources.get(interned);
if (resource == null) { // it's not mapped
return null;
}
resource.addReference(ref);
resource.updateAccessTime();
return resource.getFileName();
}
}
|
@Test
void testBootstrapping() throws Exception {
Map<String, String> initialCachedResources = startStoreWithResources();
int count = initialCachedResources.size();
ApplicationId id = createAppId(1, 1L);
// the entries from the cached entries should now exist
for (int i = 0; i < count; i++) {
String key = String.valueOf(i);
String fileName = key + ".jar";
String result =
store.addResourceReference(key, new SharedCacheResourceReference(id,
"user"));
// the value should not be null (i.e. it has the key) and the filename should match
assertEquals(fileName, result);
// the initial input should be emptied
assertTrue(initialCachedResources.isEmpty());
}
}
|
public static List<AclEntry> mergeAclEntries(List<AclEntry> existingAcl,
List<AclEntry> inAclSpec) throws AclException {
ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec);
ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES);
List<AclEntry> foundAclSpecEntries =
Lists.newArrayListWithCapacity(MAX_ENTRIES);
EnumMap<AclEntryScope, AclEntry> providedMask =
Maps.newEnumMap(AclEntryScope.class);
EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class);
EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class);
for (AclEntry existingEntry: existingAcl) {
AclEntry aclSpecEntry = aclSpec.findByKey(existingEntry);
if (aclSpecEntry != null) {
foundAclSpecEntries.add(aclSpecEntry);
scopeDirty.add(aclSpecEntry.getScope());
if (aclSpecEntry.getType() == MASK) {
providedMask.put(aclSpecEntry.getScope(), aclSpecEntry);
maskDirty.add(aclSpecEntry.getScope());
} else {
aclBuilder.add(aclSpecEntry);
}
} else {
if (existingEntry.getType() == MASK) {
providedMask.put(existingEntry.getScope(), existingEntry);
} else {
aclBuilder.add(existingEntry);
}
}
}
// ACL spec entries that were not replacements are new additions.
for (AclEntry newEntry: aclSpec) {
if (Collections.binarySearch(foundAclSpecEntries, newEntry,
ACL_ENTRY_COMPARATOR) < 0) {
scopeDirty.add(newEntry.getScope());
if (newEntry.getType() == MASK) {
providedMask.put(newEntry.getScope(), newEntry);
maskDirty.add(newEntry.getScope());
} else {
aclBuilder.add(newEntry);
}
}
}
copyDefaultsIfNeeded(aclBuilder);
calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty);
return buildAndValidateAcl(aclBuilder);
}
|
@Test
public void testMergeAclEntriesProvidedDefaultMask() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, GROUP, READ),
aclEntry(DEFAULT, MASK, ALL),
aclEntry(DEFAULT, OTHER, NONE));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, ALL))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
assertEquals(expected, mergeAclEntries(existing, aclSpec));
}
|
public String asString() {
return this.stream().map(ConfigErrors::asString).collect(Collectors.joining(", "));
}
|
@Test
public void shouldGetAConsolidatedListOfErrorsAsMessage() {
AllConfigErrors errors = new AllConfigErrors();
errors.add(error("key1"));
errors.add(error("key2"));
errors.add(error("key3"));
assertThat(errors.asString(), is("error on key1, error on key2, error on key3"));
}
|
@Override
public IpAddress allocateIp(String networkId) {
IpAddress availableIp = availableIps(networkId).stream()
.findFirst().orElse(null);
if (availableIp != null) {
String ipamId = networkId + "-" + availableIp.toString();
k8sIpamStore.removeAvailableIp(ipamId);
k8sIpamStore.createAllocatedIp(
new DefaultK8sIpam(ipamId, availableIp, networkId));
log.info("Allocate a new IP {}", availableIp.toString());
return availableIp;
} else {
log.warn("No IPs are available for allocating.");
}
return null;
}
|
@Test
public void testAllocateIp() {
createBasicIpPool();
assertEquals("Number of allocated IPs did not match", 0,
target.allocatedIps(NETWORK_ID).size());
assertEquals("Number of available IPs did not match", 2,
target.availableIps(NETWORK_ID).size());
IpAddress allocatedIp = target.allocateIp(NETWORK_ID);
assertEquals("Number of allocated IPs did not match", 1,
target.allocatedIps(NETWORK_ID).size());
assertEquals("Number of available IPs did not match", 1,
target.availableIps(NETWORK_ID).size());
assertTrue("Allocated IP did not match",
IP_ADDRESSES.contains(allocatedIp));
}
|
@Override
public ServerGroup servers() {
return cache.get();
}
|
@Test
public void all_up_endpoint_is_up() {
NginxHealthClient service = createClient("nginx-health-output-all-up.json");
assertTrue(service.servers().isHealthy("gateway.prod.music.vespa.us-east-2.prod"));
}
|
@Inject
public Generator(OnnxRuntime onnx, GeneratorConfig config) {
// Set up tokenizer
tokenizer = new SentencePieceEmbedder.Builder(config.tokenizerModel().toString()).build();
tokenizerMaxTokens = config.tokenizerMaxTokens();
// Set up encoder
encoderInputIdsName = config.encoderModelInputIdsName();
encoderAttentionMaskName = config.encoderModelAttentionMaskName();
encoderOutputName = config.encoderModelOutputName();
OnnxEvaluatorOptions encoderOptions = new OnnxEvaluatorOptions();
encoderOptions.setExecutionMode(config.encoderOnnxExecutionMode().toString());
encoderOptions.setThreads(config.encoderOnnxInterOpThreads(), config.encoderOnnxIntraOpThreads());
encoder = onnx.evaluatorOf(config.encoderModel().toString(), encoderOptions);
// Set up decoder
decoderInputIdsName = config.decoderModelInputIdsName();
decoderAttentionMaskName = config.decoderModelAttentionMaskName();
decoderEncoderHiddenStateName = config.decoderModelEncoderHiddenStateName();
decoderOutputName = config.decoderModelOutputName();
OnnxEvaluatorOptions decoderOptions = new OnnxEvaluatorOptions();
decoderOptions.setExecutionMode(config.decoderOnnxExecutionMode().toString());
decoderOptions.setThreads(config.decoderOnnxInterOpThreads(), config.decoderOnnxIntraOpThreads());
decoder = onnx.evaluatorOf(config.decoderModel().toString(), decoderOptions);
validateModels();
}
|
@Test
public void testGenerator() {
String vocabPath = "src/test/models/onnx/llm/en.wiki.bpe.vs10000.model";
String encoderModelPath = "src/test/models/onnx/llm/random_encoder.onnx";
String decoderModelPath = "src/test/models/onnx/llm/random_decoder.onnx";
assumeTrue(OnnxRuntime.isRuntimeAvailable(encoderModelPath));
GeneratorConfig.Builder builder = new GeneratorConfig.Builder();
builder.tokenizerModel(ModelReference.valueOf(vocabPath));
builder.encoderModel(ModelReference.valueOf(encoderModelPath));
builder.decoderModel(ModelReference.valueOf(decoderModelPath));
Generator generator = newGenerator(builder.build());
GeneratorOptions options = new GeneratorOptions();
options.setSearchMethod(GeneratorOptions.SearchMethod.GREEDY);
options.setMaxLength(10);
String prompt = "generate some random text";
String result = generator.generate(prompt, options);
assertEquals("<unk> linear recruit latest sack annually institutions cert solid references", result);
}
|
public void ensureIndexTemplate(IndexSet indexSet) {
final IndexSetConfig indexSetConfig = indexSet.getConfig();
final String templateName = indexSetConfig.indexTemplateName();
try {
var template = buildTemplate(indexSet, indexSetConfig);
if (indicesAdapter.ensureIndexTemplate(templateName, template)) {
LOG.info("Successfully ensured index template {}", templateName);
} else {
LOG.warn("Failed to create index template {}", templateName);
}
} catch (IgnoreIndexTemplate e) {
LOG.warn(e.getMessage());
if (e.isFailOnMissingTemplate() && !indicesAdapter.indexTemplateExists(templateName)) {
throw new IndexTemplateNotFoundException(f("No index template with name '%s' (type - '%s') found in Elasticsearch",
templateName, indexSetConfig.indexTemplateType().orElse(null)));
}
}
}
|
@Test
public void ensureIndexTemplate_IfIndexTemplateDoesntExistOnIgnoreIndexTemplateAndFailOnMissingTemplateIsTrue_thenExceptionThrown() {
when(indexMappingFactory.createIndexMapping(any()))
.thenThrow(new IgnoreIndexTemplate(true,
"Reasom", "test", "test-template", null));
when(indicesAdapter.indexTemplateExists("test-template")).thenReturn(false);
assertThatCode(() -> underTest.ensureIndexTemplate(indexSetConfig("test",
"test-template", "custom")))
.isExactlyInstanceOf(IndexTemplateNotFoundException.class)
.hasMessage("No index template with name 'test-template' (type - 'custom') found in Elasticsearch");
}
|
public KubernetesConfigOptions.ServiceExposedType getRestServiceExposedType() {
return flinkConfig.get(KubernetesConfigOptions.REST_SERVICE_EXPOSED_TYPE);
}
|
@Test
void testGetRestServiceExposedType() {
flinkConfig.set(
KubernetesConfigOptions.REST_SERVICE_EXPOSED_TYPE,
KubernetesConfigOptions.ServiceExposedType.NodePort);
assertThat(kubernetesJobManagerParameters.getRestServiceExposedType())
.isEqualByComparingTo(KubernetesConfigOptions.ServiceExposedType.NodePort);
}
|
@Override
@SuppressWarnings({"checkstyle:npathcomplexity", "checkstyle:cyclomaticcomplexity", "checkstyle:MethodLength"})
protected IdentifiedDataSerializable getConfig() {
MapConfig config = new MapConfig(parameters.name);
config.setAsyncBackupCount(parameters.asyncBackupCount);
config.setBackupCount(parameters.backupCount);
config.setCacheDeserializedValues(CacheDeserializedValues.valueOf(parameters.cacheDeserializedValues));
if (parameters.listenerConfigs != null && !parameters.listenerConfigs.isEmpty()) {
config.setEntryListenerConfigs(
(List<EntryListenerConfig>) adaptListenerConfigs(parameters.listenerConfigs, parameters.userCodeNamespace));
}
if (parameters.merkleTreeConfig != null) {
config.setMerkleTreeConfig(parameters.merkleTreeConfig);
}
if (parameters.eventJournalConfig != null) {
config.setEventJournalConfig(parameters.eventJournalConfig);
}
if (parameters.hotRestartConfig != null) {
config.setHotRestartConfig(parameters.hotRestartConfig);
}
config.setInMemoryFormat(InMemoryFormat.valueOf(parameters.inMemoryFormat));
config.setAttributeConfigs(parameters.attributeConfigs);
config.setReadBackupData(parameters.readBackupData);
config.setStatisticsEnabled(parameters.statisticsEnabled);
config.setPerEntryStatsEnabled(parameters.perEntryStatsEnabled);
config.setIndexConfigs(parameters.indexConfigs);
if (parameters.mapStoreConfig != null) {
config.setMapStoreConfig(parameters.mapStoreConfig.asMapStoreConfig(serializationService,
parameters.userCodeNamespace));
}
config.setTimeToLiveSeconds(parameters.timeToLiveSeconds);
config.setMaxIdleSeconds(parameters.maxIdleSeconds);
if (parameters.evictionConfig != null) {
config.setEvictionConfig(parameters.evictionConfig.asEvictionConfig(serializationService));
}
if (parameters.mergePolicy != null) {
config.setMergePolicyConfig(mergePolicyConfig(parameters.mergePolicy, parameters.mergeBatchSize));
}
if (parameters.nearCacheConfig != null) {
config.setNearCacheConfig(parameters.nearCacheConfig.asNearCacheConfig(serializationService));
}
config.setPartitioningStrategyConfig(getPartitioningStrategyConfig());
if (parameters.partitionLostListenerConfigs != null && !parameters.partitionLostListenerConfigs.isEmpty()) {
config.setPartitionLostListenerConfigs(
(List<MapPartitionLostListenerConfig>) adaptListenerConfigs(parameters.partitionLostListenerConfigs,
parameters.userCodeNamespace));
}
config.setSplitBrainProtectionName(parameters.splitBrainProtectionName);
if (parameters.queryCacheConfigs != null && !parameters.queryCacheConfigs.isEmpty()) {
List<QueryCacheConfig> queryCacheConfigs = new ArrayList<>(parameters.queryCacheConfigs.size());
for (QueryCacheConfigHolder holder : parameters.queryCacheConfigs) {
queryCacheConfigs.add(holder.asQueryCacheConfig(serializationService, parameters.userCodeNamespace));
}
config.setQueryCacheConfigs(queryCacheConfigs);
}
config.setWanReplicationRef(parameters.wanReplicationRef);
config.setMetadataPolicy(MetadataPolicy.getById(parameters.metadataPolicy));
if (parameters.isDataPersistenceConfigExists) {
config.setDataPersistenceConfig(parameters.dataPersistenceConfig);
}
if (parameters.isTieredStoreConfigExists) {
config.setTieredStoreConfig(parameters.tieredStoreConfig);
}
if (parameters.isPartitioningAttributeConfigsExists) {
config.setPartitioningAttributeConfigs(parameters.partitioningAttributeConfigs);
}
if (parameters.isUserCodeNamespaceExists) {
config.setUserCodeNamespace(parameters.userCodeNamespace);
}
return config;
}
|
@Test
public void testDataPersistenceAndTieredStoreConfigTransmittedCorrectly() {
MapConfig mapConfig = new MapConfig("my-map");
DataPersistenceConfig dataPersistenceConfig = new DataPersistenceConfig();
dataPersistenceConfig.setEnabled(true);
dataPersistenceConfig.setFsync(true);
mapConfig.setDataPersistenceConfig(dataPersistenceConfig);
TieredStoreConfig tieredStoreConfig = mapConfig.getTieredStoreConfig();
tieredStoreConfig.setEnabled(true);
tieredStoreConfig.getMemoryTierConfig().setCapacity(Capacity.of(1L, MemoryUnit.GIGABYTES));
tieredStoreConfig.getDiskTierConfig().setEnabled(true).setDeviceName("null-device");
ClientMessage addMapConfigClientMessage = DynamicConfigAddMapConfigCodec.encodeRequest(
mapConfig.getName(),
mapConfig.getBackupCount(),
mapConfig.getAsyncBackupCount(),
mapConfig.getTimeToLiveSeconds(),
mapConfig.getMaxIdleSeconds(),
null,
mapConfig.isReadBackupData(),
mapConfig.getCacheDeserializedValues().name(),
mapConfig.getMergePolicyConfig().getPolicy(),
mapConfig.getMergePolicyConfig().getBatchSize(),
mapConfig.getInMemoryFormat().name(),
null,
null,
mapConfig.isStatisticsEnabled(),
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
null,
mapConfig.getMetadataPolicy().getId(),
mapConfig.isPerEntryStatsEnabled(),
mapConfig.getDataPersistenceConfig(),
mapConfig.getTieredStoreConfig(),
null,
mapConfig.getUserCodeNamespace()
);
AddMapConfigMessageTask addMapConfigMessageTask = createMessageTask(addMapConfigClientMessage);
addMapConfigMessageTask.run();
MapConfig transmittedMapConfig = (MapConfig) addMapConfigMessageTask.getConfig();
assertEquals(mapConfig, transmittedMapConfig);
}
|
public static Area getArea(String ip) {
return AreaUtils.getArea(getAreaId(ip));
}
|
@Test
public void testGetArea_long() throws Exception {
// 120.203.123.0|120.203.133.255|360900
long ip = Searcher.checkIP("120.203.123.252");
Area area = IPUtils.getArea(ip);
assertEquals("宜春市", area.getName());
}
|
@Override
public SnowflakeTableMetadata loadTableMetadata(SnowflakeIdentifier tableIdentifier) {
Preconditions.checkArgument(
tableIdentifier.type() == SnowflakeIdentifier.Type.TABLE,
"loadTableMetadata requires a TABLE identifier, got '%s'",
tableIdentifier);
SnowflakeTableMetadata tableMeta;
try {
final String finalQuery = "SELECT SYSTEM$GET_ICEBERG_TABLE_INFORMATION(?) AS METADATA";
tableMeta =
connectionPool.run(
conn ->
queryHarness.query(
conn,
finalQuery,
TABLE_METADATA_RESULT_SET_HANDLER,
tableIdentifier.toIdentifierString()));
} catch (SQLException e) {
throw snowflakeExceptionToIcebergException(
tableIdentifier,
e,
String.format("Failed to get table metadata for '%s'", tableIdentifier));
} catch (InterruptedException e) {
throw new UncheckedInterruptedException(
e, "Interrupted while getting table metadata for '%s'", tableIdentifier);
}
return tableMeta;
}
|
@Test
public void testGetTableMetadataMalformedJson() throws SQLException {
when(mockResultSet.next()).thenReturn(true);
when(mockResultSet.getString("METADATA")).thenReturn("{\"malformed_no_closing_bracket");
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(
() ->
snowflakeClient.loadTableMetadata(
SnowflakeIdentifier.ofTable("DB_1", "SCHEMA_1", "TABLE_1")))
.withMessageContaining("{\"malformed_no_closing_bracket");
}
|
@Override
public void onError(QueryException error) {
assert error != null;
done.compareAndSet(null, error);
}
|
@Test
@SuppressWarnings("ResultOfMethodCallIgnored")
public void when_onErrorCalledTwice_then_secondIgnored() {
initProducer(false);
producer.onError(QueryException.error("error1"));
producer.onError(QueryException.error("error2"));
assertThatThrownBy(iterator::hasNext)
.hasMessageContaining("error1");
}
|
@Override
public String getVersionId(final Path file) throws BackgroundException {
if(StringUtils.isNotBlank(file.attributes().getVersionId())) {
if(log.isDebugEnabled()) {
log.debug(String.format("Return version %s from attributes for file %s", file.attributes().getVersionId(), file));
}
return file.attributes().getVersionId();
}
final String cached = super.getVersionId(file);
if(cached != null) {
if(log.isDebugEnabled()) {
log.debug(String.format("Return cached versionid %s for file %s", cached, file));
}
return cached;
}
try {
if(containerService.isContainer(file)) {
final B2BucketResponse info = session.getClient().listBucket(file.getName());
if(null == info) {
throw new NotfoundException(file.getAbsolute());
}
// Cache in file attributes
return this.cache(file, info.getBucketId());
}
// Files that have been hidden will not be returned
final B2ListFilesResponse response = session.getClient().listFileNames(
this.getVersionId(containerService.getContainer(file)), containerService.getKey(file), 1,
new DirectoryDelimiterPathContainerService().getKey(file.getParent()),
null);
// Find for exact filename match (.bzEmpty file for directories)
final Optional<B2FileInfoResponse> optional = response.getFiles().stream().filter(
info -> StringUtils.equals(containerService.getKey(file), info.getFileName())).findFirst();
if(optional.isPresent()) {
// Cache in file attributes
return this.cache(file, optional.get().getFileId());
}
if(file.isDirectory()) {
// Search for common prefix returned when no placeholder file was found
if(response.getFiles().stream().anyMatch(
info -> StringUtils.startsWith(info.getFileName(), new DirectoryDelimiterPathContainerService().getKey(file)))) {
if(log.isDebugEnabled()) {
log.debug(String.format("Common prefix found for %s but no placeholder file", file));
}
return null;
}
throw new NotfoundException(file.getAbsolute());
}
throw new NotfoundException(file.getAbsolute());
}
catch(B2ApiException e) {
throw new B2ExceptionMappingService(this).map(e);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map(e);
}
}
|
@Test
public void getFileIdFile() throws Exception {
final B2VersionIdProvider fileid = new B2VersionIdProvider(session);
final Path bucket = new B2DirectoryFeature(session, fileid).mkdir(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path file = new B2TouchFeature(session, fileid).touch(new Path(bucket, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
final String versionId = fileid.getVersionId(file);
assertNotNull(versionId);
assertEquals(file.attributes().getVersionId(), versionId);
try {
assertNull(fileid.getVersionId(new Path(bucket, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file))));
fail();
}
catch(NotfoundException e) {
// Expected
}
final PathAttributes duplicate = new PathAttributes();
duplicate.setVersionId("d");
duplicate.setDuplicate(true);
fileid.cache(new Path(file).withAttributes(duplicate), "d");
assertEquals(versionId, fileid.getVersionId(file));
new B2DeleteFeature(session, fileid).delete(Arrays.asList(bucket, file), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public void printKsqlEntityList(final List<KsqlEntity> entityList) {
switch (outputFormat) {
case JSON:
printAsJson(entityList);
break;
case TABULAR:
final boolean showStatements = entityList.size() > 1;
for (final KsqlEntity ksqlEntity : entityList) {
writer().println();
if (showStatements) {
writer().println(ksqlEntity.getStatementText());
}
printAsTable(ksqlEntity);
}
break;
default:
throw new RuntimeException(String.format(
"Unexpected output format: '%s'",
outputFormat.name()
));
}
}
|
@Test
public void shouldPrintAssertNotExistsTopicResult() {
// Given:
final KsqlEntityList entities = new KsqlEntityList(ImmutableList.of(
new AssertTopicEntity("statement", "name", false)
));
// When:
console.printKsqlEntityList(entities);
// Then:
final String output = terminal.getOutputString();
Approvals.verify(output, approvalOptions);
}
|
@Override
public void execute(final List<String> args, final PrintWriter terminal) {
CliCmdUtil.ensureArgCountBounds(args, 0, 1, HELP);
if (args.isEmpty()) {
terminal.println(restClient.getServerAddress());
return;
} else {
final String serverAddress = args.get(0);
restClient.setServerAddress(serverAddress);
terminal.println("Server now: " + serverAddress);
resetCliForNewServer.fire();
}
validateClient(terminal, restClient);
}
|
@Test
public void shouldIdentifyCCloudServer() {
// When:
command.execute(ImmutableList.of(CCLOUD_SERVER_ADDRESS), terminal);
// Then:
verify(restClient).setIsCCloudServer(true);
}
|
public static IpPrefix valueOf(int address, int prefixLength) {
return new IpPrefix(IpAddress.valueOf(address), prefixLength);
}
|
@Test(expected = IllegalArgumentException.class)
public void testInvalidValueOfAddressNegativePrefixLengthIPv6() {
IpAddress ipAddress;
IpPrefix ipPrefix;
ipAddress =
IpAddress.valueOf("1111:2222:3333:4444:5555:6666:7777:8888");
ipPrefix = IpPrefix.valueOf(ipAddress, -1);
}
|
@Override
public boolean isValid(Object value, ConstraintValidatorContext context) {
if (value == null) {
return true;
} else if (value instanceof Collection) {
return isValidCollection((Collection<?>) value);
} else if (value instanceof Map) {
return ((Map<?, ?>) value).values().stream().allMatch(v -> isValid(v, context));
} else {
return false;
}
}
|
@Test
public void testInvalidWithNonMatchingObject() {
boolean valid = target.isValid(new Object(), constraintValidatorContext);
assertThat(valid).isFalse();
}
|
@Override
@SuppressWarnings("unchecked")
public <T> T create(Class<T> extensionClass) {
String extensionClassName = extensionClass.getName();
ClassLoader extensionClassLoader = extensionClass.getClassLoader();
if (!cache.containsKey(extensionClassLoader)) {
cache.put(extensionClassLoader, new HashMap<>());
}
Map<String, Object> classLoaderBucket = cache.get(extensionClassLoader);
if (classLoaderBucket.containsKey(extensionClassName)) {
return (T) classLoaderBucket.get(extensionClassName);
}
T extension = super.create(extensionClass);
if (extensionClassNames.isEmpty() || extensionClassNames.contains(extensionClassName)) {
classLoaderBucket.put(extensionClassName, extension);
}
return extension;
}
|
@Test
@SuppressWarnings("unchecked")
public void createNewEachTimeFromDifferentClassLoaders() throws Exception {
ExtensionFactory extensionFactory = new SingletonExtensionFactory(pluginManager);
// Get classpath locations
URL[] classpathReferences = getClasspathReferences();
// Create different classloaders for the classpath references and load classes respectively
ClassLoader klassLoaderOne = new URLClassLoader(classpathReferences, null);
Class klassOne = klassLoaderOne.loadClass(TestExtension.class.getName());
ClassLoader klassLoaderTwo = new URLClassLoader(classpathReferences, null);
Class klassTwo = klassLoaderTwo.loadClass(TestExtension.class.getName());
// create instances
Object instanceOne = extensionFactory.create(klassOne);
Object instanceTwo = extensionFactory.create(klassTwo);
// assert that instances not same
assertNotSame(instanceOne, instanceTwo);
}
|
public static <T> void invokeAll(List<Callable<T>> callables, long timeoutMs)
throws TimeoutException, ExecutionException {
ExecutorService service = Executors.newCachedThreadPool();
try {
invokeAll(service, callables, timeoutMs);
} finally {
service.shutdownNow();
}
}
|
@Test
public void invokeAllExceptionAndHang() throws Exception {
long start = System.currentTimeMillis();
RuntimeException testException = new RuntimeException("failed");
try {
CommonUtils.invokeAll(Arrays.asList(
() -> {
Thread.sleep(10 * Constants.SECOND_MS);
return null;
},
() -> {
throw testException;
}
), 5 * Constants.SECOND_MS);
fail("Expected an exception to be thrown");
} catch (ExecutionException e) {
assertSame(testException, e.getCause());
}
assertThat("invokeAll should exit early if one of the tasks throws an exception",
System.currentTimeMillis() - start, Matchers.lessThan(2L * Constants.SECOND_MS));
}
|
public Future<Void> migrateFromDeploymentToStrimziPodSets(Deployment deployment, StrimziPodSet podSet) {
if (deployment == null) {
// Deployment does not exist anymore => no migration needed
return Future.succeededFuture();
} else {
int depReplicas = deployment.getSpec().getReplicas();
int podSetReplicas = podSet != null ? podSet.getSpec().getPods().size() : 0;
return moveOnePodFromDeploymentToStrimziPodSet(depReplicas - 1, Math.min(podSetReplicas + 1, connect.getReplicas()));
}
}
|
@Test
public void testNoMigrationToPodSets(VertxTestContext context) {
KafkaConnectMigration migration = new KafkaConnectMigration(
RECONCILIATION,
CLUSTER,
null,
null,
1_000L,
false,
null,
null,
null,
null,
null,
null
);
Checkpoint async = context.checkpoint();
migration.migrateFromDeploymentToStrimziPodSets(null, CLUSTER.generatePodSet(3, null, null, false, null, null, null))
.onComplete(context.succeeding(v -> context.verify(async::flag)));
}
|
@Override
public boolean filterPath(Path filePath) {
if (getIncludeMatchers().isEmpty() && getExcludeMatchers().isEmpty()) {
return false;
}
// compensate for the fact that Flink paths are slashed
final String path =
filePath.hasWindowsDrive() ? filePath.getPath().substring(1) : filePath.getPath();
final java.nio.file.Path nioPath = Paths.get(path);
for (PathMatcher matcher : getIncludeMatchers()) {
if (matcher.matches(nioPath)) {
return shouldExclude(nioPath);
}
}
return true;
}
|
@Test
void testSingleStarPattern() {
GlobFilePathFilter matcher =
new GlobFilePathFilter(Collections.singletonList("*"), Collections.emptyList());
assertThat(matcher.filterPath(new Path("a"))).isFalse();
assertThat(matcher.filterPath(new Path("a/b"))).isTrue();
assertThat(matcher.filterPath(new Path("a/b/c"))).isTrue();
}
|
@Override
public Messages process(Messages messages) {
for (final MessageFilter filter : filterRegistry) {
for (Message msg : messages) {
final String timerName = name(filter.getClass(), "executionTime");
final Timer timer = metricRegistry.timer(timerName);
final Timer.Context timerContext = timer.time();
try {
LOG.trace("Applying filter [{}] on message <{}>.", filter.getName(), msg.getId());
if (filter.filter(msg)) {
LOG.debug("Filter [{}] marked message <{}> to be discarded. Dropping message.",
filter.getName(),
msg.getId());
msg.setFilterOut(true);
filteredOutMessages.mark();
messageQueueAcknowledger.acknowledge(msg);
}
} catch (Exception e) {
final String shortError = String.format(Locale.US, "Could not apply filter [%s] on message <%s>",
filter.getName(), msg.getId());
if (LOG.isDebugEnabled()) {
LOG.error("{}:", shortError, e);
} else {
LOG.error("{}:\n{}", shortError, ExceptionUtils.getShortenedStackTrace(e));
}
msg.addProcessingError(new Message.ProcessingError(ProcessingFailureCause.MessageFilterException,
shortError, ExceptionUtils.getRootCauseMessage(e)));
} finally {
final long elapsedNanos = timerContext.stop();
msg.recordTiming(serverStatus, timerName, elapsedNanos);
}
}
}
return messages;
}
|
@Test
public void testAllFiltersAreBeingRun() {
final DummyFilter first = new DummyFilter(10);
final DummyFilter second = new DummyFilter(20);
final DummyFilter third = new DummyFilter(30);
final Set<MessageFilter> filters = ImmutableSet.of(first, second, third);
final MessageFilterChainProcessor processor = new MessageFilterChainProcessor(new MetricRegistry(),
filters,
acknowledger,
serverStatus);
final Message message = messageFactory.createMessage("message", "source", new DateTime(2016, 1, 1, 0, 0, DateTimeZone.UTC));
final Message result = Iterables.getFirst(processor.process(message), null);
assertThat(result).isNotNull();
assertThat(result.getFields()).containsKeys("prio-10", "prio-20", "prio-30");
}
|
public static void validateMaterializedViewPartitionColumns(
SemiTransactionalHiveMetastore metastore,
MetastoreContext metastoreContext,
Table viewTable,
MaterializedViewDefinition viewDefinition)
{
SchemaTableName viewName = new SchemaTableName(viewTable.getDatabaseName(), viewTable.getTableName());
Map<String, Map<SchemaTableName, String>> viewToBaseDirectColumnMap = viewDefinition.getDirectColumnMappingsAsMap();
if (viewToBaseDirectColumnMap.isEmpty()) {
throw new PrestoException(
NOT_SUPPORTED,
format("Materialized view %s must have at least one column directly defined by a base table column.", viewName));
}
List<Column> viewPartitions = viewTable.getPartitionColumns();
if (viewPartitions.isEmpty()) {
throw new PrestoException(NOT_SUPPORTED, "Unpartitioned materialized view is not supported.");
}
List<Table> baseTables = viewDefinition.getBaseTables().stream()
.map(baseTableName -> metastore.getTable(metastoreContext, baseTableName.getSchemaName(), baseTableName.getTableName())
.orElseThrow(() -> new TableNotFoundException(baseTableName)))
.collect(toImmutableList());
Map<Table, List<Column>> baseTablePartitions = baseTables.stream()
.collect(toImmutableMap(
table -> table,
Table::getPartitionColumns));
for (Table baseTable : baseTablePartitions.keySet()) {
SchemaTableName schemaBaseTable = new SchemaTableName(baseTable.getDatabaseName(), baseTable.getTableName());
if (!isCommonPartitionFound(schemaBaseTable, baseTablePartitions.get(baseTable), viewPartitions, viewToBaseDirectColumnMap)) {
throw new PrestoException(
NOT_SUPPORTED,
format("Materialized view %s must have at least one partition column that exists in %s as well", viewName, baseTable.getTableName()));
}
if (viewDefinition.getBaseTablesOnOuterJoinSide().contains(schemaBaseTable) && viewToBaseTableOnOuterJoinSideIndirectMappedPartitions(viewDefinition, baseTable).get().isEmpty()) {
throw new PrestoException(
NOT_SUPPORTED,
format("Outer join conditions in Materialized view %s must have at least one common partition equality constraint", viewName));
}
}
}
|
@Test
public void testValidateMaterializedViewPartitionColumnsOneColumnMatch()
{
TestingSemiTransactionalHiveMetastore testMetastore = TestingSemiTransactionalHiveMetastore.create();
Column dsColumn = new Column("ds", HIVE_STRING, Optional.empty(), Optional.empty());
Column shipmodeColumn = new Column("shipmode", HIVE_STRING, Optional.empty(), Optional.empty());
List<Column> partitionColumns = ImmutableList.of(dsColumn, shipmodeColumn);
SchemaTableName tableName = new SchemaTableName(SCHEMA_NAME, TABLE_NAME);
Map<String, Map<SchemaTableName, String>> originalColumnMapping = ImmutableMap.of(dsColumn.getName(), ImmutableMap.of(tableName, dsColumn.getName()));
testMetastore.addTable(SCHEMA_NAME, TABLE_NAME, getTable(partitionColumns), ImmutableList.of());
List<Column> viewPartitionColumns = ImmutableList.of(dsColumn);
validateMaterializedViewPartitionColumns(testMetastore, metastoreContext, getTable(viewPartitionColumns), getConnectorMaterializedViewDefinition(ImmutableList.of(tableName), originalColumnMapping));
}
|
public JobInformation getJobInformation() throws IOException, ClassNotFoundException {
if (jobInformation != null) {
return jobInformation;
}
if (serializedJobInformation instanceof NonOffloaded) {
NonOffloaded<JobInformation> jobInformation =
(NonOffloaded<JobInformation>) serializedJobInformation;
return jobInformation.serializedValue.deserializeValue(getClass().getClassLoader());
}
throw new IllegalStateException(
"Trying to work with offloaded serialized job information.");
}
|
@Test
void testOffLoadedAndNonOffLoadedPayload() throws IOException, ClassNotFoundException {
final TaskDeploymentDescriptor taskDeploymentDescriptor =
createTaskDeploymentDescriptor(
new TaskDeploymentDescriptor.NonOffloaded<>(serializedJobInformation),
new TaskDeploymentDescriptor.Offloaded<>(new PermanentBlobKey()));
JobInformation actualJobInformation = taskDeploymentDescriptor.getJobInformation();
assertThat(actualJobInformation).isEqualTo(jobInformation);
assertThatThrownBy(taskDeploymentDescriptor::getTaskInformation)
.isInstanceOf(IllegalStateException.class);
}
|
@Override
public ParsedSchema fromConnectSchema(final Schema schema) {
// Bug in ProtobufData means `fromConnectSchema` throws on the second invocation if using
// default naming.
return new ProtobufData(new ProtobufDataConfig(updatedConfigs))
.fromConnectSchema(injectSchemaFullName(schema));
}
|
@Test
public void shouldUsePlainPrimitivesIfNoNullableRepresentationIsSet() {
// Given:
givenNoNullableRepresentation();
// When:
final ParsedSchema schema = schemaTranslator.fromConnectSchema(CONNECT_SCHEMA_WITH_NULLABLE_PRIMITIVES);
// Then:
assertThat(schema.canonicalString(), is("syntax = \"proto3\";\n"
+ "\n"
+ "message ConnectDefault1 {\n"
+ " int32 optional_int32 = 1;\n"
+ " bool optional_boolean = 2;\n"
+ " string optional_string = 3;\n"
+ "}\n"));
}
|
@Override
public Iterable<Measure> getChildrenMeasures(String metric) {
validateInputMetric(metric);
return () -> internalComponent.getChildren().stream()
.map(new ComponentToMeasure(metricRepository.getByKey(metric)))
.map(ToMeasureAPI.INSTANCE)
.filter(Objects::nonNull)
.iterator();
}
|
@Test
public void get_children_measures_when_one_child_has_no_value() {
measureRepository.addRawMeasure(FILE_1_REF, NCLOC_KEY, newMeasureBuilder().create(10));
// No data on file 2
MeasureComputerContextImpl underTest = newContext(PROJECT_REF, NCLOC_KEY, COMMENT_LINES_KEY);
assertThat(underTest.getChildrenMeasures(NCLOC_KEY)).extracting("intValue").containsOnly(10);
}
|
public static String optimizeErrorMessage(String msg) {
if (msg == null) {
return null;
}
if (SERVER_ID_CONFLICT.matcher(msg).matches()) {
// Optimize the error msg when server id conflict
msg +=
"\nThe 'server-id' in the mysql cdc connector should be globally unique, but conflicts happen now.\n"
+ "The server id conflict may happen in the following situations: \n"
+ "1. The server id has been used by other mysql cdc table in the current job.\n"
+ "2. The server id has been used by the mysql cdc table in other jobs.\n"
+ "3. The server id has been used by other sync tools like canal, debezium and so on.\n";
} else if (MISSING_BINLOG_POSITION_WHEN_BINLOG_EXPIRE.matcher(msg).matches()
|| MISSING_TRANSACTION_WHEN_BINLOG_EXPIRE.matcher(msg).matches()) {
// Optimize the error msg when binlog is unavailable
msg +=
"\nThe required binary logs are no longer available on the server. This may happen in following situations:\n"
+ "1. The speed of CDC source reading is too slow to exceed the binlog expired period. You can consider increasing the binary log expiration period, you can also to check whether there is back pressure in the job and optimize your job.\n"
+ "2. The job runs normally, but something happens in the database and lead to the binlog cleanup. You can try to check why this cleanup happens from MySQL side.";
}
return msg;
}
|
@Test
public void testOptimizeErrorMessageWhenMissingBinlogPositionInMaster() {
assertEquals(
"Cannot replicate because the master purged required binary logs. Replicate the missing transactions from elsewhere, or provision a new slave from backup. Consider increasing the master's binary log expiration period. The GTID set sent by the slave is 'b9d6f3df-79e7-11ed-9a81-0242ac110004:1-33', and the missing transactions are 'b9d6f3df-79e7-11ed-9a81-0242ac110004:34'"
+ "\nThe required binary logs are no longer available on the server. This may happen in following situations:\n"
+ "1. The speed of CDC source reading is too slow to exceed the binlog expired period. You can consider increasing the binary log expiration period, you can also to check whether there is back pressure in the job and optimize your job.\n"
+ "2. The job runs normally, but something happens in the database and lead to the binlog cleanup. You can try to check why this cleanup happens from MySQL side.",
ErrorMessageUtils.optimizeErrorMessage(
"Cannot replicate because the master purged required binary logs. Replicate the missing transactions from elsewhere, or provision a new slave from backup. Consider increasing the master's binary log expiration period. The GTID set sent by the slave is 'b9d6f3df-79e7-11ed-9a81-0242ac110004:1-33', and the missing transactions are 'b9d6f3df-79e7-11ed-9a81-0242ac110004:34'"));
}
|
public final long readLong() throws IOException {
if (input instanceof RandomAccessFile) {
return ((RandomAccessFile) input).readLong();
} else if (input instanceof DataInputStream) {
return ((DataInputStream) input).readLong();
} else {
throw new UnsupportedOperationException("Unknown Hollow Blob Input type");
}
}
|
@Test
public void testReadLong() throws IOException {
HollowBlobInput inStream = HollowBlobInput.modeBasedSelector(MemoryMode.ON_HEAP, mockBlob);
assertEquals(281479271743489l, inStream.readLong()); // first long
HollowBlobInput inBuffer = HollowBlobInput.modeBasedSelector(MemoryMode.SHARED_MEMORY_LAZY, mockBlob);
assertEquals(281479271743489l, inBuffer.readLong()); // first long
}
|
public void dropPipe(DropPipeStmt stmt) throws DdlException {
Pipe pipe = null;
try {
lock.writeLock().lock();
Pair<Long, String> dbAndName = resolvePipeNameUnlock(stmt.getPipeName());
PipeId pipeId = nameToId.get(dbAndName);
if (pipeId == null) {
if (stmt.isIfExists()) {
return;
}
ErrorReport.reportSemanticException(ErrorCode.ERR_UNKNOWN_PIPE, stmt.getPipeName());
}
pipe = pipeMap.get(nameToId.get(dbAndName));
dropPipeImpl(pipe);
} catch (Throwable e) {
LOG.error("drop pipe {} failed", pipe, e);
throw e;
} finally {
lock.writeLock().unlock();
}
}
|
@Test
public void testExecuteFailed() throws Exception {
TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager();
mockRepoExecutor();
// mock execution failed
for (boolean retryAll : Lists.newArrayList(true, false)) {
final String pipeName = "p3";
Pipe p3 = preparePipe(pipeName);
new mockit.Expectations(taskManager) {
{
taskManager.executeTaskAsync((Task) any, (ExecuteOption) any);
SubmitResult submit = new SubmitResult("queryid", SubmitResult.SubmitStatus.SUBMITTED);
FutureTask<Constants.TaskRunState> future = new FutureTask<>(() -> Constants.TaskRunState.FAILED);
submit.setFuture(future);
future.run();
result = submit;
}
};
Assert.assertEquals(0, p3.getRunningTasks().size());
pipeRetryFailedTask(p3, retryAll);
dropPipe(pipeName);
}
// mock execution cancelled
for (boolean retryAll : Lists.newArrayList(true, false)) {
final String pipeName = "p4";
Pipe p4 = preparePipe(pipeName);
new mockit.Expectations(taskManager) {
{
taskManager.executeTaskAsync((Task) any, (ExecuteOption) any);
SubmitResult submit = new SubmitResult("queryid", SubmitResult.SubmitStatus.SUBMITTED);
FutureTask<Constants.TaskRunState> future = new FutureTask<>(() -> Constants.TaskRunState.FAILED);
submit.setFuture(future);
future.cancel(true);
result = submit;
}
};
Assert.assertEquals(0, p4.getRunningTasks().size());
pipeRetryFailedTask(p4, retryAll);
dropPipe(pipeName);
}
}
|
@Override
public boolean replace(K k, V v1, V v2) {
return mInternalMap.replace(new IdentityObject<>(k), v1, v2);
}
|
@Test
public void replace() {
String x = new String("x");
String x2 = new String("x");
assertNull(mMap.put(x, "x"));
assertNull(mMap.replace(x2, "x"));
assertEquals(1, mMap.size());
assertEquals("x", mMap.replace(x, "y"));
assertFalse(mMap.replace(x, "noreplace", "z")); // shouldn't replace
assertEquals("y", mMap.get(x));
}
|
public static <T> T invoke(Object obj, Method method, Object... args) {
return invoke(false, obj, method, args);
}
|
@Test
public void invokeStaticTest(){
// 测试执行普通方法
final String result = MethodHandleUtil.invoke(null,
ReflectUtil.getMethod(Duck.class, "getDuck", int.class), 78);
assertEquals("Duck 78", result);
}
|
@ScalarFunction
@LiteralParameters({"x", "y"})
@Constraint(variable = "y", expression = "min(2147483647, x + 15)")
// Color formatting uses 15 characters. Note that if the ansiColorEscape function implementation
// changes, this value may be invalidated.
@SqlType("varchar(y)")
public static Slice render(@SqlType("varchar(x)") Slice value, @SqlType(ColorType.NAME) long color)
{
StringBuilder builder = new StringBuilder(value.length());
// color
builder.append(ansiColorEscape(color))
.append(value.toStringUtf8())
.append(ANSI_RESET);
return utf8Slice(builder.toString());
}
|
@Test
public void testRenderBoolean()
{
assertEquals(render(true), toSlice("\u001b[38;5;2m✓\u001b[0m"));
assertEquals(render(false), toSlice("\u001b[38;5;1m✗\u001b[0m"));
}
|
public List<List<String>> cells() {
return raw;
}
|
@Test
void cells_should_have_three_columns_and_two_rows() {
List<List<String>> raw = createSimpleTable().cells();
assertEquals(2, raw.size(), "Rows size");
for (List<String> list : raw) {
assertEquals(3, list.size(), "Cols size: " + list);
}
}
|
public static void main(String[] args) throws SQLException {
Forest forest = createForest();
LobSerializer serializer = createLobSerializer(args);
executeSerializer(forest, serializer);
}
|
@Test
void shouldExecuteWithoutExceptionClob() {
assertDoesNotThrow(() -> App.main(new String[]{"CLOB"}));
}
|
public static int parseInt(String number) throws NumberFormatException {
if (StrUtil.isBlank(number)) {
return 0;
}
if (StrUtil.startWithIgnoreCase(number, "0x")) {
// 0x04表示16进制数
return Integer.parseInt(number.substring(2), 16);
}
if (StrUtil.containsIgnoreCase(number, "E")) {
// 科学计数法忽略支持,科学计数法一般用于表示非常小和非常大的数字,这类数字转换为int后精度丢失,没有意义。
throw new NumberFormatException(StrUtil.format("Unsupported int format: [{}]", number));
}
try {
return Integer.parseInt(number);
} catch (NumberFormatException e) {
return parseNumber(number).intValue();
}
}
|
@Test
public void parseIntTest4() {
// -------------------------- Parse failed -----------------------
Assertions.assertNull(NumberUtil.parseInt("abc", null));
assertEquals(456, NumberUtil.parseInt("abc", 456));
// -------------------------- Parse success -----------------------
assertEquals(123, NumberUtil.parseInt("123.abc", 789));
assertEquals(123, NumberUtil.parseInt("123.3", null));
}
|
public void resetMasterFlushOffset(final String brokerAddr, final long masterFlushOffset)
throws InterruptedException, RemotingTimeoutException, RemotingSendRequestException, RemotingConnectException, MQBrokerException {
ResetMasterFlushOffsetHeader requestHeader = new ResetMasterFlushOffsetHeader();
requestHeader.setMasterFlushOffset(masterFlushOffset);
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.RESET_MASTER_FLUSH_OFFSET, requestHeader);
RemotingCommand response = this.remotingClient.invokeSync(brokerAddr, request, 3000);
assert response != null;
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
return;
}
default:
break;
}
throw new MQBrokerException(response.getCode(), response.getRemark(), brokerAddr);
}
|
@Test
public void testResetMasterFlushOffset() throws RemotingException, InterruptedException, MQBrokerException {
mockInvokeSync();
mqClientAPI.resetMasterFlushOffset(defaultBrokerAddr, 1L);
}
|
@Operation(summary = "force-success", description = "FORCE_TASK_SUCCESS")
@Parameters({
@Parameter(name = "id", description = "TASK_INSTANCE_ID", required = true, schema = @Schema(implementation = int.class), example = "12")
})
@PostMapping(value = "/{id}/force-success", consumes = {"application/json"})
@ResponseStatus(HttpStatus.OK)
@ApiException(FORCE_TASK_SUCCESS_ERROR)
public TaskInstanceSuccessResponse forceTaskSuccess(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@Parameter(name = "projectCode", description = "PROJECT_CODE", required = true) @PathVariable long projectCode,
@PathVariable(value = "id") Integer id) {
Result result = taskInstanceService.forceTaskSuccess(loginUser, projectCode, id);
return new TaskInstanceSuccessResponse(result);
}
|
@Test
public void testForceTaskSuccess() {
Result mockResult = new Result();
putMsg(mockResult, Status.SUCCESS);
when(taskInstanceService.forceTaskSuccess(any(), Mockito.anyLong(), Mockito.anyInt())).thenReturn(mockResult);
Result taskResult = taskInstanceV2Controller.forceTaskSuccess(null, 1L, 1);
Assertions.assertEquals(Integer.valueOf(Status.SUCCESS.getCode()), taskResult.getCode());
}
|
public String getScope() {
return (String) context.getRequestAttribute(OidcConfiguration.SCOPE)
.or(() -> Optional.ofNullable(configuration.getScope()))
.orElse("openid profile email");
}
|
@Test
public void shouldResolveScopeFromDefaultValues() {
var webContext = MockWebContext.create();
var oidcConfiguration = new OidcConfiguration();
var oidcConfigurationContext = new OidcConfigurationContext(webContext, oidcConfiguration);
var result = oidcConfigurationContext.getScope();
assertEquals("openid profile email", result);
}
|
public final String getName() {
return getEnvironment().getTaskInfo().getTaskNameWithSubtasks();
}
|
@Test
void testTaskAvoidHangingAfterSnapshotStateThrownException() throws Exception {
// given: Configured SourceStreamTask with source which fails on checkpoint.
Configuration taskManagerConfig = new Configuration();
taskManagerConfig.set(STATE_BACKEND, TestMemoryStateBackendFactory.class.getName());
StreamConfig cfg = new StreamConfig(new Configuration());
cfg.setStateKeySerializer(mock(TypeSerializer.class));
cfg.setOperatorID(new OperatorID(4712L, 43L));
FailedSource failedSource = new FailedSource();
cfg.setStreamOperator(new TestStreamSource<String, FailedSource>(failedSource));
cfg.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);
try (NettyShuffleEnvironment shuffleEnvironment =
new NettyShuffleEnvironmentBuilder().build()) {
Task task =
createTask(
SourceStreamTask.class,
shuffleEnvironment,
cfg,
taskManagerConfig,
EXECUTOR_EXTENSION.getExecutor());
// when: Task starts
task.startTaskThread();
// wait for the task starts doing the work.
failedSource.awaitRunning();
// and: Checkpoint is triggered which should lead to exception in Source.
task.triggerCheckpointBarrier(
42L, 1L, CheckpointOptions.forCheckpointWithDefaultLocation());
// wait for clean termination.
task.getExecutingThread().join();
// then: The task doesn't hang but finished with FAILED state.
assertThat(task.getExecutionState()).isEqualTo(ExecutionState.FAILED);
}
}
|
public ExecutorConfig setPoolSize(final int poolSize) {
if (poolSize <= 0) {
throw new IllegalArgumentException("poolSize must be positive");
}
this.poolSize = poolSize;
return this;
}
|
@Test(expected = IllegalArgumentException.class)
public void shouldNotAcceptZeroCorePoolSize() {
new ExecutorConfig().setPoolSize(0);
}
|
public static boolean isEnable() {
return EVENT_BUS_ENABLE;
}
|
@Test
public void isEnable() throws Exception {
Assert.assertEquals(EventBus.isEnable(), RpcConfigs.getBooleanValue(RpcOptions.EVENT_BUS_ENABLE));
}
|
@Override
public void createTimelineSchema(String[] args) {
try {
Configuration conf = new YarnConfiguration();
LOG.info("Creating database and collections for DocumentStore : {}",
DocumentStoreUtils.getStoreVendor(conf));
try(DocumentStoreWriter documentStoreWriter = DocumentStoreFactory
.createDocumentStoreWriter(conf)) {
documentStoreWriter.createDatabase();
documentStoreWriter.createCollection(
CollectionType.APPLICATION.getCollectionName());
documentStoreWriter.createCollection(
CollectionType.ENTITY.getCollectionName());
documentStoreWriter.createCollection(
CollectionType.FLOW_ACTIVITY.getCollectionName());
documentStoreWriter.createCollection(
CollectionType.FLOW_RUN.getCollectionName());
}
} catch (Exception e) {
LOG.error("Error while creating Timeline Collections", e);
}
}
|
@Test
public void collectionCreatorTest() {
new DocumentStoreCollectionCreator().createTimelineSchema(new String[]{});
}
|
public static String getDefaultKeyDirectory() {
return getDefaultKeyDirectory(System.getProperty("os.name"));
}
|
@Test
public void testGetDefaultKeyDirectory() {
assertTrue(
WalletUtils.getDefaultKeyDirectory("Mac OS X")
.endsWith(
String.format(
"%sLibrary%sEthereum", File.separator, File.separator)));
assertTrue(
WalletUtils.getDefaultKeyDirectory("Windows")
.endsWith(String.format("%sEthereum", File.separator)));
assertTrue(
WalletUtils.getDefaultKeyDirectory("Linux")
.endsWith(String.format("%s.ethereum", File.separator)));
}
|
public final int getAndIncrement() {
return INDEX_UPDATER.getAndIncrement(this) & Integer.MAX_VALUE;
}
|
@Test
void testGetAndIncrement() {
int get = i1.getAndIncrement();
assertEquals(0, get);
assertEquals(1, i1.get());
get = i2.getAndIncrement();
assertEquals(127, get);
assertEquals(128, i2.get());
get = i3.getAndIncrement();
assertEquals(Integer.MAX_VALUE, get);
assertEquals(0, i3.get());
}
|
@Override
public FieldValueProvider decode(JsonNode value)
{
return new MillisecondsSinceEpochJsonValueProvider(value, columnHandle);
}
|
@Test
public void testDecode()
{
tester.assertDecodedAs("33701000", TIME, 33701000);
tester.assertDecodedAs("\"33701000\"", TIME, 33701000);
tester.assertDecodedAs("33701000", TIME_WITH_TIME_ZONE, packDateTimeWithZone(33701000, UTC_KEY));
tester.assertDecodedAs("\"33701000\"", TIME_WITH_TIME_ZONE, packDateTimeWithZone(33701000, UTC_KEY));
tester.assertDecodedAs("1519032101123", TIMESTAMP, 1519032101123L);
tester.assertDecodedAs("\"1519032101123\"", TIMESTAMP, 1519032101123L);
tester.assertDecodedAs("1519032101123", TIMESTAMP_WITH_TIME_ZONE, packDateTimeWithZone(1519032101123L, UTC_KEY));
tester.assertDecodedAs("\"1519032101123\"", TIMESTAMP_WITH_TIME_ZONE, packDateTimeWithZone(1519032101123L, UTC_KEY));
}
|
void allocateCollectionField( Object object, BeanInjectionInfo beanInjectionInfo, String fieldName ) {
BeanInjectionInfo.Property property = getProperty( beanInjectionInfo, fieldName );
String groupName = ( property != null ) ? property.getGroupName() : null;
if ( groupName == null ) {
return;
}
List<BeanInjectionInfo.Property> groupProperties;
groupProperties = getGroupProperties( beanInjectionInfo, groupName );
Integer maxGroupSize = getMaxSize( groupProperties, object );
// not able to get numeric size
if ( maxGroupSize == null ) {
return;
}
// guaranteed to get at least one field for constant
allocateCollectionField( property, object, Math.max( 1, maxGroupSize ) );
}
|
@Test
public void allocateCollectionField_Array() {
BeanInjector bi = new BeanInjector(null );
BeanInjectionInfo bii = new BeanInjectionInfo( MetaBeanLevel1.class );
MetaBeanLevel1 mbl1 = new MetaBeanLevel1();
mbl1.setSub( new MetaBeanLevel2() );
// should set other field based on this size
mbl1.getSub().setAscending( Arrays.asList( new Boolean[] { true, false } ) );
assertNull( mbl1.getSub().getFilenames() );
bi.allocateCollectionField( mbl1.getSub(), bii, "FILENAME_ARRAY" );
assertEquals(2, mbl1.getSub().getFilenames().length );
}
|
public static RemotingCommand createResponseCommand(Class<? extends CommandCustomHeader> classHeader) {
return createResponseCommand(RemotingSysResponseCode.SYSTEM_ERROR, "not set any response code", classHeader);
}
|
@Test
public void testCreateResponseCommand_FailToCreateCommand() {
System.setProperty(RemotingCommand.REMOTING_VERSION_KEY, "2333");
int code = RemotingSysResponseCode.SUCCESS;
String remark = "Sample remark";
RemotingCommand cmd = RemotingCommand.createResponseCommand(code, remark, CommandCustomHeader.class);
assertThat(cmd).isNull();
}
|
@Override
public void execute() {
DeleteItemResponse result = ddbClient.deleteItem(DeleteItemRequest.builder().tableName(determineTableName())
.key(determineKey()).returnValues(determineReturnValues())
.expected(determineUpdateCondition()).build());
addAttributesToResult(result.attributes());
}
|
@Test
public void execute() {
Map<String, AttributeValue> key = new HashMap<>();
key.put("1", AttributeValue.builder().s("Key_1").build());
exchange.getIn().setHeader(Ddb2Constants.KEY, key);
Map<String, ExpectedAttributeValue> updateCondition = new HashMap<>();
updateCondition.put("name", ExpectedAttributeValue.builder()
.attributeValueList(AttributeValue.builder().s("expected value").build()).build());
exchange.getIn().setHeader(Ddb2Constants.UPDATE_CONDITION, updateCondition);
exchange.getIn().setHeader(Ddb2Constants.RETURN_VALUES, "ALL_OLD");
command.execute();
assertEquals("DOMAIN1", ddbClient.deleteItemRequest.tableName());
assertEquals(key, ddbClient.deleteItemRequest.key());
assertEquals(updateCondition, ddbClient.deleteItemRequest.expected());
assertEquals(ReturnValue.ALL_OLD, ddbClient.deleteItemRequest.returnValues());
assertEquals(AttributeValue.builder().s("attrValue").build(),
exchange.getIn().getHeader(Ddb2Constants.ATTRIBUTES, Map.class).get("attrName"));
}
|
@VisibleForTesting
void updateConfig(
Configuration config, ConfigOption<List<String>> configOption, List<String> newValue) {
final List<String> originalValue =
config.getOptional(configOption).orElse(Collections.emptyList());
if (hasLocal(originalValue)) {
LOG.info(
"Updating configuration '{}' after to replace local artifact: '{}'",
configOption.key(),
newValue);
config.set(configOption, newValue);
}
}
|
@Test
void testUpdateConfig() {
List<String> artifactList =
Arrays.asList("local:///tmp/artifact1.jar", "s3://my-bucket/artifact2.jar");
Configuration config = new Configuration();
config.set(ArtifactFetchOptions.ARTIFACT_LIST, artifactList);
List<String> uploadedArtifactList = new ArrayList<>(artifactList);
uploadedArtifactList.set(0, getTargetDirUri() + "/artifact1.jar");
artifactUploader.updateConfig(
config, ArtifactFetchOptions.ARTIFACT_LIST, uploadedArtifactList);
assertThat(config.get(ArtifactFetchOptions.ARTIFACT_LIST)).isEqualTo(uploadedArtifactList);
}
|
@Override
public String toString() {
return String.format("The giant looks %s, %s and %s.", health, fatigue, nourishment);
}
|
@Test
void testSetFatigue() {
final var model = new GiantModel(Health.HEALTHY, Fatigue.ALERT, Nourishment.SATURATED);
assertEquals(Fatigue.ALERT, model.getFatigue());
var messageFormat = "The giant looks healthy, %s and saturated.";
for (final var fatigue : Fatigue.values()) {
model.setFatigue(fatigue);
assertEquals(fatigue, model.getFatigue());
assertEquals(String.format(messageFormat, fatigue), model.toString());
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.