focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public Pet category(Category category) {
this.category = category;
return this;
}
|
@Test
public void categoryTest() {
// TODO: test category
}
|
@Override
@SuppressWarnings("unchecked")
public HttpRestResult<T> convertResult(HttpClientResponse response, Type responseType) throws Exception {
final Header headers = response.getHeaders();
InputStream body = response.getBody();
T extractBody = JacksonUtils.toObj(body, responseType);
HttpRestResult<T> httpRestResult = convert((RestResult<T>) extractBody);
httpRestResult.setHeader(headers);
return httpRestResult;
}
|
@Test
void testConvertResult() throws Exception {
RestResult<String> testCase = RestResult.<String>builder().withCode(200).withData("ok").withMsg("msg").build();
InputStream inputStream = new ByteArrayInputStream(JacksonUtils.toJsonBytes(testCase));
HttpClientResponse response = mock(HttpClientResponse.class);
when(response.getBody()).thenReturn(inputStream);
when(response.getHeaders()).thenReturn(Header.EMPTY);
when(response.getStatusCode()).thenReturn(200);
RestResultResponseHandler<String> handler = new RestResultResponseHandler<>();
handler.setResponseType(RestResult.class);
HttpRestResult<String> actual = handler.handle(response);
assertEquals(testCase.getCode(), actual.getCode());
assertEquals(testCase.getData(), actual.getData());
assertEquals(testCase.getMessage(), actual.getMessage());
assertEquals(Header.EMPTY, actual.getHeader());
}
|
@NotNull
public SocialUserDO authSocialUser(Integer socialType, Integer userType, String code, String state) {
// 优先从 DB 中获取,因为 code 有且可以使用一次。
// 在社交登录时,当未绑定 User 时,需要绑定登录,此时需要 code 使用两次
SocialUserDO socialUser = socialUserMapper.selectByTypeAndCodeAnState(socialType, code, state);
if (socialUser != null) {
return socialUser;
}
// 请求获取
AuthUser authUser = socialClientService.getAuthUser(socialType, userType, code, state);
Assert.notNull(authUser, "三方用户不能为空");
// 保存到 DB 中
socialUser = socialUserMapper.selectByTypeAndOpenid(socialType, authUser.getUuid());
if (socialUser == null) {
socialUser = new SocialUserDO();
}
socialUser.setType(socialType).setCode(code).setState(state) // 需要保存 code + state 字段,保证后续可查询
.setOpenid(authUser.getUuid()).setToken(authUser.getToken().getAccessToken()).setRawTokenInfo((toJsonString(authUser.getToken())))
.setNickname(authUser.getNickname()).setAvatar(authUser.getAvatar()).setRawUserInfo(toJsonString(authUser.getRawUserInfo()));
if (socialUser.getId() == null) {
socialUserMapper.insert(socialUser);
} else {
socialUserMapper.updateById(socialUser);
}
return socialUser;
}
|
@Test
public void testAuthSocialUser_exists() {
// 准备参数
Integer socialType = SocialTypeEnum.GITEE.getType();
Integer userType = randomEle(SocialTypeEnum.values()).getType();
String code = "tudou";
String state = "yuanma";
// mock 方法
SocialUserDO socialUser = randomPojo(SocialUserDO.class).setType(socialType).setCode(code).setState(state);
socialUserMapper.insert(socialUser);
// 调用
SocialUserDO result = socialUserService.authSocialUser(socialType, userType, code, state);
// 断言
assertPojoEquals(socialUser, result);
}
|
public boolean isMatch(Map<String, Pattern> patterns) {
if (!patterns.isEmpty()) {
return matchPatterns(patterns);
}
// Empty pattern is still considered as a match.
return true;
}
|
@Test
public void testIsMatchValid() throws UnknownHostException {
Uuid uuid = Uuid.randomUuid();
ClientMetricsInstanceMetadata instanceMetadata = new ClientMetricsInstanceMetadata(uuid, ClientMetricsTestUtils.requestContext());
// We consider empty/missing client matching patterns as valid
assertTrue(instanceMetadata.isMatch(Collections.emptyMap()));
assertTrue(instanceMetadata.isMatch(
Collections.singletonMap(ClientMetricsConfigs.CLIENT_ID, Pattern.compile(".*"))));
assertTrue(instanceMetadata.isMatch(
Collections.singletonMap(ClientMetricsConfigs.CLIENT_ID, Pattern.compile("producer-1"))));
assertTrue(instanceMetadata.isMatch(
Collections.singletonMap(ClientMetricsConfigs.CLIENT_ID, Pattern.compile("producer.*"))));
assertTrue(instanceMetadata.isMatch(
Collections.singletonMap(ClientMetricsConfigs.CLIENT_INSTANCE_ID, Pattern.compile(uuid.toString()))));
assertTrue(instanceMetadata.isMatch(
Collections.singletonMap(ClientMetricsConfigs.CLIENT_SOFTWARE_NAME, Pattern.compile("apache-kafka-java"))));
assertTrue(instanceMetadata.isMatch(
Collections.singletonMap(ClientMetricsConfigs.CLIENT_SOFTWARE_VERSION, Pattern.compile("3.5.2"))));
assertTrue(instanceMetadata.isMatch(
Collections.singletonMap(ClientMetricsConfigs.CLIENT_SOURCE_ADDRESS, Pattern.compile(
InetAddress.getLocalHost().getHostAddress()))));
assertTrue(instanceMetadata.isMatch(
Collections.singletonMap(ClientMetricsConfigs.CLIENT_SOURCE_PORT, Pattern.compile(
String.valueOf(ClientMetricsTestUtils.CLIENT_PORT)))));
}
|
public static TimedCacheReloadTrigger fromConfig(ReadableConfig config) {
checkArgument(
config.get(CACHE_TYPE) == FULL,
"'%s' should be '%s' in order to build a Timed cache reload trigger.",
CACHE_TYPE.key(),
FULL);
checkArgument(
config.get(FULL_CACHE_RELOAD_STRATEGY) == TIMED,
"'%s' should be '%s' in order to build a Timed cache reload trigger.",
FULL_CACHE_RELOAD_STRATEGY.key(),
TIMED);
checkArgument(
config.getOptional(FULL_CACHE_TIMED_RELOAD_ISO_TIME).isPresent(),
"Missing '%s' in the configuration. This option is required to build a Timed cache reload trigger.",
FULL_CACHE_TIMED_RELOAD_ISO_TIME.key());
Temporal reloadTime =
(Temporal)
DateTimeFormatter.ISO_TIME.parseBest(
config.get(FULL_CACHE_TIMED_RELOAD_ISO_TIME),
OffsetTime::from,
LocalTime::from);
int reloadIntervalInDays = config.get(FULL_CACHE_TIMED_RELOAD_INTERVAL_IN_DAYS);
return new TimedCacheReloadTrigger(reloadTime, reloadIntervalInDays);
}
|
@Test
void testCreateFromConfig() {
assertThat(TimedCacheReloadTrigger.fromConfig(createValidConf())).isNotNull();
Configuration conf1 = createValidConf().set(CACHE_TYPE, PARTIAL);
assertThatThrownBy(() -> TimedCacheReloadTrigger.fromConfig(conf1))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("should be 'FULL'");
Configuration conf2 = createValidConf().set(FULL_CACHE_RELOAD_STRATEGY, PERIODIC);
assertThatThrownBy(() -> TimedCacheReloadTrigger.fromConfig(conf2))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("should be 'TIMED'");
Configuration conf3 = createValidConf();
conf3.removeConfig(FULL_CACHE_TIMED_RELOAD_ISO_TIME);
assertThatThrownBy(() -> TimedCacheReloadTrigger.fromConfig(conf3))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Missing '" + FULL_CACHE_TIMED_RELOAD_ISO_TIME.key() + "'");
Configuration conf4 = createValidConf().set(FULL_CACHE_TIMED_RELOAD_ISO_TIME, "10");
assertThatThrownBy(() -> TimedCacheReloadTrigger.fromConfig(conf4))
.isInstanceOf(DateTimeParseException.class)
.hasMessageContaining("could not be parsed");
}
|
public static <T> PTransform<PCollection<T>, PCollection<T>> exceptAll(
PCollection<T> rightCollection) {
checkNotNull(rightCollection, "rightCollection argument is null");
return new SetImpl<>(rightCollection, exceptAll());
}
|
@Test
@Category(NeedsRunner.class)
public void testExceptAllCollectionList() {
PCollection<String> third = p.apply("third", Create.of(Arrays.asList("a", "b", "b", "g", "f")));
PCollection<Row> thirdRows = p.apply("thirdRows", Create.of(toRows("a", "b", "b", "g")));
PAssert.that(
PCollectionList.of(first).and(second).and(third).apply("stringsCols", Sets.exceptAll()))
.containsInAnyOrder("g", "h", "h");
PCollection<Row> results =
PCollectionList.of(firstRows)
.and(secondRows)
.and(thirdRows)
.apply("rowCols", Sets.exceptAll());
PAssert.that(results).containsInAnyOrder(toRows("g", "h", "h"));
assertEquals(schema, results.getSchema());
p.run();
}
|
public MongoClientURI getMongoClientURI() {
final MongoClientOptions.Builder mongoClientOptionsBuilder = MongoClientOptions.builder()
.connectionsPerHost(getMaxConnections());
return new MongoClientURI(uri, mongoClientOptionsBuilder);
}
|
@Test
public void validateSucceedsWithIPv6Address() throws Exception {
MongoDbConfiguration configuration = new MongoDbConfiguration();
final Map<String, String> properties = singletonMap(
"mongodb_uri", "mongodb://[2001:DB8::DEAD:BEEF:CAFE:BABE]:1234,127.0.0.1:5678/TEST"
);
new JadConfig(new InMemoryRepository(properties), configuration).process();
assertEquals("mongodb://[2001:DB8::DEAD:BEEF:CAFE:BABE]:1234,127.0.0.1:5678/TEST", configuration.getMongoClientURI().toString());
}
|
@Override
public void setString( String string ) {
this.string = string;
}
|
@Test
public void testSetString() {
ValueString vs = new ValueString();
vs.setString( null );
assertNull( vs.getString() );
vs.setString( "" );
assertEquals( "", vs.getString() );
vs.setString( "Boden" );
assertEquals( "Boden", vs.getString() );
}
|
public static <T> T newInstanceOrNull(Class<? extends T> clazz, Object... params) {
Constructor<T> constructor = selectMatchingConstructor(clazz, params);
if (constructor == null) {
return null;
}
try {
return constructor.newInstance(params);
} catch (IllegalAccessException | InstantiationException | InvocationTargetException e) {
return null;
}
}
|
@Test
public void newInstanceOrNull_nullIsMatchingAllTypes() {
ClassWithTwoArgConstructorConstructor instance = InstantiationUtils.newInstanceOrNull(
ClassWithTwoArgConstructorConstructor.class,
"foo", null);
assertNotNull(instance);
}
|
public String getFormattedMessage() {
if (formattedMessage != null) {
return formattedMessage;
}
if (argumentArray != null) {
formattedMessage = MessageFormatter.arrayFormat(message, argumentArray)
.getMessage();
} else {
formattedMessage = message;
}
return formattedMessage;
}
|
@Test
public void testNoFormattingWithArgs() {
String message = "testNoFormatting";
Throwable throwable = null;
Object[] argArray = new Object[] {12, 13};
LoggingEvent event = new LoggingEvent("", logger, Level.INFO, message, throwable, argArray);
assertNull(event.formattedMessage);
assertEquals(message, event.getFormattedMessage());
}
|
public ExternalIssueReport parse(Path reportPath) {
try (Reader reader = Files.newBufferedReader(reportPath, StandardCharsets.UTF_8)) {
ExternalIssueReport report = gson.fromJson(reader, ExternalIssueReport.class);
externalIssueReportValidator.validate(report, reportPath);
return report;
} catch (JsonIOException | IOException e) {
throw new IllegalStateException("Failed to read external issues report '" + reportPath + "'", e);
} catch (JsonSyntaxException e) {
throw new IllegalStateException("Failed to read external issues report '" + reportPath + "': invalid JSON syntax", e);
}
}
|
@Test
public void parse_whenCorrectDeprecatedFormat_shouldParseCorrectly() {
reportPath = Paths.get(DEPRECATED_REPORTS_LOCATION + "report.json");
ExternalIssueReport report = externalIssueReportParser.parse(reportPath);
verify(validator).validate(report, reportPath);
assertDeprecatedReport(report);
}
|
@Override
public void setUnixOwner(final Path file, final String owner) throws BackgroundException {
final FileAttributes attr = new FileAttributes.Builder()
.withUIDGID(new Integer(owner), 0)
.build();
try {
session.sftp().setAttributes(file.getAbsolute(), attr);
}
catch(IOException e) {
throw new SFTPExceptionMappingService().map("Failure to write attributes of {0}", e, file);
}
}
|
@Test
@Ignore
public void testSetUnixOwner() throws Exception {
final Path home = new SFTPHomeDirectoryService(session).find();
final long modified = System.currentTimeMillis();
final Path test = new Path(home, "test", EnumSet.of(Path.Type.file));
new SFTPUnixPermissionFeature(session).setUnixOwner(test, "80");
assertEquals("80", new SFTPListService(session).list(home, new DisabledListProgressListener()).get(test).attributes().getOwner());
}
|
public void notifyPluginAboutClusterProfileChanged(String pluginId, ClusterProfilesChangedStatus status, Map<String, String> oldClusterProfile, Map<String, String> newClusterProfile) {
try {
LOGGER.debug("Processing report cluster profile changed for plugin: {} with status: {} with old cluster: {} and new cluster: {}", pluginId, status, oldClusterProfile, newClusterProfile);
extension.clusterProfileChanged(pluginId, status, oldClusterProfile, newClusterProfile);
LOGGER.debug("Done processing report cluster profile changed for plugin: {} with status: {} with old cluster: {} and new cluster: {}", pluginId, status, oldClusterProfile, newClusterProfile);
} catch (Exception e) {
LOGGER.error("An error occurred while processing report cluster profile changed for plugin: {} with status: {} with old cluster: {} and new cluster: {}", pluginId, status, oldClusterProfile, newClusterProfile, e);
}
}
|
@Test
public void shouldTalkToExtensionToNotifyClusterProfileHasChanged() {
final Map<String, String> newClusterProfileConfigurations = Map.of("Image", "alpine:latest");
elasticAgentPluginRegistry.notifyPluginAboutClusterProfileChanged(PLUGIN_ID, ClusterProfilesChangedStatus.CREATED, null, newClusterProfileConfigurations);
verify(elasticAgentExtension, times(1)).clusterProfileChanged(PLUGIN_ID, ClusterProfilesChangedStatus.CREATED, null, newClusterProfileConfigurations);
verifyNoMoreInteractions(elasticAgentExtension);
}
|
public static Map<String, String[]> getQueryMap(String query) {
Map<String, String[]> map = new HashMap<>();
String[] params = query.split(PARAM_CONCATENATE);
for (String param : params) {
String[] paramSplit = param.split("=");
if (paramSplit.length == 0) {
continue; // We found no key-/value-pair, so continue on the next param
}
String name = decodeQuery(paramSplit[0]);
// hack for SOAP request (generally)
if (name.trim().startsWith("<?")) { // $NON-NLS-1$
map.put(" ", new String[] {query}); //blank name // $NON-NLS-1$
return map;
}
// the post payload is not key=value
if((param.startsWith("=") && paramSplit.length == 1) || paramSplit.length > 2) {
map.put(" ", new String[] {query}); //blank name // $NON-NLS-1$
return map;
}
String value = "";
if(paramSplit.length>1) {
value = decodeQuery(paramSplit[1]);
}
String[] known = map.get(name);
if(known == null) {
known = new String[] {value};
}
else {
String[] tmp = new String[known.length+1];
tmp[tmp.length-1] = value;
System.arraycopy(known, 0, tmp, 0, known.length);
known = tmp;
}
map.put(name, known);
}
return map;
}
|
@Test
void testGetQueryMapMultipleValues() {
String query = "param2=15¶m1=12¶m2=baulpismuth";
Map<String, String[]> params = RequestViewHTTP.getQueryMap(query);
Assertions.assertNotNull(params);
Assertions.assertEquals(2, params.size());
String[] param1 = params.get("param1");
Assertions.assertNotNull(param1);
Assertions.assertEquals(1, param1.length);
Assertions.assertEquals("12", param1[0]);
String[] param2 = params.get("param2");
Assertions.assertNotNull(param2);
Assertions.assertEquals(2, param2.length);
Assertions.assertEquals("15", param2[0]);
Assertions.assertEquals("baulpismuth", param2[1]);
}
|
long nextRecordingId()
{
return nextRecordingId;
}
|
@Test
void shouldNotThrowArchiveExceptionWhenNextRecordingIdIsInvalidIfCatalogIsReadOnly()
throws IOException
{
setNextRecordingId(recordingTwoId);
try (Catalog catalog = new Catalog(archiveDir, clock))
{
assertEquals(recordingTwoId, catalog.nextRecordingId());
}
}
|
@Override
public Long time(RedisClusterNode node) {
RedisClient entry = getEntry(node);
RFuture<Long> f = executorService.readAsync(entry, LongCodec.INSTANCE, RedisCommands.TIME_LONG);
return syncFuture(f);
}
|
@Test
public void testTime() {
RedisClusterNode master = getFirstMaster();
Long time = connection.time(master);
assertThat(time).isGreaterThan(1000);
}
|
@Override
public Collection<String> doSharding(final Collection<String> availableTargetNames, final HintShardingValue<Comparable<?>> shardingValue) {
return shardingValue.getValues().isEmpty() ? availableTargetNames : shardingValue.getValues().stream().map(this::doSharding).collect(Collectors.toList());
}
|
@Test
void assertDoShardingWithSingleValueOfDefault() {
List<String> availableTargetNames = Arrays.asList("t_order_0", "t_order_1", "t_order_2", "t_order_3");
HintShardingValue<Comparable<?>> shardingValue = new HintShardingValue<>("t_order", "order_id", Collections.singleton("t_order_0"));
Collection<String> actual = hintInlineShardingAlgorithmDefault.doSharding(availableTargetNames, shardingValue);
assertTrue(actual.contains("t_order_0"));
}
|
@Override
protected Endpoint createEndpoint(String uri, String remaining, Map<String, Object> properties) throws Exception {
AS400ConnectionPool connectionPool;
if (properties.containsKey(CONNECTION_POOL)) {
LOG.trace("AS400ConnectionPool instance specified in the URI - will look it up.");
// We have chosen to handle the connectionPool option ourselves, so
// we must remove it from the given parameter list (see
// http://camel.apache.org/writing-components.html)
String poolId = properties.remove(CONNECTION_POOL).toString();
connectionPool
= EndpointHelper.resolveReferenceParameter(getCamelContext(), poolId, AS400ConnectionPool.class, true);
} else {
LOG.trace("No AS400ConnectionPool instance specified in the URI - one will be provided.");
connectionPool = getConnectionPool();
}
String type = remaining.substring(remaining.lastIndexOf('.') + 1).toUpperCase();
Jt400Endpoint endpoint = new Jt400Endpoint(uri, this, connectionPool);
setProperties(endpoint, properties);
endpoint.setType(Jt400Type.valueOf(type));
return endpoint;
}
|
@Test
public void testCreatePgmEndpoint() throws Exception {
Endpoint endpoint = component
.createEndpoint("jt400://user:password@host/qsys.lib/library.lib/queue.pgm?connectionPool=#mockPool");
assertNotNull(endpoint);
assertTrue(endpoint instanceof Jt400Endpoint);
}
|
public String sign(String msg) {
try {
RSAPrivateKey key = getPrivateKey();
Signature sig = Signature.getInstance(SIGNING_ALGORITHM + "with" + key.getAlgorithm());
sig.initSign(key);
sig.update(msg.getBytes(StandardCharsets.UTF_8));
return Base64.getEncoder().encodeToString(sig.sign());
} catch (GeneralSecurityException e) {
throw new SecurityException(e);
}
}
|
@Test
public void dsigSignAndVerify() throws Exception {
String plainText = "Hello world";
String msg = key.sign(plainText);
System.out.println(msg);
Signature sig = Signature.getInstance("SHA256withRSA");
sig.initVerify(key.getPublicKey());
sig.update(plainText.getBytes(StandardCharsets.UTF_8));
assertTrue(sig.verify(Base64.getDecoder().decode(msg)));
}
|
public static List<PartitionSpec> getPartitionspecsGroupedByStorageDescriptor(Table table,
Collection<Partition> partitions) {
final String tablePath = table.getSd().getLocation();
ImmutableListMultimap<StorageDescriptorKey, Partition> partitionsWithinTableDirectory =
Multimaps.index(partitions, input -> {
// if sd is not in the list of projected fields, all the partitions
// can be just grouped in PartitionSpec object
if (input.getSd() == null) {
return StorageDescriptorKey.UNSET_KEY;
}
// if sd has skewed columns we better not group partition, since different partitions
// could have different skewed info like skewed location
if (input.getSd().getSkewedInfo() != null
&& input.getSd().getSkewedInfo().getSkewedColNames() != null
&& !input.getSd().getSkewedInfo().getSkewedColNames().isEmpty()) {
return new StorageDescriptorKey(input.getSd());
}
// if partitions don't have the same number of buckets we can not group their SD,
// this could lead to incorrect number of buckets
if (input.getSd().getNumBuckets()
!= partitions.iterator().next().getSd().getNumBuckets()) {
return new StorageDescriptorKey(input.getSd());
}
// if the partition is within table, use the tableSDKey to group it with other partitions
// within the table directory
if (input.getSd().getLocation() != null && input.getSd().getLocation()
.startsWith(tablePath)) {
return new StorageDescriptorKey(tablePath, input.getSd());
}
// if partitions are located outside table location we treat them as non-standard
// and do not perform any grouping
// if the location is not set partitions are grouped according to the rest of the SD fields
return new StorageDescriptorKey(input.getSd());
});
List<PartitionSpec> partSpecs = new ArrayList<>();
// Classify partitions based on shared SD properties.
Map<StorageDescriptorKey, List<PartitionWithoutSD>> sdToPartList
= new HashMap<>();
// we don't expect partitions to exist outside directory in most cases
List<Partition> partitionsOutsideTableDir = new ArrayList<>(0);
for (StorageDescriptorKey key : partitionsWithinTableDirectory.keySet()) {
boolean isUnsetKey = key.equals(StorageDescriptorKey.UNSET_KEY);
// group the partitions together when
// case I : sd is not set because it was not in the requested fields
// case II : when sd.location is not set because it was not in the requested fields
// case III : when sd.location is set and it is located within table directory
if (isUnsetKey || key.baseLocation == null || key.baseLocation.equals(tablePath)) {
for (Partition partition : partitionsWithinTableDirectory.get(key)) {
PartitionWithoutSD partitionWithoutSD
= new PartitionWithoutSD();
partitionWithoutSD.setValues(partition.getValues());
partitionWithoutSD.setCreateTime(partition.getCreateTime());
partitionWithoutSD.setLastAccessTime(partition.getLastAccessTime());
partitionWithoutSD.setRelativePath(
(isUnsetKey || !partition.getSd().isSetLocation()) ? null : partition.getSd()
.getLocation().substring(tablePath.length()));
partitionWithoutSD.setParameters(partition.getParameters());
if (!sdToPartList.containsKey(key)) {
sdToPartList.put(key, new ArrayList<>());
}
sdToPartList.get(key).add(partitionWithoutSD);
}
} else {
// Lump all partitions outside the tablePath into one PartSpec.
// if non-standard partitions need not be deDuped create PartitionListComposingSpec
// this will be used mostly for keeping backwards compatibility with some HMS APIs which use
// PartitionListComposingSpec for non-standard partitions located outside table
partitionsOutsideTableDir.addAll(partitionsWithinTableDirectory.get(key));
}
}
// create sharedSDPartSpec for all the groupings
for (Map.Entry<StorageDescriptorKey, List<PartitionWithoutSD>> entry : sdToPartList
.entrySet()) {
partSpecs.add(getSharedSDPartSpec(table, entry.getKey(), entry.getValue()));
}
if (!partitionsOutsideTableDir.isEmpty()) {
PartitionSpec partListSpec = new PartitionSpec();
partListSpec.setCatName(table.getCatName());
partListSpec.setDbName(table.getDbName());
partListSpec.setTableName(table.getTableName());
partListSpec.setPartitionList(new PartitionListComposingSpec(partitionsOutsideTableDir));
partSpecs.add(partListSpec);
}
return partSpecs;
}
|
@Test
public void testGetPartitionspecsGroupedBySDOnePartitionInTable() throws MetaException {
// Create database and table
Table tbl = new TableBuilder()
.setDbName(DB_NAME)
.setTableName(TABLE_NAME)
.addCol("id", "int")
.setLocation("/foo")
.build(null);
Partition p1 = new PartitionBuilder()
.setDbName("DB_NAME")
.setTableName(TABLE_NAME)
.setLocation("/foo/bar")
.addCol("a", "int")
.addValue("val1")
.setInputFormat("foo")
.build(null);
List<PartitionSpec> result =
MetaStoreServerUtils.getPartitionspecsGroupedByStorageDescriptor(tbl, Collections.singleton(p1));
assertThat(result.size(), is(1));
PartitionSpec ps = result.get(0);
assertThat(ps.getRootPath(), is(tbl.getSd().getLocation()));
List<PartitionWithoutSD> partitions = ps.getSharedSDPartitionSpec().getPartitions();
assertThat(partitions.size(), is(1));
PartitionWithoutSD partition = partitions.get(0);
assertThat(partition.getRelativePath(), is("/bar"));
assertThat(partition.getValues(), is(Collections.singletonList("val1")));
}
|
public static RepositoryMetadataStore getInstance() {
return repositoryMetadataStore;
}
|
@Test
public void shouldBeAbleToCheckIfPluginExists() throws Exception {
RepositoryMetadataStore metadataStore = RepositoryMetadataStore.getInstance();
PackageConfigurations repositoryConfigurationPut = new PackageConfigurations();
metadataStore.addMetadataFor("plugin-id", repositoryConfigurationPut);
assertThat(metadataStore.hasPlugin("plugin-id"), is(true));
assertThat(metadataStore.hasPlugin("some-plugin-which-does-not-exist"), is(false));
}
|
static KafkaAdminClient createInternal(AdminClientConfig config, TimeoutProcessorFactory timeoutProcessorFactory) {
return createInternal(config, timeoutProcessorFactory, null);
}
|
@Test
public void testDefaultApiTimeoutAndRequestTimeoutConflicts() {
final AdminClientConfig config = newConfMap(AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, "500");
KafkaException exception = assertThrows(KafkaException.class,
() -> KafkaAdminClient.createInternal(config, null));
assertInstanceOf(ConfigException.class, exception.getCause());
}
|
@Override
public UrlPattern doGetPattern() {
return UrlPattern.builder()
.includes(includeUrls)
.excludes(excludeUrls)
.build();
}
|
@Test
public void does_not_match_servlet_filter_that_prefix_a_ws() {
initWebServiceEngine(
newWsUrl("api/foo", "action").setHandler(ServletFilterHandler.INSTANCE),
newWsUrl("api/foo", "action_2"));
assertThat(underTest.doGetPattern().matches("/api/foo/action")).isFalse();
assertThat(underTest.doGetPattern().matches("/api/foo/action_2")).isTrue();
}
|
@Override
public void validate(final SingleRule rule, final SQLStatementContext sqlStatementContext, final ShardingSphereDatabase database) {
DropSchemaStatement dropSchemaStatement = (DropSchemaStatement) sqlStatementContext.getSqlStatement();
boolean containsCascade = dropSchemaStatement.isContainsCascade();
for (IdentifierValue each : dropSchemaStatement.getSchemaNames()) {
String schemaName = each.getValue();
ShardingSphereSchema schema = database.getSchema(schemaName);
ShardingSpherePreconditions.checkNotNull(schema, () -> new SchemaNotFoundException(schemaName));
ShardingSpherePreconditions.checkState(containsCascade || schema.getAllTableNames().isEmpty(), () -> new DropNotEmptySchemaException(schemaName));
}
}
|
@Test
void assertValidate() {
new SingleDropSchemaMetaDataValidator().validate(mock(SingleRule.class, RETURNS_DEEP_STUBS), createSQLStatementContext("foo_schema", true), mockDatabase());
}
|
@Override
public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) {
Map<String, List<TopicPartition>> assignment = new HashMap();
Iterator subIter = subscriptions.keySet().iterator();
//initialize subscription mappings for assignment
while (subIter.hasNext()) {
String memberId = (String) subIter.next();
assignment.put(memberId, new ArrayList());
}
ArrayList<String> consumerList = new ArrayList(Utils.sorted(subscriptions.keySet()));
Iterator partIter = this.allPartitionsSorted(partitionsPerTopic, subscriptions).iterator();
//assign partitions at random
while (partIter.hasNext()) {
TopicPartition partition = (TopicPartition) partIter.next();
String topic = partition.topic();
int rand = ThreadLocalRandom.current().nextInt(0, consumerList.size());
while (!((Subscription) subscriptions.get(consumerList.get(rand))).topics().contains(topic)) {
rand = ThreadLocalRandom.current().nextInt(0, consumerList.size());
}
(assignment.get(consumerList.get(rand))).add(partition);
}
return assignment;
}
|
@Test
public void ConsumerEmptyWithoutTopic() {
String consumerId = "testConsumer";
Map<String, Integer> partitionsPerTopic = new HashMap<>();
Map<String, List<TopicPartition>> assignment = testAssignor.assign(
partitionsPerTopic,
Collections.singletonMap(consumerId, new Subscription(Collections.<String>emptyList()))
);
assertEquals(Collections.singleton(consumerId), assignment.keySet());
assertTrue(assignment.get(consumerId).isEmpty());
}
|
static AnnotatedClusterState generatedStateFrom(final Params params) {
final ContentCluster cluster = params.cluster;
final ClusterState workingState = ClusterState.emptyState();
final Map<Node, NodeStateReason> nodeStateReasons = new HashMap<>();
for (final NodeInfo nodeInfo : cluster.getNodeInfos()) {
final NodeState nodeState = computeEffectiveNodeState(nodeInfo, params, nodeStateReasons);
workingState.setNodeState(nodeInfo.getNode(), nodeState);
}
takeDownGroupsWithTooLowAvailability(workingState, nodeStateReasons, params);
final Optional<ClusterStateReason> reasonToBeDown = clusterDownReason(workingState, params);
if (reasonToBeDown.isPresent()) {
workingState.setClusterState(State.DOWN);
}
workingState.setDistributionBits(inferDistributionBitCount(cluster, workingState, params));
return new AnnotatedClusterState(workingState, reasonToBeDown, nodeStateReasons);
}
|
@Test
void cluster_down_if_less_than_min_count_of_distributors_available() {
final ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
.bringEntireClusterUp()
.reportDistributorNodeState(0, State.DOWN)
.reportDistributorNodeState(2, State.DOWN);
final ClusterStateGenerator.Params params = fixture.generatorParams().minDistributorNodesUp(2);
final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
assertThat(state.toString(), equalTo("cluster:d distributor:2 .0.s:d storage:3"));
assertThat(state.getClusterStateReason(), equalTo(Optional.of(ClusterStateReason.TOO_FEW_DISTRIBUTOR_NODES_AVAILABLE)));
}
|
protected ValidationTaskResult loadHdfsConfig() {
Pair<String, String> clientConfFiles = getHdfsConfPaths();
String coreConfPath = clientConfFiles.getFirst();
String hdfsConfPath = clientConfFiles.getSecond();
mCoreConf = accessAndParseConf("core-site.xml", coreConfPath);
mHdfsConf = accessAndParseConf("hdfs-site.xml", hdfsConfPath);
return new ValidationTaskResult(mState, getName(), mMsg.toString(), mAdvice.toString());
}
|
@Test
public void cannotParseCoreSiteXml() throws IOException {
String hdfsSite = Paths.get(sTestDir.toPath().toString(), "hdfs-site.xml").toString();
ValidationTestUtils.writeXML(hdfsSite, ImmutableMap.of("key2", "value2"));
RandomAccessFile hdfsFile = new RandomAccessFile(hdfsSite, "rw");
hdfsFile.setLength(hdfsFile.length() - 10);
String coreSite = Paths.get(sTestDir.toPath().toString(), "core-site.xml").toString();
ValidationTestUtils.writeXML(coreSite, ImmutableMap.of("key1", "value1"));
RandomAccessFile coreFile = new RandomAccessFile(coreSite, "rw");
coreFile.setLength(coreFile.length() - 10);
CONF.set(PropertyKey.UNDERFS_HDFS_CONFIGURATION,
hdfsSite + HdfsConfValidationTask.SEPARATOR + coreSite);
HdfsConfValidationTask task =
new HdfsConfValidationTask("hdfs://namenode:9000/alluxio", CONF);
ValidationTaskResult result = task.loadHdfsConfig();
assertEquals(ValidationUtils.State.FAILED, result.getState());
assertThat(result.getResult(),
containsString(String.format("Failed to parse %s", hdfsSite)));
assertThat(result.getResult(), containsString(String.format("Failed to parse %s", coreSite)));
assertThat(result.getAdvice(), containsString(String.format("Failed to parse %s", hdfsSite)));
assertThat(result.getAdvice(), containsString(String.format("Failed to parse %s", coreSite)));
}
|
public static void mergeMap(boolean decrypt, Map<String, Object> config) {
merge(decrypt, config);
}
|
@Test
public void testMap_allowNullOverwrite() {
Map<String, Object> testMap = new HashMap<>();
testMap.put("key", "${TEST.null: value}");
CentralizedManagement.mergeMap(true, testMap);
Assert.assertNull(testMap.get("key"));
}
|
public static ShowResultSet execute(ShowStmt statement, ConnectContext context) {
return GlobalStateMgr.getCurrentState().getShowExecutor().showExecutorVisitor.visit(statement, context);
}
|
@Test
public void testShowTableFromUnknownDatabase() throws SemanticException, DdlException {
ShowTableStmt stmt = new ShowTableStmt("emptyDb", false, null);
expectedEx.expect(SemanticException.class);
expectedEx.expectMessage("Unknown database 'emptyDb'");
ShowExecutor.execute(stmt, ctx);
}
|
@Override
public byte[] encode(ILoggingEvent event) {
StringBuilder sb = new StringBuilder();
sb.append(OPEN_OBJ);
var level = event.getLevel();
if (level != null) {
appenderMember(sb, "level", level.levelStr);
sb.append(VALUE_SEPARATOR);
}
appenderMember(sb, "message", StringEscapeUtils.escapeJson(event.getFormattedMessage()));
IThrowableProxy tp = event.getThrowableProxy();
String stackTrace = null;
if (tp != null) {
sb.append(VALUE_SEPARATOR);
stackTrace = tpc.convert(event);
appenderMember(sb, "stacktrace", StringEscapeUtils.escapeJson(stackTrace));
}
sb.append(CLOSE_OBJ);
sb.append(CoreConstants.JSON_LINE_SEPARATOR);
return sb.toString().getBytes(UTF_8_CHARSET);
}
|
@Test
void should_encode_when_no_level_and_no_stacktrace() {
var logEvent = mock(ILoggingEvent.class);
when(logEvent.getLevel()).thenReturn(null);
when(logEvent.getFormattedMessage()).thenReturn("message");
var bytes = underTest.encode(logEvent);
assertThat(new String(bytes, StandardCharsets.UTF_8)).isEqualTo("{\"message\":\"message\"}\n");
}
|
@PostMapping("create-review")
public Mono<String> createReview(@ModelAttribute("product") Mono<Product> productMono,
NewProductReviewPayload payload,
Model model,
ServerHttpResponse response) {
return productMono.flatMap(product ->
this.productReviewsClient.createProductReview(product.id(), payload.rating(), payload.review())
.thenReturn("redirect:/customer/products/%d".formatted(product.id()))
.onErrorResume(ClientBadRequestException.class, exception -> {
model.addAttribute("inFavourite", false);
model.addAttribute("payload", payload);
model.addAttribute("errors", exception.getErrors());
response.setStatusCode(HttpStatus.BAD_REQUEST);
return this.favouriteProductsClient.findFavouriteProductByProductId(product.id())
.doOnNext(favouriteProduct -> model.addAttribute("inFavourite", true))
.thenReturn("customer/products/product");
}));
}
|
@Test
void createReview_RequestIsValid_RedirectsToProductPage() {
// given
var model = new ConcurrentModel();
var response = new MockServerHttpResponse();
doReturn(Mono.just(new ProductReview(UUID.fromString("86efa22c-cbae-11ee-ab01-679baf165fb7"), 1, 3, "Ну, на троечку...")))
.when(this.productReviewsClient).createProductReview(1, 3, "Ну, на троечку...");
// when
StepVerifier.create(this.controller.createReview(
Mono.just(new Product(1, "Товар №1", "Описание товара №1")),
new NewProductReviewPayload(3, "Ну, на троечку..."), model, response))
// then
.expectNext("redirect:/customer/products/1")
.verifyComplete();
assertNull(response.getStatusCode());
verify(this.productReviewsClient).createProductReview(1, 3, "Ну, на троечку...");
verifyNoMoreInteractions(this.productReviewsClient);
verifyNoInteractions(this.productsClient, this.favouriteProductsClient);
}
|
private static ByteBuf copiedBufferUtf8(CharSequence string) {
boolean release = true;
// Mimic the same behavior as other copiedBuffer implementations.
ByteBuf buffer = ALLOC.heapBuffer(ByteBufUtil.utf8Bytes(string));
try {
ByteBufUtil.writeUtf8(buffer, string);
release = false;
return buffer;
} finally {
if (release) {
buffer.release();
}
}
}
|
@Test
public void testCopiedBufferUtf8() {
testCopiedBufferCharSequence("Some UTF_8 like äÄ∏ŒŒ", CharsetUtil.UTF_8);
}
|
public static void addSizeAllMemTables(final StreamsMetricsImpl streamsMetrics,
final RocksDBMetricContext metricContext,
final Gauge<BigInteger> valueProvider) {
addMutableMetric(
streamsMetrics,
metricContext,
valueProvider,
SIZE_OF_ALL_MEMTABLES,
SIZE_OF_ALL_MEMTABLES_DESCRIPTION
);
}
|
@Test
public void shouldAddSizeAllMemTablesMetric() {
final String name = "size-all-mem-tables";
final String description = "Approximate size of active, unflushed immutable, and pinned immutable memtables in bytes";
runAndVerifyMutableMetric(
name,
description,
() -> RocksDBMetrics.addSizeAllMemTables(streamsMetrics, ROCKSDB_METRIC_CONTEXT, VALUE_PROVIDER)
);
}
|
public List<ShenyuLoaderResult> loadExtendPlugins(final String path) throws IOException {
File[] jarFiles = ShenyuPluginPathBuilder.getPluginFile(path).listFiles(file -> file.getName().endsWith(".jar"));
if (Objects.isNull(jarFiles)) {
return Collections.emptyList();
}
List<ShenyuLoaderResult> results = new ArrayList<>();
boolean loadNewPlugin = false;
for (File each : jarFiles) {
if (jars.stream().map(PluginJar::absolutePath).filter(StringUtils::hasText).anyMatch(p -> p.equals(each.getAbsolutePath()))) {
continue;
}
loadNewPlugin = true;
JarFile jar = new JarFile(each, true);
jars.add(new PluginJar(jar, each));
Enumeration<JarEntry> entries = jar.entries();
while (entries.hasMoreElements()) {
JarEntry jarEntry = entries.nextElement();
String entryName = jarEntry.getName();
if (entryName.endsWith(".class") && !entryName.contains("$")) {
String className = entryName.substring(0, entryName.length() - 6).replaceAll("/", ".");
if (checkExistence(className)) {
LOG.warn("The same plugin {} already exists", className);
} else {
names.add(className);
}
}
}
}
if (!loadNewPlugin) {
return results;
}
names.forEach(className -> {
Object instance;
try {
if (!uploadedJarClassByteArrayCache.containsKey(className)) {
instance = getOrCreateSpringBean(className);
if (Objects.nonNull(instance)) {
results.add(buildResult(instance));
LOG.info("The class successfully loaded into a ext-plugin {} is registered as a spring bean", className);
}
}
} catch (ClassNotFoundException | IllegalAccessException | InstantiationException e) {
LOG.warn("Registering ext-plugins succeeds spring bean fails:{}", className);
}
});
return results;
}
|
@Test
public void testGetPluginPathWithNoJar() throws IOException {
List<ShenyuLoaderResult> pluginList = shenyuPluginLoader.loadExtendPlugins("test");
assertThat(pluginList.size(), is(0));
}
|
public static void validateValue(Schema schema, Object value) {
validateValue(null, schema, value);
}
|
@Test
public void testValidateValueMismatchTimestamp() {
assertThrows(DataException.class, () -> ConnectSchema.validateValue(Timestamp.SCHEMA, 1000L));
}
|
public boolean eval(StructLike data) {
return new EvalVisitor().eval(data);
}
|
@Test
public void testEqual() {
assertThat(equal("x", 5).literals().size()).isEqualTo(1);
Evaluator evaluator = new Evaluator(STRUCT, equal("x", 7));
assertThat(evaluator.eval(TestHelpers.Row.of(7, 8, null))).as("7 == 7 => true").isTrue();
assertThat(evaluator.eval(TestHelpers.Row.of(6, 8, null))).as("6 == 7 => false").isFalse();
Evaluator structEvaluator = new Evaluator(STRUCT, equal("s1.s2.s3.s4.i", 7));
assertThat(
structEvaluator.eval(
TestHelpers.Row.of(
7,
8,
null,
TestHelpers.Row.of(
TestHelpers.Row.of(TestHelpers.Row.of(TestHelpers.Row.of(7)))))))
.as("7 == 7 => true")
.isTrue();
assertThat(
structEvaluator.eval(
TestHelpers.Row.of(
6,
8,
null,
TestHelpers.Row.of(
TestHelpers.Row.of(TestHelpers.Row.of(TestHelpers.Row.of(6)))))))
.as("6 == 7 => false")
.isFalse();
}
|
@Override
public String getUrl() {
return url != null ? url.originalArgument() : null;
}
|
@Test
void shouldReturnTheUrl() {
String url = "git@github.com/my/repo";
GitMaterialConfig config = git(url);
assertEquals(url, config.getUrl());
}
|
public UUID initializeProcessId() {
if (!hasPersistentStores) {
final UUID processId = UUID.randomUUID();
log.info("Created new process id: {}", processId);
return processId;
}
if (!lockStateDirectory()) {
log.error("Unable to obtain lock as state directory is already locked by another process");
throw new StreamsException(String.format("Unable to initialize state, this can happen if multiple instances of " +
"Kafka Streams are running in the same state directory " +
"(current state directory is [%s]", stateDir.getAbsolutePath()));
}
final File processFile = new File(stateDir, PROCESS_FILE_NAME);
final ObjectMapper mapper = new ObjectMapper();
try {
if (processFile.exists()) {
try {
final StateDirectoryProcessFile processFileData = mapper.readValue(processFile, StateDirectoryProcessFile.class);
log.info("Reading UUID from process file: {}", processFileData.processId);
if (processFileData.processId != null) {
return processFileData.processId;
}
} catch (final Exception e) {
log.warn("Failed to read json process file", e);
}
}
final StateDirectoryProcessFile processFileData = new StateDirectoryProcessFile(UUID.randomUUID());
log.info("No process id found on disk, got fresh process id {}", processFileData.processId);
mapper.writeValue(processFile, processFileData);
return processFileData.processId;
} catch (final IOException e) {
log.error("Unable to read/write process file due to unexpected exception", e);
throw new ProcessorStateException(e);
}
}
|
@Test
public void shouldReadFutureProcessFileFormat() throws Exception {
final File processFile = new File(appDir, PROCESS_FILE_NAME);
final ObjectMapper mapper = new ObjectMapper();
final UUID processId = UUID.randomUUID();
mapper.writeValue(processFile, new FutureStateDirectoryProcessFile(processId, "some random junk"));
assertThat(directory.initializeProcessId(), equalTo(processId));
}
|
public synchronized void xaRollback(String xid, long branchId, String applicationData) throws XAException {
XAXid xaXid = XAXidBuilder.build(xid, branchId);
xaRollback(xaXid);
}
|
@Test
public void testXARollback() throws Throwable {
Connection connection = Mockito.mock(Connection.class);
Mockito.when(connection.getAutoCommit()).thenReturn(true);
XAResource xaResource = Mockito.mock(XAResource.class);
XAConnection xaConnection = Mockito.mock(XAConnection.class);
Mockito.when(xaConnection.getXAResource()).thenReturn(xaResource);
BaseDataSourceResource<ConnectionProxyXA> baseDataSourceResource = Mockito.mock(BaseDataSourceResource.class);
String xid = "xxx";
ConnectionProxyXA connectionProxyXA = new ConnectionProxyXA(connection, xaConnection, baseDataSourceResource, xid);
connectionProxyXA.init();
connectionProxyXA.xaRollback("xxx", 123L, null);
Mockito.verify(xaResource, times(0)).commit(any(Xid.class), any(Boolean.class));
Mockito.verify(xaResource).rollback(any(Xid.class));
}
|
@Override
@Transactional(rollbackFor = Exception.class)
public Long createJob(JobSaveReqVO createReqVO) throws SchedulerException {
validateCronExpression(createReqVO.getCronExpression());
// 1.1 校验唯一性
if (jobMapper.selectByHandlerName(createReqVO.getHandlerName()) != null) {
throw exception(JOB_HANDLER_EXISTS);
}
// 1.2 校验 JobHandler 是否存在
validateJobHandlerExists(createReqVO.getHandlerName());
// 2. 插入 JobDO
JobDO job = BeanUtils.toBean(createReqVO, JobDO.class);
job.setStatus(JobStatusEnum.INIT.getStatus());
fillJobMonitorTimeoutEmpty(job);
jobMapper.insert(job);
// 3.1 添加 Job 到 Quartz 中
schedulerManager.addJob(job.getId(), job.getHandlerName(), job.getHandlerParam(), job.getCronExpression(),
createReqVO.getRetryCount(), createReqVO.getRetryInterval());
// 3.2 更新 JobDO
JobDO updateObj = JobDO.builder().id(job.getId()).status(JobStatusEnum.NORMAL.getStatus()).build();
jobMapper.updateById(updateObj);
return job.getId();
}
|
@Test
public void testCreateJob_cronExpressionValid() {
// 准备参数。Cron 表达式为 String 类型,默认随机字符串。
JobSaveReqVO reqVO = randomPojo(JobSaveReqVO.class);
// 调用,并断言异常
assertServiceException(() -> jobService.createJob(reqVO), JOB_CRON_EXPRESSION_VALID);
}
|
public void addTimeline(TimelineEvent event) {
if (timeline.add(event)) {
synced = false;
}
}
|
@Test
public void testAddDuplicateTimelineEvents() throws Exception {
StepRuntimeSummary summary =
loadObject(
"fixtures/execution/sample-step-runtime-summary-1.json", StepRuntimeSummary.class);
TimelineEvent event = TimelineLogEvent.info("hello world");
summary.addTimeline(event);
summary.addTimeline(TimelineLogEvent.info("hello world"));
summary.addTimeline(TimelineLogEvent.info("hello world"));
summary.addTimeline(TimelineLogEvent.info("hello world"));
assertEquals(2, summary.getTimeline().getTimelineEvents().size());
assertEquals(event, summary.getTimeline().getTimelineEvents().get(1));
}
|
public static void checkNotNullAndNotEmpty(@Nullable String value, String propertyName) {
Preconditions.checkNotNull(value, "Property '" + propertyName + "' cannot be null");
Preconditions.checkArgument(
!value.trim().isEmpty(), "Property '" + propertyName + "' cannot be an empty string");
}
|
@Test
public void testCheckNotNullAndNotEmpty_stringPass() {
Validator.checkNotNullAndNotEmpty("value", "ignored");
// pass
}
|
@Override
public void setIntValue(String metricKey, int line, int value) {
checkNotNull(metricKey);
checkLineRange(line);
setValue(metricKey, line, value);
}
|
@Test
public void validateLineGreaterThanZero() {
assertThatThrownBy(() -> fileLineMeasures.setIntValue(HITS_METRIC_KEY, 0, 2))
.hasMessage("Line number should be positive for file src/foo.php.");
}
|
@Override
public HttpResponseOutputStream<Void> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
try {
return this.write(file, this.toHeaders(file, status, expect), status);
}
catch(ConflictException e) {
if(expect) {
if(null != status.getLockId()) {
// Handle 412 Precondition Failed with expired token
log.warn(String.format("Retry failure %s with lock id %s removed", e, status.getLockId()));
return this.write(file, this.toHeaders(file, status.withLockId(null), expect), status);
}
}
throw e;
}
catch(InteroperabilityException e) {
if(expect) {
// Handle 417 Expectation Failed
log.warn(String.format("Retry failure %s with Expect: Continue removed", e));
return this.write(file, this.toHeaders(file, status.withLockId(null), false), status);
}
throw e;
}
}
|
@Test(expected = AccessDeniedException.class)
@Ignore
public void testWriteAccessDenied() throws Exception {
final Path test = new Path(new DefaultHomeFinderService(session).find().getAbsolute() + "/nosuchdirectory/" + new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
// With Expect: Continue header
final HttpResponseOutputStream<Void> out = new DAVWriteFeature(session).write(test, new TransferStatus().withLength(0L), new DisabledConnectionCallback());
out.close();
}
|
public MemoryLRUCacheBytesIterator range(final String namespace, final Bytes from, final Bytes to) {
return range(namespace, from, to, true);
}
|
@Test
public void shouldGetSameKeyAsPeekNext() {
final ThreadCache cache = setupThreadCache(0, 1, 10000L, false);
final Bytes theByte = Bytes.wrap(new byte[]{0});
final ThreadCache.MemoryLRUCacheBytesIterator iterator = cache.range(namespace, theByte, Bytes.wrap(new byte[]{1}));
assertThat(iterator.peekNextKey(), is(iterator.next().key));
}
|
public static IRubyObject deep(final Ruby runtime, final Object input) {
if (input == null) {
return runtime.getNil();
}
final Class<?> cls = input.getClass();
final Rubyfier.Converter converter = CONVERTER_MAP.get(cls);
if (converter != null) {
return converter.convert(runtime, input);
}
return fallbackConvert(runtime, input, cls);
}
|
@Test
public void testDeepMapWithFloat() throws Exception {
Map<String, Float> data = new HashMap<>();
data.put("foo", 1.0F);
RubyHash rubyHash = (RubyHash)Rubyfier.deep(RubyUtil.RUBY, data);
// Hack to be able to retrieve the original, unconverted Ruby object from Map
// it seems the only method providing this is internalGet but it is declared protected.
// I know this is bad practice but I think this is practically acceptable.
Method internalGet = RubyHash.class.getDeclaredMethod("internalGet", IRubyObject.class);
internalGet.setAccessible(true);
Object result = internalGet.invoke(rubyHash, JavaUtil.convertJavaToUsableRubyObject(RubyUtil.RUBY, "foo"));
assertEquals(RubyFloat.class, result.getClass());
assertEquals(1.0D, ((RubyFloat)result).getDoubleValue(), 0);
}
|
public Statement buildStatement(final ParserRuleContext parseTree) {
return build(Optional.of(getSources(parseTree)), parseTree);
}
|
@Test
public void shouldExtractUnaliasedJoinDataSources() {
// Given:
final SingleStatementContext stmt = givenQuery("SELECT * FROM TEST1 JOIN TEST2"
+ " ON test1.col1 = test2.col1;");
// When:
final Query result = (Query) builder.buildStatement(stmt);
// Then:
assertThat(result.getFrom(), is(instanceOf(Join.class)));
assertThat((Join) result.getFrom(), hasLeft(new AliasedRelation(TEST1, TEST1_NAME)));
assertThat((Join) result.getFrom(), hasRights(new AliasedRelation(TEST2, TEST2_NAME)));
}
|
@Override
public Batch toBatch() {
return new SparkBatch(
sparkContext, table, readConf, groupingKeyType(), taskGroups(), expectedSchema, hashCode());
}
|
@Test
public void testUnpartitionedMonths() throws Exception {
createUnpartitionedTable(spark, tableName);
SparkScanBuilder builder = scanBuilder();
MonthsFunction.TimestampToMonthsFunction function =
new MonthsFunction.TimestampToMonthsFunction();
UserDefinedScalarFunc udf = toUDF(function, expressions(fieldRef("ts")));
Predicate predicate =
new Predicate(
">",
expressions(
udf, intLit(timestampStrToMonthOrdinal("2017-11-22T00:00:00.000000+00:00"))));
pushFilters(builder, predicate);
Batch scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(10);
// NOT GT
builder = scanBuilder();
predicate = new Not(predicate);
pushFilters(builder, predicate);
scan = builder.build().toBatch();
assertThat(scan.planInputPartitions().length).isEqualTo(10);
}
|
public ConvertedTime getConvertedTime(long duration) {
Set<Seconds> keys = RULES.keySet();
for (Seconds seconds : keys) {
if (duration <= seconds.getSeconds()) {
return RULES.get(seconds).getConvertedTime(duration);
}
}
return new TimeConverter.OverTwoYears().getConvertedTime(duration);
}
|
@Test
public void testShouldReport2MonthsFor59Days23Hours59Minutes30Seconds() throws Exception {
assertEquals(TimeConverter.ABOUT_X_MONTHS_AGO.argument(2), timeConverter
.getConvertedTime(60 * TimeConverter.DAY_IN_SECONDS - 30));
}
|
public static <T> T convert(Class<T> type, Object value) throws ConvertException {
return convert((Type) type, value);
}
|
@Test
public void toObjectTest() {
final Object result = Convert.convert(Object.class, "aaaa");
assertEquals("aaaa", result);
}
|
@Override
public void verify(byte[] data, byte[] signature, MessageDigest digest) {
final byte[] decrypted = engine.processBlock(signature, 0, signature.length);
final int delta = checkSignature(decrypted, digest);
final int offset = decrypted.length - digest.getDigestLength() - delta;
digest.update(decrypted, 1, offset - 1);
digest.update(data);
if (!CryptoUtils.compare(digest.digest(), decrypted, offset)) {
throw new VerificationException("Invalid signature");
}
}
|
@Test
public void shouldValidateSignatureSHA512() {
final byte[] challenge = CryptoUtils.random(40);
final byte[] signature = sign(0x54, challenge, ISOTrailers.TRAILER_SHA512, "SHA-512");
new DssRsaSignatureVerifier(PUBLIC).verify(challenge, signature, "SHA-512");
}
|
public QueryParseResult parse(String sql, @Nonnull SqlSecurityContext ssc) {
try {
return parse0(sql, ssc);
} catch (QueryException e) {
throw e;
} catch (Exception e) {
String message;
// Check particular type of exception which causes typical long multiline error messages.
if (e instanceof SqlParseException && e.getCause() instanceof ParseException) {
message = trimMessage(e.getMessage());
} else {
message = e.getMessage();
}
throw QueryException.error(SqlErrorCode.PARSING, message, e);
}
}
|
@Test
public void unsupportedKeywordTest() {
assertThatThrownBy(() -> parser.parse("show tables"))
.isInstanceOf(QueryException.class)
.hasMessageContaining("Encountered \"show tables\" at line 1, column 1.");
}
|
public PageListResponse<IndexSetFieldTypeSummary> getIndexSetFieldTypeSummary(final Set<String> streamIds,
final String fieldName,
final Predicate<String> indexSetPermissionPredicate) {
return getIndexSetFieldTypeSummary(streamIds, fieldName, indexSetPermissionPredicate, 1, 50, DEFAULT_SORT.id(), DEFAULT_SORT.direction());
}
|
@Test
void testDoesNotReturnResultsForIndexSetsIfItDoesNotExist() {
Predicate<String> indexSetPermissionPredicateAlwaysReturningFalse = x -> false;
doReturn(Set.of("index_set_id")).when(streamService).indexSetIdsByIds(Set.of("stream_id"));
final PageListResponse<IndexSetFieldTypeSummary> summary = toTest.getIndexSetFieldTypeSummary(Set.of("stream_id"), "field_name", indexSetPermissionPredicateAlwaysReturningFalse);
assertThat(summary.elements()).isEmpty();
verifyNoInteractions(indexFieldTypesService);
verifyNoMoreInteractions(streamService);
}
|
@CanIgnoreReturnValue
public Replacements add(Replacement replacement) {
return add(replacement, CoalescePolicy.REJECT);
}
|
@Test
public void overlap() {
Replacements replacements = new Replacements();
Replacement hello = Replacement.create(2, 4, "hello");
Replacement goodbye = Replacement.create(3, 5, "goodbye");
replacements.add(hello);
try {
replacements.add(goodbye);
fail();
} catch (IllegalArgumentException expected) {
assertThat(expected)
.hasMessageThat()
.isEqualTo(String.format("%s overlaps with existing replacements: %s", goodbye, hello));
}
}
|
public static CharSequence unescapeCsv(CharSequence value) {
int length = checkNotNull(value, "value").length();
if (length == 0) {
return value;
}
int last = length - 1;
boolean quoted = isDoubleQuote(value.charAt(0)) && isDoubleQuote(value.charAt(last)) && length != 1;
if (!quoted) {
validateCsvFormat(value);
return value;
}
StringBuilder unescaped = InternalThreadLocalMap.get().stringBuilder();
for (int i = 1; i < last; i++) {
char current = value.charAt(i);
if (current == DOUBLE_QUOTE) {
if (isDoubleQuote(value.charAt(i + 1)) && (i + 1) != last) {
// Followed by a double-quote but not the last character
// Just skip the next double-quote
i++;
} else {
// Not followed by a double-quote or the following double-quote is the last character
throw newInvalidEscapedCsvFieldException(value, i);
}
}
unescaped.append(current);
}
return unescaped.toString();
}
|
@Test
public void testUnescapeCsv() {
assertEquals("", unescapeCsv(""));
assertEquals("\"", unescapeCsv("\"\"\"\""));
assertEquals("\"\"", unescapeCsv("\"\"\"\"\"\""));
assertEquals("\"\"\"", unescapeCsv("\"\"\"\"\"\"\"\""));
assertEquals("\"netty\"", unescapeCsv("\"\"\"netty\"\"\""));
assertEquals("netty", unescapeCsv("netty"));
assertEquals("netty", unescapeCsv("\"netty\""));
assertEquals("\r", unescapeCsv("\"\r\""));
assertEquals("\n", unescapeCsv("\"\n\""));
assertEquals("hello,netty", unescapeCsv("\"hello,netty\""));
}
|
@Override
public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException {
try {
new DriveAttributesFinderFeature(session, fileid).find(file, listener);
return true;
}
catch(NotfoundException e) {
return false;
}
}
|
@Test
public void testFind() throws Exception {
final DriveFileIdProvider fileid = new DriveFileIdProvider(session);
final Path folder = new DriveDirectoryFeature(session, fileid).mkdir(
new Path(DriveHomeFinderService.MYDRIVE_FOLDER, UUID.randomUUID().toString(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final Path file = new DriveTouchFeature(session, fileid).touch(
new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
final String id = file.attributes().getFileId();
assertTrue(new DriveFindFeature(session, fileid).find(file));
new DriveTrashFeature(session, fileid).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse(new DefaultFindFeature(session).find(file));
assertFalse(new DriveFindFeature(session, fileid).find(file));
// When searching with version "2", find trashed file
final Path trashed = new DriveListService(session, fileid).list(folder, new DisabledListProgressListener()).find(new SimplePathPredicate(file));
assertNotNull(trashed);
assertEquals(id, trashed.attributes().getFileId());
assertTrue(new DefaultFindFeature(session).find(trashed));
assertTrue(new DriveFindFeature(session, fileid).find(trashed));
// Recreate file
final Path version2 = new DriveTouchFeature(session, fileid).touch(file, new TransferStatus());
assertTrue(new DefaultFindFeature(session).find(version2));
assertTrue(new DriveFindFeature(session, fileid).find(version2));
assertEquals(version2.attributes(), new DriveAttributesFinderFeature(session, fileid).find(version2));
new DriveDeleteFeature(session, fileid).delete(Arrays.asList(version2, folder), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@ConstantFunction(name = "bitShiftRightLogical", argTypes = {BIGINT, BIGINT}, returnType = BIGINT)
public static ConstantOperator bitShiftRightLogicalBigint(ConstantOperator first, ConstantOperator second) {
return ConstantOperator.createBigint(first.getBigint() >>> second.getBigint());
}
|
@Test
public void bitShiftRightLogicalBigint() {
assertEquals(12, ScalarOperatorFunctions.bitShiftRightLogicalBigint(O_BI_100, O_BI_3).getBigint());
}
|
public void handleSend(HttpServerResponse response, Span span) {
handleFinish(response, span);
}
|
@Test void handleSend_finishesSpanEvenIfUnwrappedNull() {
brave.Span span = mock(brave.Span.class);
when(span.context()).thenReturn(context);
when(span.customizer()).thenReturn(span);
handler.handleSend(response, span);
verify(span).isNoop();
verify(span).context();
verify(span).customizer();
verify(span).finish();
verifyNoMoreInteractions(span);
}
|
@Override
public void onMsg(TbContext ctx, TbMsg msg) {
var tbMsg = ackIfNeeded(ctx, msg);
withCallback(publishMessageAsync(ctx, tbMsg),
m -> tellSuccess(ctx, m),
t -> tellFailure(ctx, processException(tbMsg, t), t));
}
|
@Test
void givenForceAckIsFalseAndErrorOccursDuringProcessingRequest_whenOnMsg_thenTellFailure() {
ReflectionTestUtils.setField(node, "forceAck", false);
ListeningExecutor listeningExecutor = mock(ListeningExecutor.class);
given(ctxMock.getExternalCallExecutor()).willReturn(listeningExecutor);
String errorMsg = "Something went wrong";
ListenableFuture<TbMsg> failedFuture = Futures.immediateFailedFuture(new RuntimeException(errorMsg));
given(listeningExecutor.executeAsync(any(Callable.class))).willReturn(failedFuture);
TbMsg msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, DEVICE_ID, TbMsgMetaData.EMPTY, TbMsg.EMPTY_JSON_OBJECT);
node.onMsg(ctxMock, msg);
then(ctxMock).should(never()).enqueueForTellNext(any(), any(String.class));
ArgumentCaptor<TbMsg> actualMsgCaptor = ArgumentCaptor.forClass(TbMsg.class);
ArgumentCaptor<Throwable> throwableCaptor = ArgumentCaptor.forClass(Throwable.class);
then(ctxMock).should().tellFailure(actualMsgCaptor.capture(), throwableCaptor.capture());
TbMsg actualMsg = actualMsgCaptor.getValue();
assertThat(actualMsg)
.usingRecursiveComparison()
.ignoringFields("metaData", "ctx")
.isEqualTo(msg);
assertThat(actualMsg.getMetaData().getData())
.hasFieldOrPropertyWithValue("error", RuntimeException.class + ": " + errorMsg);
assertThat(throwableCaptor.getValue()).isInstanceOf(RuntimeException.class).hasMessage(errorMsg);
verifyNoMoreInteractions(ctxMock, snsClientMock);
}
|
@VisibleForTesting
S3Client getS3Client() {
return this.s3Client.get();
}
|
@Test
public void testGetPathStyleAccessEnabled() throws URISyntaxException {
S3FileSystem s3FileSystem = new S3FileSystem(s3ConfigWithPathStyleAccessEnabled("s3"));
URL s3Url =
s3FileSystem
.getS3Client()
.utilities()
.getUrl(GetUrlRequest.builder().bucket("bucket").key("file").build());
assertEquals("https://s3.us-west-1.amazonaws.com/bucket/file", s3Url.toURI().toString());
}
|
@Override
public Set<Permission> getRequestedPermissions(ApplicationId appId) {
Set<Permission> permissions = violations.get(appId);
return permissions != null ? permissions : ImmutableSet.of();
}
|
@Test
public void testGetRequestedPermissions() {
Set<Permission> permissions = violations.get(appId);
assertTrue(permissions.contains(testPermission));
}
|
@Nonnull
public static <T> AggregateOperation1<T, LongLongAccumulator, Double> averagingLong(
@Nonnull ToLongFunctionEx<? super T> getLongValueFn
) {
checkSerializable(getLongValueFn, "getLongValueFn");
// count == accumulator.value1
// sum == accumulator.value2
return AggregateOperation
.withCreate(LongLongAccumulator::new)
.andAccumulate((LongLongAccumulator a, T i) -> {
// a bit faster check than in addExact, specialized for increment
if (a.get1() == Long.MAX_VALUE) {
throw new ArithmeticException("Counter overflow");
}
a.set1(a.get1() + 1);
a.set2(Math.addExact(a.get2(), getLongValueFn.applyAsLong(i)));
})
.andCombine((a1, a2) -> {
a1.set1(Math.addExact(a1.get1(), a2.get1()));
a1.set2(Math.addExact(a1.get2(), a2.get2()));
})
.andDeduct((a1, a2) -> {
a1.set1(Math.subtractExact(a1.get1(), a2.get1()));
a1.set2(Math.subtractExact(a1.get2(), a2.get2()));
})
.andExportFinish(a -> (double) a.get2() / a.get1());
}
|
@Test
public void when_averagingLong_sumTooLarge_then_exception() {
// Given
AggregateOperation1<Long, LongLongAccumulator, Double> aggrOp = averagingLong(Long::longValue);
LongLongAccumulator acc = aggrOp.createFn().get();
// When
BiConsumerEx<? super LongLongAccumulator, ? super Long> biConsumerEx = aggrOp.accumulateFn();
biConsumerEx.accept(acc, Long.MAX_VALUE);
assertThrows(ArithmeticException.class, () -> biConsumerEx.accept(acc, 1L));
}
|
@Override
public boolean addClass(final Class<?> stepClass) {
if (stepClasses.contains(stepClass)) {
return true;
}
checkNoComponentAnnotations(stepClass);
if (hasCucumberContextConfiguration(stepClass)) {
checkOnlyOneClassHasCucumberContextConfiguration(stepClass);
withCucumberContextConfiguration = stepClass;
}
stepClasses.add(stepClass);
return true;
}
|
@Test
void shouldBeStoppableWhenFacedWithFailedApplicationContext() {
final ObjectFactory factory = new SpringFactory();
factory.addClass(FailedTestInstanceCreation.class);
assertThrows(CucumberBackendException.class, factory::start);
assertDoesNotThrow(factory::stop);
}
|
public static byte[] decode(String base32) {
return Base32Codec.INSTANCE.decode(base32);
}
|
@Test
public void decodeTest(){
String a = "伦家是一个非常长的字符串";
String decodeStr = Base32.decodeStr("4S6KNZNOW3TJRL7EXCAOJOFK5GOZ5ZNYXDUZLP7HTKCOLLMX46WKNZFYWI");
assertEquals(a, decodeStr);
}
|
@VisibleForTesting
void setIsPartialBufferCleanupRequired() {
isPartialBufferCleanupRequired = true;
}
|
@TestTemplate
void testSkipPartialDataEndsInBufferWithNoMoreData() throws Exception {
final BufferWritingResultPartition writer = createResultPartition();
final PipelinedApproximateSubpartition subpartition =
getPipelinedApproximateSubpartition(writer);
writer.emitRecord(toByteBuffer(0, 1, 2, 3, 42), 0);
assertContent(requireNonNull(subpartition.pollBuffer()).buffer(), null, 0, 1, 2, 3);
subpartition.setIsPartialBufferCleanupRequired();
assertThat(subpartition.pollBuffer()).isNull();
writer.emitRecord(toByteBuffer(8, 9), 0);
assertContent(requireNonNull(subpartition.pollBuffer()).buffer(), null, 8, 9);
}
|
public static boolean hasCauseOf(Throwable t, Class<? extends Throwable> causeType) {
return Throwables.getCausalChain(t)
.stream()
.anyMatch(c -> causeType.isAssignableFrom(c.getClass()));
}
|
@Test
public void hasCauseOf_returnsFalseIfNoCauseOfTheProvidedTypeFound() {
assertThat(ExceptionUtils.hasCauseOf(
new RuntimeException("parent", new RuntimeException("asdasd", new IllegalArgumentException())), IOException.class)).isFalse();
}
|
public DoubleArrayAsIterable usingTolerance(double tolerance) {
return new DoubleArrayAsIterable(tolerance(tolerance), iterableSubject());
}
|
@Test
public void usingTolerance_contains_failureWithInfinity() {
expectFailureWhenTestingThat(array(1.1, POSITIVE_INFINITY, 3.3))
.usingTolerance(DEFAULT_TOLERANCE)
.contains(POSITIVE_INFINITY);
assertFailureKeys("value of", "expected to contain", "testing whether", "but was");
assertFailureValue("expected to contain", "Infinity");
assertFailureValue("but was", "[1.1, Infinity, 3.3]");
}
|
public static String appendScheme(final String url, final String scheme) {
String schemeUrl = url;
if (!schemeUrl.startsWith("http://") && !schemeUrl.startsWith("https://")) {
schemeUrl = scheme + "://" + schemeUrl;
}
return schemeUrl;
}
|
@Test
void appendScheme() {
String uri = UriUtils.appendScheme("example.com", "http");
assertEquals("http://example.com", uri);
uri = UriUtils.appendScheme("example.com", "https");
assertEquals("https://example.com", uri);
uri = UriUtils.appendScheme("https://example.com", "http");
assertEquals("https://example.com", uri);
assertNotEquals("http://example.com", uri);
uri = UriUtils.appendScheme("http://example.com", "https");
assertEquals("http://example.com", uri);
assertNotEquals("https://example.com", uri);
}
|
@Override
public void trace(String msg) {
logger.trace(msg);
}
|
@Test
public void testTrace() {
Log mockLog = mock(Log.class);
InternalLogger logger = new CommonsLogger(mockLog, "foo");
logger.trace("a");
verify(mockLog).trace("a");
}
|
public static VersionedBytesStoreSupplier persistentVersionedKeyValueStore(final String name,
final Duration historyRetention) {
Objects.requireNonNull(name, "name cannot be null");
final String hrMsgPrefix = prepareMillisCheckFailMsgPrefix(historyRetention, "historyRetention");
final long historyRetentionMs = validateMillisecondDuration(historyRetention, hrMsgPrefix);
if (historyRetentionMs < 0L) {
throw new IllegalArgumentException("historyRetention cannot be negative");
}
return new RocksDbVersionedKeyValueBytesStoreSupplier(name, historyRetentionMs);
}
|
@Test
public void shouldThrowIfPersistentVersionedKeyValueStoreStoreNameIsNull() {
Exception e = assertThrows(NullPointerException.class, () -> Stores.persistentVersionedKeyValueStore(null, ZERO));
assertEquals("name cannot be null", e.getMessage());
e = assertThrows(NullPointerException.class, () -> Stores.persistentVersionedKeyValueStore(null, ZERO, ofMillis(1)));
assertEquals("name cannot be null", e.getMessage());
}
|
@CanIgnoreReturnValue
public final Ordered containsAtLeastEntriesIn(Map<?, ?> expectedMap) {
if (expectedMap.isEmpty()) {
return IN_ORDER;
}
boolean containsAnyOrder = containsEntriesInAnyOrder(expectedMap, /* allowUnexpected= */ true);
if (containsAnyOrder) {
return new MapInOrder(expectedMap, /* allowUnexpected= */ true, /* correspondence= */ null);
} else {
return ALREADY_FAILED;
}
}
|
@Test
public void containsAtLeastEmpty() {
ImmutableMap<String, Integer> actual = ImmutableMap.of("key", 1);
assertThat(actual).containsAtLeastEntriesIn(ImmutableMap.of());
assertThat(actual).containsAtLeastEntriesIn(ImmutableMap.of()).inOrder();
}
|
@Override
public Collection<Block> getBySequenceHash(ByteArray sequenceHash) {
ensureSorted();
// prepare hash for binary search
int[] hash = sequenceHash.toIntArray();
if (hash.length != hashInts) {
throw new IllegalArgumentException("Expected " + hashInts + " ints in hash, but got " + hash.length);
}
int offset = size * blockInts;
for (int i = 0; i < hashInts; i++) {
blockData[offset++] = hash[i];
}
int index = DataUtils.binarySearch(byBlockHash);
List<Block> result = new ArrayList<>();
while (index < size && !isLessByHash(size, index)) {
// extract block (note that there is no need to extract hash)
String resourceId = resourceIds[index];
result.add(createBlock(index, resourceId, sequenceHash));
index++;
}
return result;
}
|
@Test(expected = IllegalArgumentException.class)
public void attempt_to_find_hash_of_incorrect_size() {
CloneIndex index = new PackedMemoryCloneIndex(4, 1);
index.getBySequenceHash(new ByteArray(1L));
}
|
public boolean isValid() {
return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum();
}
|
@Test
public void testInvalidCrc() {
MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, 0L,
Compression.NONE, TimestampType.CREATE_TIME,
new SimpleRecord(1L, "a".getBytes(), "1".getBytes()),
new SimpleRecord(2L, "b".getBytes(), "2".getBytes()),
new SimpleRecord(3L, "c".getBytes(), "3".getBytes()));
ByteBuffer buffer = records.buffer();
buffer.putInt(DefaultRecordBatch.LAST_OFFSET_DELTA_OFFSET, 23);
DefaultRecordBatch batch = new DefaultRecordBatch(buffer);
assertFalse(batch.isValid());
assertThrows(CorruptRecordException.class, batch::ensureValid);
}
|
@Override
public void commit() throws SQLException {
for (TransactionHook each : transactionHooks) {
each.beforeCommit(connection.getCachedConnections().values(), getTransactionContext(), ProxyContext.getInstance().getContextManager().getComputeNodeInstanceContext().getLockContext());
}
if (connection.getConnectionSession().getTransactionStatus().isInTransaction()) {
try {
if (TransactionType.LOCAL == TransactionUtils.getTransactionType(getTransactionContext()) || null == distributionTransactionManager) {
localTransactionManager.commit();
} else {
distributionTransactionManager.commit(getTransactionContext().isExceptionOccur());
}
} finally {
for (TransactionHook each : transactionHooks) {
each.afterCommit(connection.getCachedConnections().values(),
getTransactionContext(), ProxyContext.getInstance().getContextManager().getComputeNodeInstanceContext().getLockContext());
}
for (Connection each : connection.getCachedConnections().values()) {
ConnectionSavepointManager.getInstance().transactionFinished(each);
}
connection.getConnectionSession().getTransactionStatus().setInTransaction(false);
connection.getConnectionSession().getConnectionContext().close();
}
}
}
|
@Test
void assertCommitForLocalTransaction() throws SQLException {
ContextManager contextManager = mockContextManager(TransactionType.LOCAL);
when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager);
newBackendTransactionManager(TransactionType.LOCAL, true);
backendTransactionManager.commit();
verify(transactionStatus).setInTransaction(false);
verify(localTransactionManager).commit();
}
|
@Override
public String toString() {
int numColumns = getColumnCount();
TextTable table = new TextTable();
String[] columnNames = new String[numColumns];
for (int c = 0; c < getColumnCount(); c++) {
columnNames[c] = getColumnName(c);
}
table.addHeader(columnNames);
int numRows = getRowCount();
for (int r = 0; r < numRows; r++) {
String[] columnValues = new String[numColumns];
for (int c = 0; c < getColumnCount(); c++) {
columnValues[c] = getString(r, c);
}
table.addRow(columnValues);
}
return table.toString();
}
|
@Test
public void testToString() {
// Run the test
final String result = _aggregationResultSetUnderTest.toString();
// Verify the results
assertNotEquals("", result);
}
|
public void setContract(@Nullable Produce contract)
{
this.contract = contract;
setStoredContract(contract);
handleContractState();
}
|
@Test
public void cabbageContractCabbageGrowingAndCabbageDiseased()
{
final long unixNow = Instant.now().getEpochSecond();
final long expectedTime = unixNow + 60;
// Get the two allotment patches
final FarmingPatch patch1 = farmingGuildPatches.get(Varbits.FARMING_4773);
final FarmingPatch patch2 = farmingGuildPatches.get(Varbits.FARMING_4774);
assertNotNull(patch1);
assertNotNull(patch2);
// Specify the two allotment patches
when(farmingTracker.predictPatch(patch1))
.thenReturn(new PatchPrediction(Produce.CABBAGE, CropState.DISEASED, 0, 2, 3));
when(farmingTracker.predictPatch(patch2))
.thenReturn(new PatchPrediction(Produce.CABBAGE, CropState.GROWING, expectedTime, 2, 3));
farmingContractManager.setContract(Produce.CABBAGE);
assertEquals(SummaryState.IN_PROGRESS, farmingContractManager.getSummary());
// Prefer healthy cabbages
assertEquals(CropState.GROWING, farmingContractManager.getContractCropState());
assertEquals(expectedTime, farmingContractManager.getCompletionTime());
}
|
public static MutableURLClassLoader childFirst(
URL[] urls,
ClassLoader parent,
String[] alwaysParentFirstPatterns,
Consumer<Throwable> classLoadingExceptionHandler,
boolean checkClassLoaderLeak) {
FlinkUserCodeClassLoader classLoader =
new ChildFirstClassLoader(
urls, parent, alwaysParentFirstPatterns, classLoadingExceptionHandler);
return wrapWithSafetyNet(classLoader, checkClassLoaderLeak);
}
|
@Test
void testRepeatedParentFirstPatternClass() throws Exception {
final String className = FlinkUserCodeClassLoadersTest.class.getName();
final String parentFirstPattern = className.substring(0, className.lastIndexOf('.'));
final ClassLoader parentClassLoader = getClass().getClassLoader();
// collect the libraries / class folders with RocksDB related code: the state backend and
// RocksDB itself
final URL childCodePath = getClass().getProtectionDomain().getCodeSource().getLocation();
final URLClassLoader childClassLoader =
FlinkUserCodeClassLoaders.childFirst(
new URL[] {childCodePath},
parentClassLoader,
new String[] {parentFirstPattern},
NOOP_EXCEPTION_HANDLER,
true);
final Class<?> clazz1 = Class.forName(className, false, parentClassLoader);
final Class<?> clazz2 = Class.forName(className, false, childClassLoader);
final Class<?> clazz3 = Class.forName(className, false, childClassLoader);
final Class<?> clazz4 = Class.forName(className, false, childClassLoader);
assertThat(clazz2).isEqualTo(clazz1);
assertThat(clazz3).isEqualTo(clazz1);
assertThat(clazz4).isEqualTo(clazz1);
childClassLoader.close();
}
|
public static Date parseDT(TimeZone tz, String s, DatePrecision precision) {
return parseDT(tz, s, false, precision);
}
|
@Test
public void testParseDT() {
DatePrecision precision = new DatePrecision();
assertEquals(0,
DateUtils.parseDT(tz, "19700101020000.000", precision).getTime());
assertEquals(Calendar.MILLISECOND, precision.lastField);
assertFalse(precision.includeTimezone);
}
|
public void deleteBoardById(final Long boardId, final Long memberId) {
Board board = findBoard(boardId);
board.validateWriter(memberId);
boardRepository.deleteByBoardId(boardId);
imageUploader.delete(board.getImages());
Events.raise(new BoardDeletedEvent(boardId));
}
|
@Test
void 게시글을_삭제한다() {
// given
Board savedBoard = boardRepository.save(게시글_생성_사진없음());
// when & then
assertDoesNotThrow(() -> boardService.deleteBoardById(savedBoard.getId(), savedBoard.getWriterId()));
}
|
@Override
public Iterator<CharSequence> valueIterator(CharSequence name) {
return new ReadOnlyValueIterator(name);
}
|
@Test
public void testEmptyValueIterator() {
Http2Headers headers = newServerHeaders();
final Iterator<CharSequence> itr = headers.valueIterator("foo");
assertFalse(itr.hasNext());
assertThrows(NoSuchElementException.class, new Executable() {
@Override
public void execute() {
itr.next();
}
});
}
|
@Override
public Collection<String> getEnhancedTableNames() {
return Collections.emptySet();
}
|
@Test
void assertGetEnhancedTableMapper() {
assertThat(new LinkedList<>(ruleAttribute.getEnhancedTableNames()), is(Collections.emptyList()));
}
|
public static boolean isEmpty(final Object[] array) {
return array == null || array.length == 0;
}
|
@Test
void testisEmpty() {
Integer[] arr = new Integer[] {1, 2};
assertTrue(ArrayUtils.isEmpty(nullArr));
assertTrue(ArrayUtils.isEmpty(nothingArr));
assertFalse(ArrayUtils.isEmpty(arr));
}
|
public static String normalize(String path) {
if (path == null) {
return null;
}
//兼容Windows下的共享目录路径(原始路径如果以\\开头,则保留这种路径)
if (path.startsWith("\\\\")) {
return path;
}
// 兼容Spring风格的ClassPath路径,去除前缀,不区分大小写
String pathToUse = StrUtil.removePrefixIgnoreCase(path, URLUtil.CLASSPATH_URL_PREFIX);
// 去除file:前缀
pathToUse = StrUtil.removePrefixIgnoreCase(pathToUse, URLUtil.FILE_URL_PREFIX);
// 识别home目录形式,并转换为绝对路径
if (StrUtil.startWith(pathToUse, '~')) {
pathToUse = getUserHomePath() + pathToUse.substring(1);
}
// 统一使用斜杠
pathToUse = pathToUse.replaceAll("[/\\\\]+", StrUtil.SLASH);
// 去除开头空白符,末尾空白符合法,不去除
pathToUse = StrUtil.trimStart(pathToUse);
// issue#IAB65V 去除尾部的换行符
pathToUse = StrUtil.trim(pathToUse, 1, (c)->c == '\n' || c == '\r');
String prefix = StrUtil.EMPTY;
int prefixIndex = pathToUse.indexOf(StrUtil.COLON);
if (prefixIndex > -1) {
// 可能Windows风格路径
prefix = pathToUse.substring(0, prefixIndex + 1);
if (StrUtil.startWith(prefix, StrUtil.C_SLASH)) {
// 去除类似于/C:这类路径开头的斜杠
prefix = prefix.substring(1);
}
if (false == prefix.contains(StrUtil.SLASH)) {
pathToUse = pathToUse.substring(prefixIndex + 1);
} else {
// 如果前缀中包含/,说明非Windows风格path
prefix = StrUtil.EMPTY;
}
}
if (pathToUse.startsWith(StrUtil.SLASH)) {
prefix += StrUtil.SLASH;
pathToUse = pathToUse.substring(1);
}
List<String> pathList = StrUtil.split(pathToUse, StrUtil.C_SLASH);
List<String> pathElements = new LinkedList<>();
int tops = 0;
String element;
for (int i = pathList.size() - 1; i >= 0; i--) {
element = pathList.get(i);
// 只处理非.的目录,即只处理非当前目录
if (false == StrUtil.DOT.equals(element)) {
if (StrUtil.DOUBLE_DOT.equals(element)) {
tops++;
} else {
if (tops > 0) {
// 有上级目录标记时按照个数依次跳过
tops--;
} else {
// Normal path element found.
pathElements.add(0, element);
}
}
}
}
// issue#1703@Github
if (tops > 0 && StrUtil.isEmpty(prefix)) {
// 只有相对路径补充开头的..,绝对路径直接忽略之
while (tops-- > 0) {
//遍历完节点发现还有上级标注(即开头有一个或多个..),补充之
// Normal path element found.
pathElements.add(0, StrUtil.DOUBLE_DOT);
}
}
return prefix + CollUtil.join(pathElements, StrUtil.SLASH);
}
|
@Test
public void doubleNormalizeTest() {
final String normalize = FileUtil.normalize("/aa/b:/c");
final String normalize2 = FileUtil.normalize(normalize);
assertEquals("/aa/b:/c", normalize);
assertEquals(normalize, normalize2);
}
|
public static InetSocketAddress[] netAddressToSocketAddress(List<NetAddress> request)
throws UnknownHostException {
InetSocketAddress[] addresses = new InetSocketAddress[request.size()];
for (int i = 0; i < addresses.length; i++) {
addresses[i] = new InetSocketAddress(
InetAddress.getByName(request.get(i).getHost()),
request.get(i).getRpcPort());
}
return addresses;
}
|
@Test
public void netAddressTest() throws Exception {
List<NetAddress> addressList = Collections.singletonList(
NetAddress.newBuilder().setHost("localhost").setRpcPort(1).build());
InetSocketAddress[] inetSocketAddressList = netAddressToSocketAddress(addressList);
Assert.assertEquals(1, inetSocketAddressList.length);
Assert.assertEquals("localhost", inetSocketAddressList[0].getHostName());
Assert.assertEquals(1, inetSocketAddressList[0].getPort());
}
|
public BackgroundException map(HttpResponse response) throws IOException {
final S3ServiceException failure;
if(null == response.getEntity()) {
failure = new S3ServiceException(response.getStatusLine().getReasonPhrase());
}
else {
EntityUtils.updateEntity(response, new BufferedHttpEntity(response.getEntity()));
failure = new S3ServiceException(response.getStatusLine().getReasonPhrase(),
EntityUtils.toString(response.getEntity()));
}
failure.setResponseCode(response.getStatusLine().getStatusCode());
if(response.containsHeader(MINIO_ERROR_CODE)) {
failure.setErrorCode(response.getFirstHeader(MINIO_ERROR_CODE).getValue());
}
if(response.containsHeader(MINIO_ERROR_DESCRIPTION)) {
failure.setErrorMessage(response.getFirstHeader(MINIO_ERROR_DESCRIPTION).getValue());
}
return this.map(failure);
}
|
@Test
public void testHandshakeFailure() {
final SSLHandshakeException f = new SSLHandshakeException("f");
f.initCause(new CertificateException("c"));
assertEquals(ConnectionCanceledException.class, new S3ExceptionMappingService().map(
new ServiceException(f)).getClass());
}
|
@Override
public Mono<GetExternalServiceCredentialsResponse> getExternalServiceCredentials(final GetExternalServiceCredentialsRequest request) {
final ExternalServiceCredentialsGenerator credentialsGenerator = this.credentialsGeneratorByType
.get(request.getExternalService());
if (credentialsGenerator == null) {
return Mono.error(Status.INVALID_ARGUMENT.asException());
}
final AuthenticatedDevice authenticatedDevice = AuthenticationUtil.requireAuthenticatedDevice();
return rateLimiters.forDescriptor(RateLimiters.For.EXTERNAL_SERVICE_CREDENTIALS).validateReactive(authenticatedDevice.accountIdentifier())
.then(Mono.fromSupplier(() -> {
final ExternalServiceCredentials externalServiceCredentials = credentialsGenerator
.generateForUuid(authenticatedDevice.accountIdentifier());
return GetExternalServiceCredentialsResponse.newBuilder()
.setUsername(externalServiceCredentials.username())
.setPassword(externalServiceCredentials.password())
.build();
}));
}
|
@Test
public void testUnauthenticatedCall() throws Exception {
assertStatusUnauthenticated(() -> unauthenticatedServiceStub().getExternalServiceCredentials(
GetExternalServiceCredentialsRequest.newBuilder()
.setExternalService(ExternalServiceType.EXTERNAL_SERVICE_TYPE_ART)
.build()));
}
|
OperationNode createFlattenOperation(
Network<Node, Edge> network, ParallelInstructionNode node, OperationContext context) {
OutputReceiver[] receivers = getOutputReceivers(network, node);
return OperationNode.create(new FlattenOperation(receivers, context));
}
|
@Test
public void testCreateFlattenOperation() throws Exception {
int producerIndex1 = 1;
int producerOutputNum1 = 2;
int producerIndex2 = 0;
int producerOutputNum2 = 1;
ParallelInstructionNode instructionNode =
ParallelInstructionNode.create(
createFlattenInstruction(
producerIndex1, producerOutputNum1, producerIndex2, producerOutputNum2, "Flatten"),
ExecutionLocation.UNKNOWN);
when(network.successors(instructionNode))
.thenReturn(
ImmutableSet.<Node>of(
IntrinsicMapTaskExecutorFactory.createOutputReceiversTransform(STAGE, counterSet)
.apply(
InstructionOutputNode.create(
instructionNode.getParallelInstruction().getOutputs().get(0),
PCOLLECTION_ID))));
when(network.outDegree(instructionNode)).thenReturn(1);
Node operationNode =
mapTaskExecutorFactory
.createOperationTransformForParallelInstructionNodes(
STAGE,
network,
options,
readerRegistry,
sinkRegistry,
BatchModeExecutionContext.forTesting(options, counterSet, "testStage"))
.apply(instructionNode);
assertThat(operationNode, instanceOf(OperationNode.class));
assertThat(((OperationNode) operationNode).getOperation(), instanceOf(FlattenOperation.class));
FlattenOperation flattenOperation =
(FlattenOperation) ((OperationNode) operationNode).getOperation();
assertEquals(1, flattenOperation.receivers.length);
assertEquals(0, flattenOperation.receivers[0].getReceiverCount());
assertEquals(Operation.InitializationState.UNSTARTED, flattenOperation.initializationState);
}
|
List<StepOption> retrieveOptions() {
return Arrays.asList(
new StepOption( KEEP_ALIVE_INTERVAL, getString( PKG, "MQTTDialog.Options.KEEP_ALIVE_INTERVAL" ),
keepAliveInterval ),
new StepOption( MAX_INFLIGHT, getString( PKG, "MQTTDialog.Options.MAX_INFLIGHT" ), maxInflight ),
new StepOption( CONNECTION_TIMEOUT, getString( PKG, "MQTTDialog.Options.CONNECTION_TIMEOUT" ),
connectionTimeout ),
new StepOption( CLEAN_SESSION, getString( PKG, "MQTTDialog.Options.CLEAN_SESSION" ),
cleanSession ),
new StepOption( STORAGE_LEVEL, getString( PKG, "MQTTDialog.Options.STORAGE_LEVEL" ),
storageLevel ),
new StepOption( SERVER_URIS, getString( PKG, "MQTTDialog.Options.SERVER_URIS" ), serverUris ),
new StepOption( MQTT_VERSION, getString( PKG, "MQTTDialog.Options.MQTT_VERSION" ), mqttVersion ),
new StepOption( AUTOMATIC_RECONNECT, getString( PKG, "MQTTDialog.Options.AUTOMATIC_RECONNECT" ),
automaticReconnect )
);
}
|
@Test
public void testRetrieveOptions() {
List<String> keys = Arrays
.asList( KEEP_ALIVE_INTERVAL, MAX_INFLIGHT, CONNECTION_TIMEOUT, CLEAN_SESSION, STORAGE_LEVEL, SERVER_URIS,
MQTT_VERSION, AUTOMATIC_RECONNECT );
MQTTProducerMeta meta = new MQTTProducerMeta();
meta.setDefault();
List<StepOption> options = meta.retrieveOptions();
assertEquals( 8, options.size() );
for ( StepOption option : options ) {
assertEquals( "", option.getValue() );
assertNotNull( option.getText() );
assertTrue( keys.contains( option.getKey() ) );
}
}
|
public static UBlock create(List<UStatement> statements) {
return new AutoValue_UBlock(ImmutableList.copyOf(statements));
}
|
@Test
public void serialization() {
SerializableTester.reserializeAndAssert(
UBlock.create(
UExpressionStatement.create(
UMethodInvocation.create(
UStaticIdent.create(
"java.lang.System",
"exit",
UMethodType.create(UPrimitiveType.VOID, UPrimitiveType.INT)),
ULiteral.intLit(0)))));
}
|
@Override
@NonNull
public IndexEntry getIndexEntry(String name) {
readLock.lock();
try {
var indexDescriptor = findIndexByName(name);
if (indexDescriptor == null) {
throw new IllegalArgumentException(
"No index found for fieldPath [" + name + "], "
+ "make sure you have created an index for this field.");
}
if (!indexDescriptor.isReady()) {
throw new IllegalStateException(
"Index [" + name + "] is not ready, "
+ "Please wait for more time or check the index status.");
}
return indexEntries.get(indexDescriptor);
} finally {
readLock.unlock();
}
}
|
@Test
void getIndexEntryTest() {
var spec = getNameIndexSpec();
var descriptor = new IndexDescriptor(spec);
descriptor.setReady(true);
var indexContainer = new IndexEntryContainer();
indexContainer.add(new IndexEntryImpl(descriptor));
var defaultIndexer = new DefaultIndexer(List.of(descriptor), indexContainer);
assertThatThrownBy(() -> defaultIndexer.getIndexEntry("not-exist"))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("No index found for fieldPath [not-exist], "
+ "make sure you have created an index for this field.");
assertThat(defaultIndexer.getIndexEntry("metadata.name")).isNotNull();
}
|
@Override
public void write(int datum)
{
if (this._bufferList.peekLast() == null || this._bufferList.getLast().length == this._index)
{
// If there's no buffer or its capacity is full, add a new one to the list.
addBuffer(1);
}
this._bufferList.getLast()[this._index++] = (byte) datum;
}
|
@Test
public void testWrite() throws Exception
{
FastByteArrayOutputStream testStream1 = new FastByteArrayOutputStream();
testStream1.write(1);
Assert.assertEquals(testStream1.size(), 1);
Assert.assertEquals(testStream1.toByteArray().length, 1);
Assert.assertEquals(testStream1.toByteArray()[0], 0b1);
FastByteArrayOutputStream testStream2 = new FastByteArrayOutputStream();
byte[] inputArray = new byte[] {0b1, 0b1, 0b1, 0b1, 0b0, 0b0, 0b0, 0b0};
testStream2.write(inputArray, 0, inputArray.length);
Assert.assertEquals(testStream2.size(), inputArray.length);
Assert.assertEquals(testStream2.toByteArray(), inputArray);
FastByteArrayOutputStream testStream3 = new FastByteArrayOutputStream();
Field maxSizeField = FastByteArrayOutputStream.class.getDeclaredField("MAX_STREAM_SIZE");
maxSizeField.setAccessible(true);
int maxSize = (int) maxSizeField.get(null);
byte[] maxArray = new byte[0]; // Don't allocate a real array with maxSize to avoid OOM in test environment.
Assert.assertThrows(IndexOutOfBoundsException.class, () -> testStream3.write(maxArray, 0, maxSize + 1));
}
|
public List<String> mergePartitions(
MergingStrategy mergingStrategy,
List<String> sourcePartitions,
List<String> derivedPartitions) {
if (!derivedPartitions.isEmpty()
&& !sourcePartitions.isEmpty()
&& mergingStrategy != MergingStrategy.EXCLUDING) {
throw new ValidationException(
"The base table already has partitions defined. You might want to specify "
+ "EXCLUDING PARTITIONS.");
}
if (!derivedPartitions.isEmpty()) {
return derivedPartitions;
}
return sourcePartitions;
}
|
@Test
void mergePartitionsFromDerivedTable() {
List<String> derivedPartitions = Arrays.asList("col1", "col2");
List<String> mergePartitions =
util.mergePartitions(
getDefaultMergingStrategies().get(FeatureOption.PARTITIONS),
Collections.emptyList(),
derivedPartitions);
assertThat(mergePartitions).isEqualTo(derivedPartitions);
}
|
@VisibleForTesting
void validateDictTypeUnique(Long id, String type) {
if (StrUtil.isEmpty(type)) {
return;
}
DictTypeDO dictType = dictTypeMapper.selectByType(type);
if (dictType == null) {
return;
}
// 如果 id 为空,说明不用比较是否为相同 id 的字典类型
if (id == null) {
throw exception(DICT_TYPE_TYPE_DUPLICATE);
}
if (!dictType.getId().equals(id)) {
throw exception(DICT_TYPE_TYPE_DUPLICATE);
}
}
|
@Test
public void testValidateDictTypeUnique_success() {
// 调用,成功
dictTypeService.validateDictTypeUnique(randomLongId(), randomString());
}
|
public static String generateFileName(String string) {
string = StringUtils.stripAccents(string);
StringBuilder buf = new StringBuilder();
for (int i = 0; i < string.length(); i++) {
char c = string.charAt(i);
if (Character.isSpaceChar(c)
&& (buf.length() == 0 || Character.isSpaceChar(buf.charAt(buf.length() - 1)))) {
continue;
}
if (ArrayUtils.contains(validChars, c)) {
buf.append(c);
}
}
String filename = buf.toString().trim();
if (TextUtils.isEmpty(filename)) {
return randomString(8);
} else if (filename.length() >= MAX_FILENAME_LENGTH) {
return filename.substring(0, MAX_FILENAME_LENGTH - MD5_HEX_LENGTH - 1) + "_" + md5(filename);
} else {
return filename;
}
}
|
@Test
public void testFeedTitleContainsAccents() {
String result = FileNameGenerator.generateFileName("Äàáâãå");
assertEquals("Aaaaaa", result);
}
|
JspWrapper(String path, RequestDispatcher requestDispatcher) {
super();
assert path != null;
assert requestDispatcher != null;
// quand ce RequestDispatcher est utilisé, le compteur est affiché
// sauf si le paramètre displayed-counters dit le contraire
JSP_COUNTER.setDisplayed(!COUNTER_HIDDEN);
JSP_COUNTER.setUsed(true);
this.path = path;
this.requestDispatcher = requestDispatcher;
}
|
@Test
public void testJspWrapper() throws ServletException, IOException {
assertNotNull("getJspCounter", JspWrapper.getJspCounter());
final ServletContext servletContext = createNiceMock(ServletContext.class);
final HttpServletRequest request = createNiceMock(HttpServletRequest.class);
final HttpServletResponse response = createNiceMock(HttpServletResponse.class);
final RequestDispatcher requestDispatcher = createNiceMock(RequestDispatcher.class);
final RequestDispatcher requestDispatcherWithError = createNiceMock(
RequestDispatcher.class);
final RequestDispatcher requestDispatcherWithException = createNiceMock(
RequestDispatcher.class);
final String url1 = "test.jsp";
final String url2 = "test.jsp?param=test2";
final String url3 = "test.jsp?param=test3";
final String url4 = null;
expect(servletContext.getMajorVersion()).andReturn(5).anyTimes();
expect(request.getRequestDispatcher(url1)).andReturn(requestDispatcher);
expect(request.getRequestDispatcher(url2)).andReturn(requestDispatcherWithError);
requestDispatcherWithError.forward(request, response);
expectLastCall().andThrow(new UnknownError("erreur dans forward"));
expect(request.getRequestDispatcher(url3)).andReturn(requestDispatcherWithException);
requestDispatcherWithException.forward(request, response);
expectLastCall().andThrow(new IllegalStateException("erreur dans forward"));
expect(request.getRequestDispatcher(url4)).andReturn(null);
replay(request);
replay(response);
replay(requestDispatcher);
replay(requestDispatcherWithError);
replay(requestDispatcherWithException);
replay(servletContext);
Parameters.initialize(servletContext);
final HttpServletRequest wrappedRequest = JspWrapper.createHttpRequestWrapper(request,
response);
final RequestDispatcher wrappedRequestDispatcher = wrappedRequest
.getRequestDispatcher(url1);
wrappedRequestDispatcher.toString();
wrappedRequestDispatcher.include(wrappedRequest, response);
final RequestDispatcher wrappedRequestDispatcher2 = wrappedRequest
.getRequestDispatcher(url2);
try {
wrappedRequestDispatcher2.forward(request, response);
} catch (final UnknownError e) {
assertNotNull("ok", e);
}
final RequestDispatcher wrappedRequestDispatcher3 = wrappedRequest
.getRequestDispatcher(url3);
try {
wrappedRequestDispatcher3.forward(request, response);
} catch (final IllegalStateException e) {
assertNotNull("ok", e);
}
final RequestDispatcher wrappedRequestDispatcher4 = wrappedRequest
.getRequestDispatcher(url4);
assertNull("getRequestDispatcher(null)", wrappedRequestDispatcher4);
verify(request);
verify(response);
verify(requestDispatcher);
// verify ne marche pas ici car on fait une Error, verify(requestDispatcherWithError);
verify(requestDispatcherWithException);
verify(servletContext);
}
|
public static Rect getTilesRect(final BoundingBox pBB,
final int pZoomLevel) {
final int mapTileUpperBound = 1 << pZoomLevel;
final int right = MapView.getTileSystem().getTileXFromLongitude(pBB.getLonEast(), pZoomLevel);
final int bottom = MapView.getTileSystem().getTileYFromLatitude(pBB.getLatSouth(), pZoomLevel);
final int left = MapView.getTileSystem().getTileXFromLongitude(pBB.getLonWest(), pZoomLevel);
final int top = MapView.getTileSystem().getTileYFromLatitude(pBB.getLatNorth(), pZoomLevel);
int width = right - left + 1; // handling the modulo
if (width <= 0) {
width += mapTileUpperBound;
}
int height = bottom - top + 1; // handling the modulo
if (height <= 0) {
height += mapTileUpperBound;
}
return new Rect(left, top, left + width - 1, top + height - 1);
}
|
@Test
public void testGetTilesRectWholeWorld() {
final TileSystem tileSystem = MapView.getTileSystem();
final BoundingBox box = new BoundingBox( // whole world
tileSystem.getMaxLatitude(), tileSystem.getMaxLongitude(),
tileSystem.getMinLatitude(), tileSystem.getMinLongitude());
for (int zoom = 0; zoom <= TileSystem.getMaximumZoomLevel(); zoom++) {
final Rect rect = CacheManager.getTilesRect(box, zoom);
Assert.assertEquals(0, rect.left);
Assert.assertEquals(0, rect.top);
final int maxSize = -1 + (1 << zoom);
Assert.assertEquals(maxSize, rect.bottom);
Assert.assertEquals(maxSize, rect.right);
}
}
|
@Override
public void start() {
if (isStarted()) return;
try {
ServerSocket socket = getServerSocketFactory().createServerSocket(
getPort(), getBacklog(), getInetAddress());
ServerListener<RemoteReceiverClient> listener = createServerListener(socket);
runner = createServerRunner(listener, getContext().getScheduledExecutorService());
runner.setContext(getContext());
getContext().getScheduledExecutorService().execute(runner);
super.start();
}
catch (Exception ex) {
addError("server startup error: " + ex, ex);
}
}
|
@Test
public void testStartWhenAlreadyStarted() throws Exception {
appender.start();
appender.start();
assertEquals(1, runner.getStartCount());
}
|
@VisibleForTesting
static Object convertAvroField(Object avroValue, Schema schema) {
if (avroValue == null) {
return null;
}
switch (schema.getType()) {
case NULL:
case INT:
case LONG:
case DOUBLE:
case FLOAT:
case BOOLEAN:
return avroValue;
case ENUM:
case STRING:
return avroValue.toString(); // can be a String or org.apache.avro.util.Utf8
case UNION:
for (Schema s : schema.getTypes()) {
if (s.getType() == Schema.Type.NULL) {
continue;
}
return convertAvroField(avroValue, s);
}
throw new IllegalArgumentException("Found UNION schema but it doesn't contain any type");
case ARRAY:
case BYTES:
case FIXED:
case RECORD:
case MAP:
default:
throw new UnsupportedOperationException("Unsupported avro schema type=" + schema.getType()
+ " for value field schema " + schema.getName());
}
}
|
@Test
public void testConvertAvroInt() {
Object converted = BaseJdbcAutoSchemaSink.convertAvroField(Integer.MIN_VALUE, createFieldAndGetSchema((builder) ->
builder.name("field").type().intType().noDefault()));
Assert.assertEquals(converted, Integer.MIN_VALUE);
}
|
@VisibleForTesting
static StreamExecutionEnvironment createStreamExecutionEnvironment(FlinkPipelineOptions options) {
return createStreamExecutionEnvironment(
options,
MoreObjects.firstNonNull(options.getFilesToStage(), Collections.emptyList()),
options.getFlinkConfDir());
}
|
@Test
public void shouldAutoSetIdleSourcesFlagWithCheckpointing() {
// Checkpointing is enabled, never shut down sources
FlinkPipelineOptions options = getDefaultPipelineOptions();
options.setCheckpointingInterval(1000L);
FlinkExecutionEnvironments.createStreamExecutionEnvironment(options);
assertThat(options.getShutdownSourcesAfterIdleMs(), is(Long.MAX_VALUE));
}
|
public static double[] colMeans(double[][] matrix) {
double[] x = matrix[0].clone();
for (int i = 1; i < matrix.length; i++) {
for (int j = 0; j < x.length; j++) {
x[j] += matrix[i][j];
}
}
scale(1.0 / matrix.length, x);
return x;
}
|
@Test
public void testColMeans() {
System.out.println("colMeans");
double[][] A = {
{0.7220180, 0.07121225, 0.6881997},
{-0.2648886, -0.89044952, 0.3700456},
{-0.6391588, 0.44947578, 0.6240573}
};
double[] r = {-0.06067647, -0.12325383, 0.56076753};
double[] result = MathEx.colMeans(A);
for (int i = 0; i < r.length; i++) {
assertEquals(result[i], r[i], 1E-7);
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.