focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@LiteralParameters("x")
@ScalarOperator(INDETERMINATE)
@SqlType(StandardTypes.BOOLEAN)
public static boolean indeterminate(@SqlType("char(x)") Slice value, @IsNull boolean isNull)
{
return isNull;
}
|
@Test
public void testIndeterminate()
{
assertOperator(INDETERMINATE, "CAST(null AS CHAR(3))", BOOLEAN, true);
assertOperator(INDETERMINATE, "CHAR '123'", BOOLEAN, false);
}
|
@Override
public boolean deleteService(String serviceName, String groupName) throws NacosException {
NAMING_LOGGER.info("[DELETE-SERVICE] {} deleting service : {} with groupName : {}", namespaceId, serviceName,
groupName);
final Map<String, String> params = new HashMap<>(16);
params.put(CommonParams.NAMESPACE_ID, namespaceId);
params.put(CommonParams.SERVICE_NAME, serviceName);
params.put(CommonParams.GROUP_NAME, groupName);
String result = reqApi(UtilAndComs.nacosUrlService, params, HttpMethod.DELETE);
return "ok".equals(result);
}
|
@Test
void testDeleteService() throws Exception {
//given
NacosRestTemplate nacosRestTemplate = mock(NacosRestTemplate.class);
HttpRestResult<Object> a = new HttpRestResult<Object>();
a.setData("{\"name\":\"service1\",\"groupName\":\"group1\"}");
a.setCode(200);
when(nacosRestTemplate.exchangeForm(any(), any(), any(), any(), any(), any())).thenReturn(a);
final Field nacosRestTemplateField = NamingHttpClientProxy.class.getDeclaredField("nacosRestTemplate");
nacosRestTemplateField.setAccessible(true);
nacosRestTemplateField.set(clientProxy, nacosRestTemplate);
String serviceName = "service1";
String groupName = "group1";
//when
clientProxy.deleteService(serviceName, groupName);
//then
verify(nacosRestTemplate, times(1)).exchangeForm(endsWith(UtilAndComs.nacosUrlService), any(), any(), any(),
eq(HttpMethod.DELETE), any());
}
|
@Override
public void deleteDiyPage(Long id) {
// 校验存在
validateDiyPageExists(id);
// 删除
diyPageMapper.deleteById(id);
}
|
@Test
public void testDeleteDiyPage_success() {
// mock 数据
DiyPageDO dbDiyPage = randomPojo(DiyPageDO.class);
diyPageMapper.insert(dbDiyPage);// @Sql: 先插入出一条存在的数据
// 准备参数
Long id = dbDiyPage.getId();
// 调用
diyPageService.deleteDiyPage(id);
// 校验数据不存在了
assertNull(diyPageMapper.selectById(id));
}
|
@Operation(summary = "Get single organization")
@GetMapping(value = "{id}", produces = "application/json")
@ResponseBody
public Organization getById(@PathVariable("id") Long id) {
return organizationService.getOrganizationById(id);
}
|
@Test
public void organizationIdNotFound() {
when(organizationServiceMock.getOrganizationById(anyLong())).thenThrow(NotFoundException.class);
assertThrows(NotFoundException.class, () -> {
controllerMock.getById(1L);
});
}
|
@Override
public MaterializedTable nonWindowed() {
return new KsqlMaterializedTable(inner.nonWindowed());
}
|
@Test
public void shouldCallFilterWithCorrectValuesOnNonWindowedGet() {
// Given:
final MaterializedTable table = materialization.nonWindowed();
givenNoopFilter();
when(project.apply(any(), any(), any())).thenReturn(Optional.of(transformed));
// When:
table.get(aKey, partition).next();
// Then:
verify(filter).apply(aKey, aValue, new PullProcessingContext(aRowtime));
}
|
public StepInstanceActionResponse terminate(
String workflowId,
long workflowInstanceId,
String stepId,
User user,
Actions.StepInstanceAction action,
boolean blocking) {
WorkflowInstance instance =
instanceDao.getLatestWorkflowInstanceRun(workflowId, workflowInstanceId);
if (instance.getStatus().isTerminal()) {
throw new MaestroInvalidStatusException(
"Cannot manually %s the step [%s] as the workflow instance %s is in a terminal state [%s]",
action.name(), stepId, instance.getIdentity(), instance.getStatus());
}
return actionDao.terminate(instance, stepId, user, action, blocking);
}
|
@Test
public void testTerminate() {
when(instance.getStatus()).thenReturn(WorkflowInstance.Status.IN_PROGRESS);
stepActionHandler.terminate("sample-minimal-wf", 1, "job1", user, STOP, true);
verify(actionDao, times(1)).terminate(instance, "job1", user, STOP, true);
when(instance.getStatus()).thenReturn(WorkflowInstance.Status.FAILED);
AssertHelper.assertThrows(
"Cannot manually terminate the step",
MaestroInvalidStatusException.class,
"Cannot manually STOP the step [job1] as the workflow instance",
() -> stepActionHandler.terminate("sample-minimal-wf", 1, "job1", user, STOP, true));
}
|
@Override
public void updateCorePoolSize(int corePoolSize) {
if (corePoolSize > 0
&& corePoolSize <= Short.MAX_VALUE
&& corePoolSize < this.defaultMQPushConsumer.getConsumeThreadMax()) {
this.consumeExecutor.setCorePoolSize(corePoolSize);
}
}
|
@Test
public void testUpdateCorePoolSize() {
popService.updateCorePoolSize(2);
popService.incCorePoolSize();
popService.decCorePoolSize();
assertEquals(2, popService.getCorePoolSize());
}
|
public PrefetchableIterable<V> get(K key) {
checkState(
!isClosed,
"Multimap user state is no longer usable because it is closed for %s",
keysStateRequest.getStateKey());
Object structuralKey = mapKeyCoder.structuralValue(key);
KV<K, List<V>> pendingAddValues = pendingAdds.get(structuralKey);
PrefetchableIterable<V> pendingValues =
pendingAddValues == null
? PrefetchableIterables.fromArray()
: PrefetchableIterables.limit(
pendingAddValues.getValue(), pendingAddValues.getValue().size());
if (isCleared || pendingRemoves.containsKey(structuralKey)) {
return pendingValues;
}
return PrefetchableIterables.concat(getPersistedValues(structuralKey, key), pendingValues);
}
|
@Test
public void testGet() throws Exception {
FakeBeamFnStateClient fakeClient =
new FakeBeamFnStateClient(
ImmutableMap.of(
createMultimapKeyStateKey(),
KV.of(ByteArrayCoder.of(), singletonList(A1)),
createMultimapValueStateKey(A1),
KV.of(StringUtf8Coder.of(), asList("V1", "V2"))));
MultimapUserState<byte[], String> userState =
new MultimapUserState<>(
Caches.noop(),
fakeClient,
"instructionId",
createMultimapKeyStateKey(),
ByteArrayCoder.of(),
StringUtf8Coder.of());
Iterable<String> initValues = userState.get(A1);
userState.put(A1, "V3");
assertArrayEquals(new String[] {"V1", "V2"}, Iterables.toArray(initValues, String.class));
assertArrayEquals(
new String[] {"V1", "V2", "V3"}, Iterables.toArray(userState.get(A1), String.class));
assertArrayEquals(new String[] {}, Iterables.toArray(userState.get(A2), String.class));
userState.asyncClose();
assertThrows(IllegalStateException.class, () -> userState.get(A1));
}
|
public static Write write() {
return new AutoValue_SnsIO_Write.Builder().build();
}
|
@Test
public void testCustomCoder() throws Exception {
final PublishRequest request1 = createSampleMessage("my_first_message");
final TupleTag<PublishResult> results = new TupleTag<>();
final AmazonSNS amazonSnsSuccess = getAmazonSnsMockSuccess();
final MockCoder mockCoder = new MockCoder();
final PCollectionTuple snsWrites =
p.apply(Create.of(request1))
.apply(
SnsIO.write()
.withTopicName(topicName)
.withAWSClientsProvider(new Provider(amazonSnsSuccess))
.withResultOutputTag(results)
.withCoder(mockCoder));
final PCollection<Long> publishedResultsSize =
snsWrites
.get(results)
.apply(MapElements.into(TypeDescriptors.strings()).via(result -> result.getMessageId()))
.apply(Count.globally());
PAssert.that(publishedResultsSize).containsInAnyOrder(ImmutableList.of(1L));
p.run().waitUntilFinish();
assertThat(mockCoder.captured).isNotNull();
}
|
static InitWriterConfig fromMap(Map<String, String> map) {
Map<String, String> envMap = new HashMap<>(map);
envMap.keySet().retainAll(InitWriterConfig.keyNames());
Map<String, Object> generatedMap = ConfigParameter.define(envMap, CONFIG_VALUES);
return new InitWriterConfig(generatedMap);
}
|
@Test
public void testFromMapEmptyEnvVarsThrows() {
assertThrows(InvalidConfigurationException.class, () -> InitWriterConfig.fromMap(Map.of()));
}
|
@VisibleForTesting
void checkOpenFiles() {
if (notificationExists(Notification.Type.ES_OPEN_FILES)) {
return;
}
boolean allHigher = true;
final Set<NodeFileDescriptorStats> fileDescriptorStats = cluster.getFileDescriptorStats();
for (NodeFileDescriptorStats nodeFileDescriptorStats : fileDescriptorStats) {
final String name = nodeFileDescriptorStats.name();
final String ip = nodeFileDescriptorStats.ip();
final String host = nodeFileDescriptorStats.host();
final long maxFileDescriptors = nodeFileDescriptorStats.fileDescriptorMax().orElse(-1L);
if (maxFileDescriptors != -1L && maxFileDescriptors < MINIMUM_OPEN_FILES_LIMIT) {
// Write notification.
final String ipOrHostName = firstNonNull(host, ip);
final Notification notification = notificationService.buildNow()
.addType(Notification.Type.ES_OPEN_FILES)
.addSeverity(Notification.Severity.URGENT)
.addDetail("hostname", ipOrHostName)
.addDetail("max_file_descriptors", maxFileDescriptors);
if (notificationService.publishIfFirst(notification)) {
LOG.warn("Indexer node <{}> ({}) open file limit is too low: [{}]. Set it to at least {}.",
name,
ipOrHostName,
maxFileDescriptors,
MINIMUM_OPEN_FILES_LIMIT);
}
allHigher = false;
}
}
if (allHigher) {
Notification notification = notificationService.build().addType(Notification.Type.ES_OPEN_FILES);
notificationService.fixed(notification);
}
}
|
@Test
public void preventOpenFilesNotificationFlood() {
when(notificationService.isFirst(Notification.Type.ES_OPEN_FILES)).thenReturn(false);
indexerClusterCheckerThread.checkOpenFiles();
verify(notificationService, times(1)).isFirst(Notification.Type.ES_OPEN_FILES);
verifyNoMoreInteractions(notificationService);
verifyNoMoreInteractions(cluster);
}
|
public void updateContent(Properties prop) {
{
String value = prop.getProperty(ACCESS_KEY);
if (value != null) {
this.accessKey = value.trim();
}
}
{
String value = prop.getProperty(SECRET_KEY);
if (value != null) {
this.secretKey = value.trim();
}
}
{
String value = prop.getProperty(SECURITY_TOKEN);
if (value != null) {
this.securityToken = value.trim();
}
}
}
|
@Test
public void updateContentTest() {
SessionCredentials sessionCredentials = new SessionCredentials();
Properties properties = new Properties();
properties.setProperty(SessionCredentials.ACCESS_KEY,"RocketMQ");
properties.setProperty(SessionCredentials.SECRET_KEY,"12345678");
properties.setProperty(SessionCredentials.SECURITY_TOKEN,"abcd");
sessionCredentials.updateContent(properties);
}
|
public static Criterion matchIPSrc(IpPrefix ip) {
return new IPCriterion(ip, Type.IPV4_SRC);
}
|
@Test
public void testMatchIPSrcMethod() {
Criterion matchIpSrc = Criteria.matchIPSrc(ip1);
IPCriterion ipCriterion =
checkAndConvert(matchIpSrc,
Criterion.Type.IPV4_SRC,
IPCriterion.class);
assertThat(ipCriterion.ip(), is(ip1));
}
|
public static void boundsCheck(int capacity, int index, int length) {
if (capacity < 0 || index < 0 || length < 0 || (index > (capacity - length))) {
throw new IndexOutOfBoundsException(String.format("index=%d, length=%d, capacity=%d", index, length, capacity));
}
}
|
@Test(expected = IndexOutOfBoundsException.class)
public void boundsCheck_whenLengthIntegerMax() {
//Testing wrapping does not cause false check
ArrayUtils.boundsCheck(0, 10, Integer.MAX_VALUE);
}
|
public static <T> T getBean(Class<T> interfaceClass, Class typeClass) {
Object object = serviceMap.get(interfaceClass.getName() + "<" + typeClass.getName() + ">");
if(object == null) return null;
if(object instanceof Object[]) {
return (T)Array.get(object, 0);
} else {
return (T)object;
}
}
|
@Test
public void testConstructorWithParameters() {
MImpl m = (MImpl)SingletonServiceFactory.getBean(M.class);
Assert.assertEquals(5, m.getValue());
}
|
@Override
public URL getLocalArtifactUrl(DependencyJar dependency) {
String depShortName = dependency.getShortName();
String pathStr = properties.getProperty(depShortName);
if (pathStr != null) {
if (pathStr.indexOf(File.pathSeparatorChar) != -1) {
throw new IllegalArgumentException(
"didn't expect multiple files for " + dependency + ": " + pathStr);
}
Path path = baseDir.resolve(Paths.get(pathStr));
try {
return path.toUri().toURL();
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
} else {
if (delegate != null) {
return delegate.getLocalArtifactUrl(dependency);
}
}
throw new RuntimeException("no artifacts found for " + dependency);
}
|
@Test
public void whenRelativePathIsProvidedInProperties_shouldReturnFileUrl() throws Exception {
DependencyResolver resolver =
new PropertiesDependencyResolver(
propsFile("com.group:example:1.3", new File("path", "1")), mock);
URL url = resolver.getLocalArtifactUrl(exampleDep);
assertThat(url)
.isEqualTo(temporaryFolder.getRoot().toPath().resolve("path").resolve("1").toUri().toURL());
}
|
private CompletableFuture<Boolean> verifyTxnOwnership(TxnID txnID) {
assert ctx.executor().inEventLoop();
return service.pulsar().getTransactionMetadataStoreService()
.verifyTxnOwnership(txnID, getPrincipal())
.thenComposeAsync(isOwner -> {
if (isOwner) {
return CompletableFuture.completedFuture(true);
}
if (service.isAuthenticationEnabled() && service.isAuthorizationEnabled()) {
return isSuperUser();
} else {
return CompletableFuture.completedFuture(false);
}
}, ctx.executor());
}
|
@Test(timeOut = 30000)
public void sendAddPartitionToTxnResponse() throws Exception {
final TransactionMetadataStoreService txnStore = mock(TransactionMetadataStoreService.class);
when(txnStore.getTxnMeta(any())).thenReturn(CompletableFuture.completedFuture(mock(TxnMeta.class)));
when(txnStore.verifyTxnOwnership(any(), any())).thenReturn(CompletableFuture.completedFuture(true));
when(txnStore.addProducedPartitionToTxn(any(TxnID.class), any()))
.thenReturn(CompletableFuture.completedFuture(null));
when(pulsar.getTransactionMetadataStoreService()).thenReturn(txnStore);
svcConfig.setTransactionCoordinatorEnabled(true);
resetChannel();
setChannelConnected();
ByteBuf clientCommand = Commands.newAddPartitionToTxn(89L, 1L, 12L,
List.of("tenant/ns/topic1"));
channel.writeInbound(clientCommand);
CommandAddPartitionToTxnResponse response = (CommandAddPartitionToTxnResponse) getResponse();
assertEquals(response.getRequestId(), 89L);
assertEquals(response.getTxnidLeastBits(), 1L);
assertEquals(response.getTxnidMostBits(), 12L);
assertFalse(response.hasError());
assertFalse(response.hasMessage());
channel.finish();
}
|
@Override
public synchronized void write(int b) throws IOException {
checkNotClosed();
file.writeLock().lock();
try {
if (append) {
pos = file.sizeWithoutLocking();
}
file.write(pos++, (byte) b);
file.setLastModifiedTime(fileSystemState.now());
} finally {
file.writeLock().unlock();
}
}
|
@Test
public void testWrite_wholeArray_overwriting() throws IOException {
JimfsOutputStream out = newOutputStream(false);
addBytesToStore(out, 9, 8, 7, 6, 5, 4, 3);
out.write(new byte[] {1, 2, 3, 4});
assertStoreContains(out, 1, 2, 3, 4, 5, 4, 3);
}
|
@VisibleForTesting
public static JobGraph createJobGraph(StreamGraph streamGraph) {
return new StreamingJobGraphGenerator(
Thread.currentThread().getContextClassLoader(),
streamGraph,
null,
Runnable::run)
.createJobGraph();
}
|
@Test
void testPartitionTypesInBatchMode() {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setRuntimeMode(RuntimeExecutionMode.BATCH);
env.setParallelism(4);
env.disableOperatorChaining();
DataStream<Integer> source = env.fromData(1);
source
// set the same parallelism as the source to make it a FORWARD exchange
.map(value -> value)
.setParallelism(1)
.rescale()
.map(value -> value)
.rebalance()
.map(value -> value)
.keyBy(value -> value)
.map(value -> value)
.sinkTo(new DiscardingSink<>());
JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());
List<JobVertex> verticesSorted = jobGraph.getVerticesSortedTopologicallyFromSources();
assertHasOutputPartitionType(
verticesSorted.get(0) /* source - forward */, ResultPartitionType.BLOCKING);
assertHasOutputPartitionType(
verticesSorted.get(1) /* rescale */, ResultPartitionType.BLOCKING);
assertHasOutputPartitionType(
verticesSorted.get(2) /* rebalance */, ResultPartitionType.BLOCKING);
assertHasOutputPartitionType(
verticesSorted.get(3) /* keyBy */, ResultPartitionType.BLOCKING);
assertHasOutputPartitionType(
verticesSorted.get(4) /* forward - sink */, ResultPartitionType.BLOCKING);
}
|
public static BadRequestException itemAlreadyExists(String itemKey) {
return new BadRequestException("item already exists for itemKey:%s", itemKey);
}
|
@Test
public void testItemAlreadyExists() {
BadRequestException itemAlreadyExists = BadRequestException.itemAlreadyExists("itemKey");
assertEquals("item already exists for itemKey:itemKey", itemAlreadyExists.getMessage());
}
|
@Override
public void cleanUp(GlobalTransaction tx) {
if (tx == null) {
throw new EngineExecutionException("Global transaction does not exist. Unable to proceed without a valid global transaction context.",
FrameworkErrorCode.ObjectNotExists);
}
if (tx.getGlobalTransactionRole() == GlobalTransactionRole.Launcher) {
TransactionHookManager.clear();
}
}
|
@Test
public void testCleanUp() {
MockGlobalTransaction mockGlobalTransaction = new MockGlobalTransaction();
sagaTransactionalTemplate.cleanUp(mockGlobalTransaction);
}
|
public static String buildURIFromPattern(String pattern, List<Parameter> parameters) {
if (parameters != null) {
// Browse parameters and choose between template or query one.
for (Parameter parameter : parameters) {
String wadlTemplate = "{" + parameter.getName() + "}";
String swaggerTemplate = "/:" + parameter.getName();
if (pattern.contains(wadlTemplate)) {
// It's a template parameter.
pattern = pattern.replace(wadlTemplate, encodePath(parameter.getValue()));
} else if (pattern.contains(swaggerTemplate)) {
// It's a template parameter.
pattern = pattern.replace(":" + parameter.getName(), encodePath(parameter.getValue()));
} else {
// It's a query parameter, ensure we have started delimiting them.
if (!pattern.contains("?")) {
pattern += "?";
}
if (pattern.contains("=")) {
pattern += "&";
}
pattern += parameter.getName() + "=" + encodeValue(parameter.getValue());
}
}
}
return pattern;
}
|
@Test
void testBuildURIFromPattern() {
// Prepare a bunch of parameters.
Parameter yearParam = new Parameter();
yearParam.setName("year");
yearParam.setValue("2017");
Parameter monthParam = new Parameter();
monthParam.setName("month");
monthParam.setValue("08");
Parameter statusParam = new Parameter();
statusParam.setName("status");
statusParam.setValue("published");
Parameter pageParam = new Parameter();
pageParam.setName("page");
pageParam.setValue("0");
List<Parameter> parameters = new ArrayList<>();
parameters.add(yearParam);
parameters.add(monthParam);
parameters.add(statusParam);
parameters.add(pageParam);
// Test with old wadl like template format.
String pattern = "http://localhost:8080/blog/{year}/{month}";
String uri = URIBuilder.buildURIFromPattern(pattern, parameters);
assertTrue("http://localhost:8080/blog/2017/08?page=0&status=published".equals(uri)
|| "http://localhost:8080/blog/2017/08?status=published&page=0".equals(uri));
// Test with new swagger like template format.
pattern = "http://localhost:8080/blog/:year/:month";
uri = URIBuilder.buildURIFromPattern(pattern, parameters);
assertTrue("http://localhost:8080/blog/2017/08?page=0&status=published".equals(uri)
|| "http://localhost:8080/blog/2017/08?status=published&page=0".equals(uri));
}
|
@Override
public void load() throws AccessDeniedException {
super.load();
if(preferences.getBoolean("bookmarks.folder.monitor")) {
try {
monitor.register(folder, FILE_FILTER, this);
}
catch(IOException e) {
throw new LocalAccessDeniedException(String.format("Failure monitoring directory %s", folder.getName()), e);
}
}
}
|
@Test
public void testLoad() throws Exception {
final Local source = new Local(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString());
final MonitorFolderHostCollection c = new MonitorFolderHostCollection(source);
c.load();
final Host bookmark = new Host(new TestProtocol());
c.add(bookmark);
assertEquals(1, c.size());
bookmark.setLabels(Collections.singleton("l"));
c.collectionItemChanged(bookmark);
assertEquals(1, c.size());
}
|
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) {
return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature);
}
|
@Test
public void testUsageOfTimerDeclaredInSuperclass() throws Exception {
thrown.expect(IllegalArgumentException.class);
thrown.expectMessage("process");
thrown.expectMessage("declared in a different class");
thrown.expectMessage(DoFnDeclaringTimerAndCallback.TIMER_ID);
thrown.expectMessage(not(mentionsState()));
thrown.expectMessage(mentionsTimers());
DoFnSignatures.getSignature(
new DoFnDeclaringTimerAndCallback() {
@ProcessElement
public void process(
ProcessContext context,
@TimerId(DoFnDeclaringTimerAndCallback.TIMER_ID) Timer timer) {}
}.getClass());
}
|
@Override
public void deploy(DeploymentPhaseContext phaseContext) throws DeploymentUnitProcessingException {
DeploymentUnit deploymentUnit = phaseContext.getDeploymentUnit();
WarMetaData warMetaData = deploymentUnit.getAttachment(WarMetaData.ATTACHMENT_KEY);
if (warMetaData == null) {
return;
}
JBossWebMetaData webMetaData = warMetaData.getMergedJBossWebMetaData();
if (webMetaData == null) {
webMetaData = new JBossWebMetaData();
warMetaData.setMergedJBossWebMetaData(webMetaData);
}
OidcConfigService configService = OidcConfigService.getInstance();
boolean subsystemConfigured = configService.isSecureDeployment(deploymentUnit) && configService.isDeploymentConfigured(deploymentUnit);
if (subsystemConfigured) {
addOidcAuthDataAndConfig(phaseContext, configService, webMetaData);
}
LoginConfigMetaData loginConfig = webMetaData.getLoginConfig();
if (loginConfig != null && OIDC_AUTH_METHOD.equals(loginConfig.getAuthMethod())) {
if (! subsystemConfigured) {
// check for unsupported attributes in the deployment's oidc.json file
if (deploymentUnit.getAttachment(Attachments.DEPLOYMENT_ROOT) != null) {
VirtualFile oidcConfigurationFile = deploymentUnit.getAttachment(Attachments.DEPLOYMENT_ROOT).getRoot().getChild("WEB-INF/oidc.json");
if (oidcConfigurationFile.exists()) {
try (InputStream is = oidcConfigurationFile.openStream()) {
String oidcConfigString = readFromInputStream(is);
for (SimpleAttributeDefinition attribute : SecureDeploymentDefinition.NON_DEFAULT_ATTRIBUTES) {
if ((!deploymentUnit.enables(attribute)) && oidcConfigString.contains(attribute.getName())) {
throw ROOT_LOGGER.unsupportedAttribute(attribute.getName());
}
}
addJSONDataAsContextParam(oidcConfigString, webMetaData);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
}
ListenerMetaData listenerMetaData = new ListenerMetaData();
listenerMetaData.setListenerClass(OidcConfigurationServletListener.class.getName());
webMetaData.getListeners().add(listenerMetaData);
ROOT_LOGGER.tracef("Activating OIDC for deployment %s.", deploymentUnit.getName());
OidcDeploymentMarker.mark(deploymentUnit);
VirtualHttpServerMechanismFactoryMarkerUtility.virtualMechanismFactoryRequired(deploymentUnit);
}
}
|
@Test
public void testDeployLoginConfigWithRealmAndNullAuthMethod() throws Exception {
DeploymentUnit unit = mock(DeploymentUnit.class);
doReturn(true).when(unit).hasAttachment(WarMetaData.ATTACHMENT_KEY);
doReturn(new NonNullRealmNullEverythingElseWarMetaData()).when(unit).getAttachment(WarMetaData.ATTACHMENT_KEY);
DeploymentPhaseContext context = mock(DeploymentPhaseContext.class);
doReturn(unit).when(context).getDeploymentUnit();
new OidcActivationProcessor().deploy(context);
assertTrue("Expect to succeed and reach this point", true);
}
|
Map<String, String> getShardIterators() {
if (streamArn == null) {
streamArn = getStreamArn();
}
// Either return cached ones or get new ones via GetShardIterator requests.
if (currentShardIterators.isEmpty()) {
DescribeStreamResponse streamDescriptionResult
= getClient().describeStream(DescribeStreamRequest.builder().streamArn(streamArn).build());
shardTree.populate(streamDescriptionResult.streamDescription().shards());
StreamIteratorType streamIteratorType = getEndpoint().getConfiguration().getStreamIteratorType();
currentShardIterators = getCurrentShardIterators(streamIteratorType);
} else {
Map<String, String> childShardIterators = new HashMap<>();
for (Entry<String, String> currentShardIterator : currentShardIterators.entrySet()) {
List<Shard> children = shardTree.getChildren(currentShardIterator.getKey());
if (children.isEmpty()) { // This is still an active leaf shard, reuse it.
childShardIterators.put(currentShardIterator.getKey(), currentShardIterator.getValue());
} else {
for (Shard child : children) { // Inactive shard, move down to its children.
String shardIterator = getShardIterator(child.shardId(), ShardIteratorType.TRIM_HORIZON);
childShardIterators.put(child.shardId(), shardIterator);
}
}
}
currentShardIterators = childShardIterators;
}
LOG.trace("Shard Iterators are: {}", currentShardIterators);
return currentShardIterators;
}
|
@Test
void shouldReturnRootShardIterator() throws Exception {
component.getConfiguration().setStreamIteratorType(StreamIteratorType.FROM_START);
Ddb2StreamEndpoint endpoint = (Ddb2StreamEndpoint) component.createEndpoint("aws2-ddbstreams://myTable");
ShardIteratorHandler underTest = new ShardIteratorHandler(endpoint);
endpoint.doStart();
assertEquals(Collections.singletonMap(SHARD_0.shardId(), SHARD_ITERATOR_0), underTest.getShardIterators());
}
|
@Nullable
public Float getFloatValue(@FloatFormat final int formatType,
@IntRange(from = 0) final int offset) {
if ((offset + getTypeLen(formatType)) > size()) return null;
switch (formatType) {
case FORMAT_SFLOAT -> {
if (mValue[offset + 1] == 0x07 && mValue[offset] == (byte) 0xFE)
return Float.POSITIVE_INFINITY;
if ((mValue[offset + 1] == 0x07 && mValue[offset] == (byte) 0xFF) ||
(mValue[offset + 1] == 0x08 && mValue[offset] == 0x00) ||
(mValue[offset + 1] == 0x08 && mValue[offset] == 0x01))
return Float.NaN;
if (mValue[offset + 1] == 0x08 && mValue[offset] == 0x02)
return Float.NEGATIVE_INFINITY;
return bytesToFloat(mValue[offset], mValue[offset + 1]);
}
case FORMAT_FLOAT -> {
if (mValue[offset + 3] == 0x00) {
if (mValue[offset + 2] == 0x7F && mValue[offset + 1] == (byte) 0xFF) {
if (mValue[offset] == (byte) 0xFE)
return Float.POSITIVE_INFINITY;
if (mValue[offset] == (byte) 0xFF)
return Float.NaN;
} else if (mValue[offset + 2] == (byte) 0x80 && mValue[offset + 1] == 0x00) {
if (mValue[offset] == 0x00 || mValue[offset] == 0x01)
return Float.NaN;
if (mValue[offset] == 0x02)
return Float.NEGATIVE_INFINITY;
}
}
return bytesToFloat(mValue[offset], mValue[offset + 1],
mValue[offset + 2], mValue[offset + 3]);
}
}
return null;
}
|
@Test
public void setValue_FLOAT_nan() {
final MutableData data = new MutableData(new byte[4]);
data.setValue(Float.NaN, Data.FORMAT_FLOAT, 0);
final float value = data.getFloatValue(Data.FORMAT_FLOAT, 0);
assertEquals(Float.NaN, value, 0.00);
}
|
public void setContract(@Nullable Produce contract)
{
this.contract = contract;
setStoredContract(contract);
handleContractState();
}
|
@Test
public void cabbageContractOnionHarvestableAndCabbageDiseased()
{
final long unixNow = Instant.now().getEpochSecond();
// Get the two allotment patches
final FarmingPatch patch1 = farmingGuildPatches.get(Varbits.FARMING_4773);
final FarmingPatch patch2 = farmingGuildPatches.get(Varbits.FARMING_4774);
assertNotNull(patch1);
assertNotNull(patch2);
// Specify the two allotment patches
when(farmingTracker.predictPatch(patch1))
.thenReturn(new PatchPrediction(Produce.ONION, CropState.HARVESTABLE, unixNow, 3, 3));
when(farmingTracker.predictPatch(patch2))
.thenReturn(new PatchPrediction(Produce.CABBAGE, CropState.DISEASED, 0, 2, 3));
farmingContractManager.setContract(Produce.CABBAGE);
assertEquals(SummaryState.IN_PROGRESS, farmingContractManager.getSummary());
assertEquals(CropState.DISEASED, farmingContractManager.getContractCropState());
}
|
@Override
public BasicTypeDefine reconvert(Column column) {
BasicTypeDefine.BasicTypeDefineBuilder builder =
BasicTypeDefine.builder()
.name(column.getName())
.nullable(column.isNullable())
.comment(column.getComment())
.defaultValue(column.getDefaultValue());
switch (column.getDataType().getSqlType()) {
case BOOLEAN:
builder.columnType(DB2_BOOLEAN);
builder.dataType(DB2_BOOLEAN);
break;
case TINYINT:
case SMALLINT:
builder.columnType(DB2_SMALLINT);
builder.dataType(DB2_SMALLINT);
break;
case INT:
builder.columnType(DB2_INT);
builder.dataType(DB2_INT);
break;
case BIGINT:
builder.columnType(DB2_BIGINT);
builder.dataType(DB2_BIGINT);
break;
case FLOAT:
builder.columnType(DB2_REAL);
builder.dataType(DB2_REAL);
break;
case DOUBLE:
builder.columnType(DB2_DOUBLE);
builder.dataType(DB2_DOUBLE);
break;
case DECIMAL:
DecimalType decimalType = (DecimalType) column.getDataType();
long precision = decimalType.getPrecision();
int scale = decimalType.getScale();
if (precision <= 0) {
precision = DEFAULT_PRECISION;
scale = DEFAULT_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is precision less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (precision > MAX_PRECISION) {
scale = (int) Math.max(0, scale - (precision - MAX_PRECISION));
precision = MAX_PRECISION;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum precision of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_PRECISION,
precision,
scale);
}
if (scale < 0) {
scale = 0;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is scale less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (scale > MAX_SCALE) {
scale = MAX_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_SCALE,
precision,
scale);
}
builder.columnType(String.format("%s(%s,%s)", DB2_DECIMAL, precision, scale));
builder.dataType(DB2_DECIMAL);
builder.precision(precision);
builder.scale(scale);
break;
case BYTES:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.columnType(
String.format("%s(%s)", DB2_VARBINARY, MAX_VARBINARY_LENGTH));
builder.dataType(DB2_VARBINARY);
builder.length(column.getColumnLength());
} else if (column.getColumnLength() <= MAX_BINARY_LENGTH) {
builder.columnType(
String.format("%s(%s)", DB2_BINARY, column.getColumnLength()));
builder.dataType(DB2_BINARY);
builder.length(column.getColumnLength());
} else if (column.getColumnLength() <= MAX_VARBINARY_LENGTH) {
builder.columnType(
String.format("%s(%s)", DB2_VARBINARY, column.getColumnLength()));
builder.dataType(DB2_VARBINARY);
builder.length(column.getColumnLength());
} else {
long length = column.getColumnLength();
if (length > MAX_BLOB_LENGTH) {
length = MAX_BLOB_LENGTH;
log.warn(
"The length of blob type {} is out of range, "
+ "it will be converted to {}({})",
column.getName(),
DB2_BLOB,
length);
}
builder.columnType(String.format("%s(%s)", DB2_BLOB, length));
builder.dataType(DB2_BLOB);
builder.length(length);
}
break;
case STRING:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.columnType(String.format("%s(%s)", DB2_VARCHAR, MAX_VARCHAR_LENGTH));
builder.dataType(DB2_VARCHAR);
builder.length(column.getColumnLength());
} else if (column.getColumnLength() <= MAX_CHAR_LENGTH) {
builder.columnType(String.format("%s(%s)", DB2_CHAR, column.getColumnLength()));
builder.dataType(DB2_CHAR);
builder.length(column.getColumnLength());
} else if (column.getColumnLength() <= MAX_VARCHAR_LENGTH) {
builder.columnType(
String.format("%s(%s)", DB2_VARCHAR, column.getColumnLength()));
builder.dataType(DB2_VARCHAR);
builder.length(column.getColumnLength());
} else {
long length = column.getColumnLength();
if (length > MAX_CLOB_LENGTH) {
length = MAX_CLOB_LENGTH;
log.warn(
"The length of clob type {} is out of range, "
+ "it will be converted to {}({})",
column.getName(),
DB2_CLOB,
length);
}
builder.columnType(String.format("%s(%s)", DB2_CLOB, length));
builder.dataType(DB2_CLOB);
builder.length(length);
}
break;
case DATE:
builder.columnType(DB2_DATE);
builder.dataType(DB2_DATE);
break;
case TIME:
builder.columnType(DB2_TIME);
builder.dataType(DB2_TIME);
break;
case TIMESTAMP:
if (column.getScale() != null && column.getScale() > 0) {
int timestampScale = column.getScale();
if (column.getScale() > MAX_TIMESTAMP_SCALE) {
timestampScale = MAX_TIMESTAMP_SCALE;
log.warn(
"The timestamp column {} type timestamp({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to timestamp({})",
column.getName(),
column.getScale(),
MAX_TIMESTAMP_SCALE,
timestampScale);
}
builder.columnType(String.format("%s(%s)", DB2_TIMESTAMP, timestampScale));
builder.scale(timestampScale);
} else {
builder.columnType(DB2_TIMESTAMP);
}
builder.dataType(DB2_TIMESTAMP);
break;
default:
throw CommonError.convertToConnectorTypeError(
DatabaseIdentifier.DB_2,
column.getDataType().getSqlType().name(),
column.getName());
}
return builder.build();
}
|
@Test
public void testReconvertLong() {
Column column = PhysicalColumn.builder().name("test").dataType(BasicType.LONG_TYPE).build();
BasicTypeDefine typeDefine = DB2TypeConverter.INSTANCE.reconvert(column);
Assertions.assertEquals(column.getName(), typeDefine.getName());
Assertions.assertEquals(DB2TypeConverter.DB2_BIGINT, typeDefine.getColumnType());
Assertions.assertEquals(DB2TypeConverter.DB2_BIGINT, typeDefine.getDataType());
}
|
Optional<List<String>> dependencies(ConfigT configuration, PipelineOptions options) {
return Optional.empty();
}
|
@Test
public void testDependencies() {
SchemaTransformProvider provider = new FakeTypedSchemaIOProvider();
Row inputConfig =
Row.withSchema(provider.configurationSchema())
.withFieldValue("string_field", "field1")
.withFieldValue("integer_field", Integer.valueOf(13))
.build();
assertEquals(Arrays.asList("field1", "13"), provider.dependencies(inputConfig, null).get());
}
|
@SuppressWarnings({"dereference.of.nullable", "argument"})
public static PipelineResult run(DataTokenizationOptions options) {
SchemasUtils schema = null;
try {
schema = new SchemasUtils(options.getDataSchemaPath(), StandardCharsets.UTF_8);
} catch (IOException e) {
LOG.error("Failed to retrieve schema for data.", e);
}
checkArgument(schema != null, "Data schema is mandatory.");
// Create the pipeline
Pipeline pipeline = Pipeline.create(options);
// Register the coder for pipeline
CoderRegistry coderRegistry = pipeline.getCoderRegistry();
coderRegistry.registerCoderForType(
FAILSAFE_ELEMENT_CODER.getEncodedTypeDescriptor(), FAILSAFE_ELEMENT_CODER);
coderRegistry.registerCoderForType(
RowCoder.of(schema.getBeamSchema()).getEncodedTypeDescriptor(),
RowCoder.of(schema.getBeamSchema()));
/*
* Row/Row Coder for FailsafeElement.
*/
FailsafeElementCoder<Row, Row> coder =
FailsafeElementCoder.of(
RowCoder.of(schema.getBeamSchema()), RowCoder.of(schema.getBeamSchema()));
coderRegistry.registerCoderForType(coder.getEncodedTypeDescriptor(), coder);
PCollection<Row> rows;
if (options.getInputFilePattern() != null) {
rows = new TokenizationFileSystemIO(options).read(pipeline, schema);
} else if (options.getPubsubTopic() != null) {
rows =
pipeline
.apply(
"ReadMessagesFromPubsub",
PubsubIO.readStrings().fromTopic(options.getPubsubTopic()))
.apply(
"TransformToBeamRow",
new JsonToBeamRow(options.getNonTokenizedDeadLetterPath(), schema));
if (options.getOutputDirectory() != null) {
rows = rows.apply(Window.into(FixedWindows.of(parseDuration(options.getWindowDuration()))));
}
} else {
throw new IllegalStateException(
"No source is provided, please configure File System or Pub/Sub");
}
/*
Tokenize data using remote API call
*/
PCollectionTuple tokenizedRows =
rows.setRowSchema(schema.getBeamSchema())
.apply(
MapElements.into(
TypeDescriptors.kvs(TypeDescriptors.integers(), TypeDescriptors.rows()))
.via((Row row) -> KV.of(0, row)))
.setCoder(KvCoder.of(VarIntCoder.of(), RowCoder.of(schema.getBeamSchema())))
.apply(
"DsgTokenization",
RowToTokenizedRow.newBuilder()
.setBatchSize(options.getBatchSize())
.setRpcURI(options.getRpcUri())
.setSchema(schema.getBeamSchema())
.setSuccessTag(TOKENIZATION_OUT)
.setFailureTag(TOKENIZATION_DEADLETTER_OUT)
.build());
String csvDelimiter = options.getCsvDelimiter();
if (options.getNonTokenizedDeadLetterPath() != null) {
/*
Write tokenization errors to dead-letter sink
*/
tokenizedRows
.get(TOKENIZATION_DEADLETTER_OUT)
.apply(
"ConvertToCSV",
MapElements.into(FAILSAFE_ELEMENT_CODER.getEncodedTypeDescriptor())
.via(
(FailsafeElement<Row, Row> fse) ->
FailsafeElement.of(
new RowToCsv(csvDelimiter).getCsvFromRow(fse.getOriginalPayload()),
new RowToCsv(csvDelimiter).getCsvFromRow(fse.getPayload()))))
.apply(
"WriteTokenizationErrorsToFS",
ErrorConverters.WriteErrorsToTextIO.<String, String>newBuilder()
.setErrorWritePath(options.getNonTokenizedDeadLetterPath())
.setTranslateFunction(SerializableFunctions.getCsvErrorConverter())
.build());
}
if (options.getOutputDirectory() != null) {
new TokenizationFileSystemIO(options)
.write(tokenizedRows.get(TOKENIZATION_OUT), schema.getBeamSchema());
} else if (options.getBigQueryTableName() != null) {
WriteResult writeResult =
TokenizationBigQueryIO.write(
tokenizedRows.get(TOKENIZATION_OUT),
options.getBigQueryTableName(),
schema.getBigQuerySchema());
writeResult
.getFailedInsertsWithErr()
.apply(
"WrapInsertionErrors",
MapElements.into(FAILSAFE_ELEMENT_CODER.getEncodedTypeDescriptor())
.via(TokenizationBigQueryIO::wrapBigQueryInsertError))
.setCoder(FAILSAFE_ELEMENT_CODER)
.apply(
"WriteInsertionFailedRecords",
ErrorConverters.WriteStringMessageErrors.newBuilder()
.setErrorRecordsTable(
options.getBigQueryTableName() + DEFAULT_DEADLETTER_TABLE_SUFFIX)
.setErrorRecordsTableSchema(DEADLETTER_SCHEMA)
.build());
} else if (options.getBigTableInstanceId() != null) {
new TokenizationBigTableIO(options)
.write(tokenizedRows.get(TOKENIZATION_OUT), schema.getBeamSchema());
} else {
throw new IllegalStateException(
"No sink is provided, please configure BigQuery or BigTable.");
}
return pipeline.run();
}
|
@Test
public void testFileSystemIOReadCSV() throws IOException {
PCollection<Row> jsons = fileSystemIORead(CSV_FILE_PATH, FORMAT.CSV);
assertRows(jsons);
testPipeline.run();
}
|
public static <T> MutationDetector forValueWithCoder(T value, Coder<T> coder)
throws CoderException {
if (value == null) {
return noopMutationDetector();
} else {
return new CodedValueMutationDetector<>(value, coder);
}
}
|
@Test
public void testImmutableList() throws Exception {
List<Integer> value = Lists.newLinkedList(Arrays.asList(1, 2, 3, 4));
MutationDetector detector =
MutationDetectors.forValueWithCoder(value, IterableCoder.of(VarIntCoder.of()));
detector.verifyUnmodified();
}
|
@Override
public void preCommit(TransactionState txnState, List<TabletCommitInfo> finishedTablets,
List<TabletFailInfo> failedTablets) throws TransactionException {
Preconditions.checkState(txnState.getTransactionStatus() != TransactionStatus.COMMITTED);
txnState.clearAutomaticPartitionSnapshot();
if (!finishedTablets.isEmpty()) {
txnState.setTabletCommitInfos(finishedTablets);
}
if (table.getState() == OlapTable.OlapTableState.RESTORE) {
throw new TransactionCommitFailedException("Cannot write RESTORE state table \"" + table.getName() + "\"");
}
dirtyPartitionSet = Sets.newHashSet();
invalidDictCacheColumns = Sets.newHashSet();
validDictCacheColumns = Maps.newHashMap();
Set<Long> finishedTabletsOfThisTable = Sets.newHashSet();
TabletInvertedIndex tabletInvertedIndex = dbTxnMgr.getGlobalStateMgr().getTabletInvertedIndex();
List<Long> tabletIds = finishedTablets.stream().map(TabletCommitInfo::getTabletId).collect(Collectors.toList());
List<TabletMeta> tabletMetaList = tabletInvertedIndex.getTabletMetaList(tabletIds);
for (int i = 0; i < tabletMetaList.size(); i++) {
TabletMeta tabletMeta = tabletMetaList.get(i);
if (tabletMeta == TabletInvertedIndex.NOT_EXIST_TABLET_META) {
continue;
}
if (tabletMeta.getTableId() != table.getId()) {
continue;
}
if (table.getPhysicalPartition(tabletMeta.getPartitionId()) == null) {
// this can happen when partitionId == -1 (tablet being dropping) or partition really not exist.
continue;
}
dirtyPartitionSet.add(tabletMeta.getPartitionId());
// Invalid column set should union
invalidDictCacheColumns.addAll(finishedTablets.get(i).getInvalidDictCacheColumns());
// Valid column set should intersect and remove all invalid columns
// Only need to add valid column set once
if (validDictCacheColumns.isEmpty() &&
!finishedTablets.get(i).getValidDictCacheColumns().isEmpty()) {
TabletCommitInfo tabletCommitInfo = finishedTablets.get(i);
List<Long> validDictCollectedVersions = tabletCommitInfo.getValidDictCollectedVersions();
List<ColumnId> validDictCacheColumns = tabletCommitInfo.getValidDictCacheColumns();
for (int j = 0; j < validDictCacheColumns.size(); j++) {
long version = 0;
// validDictCollectedVersions != validDictCacheColumns means be has not upgrade
if (validDictCollectedVersions.size() == validDictCacheColumns.size()) {
version = validDictCollectedVersions.get(j);
}
this.validDictCacheColumns.put(validDictCacheColumns.get(i), version);
}
}
if (i == tabletMetaList.size() - 1) {
validDictCacheColumns.entrySet().removeIf(entry -> invalidDictCacheColumns.contains(entry.getKey()));
}
finishedTabletsOfThisTable.add(finishedTablets.get(i).getTabletId());
}
if (enableIngestSlowdown()) {
long currentTimeMs = System.currentTimeMillis();
new CommitRateLimiter(compactionMgr, txnState, table.getId()).check(dirtyPartitionSet, currentTimeMs);
}
List<Long> unfinishedTablets = null;
for (Long partitionId : dirtyPartitionSet) {
PhysicalPartition partition = table.getPhysicalPartition(partitionId);
List<MaterializedIndex> allIndices = txnState.getPartitionLoadedTblIndexes(table.getId(), partition);
for (MaterializedIndex index : allIndices) {
Optional<Tablet> unfinishedTablet =
index.getTablets().stream().filter(t -> !finishedTabletsOfThisTable.contains(t.getId()))
.findAny();
if (!unfinishedTablet.isPresent()) {
continue;
}
if (unfinishedTablets == null) {
unfinishedTablets = Lists.newArrayList();
}
unfinishedTablets.add(unfinishedTablet.get().getId());
}
}
if (unfinishedTablets != null && !unfinishedTablets.isEmpty()) {
throw new TransactionCommitFailedException(
"table '" + table.getName() + "\" has unfinished tablets: " + unfinishedTablets);
}
}
|
@Test
public void testHasUnfinishedTablet() {
LakeTable table = buildLakeTable();
DatabaseTransactionMgr databaseTransactionMgr = addDatabaseTransactionMgr();
LakeTableTxnStateListener listener = new LakeTableTxnStateListener(databaseTransactionMgr, table);
TransactionCommitFailedException exception = Assert.assertThrows(TransactionCommitFailedException.class, () -> {
listener.preCommit(newTransactionState(), buildPartialTabletCommitInfo(), Collections.emptyList());
});
Assert.assertTrue(exception.getMessage().contains("has unfinished tablets"));
}
|
@Override
public void doFilter(HttpRequest request, HttpResponse response, FilterChain chain) {
IdentityProvider provider = resolveProviderOrHandleResponse(request, response, CALLBACK_PATH);
if (provider != null) {
handleProvider(request, response, provider);
}
}
|
@Test
public void fail_on_disabled_provider() throws Exception {
when(request.getRequestURI()).thenReturn("/oauth2/callback/" + OAUTH2_PROVIDER_KEY);
identityProviderRepository.addIdentityProvider(new FakeOAuth2IdentityProvider(OAUTH2_PROVIDER_KEY, false));
underTest.doFilter(request, response, chain);
assertError("Failed to retrieve IdentityProvider for key 'github'");
verifyNoInteractions(authenticationEvent);
}
|
@Override
public void parse(InputStream stream, ContentHandler handler, Metadata metadata,
ParseContext context) throws IOException, SAXException, TikaException {
// Automatically detect the character encoding
try (AutoDetectReader reader = new AutoDetectReader(CloseShieldInputStream.wrap(stream),
metadata, getEncodingDetector(context))) {
//try to get detected content type; could be a subclass of text/plain
//such as vcal, etc.
String incomingMime = metadata.get(Metadata.CONTENT_TYPE);
MediaType mediaType = MediaType.TEXT_PLAIN;
if (incomingMime != null) {
MediaType tmpMediaType = MediaType.parse(incomingMime);
if (tmpMediaType != null) {
mediaType = tmpMediaType;
}
}
Charset charset = reader.getCharset();
MediaType type = new MediaType(mediaType, charset);
metadata.set(Metadata.CONTENT_TYPE, type.toString());
// deprecated, see TIKA-431
metadata.set(Metadata.CONTENT_ENCODING, charset.name());
XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata);
xhtml.startDocument();
xhtml.startElement("p");
char[] buffer = new char[4096];
int n = reader.read(buffer);
while (n != -1) {
xhtml.characters(buffer, 0, n);
n = reader.read(buffer);
}
xhtml.endElement("p");
xhtml.endDocument();
}
}
|
@Test
public void testEBCDIC_CP500() throws Exception {
Metadata metadata = new Metadata();
StringWriter writer = new StringWriter();
parser.parse(getResourceAsStream("/test-documents/english.cp500.txt"),
new WriteOutContentHandler(writer), metadata, new ParseContext());
assertEquals("text/plain; charset=IBM500", metadata.get(Metadata.CONTENT_TYPE));
// Additional check that it isn't too eager on short blocks of text
metadata = new Metadata();
writer = new StringWriter();
parser.parse(new ByteArrayInputStream(
"<html><body>hello world</body></html>".getBytes(ISO_8859_1)),
new WriteOutContentHandler(writer), metadata, new ParseContext());
assertEquals("text/plain; charset=ISO-8859-1", metadata.get(Metadata.CONTENT_TYPE));
}
|
@Override
public void handleWayTags(int edgeId, EdgeIntAccess edgeIntAccess, ReaderWay way, IntsRef relationFlags) {
PointList pointList = way.getTag("point_list", null);
Double edgeDistance = way.getTag("edge_distance", null);
if (pointList != null && edgeDistance != null && !pointList.isEmpty()) {
double beeline = DistanceCalcEarth.DIST_EARTH.calcDist(pointList.getLat(0), pointList.getLon(0),
pointList.getLat(pointList.size() - 1), pointList.getLon(pointList.size() - 1));
// For now keep the formula simple. Maybe later use quadratic value as it might improve the "resolution"
double curvature = beeline / edgeDistance;
curvatureEnc.setDecimal(false, edgeId, edgeIntAccess, Math.max(curvatureEnc.getMinStorableDecimal(), Math.min(curvatureEnc.getMaxStorableDecimal(),
curvature)));
} else {
curvatureEnc.setDecimal(false, edgeId, edgeIntAccess, 1.0);
}
}
|
@Test
public void testCurvature() {
CurvatureCalculator calculator = new CurvatureCalculator(em.getDecimalEncodedValue(Curvature.KEY));
ArrayEdgeIntAccess intAccess = ArrayEdgeIntAccess.createFromBytes(em.getBytesForFlags());
int edgeId = 0;
calculator.handleWayTags(edgeId, intAccess, getStraightWay(), null);
double valueStraight = em.getDecimalEncodedValue(Curvature.KEY).getDecimal(false, edgeId, intAccess);
intAccess = ArrayEdgeIntAccess.createFromBytes(em.getBytesForFlags());
calculator.handleWayTags(edgeId, intAccess, getCurvyWay(), null);
double valueCurvy = em.getDecimalEncodedValue(Curvature.KEY).getDecimal(false, edgeId, intAccess);
assertTrue(valueCurvy < valueStraight, "The bendiness of the straight road is smaller than the one of the curvy road");
}
|
@VisibleForTesting
public Supplier<PageFilter> compileFilter(
SqlFunctionProperties sqlFunctionProperties,
RowExpression filter,
boolean isOptimizeCommonSubExpression,
Optional<String> classNameSuffix)
{
return compileFilter(sqlFunctionProperties, emptyMap(), filter, isOptimizeCommonSubExpression, classNameSuffix);
}
|
@Test
public void testCommonSubExpressionInFilter()
{
PageFunctionCompiler functionCompiler = new PageFunctionCompiler(createTestMetadataManager(), 0);
Supplier<PageFilter> pageFilter = functionCompiler.compileFilter(SESSION.getSqlFunctionProperties(), new SpecialFormExpression(AND, BIGINT, ADD_X_Y_GREATER_THAN_2, ADD_X_Y_LESS_THAN_10), true, Optional.empty());
Page input = createLongBlockPage(2, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
SelectedPositions positions = filter(pageFilter.get(), input);
assertEquals(positions.size(), 3);
assertEquals(positions.getPositions(), new int[] {2, 3, 4});
}
|
@Nullable
protected String findWebJarResourcePath(String pathStr) {
Path path = Paths.get(pathStr);
if (path.getNameCount() < 2) return null;
String version = swaggerUiConfigProperties.getVersion();
if (version == null) return null;
Path first = path.getName(0);
Path rest = path.subpath(1, path.getNameCount());
return first.resolve(version).resolve(rest).toString();
}
|
@Test
void returNullWhenVersionIsNull() {
String path = "swagger-ui/swagger-initializer.js";
swaggerUiConfigProperties.setVersion(null);
String actual = abstractSwaggerResourceResolver.findWebJarResourcePath(path);
assertTrue(Objects.isNull(actual));
}
|
@SuppressWarnings("OptionalGetWithoutIsPresent") // Enforced by type
@Override
public StreamsMaterializedWindowedTable windowed() {
if (!windowInfo.isPresent()) {
throw new UnsupportedOperationException("Table has non-windowed key");
}
final WindowInfo wndInfo = windowInfo.get();
final WindowType wndType = wndInfo.getType();
switch (wndType) {
case SESSION:
return new KsMaterializedSessionTable(stateStore,
SessionStoreCacheBypass::fetch, SessionStoreCacheBypass::fetchRange);
case HOPPING:
case TUMBLING:
return new KsMaterializedWindowTable(stateStore, wndInfo.getSize().get(),
WindowStoreCacheBypass::fetch,
WindowStoreCacheBypass::fetchAll,
WindowStoreCacheBypass::fetchRange);
default:
throw new UnsupportedOperationException("Unknown window type: " + wndInfo);
}
}
|
@Test
public void shouldReturnWindowedForSession() {
// Given:
givenWindowType(Optional.of(WindowType.SESSION));
// When:
final StreamsMaterializedWindowedTable table = materialization.windowed();
// Then:
assertThat(table, is(instanceOf(KsMaterializedSessionTable.class)));
}
|
@Override
public void isNotEqualTo(@Nullable Object expected) {
super.isNotEqualTo(expected);
}
|
@Test
public void isNotEqualTo_WithoutToleranceParameter_FailEquals() {
expectFailureWhenTestingThat(
array(2.2f, 5.4f, POSITIVE_INFINITY, NEGATIVE_INFINITY, NaN, 0.0f, -0.0f))
.isNotEqualTo(array(2.2f, 5.4f, POSITIVE_INFINITY, NEGATIVE_INFINITY, NaN, 0.0f, -0.0f));
}
|
@Override
public Space get() throws BackgroundException {
try {
final SpaceUsage usage = new DbxUserUsersRequests(session.getClient()).getSpaceUsage();
final SpaceAllocation allocation = usage.getAllocation();
if(allocation.isIndividual()) {
long remaining = allocation.getIndividualValue().getAllocated() - usage.getUsed();
return new Space(usage.getUsed(), remaining);
}
else if(allocation.isTeam()) {
long remaining = allocation.getTeamValue().getAllocated() - usage.getUsed();
return new Space(usage.getUsed(), remaining);
}
return unknown;
}
catch(DbxException e) {
throw new DropboxExceptionMappingService().map("Failure to read attributes of {0}", e,
new Path(String.valueOf(Path.DELIMITER), EnumSet.of(Path.Type.volume, Path.Type.directory)));
}
}
|
@Test
public void testGet() throws Exception {
final Quota.Space quota = new DropboxQuotaFeature(session).get();
assertNotNull(quota);
assertNotEquals(-1, quota.available, 0L);
assertNotEquals(-1, quota.used, 0L);
}
|
@Override
public JobStatus getJobStatus() {
return JobStatus.FAILING;
}
|
@Test
void testStateDoesNotExposeGloballyTerminalExecutionGraph() throws Exception {
try (MockFailingContext ctx = new MockFailingContext()) {
StateTrackingMockExecutionGraph meg = new StateTrackingMockExecutionGraph();
Failing failing = createFailingState(ctx, meg);
// ideally we'd delay the async call to #onGloballyTerminalState instead, but the
// context does not support that
ctx.setExpectFinished(eg -> {});
meg.completeTerminationFuture(JobStatus.FAILED);
// this is just a sanity check for the test
assertThat(meg.getState()).isEqualTo(JobStatus.FAILED);
assertThat(failing.getJobStatus()).isEqualTo(JobStatus.FAILING);
assertThat(failing.getJob().getState()).isEqualTo(JobStatus.FAILING);
assertThat(failing.getJob().getStatusTimestamp(JobStatus.FAILED)).isZero();
}
}
|
@VisibleForTesting
List<String> getIpAddressFields(Message message) {
return message.getFieldNames()
.stream()
.filter(e -> (!enforceGraylogSchema || ipAddressFields.containsKey(e))
&& !e.startsWith(Message.INTERNAL_FIELD_PREFIX))
.collect(Collectors.toList());
}
|
@Test
public void testGetIpAddressFieldsEnforceGraylogSchema() {
GeoIpResolverConfig conf = config.toBuilder().enforceGraylogSchema(true).build();
final GeoIpResolverEngine engine = new GeoIpResolverEngine(geoIpVendorResolverService, conf, s3GeoIpFileService, metricRegistry);
Map<String, Object> fields = new HashMap<>();
fields.put("_id", java.util.UUID.randomUUID().toString());
fields.put("source_ip", "127.0.0.1");
fields.put("src_ip", "127.0.0.1");
fields.put("destination_ip", "127.0.0.1");
fields.put("dest_ip", "127.0.0.1");
fields.put("gl2_test", "127.0.0.1");
Message message = messageFactory.createMessage(fields);
List<String> ipFields = engine.getIpAddressFields(message);
//with the Graylog Schema enforced, only the source_ip and destination_ip should be returned
Assertions.assertEquals(2, ipFields.size());
Assertions.assertTrue(ipFields.contains("source_ip"));
Assertions.assertTrue(ipFields.contains("destination_ip"));
}
|
public void clear() {
while (!this.segments.isEmpty()) {
RecycleUtil.recycle(this.segments.pollLast());
}
this.size = this.firstOffset = 0;
}
|
@Test
public void simpleBenchmark() {
int warmupRepeats = 10_0000;
int repeats = 100_0000;
double arrayDequeOps = 0;
double segListOps = 0;
// test ArrayDequeue
{
ArrayDeque<Integer> deque = new ArrayDeque<>();
System.gc();
// wramup
benchArrayDequeue(warmupRepeats, deque);
deque.clear();
System.gc();
long startNs = System.nanoTime();
benchArrayDequeue(repeats, deque);
long costMs = (System.nanoTime() - startNs) / repeats;
arrayDequeOps = repeats * 3.0 / costMs * 1000;
System.out.println("ArrayDeque, cost:" + costMs + ", ops: " + arrayDequeOps);
}
// test SegmentList
{
System.gc();
// warmup
benchSegmentList(warmupRepeats);
this.list.clear();
System.gc();
long startNs = System.nanoTime();
benchSegmentList(repeats);
long costMs = (System.nanoTime() - startNs) / repeats;
segListOps = repeats * 3.0 / costMs * 1000;
System.out.println("SegmentList, cost:" + costMs + ", ops: " + segListOps);
this.list.clear();
}
System.out.println("Improvement:" + Math.round((segListOps - arrayDequeOps) / arrayDequeOps * 100) + "%");
}
|
@Override
public Object getInternalProperty(String key) {
String value = getenv(key);
if (StringUtils.isEmpty(value)) {
value = getenv(StringUtils.toOSStyleKey(key));
}
return value;
}
|
@Test
void testGetInternalProperty() {
Map<String, String> map = new HashMap<>();
map.put(MOCK_KEY, MOCK_VALUE);
EnvironmentConfiguration configuration = new EnvironmentConfiguration() {
@Override
protected String getenv(String key) {
return map.get(key);
}
};
// this UT maybe only works on particular platform, assert only when value is not null.
Assertions.assertEquals(MOCK_VALUE, configuration.getInternalProperty("dubbo.key"));
Assertions.assertEquals(MOCK_VALUE, configuration.getInternalProperty("key"));
Assertions.assertEquals(MOCK_VALUE, configuration.getInternalProperty("dubbo_key"));
Assertions.assertEquals(MOCK_VALUE, configuration.getInternalProperty(MOCK_KEY));
}
|
@Override
public Node upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener,
final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final ThreadPool pool = ThreadPoolFactory.get("multipart", concurrency);
try {
final InputStream in;
if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(containerService.getContainer(file))) {
in = new SDSTripleCryptEncryptorFeature(session, nodeid).encrypt(file, local.getInputStream(), status);
}
else {
in = local.getInputStream();
}
final CreateFileUploadRequest createFileUploadRequest = new CreateFileUploadRequest()
.directS3Upload(true)
.timestampModification(status.getModified() != null ? new DateTime(status.getModified()) : null)
.timestampCreation(status.getCreated() != null ? new DateTime(status.getCreated()) : null)
.size(TransferStatus.UNKNOWN_LENGTH == status.getLength() ? null : status.getLength())
.parentId(Long.parseLong(nodeid.getVersionId(file.getParent())))
.name(file.getName());
final CreateFileUploadResponse createFileUploadResponse = new NodesApi(session.getClient())
.createFileUploadChannel(createFileUploadRequest, StringUtils.EMPTY);
if(log.isDebugEnabled()) {
log.debug(String.format("upload started for %s with response %s", file, createFileUploadResponse));
}
final Map<Integer, TransferStatus> etags = new HashMap<>();
final List<PresignedUrl> presignedUrls = this.retrievePresignedUrls(createFileUploadResponse, status);
final List<Future<TransferStatus>> parts = new ArrayList<>();
try {
final String random = new UUIDRandomStringService().random();
// Full size of file
final long size = status.getLength() + status.getOffset();
long offset = 0;
long remaining = status.getLength();
for(int partNumber = 1; remaining >= 0; partNumber++) {
final long length = Math.min(Math.max((size / (MAXIMUM_UPLOAD_PARTS - 1)), partsize), remaining);
final PresignedUrl presignedUrl = presignedUrls.get(partNumber - 1);
if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(containerService.getContainer(file))) {
final Local temporary = temp.create(String.format("%s-%d", random, partNumber));
if(log.isDebugEnabled()) {
log.debug(String.format("Encrypted contents for part %d to %s", partNumber, temporary));
}
final FileBuffer buffer = new FileBuffer(temporary);
new StreamCopier(status, StreamProgress.noop).withAutoclose(false).withLimit(length)
.transfer(in, new BufferOutputStream(buffer));
parts.add(this.submit(pool, file, temporary, buffer, throttle, listener, status,
presignedUrl.getUrl(), presignedUrl.getPartNumber(), 0L, length, callback));
}
else {
parts.add(this.submit(pool, file, local, Buffer.noop, throttle, listener, status,
presignedUrl.getUrl(), presignedUrl.getPartNumber(), offset, length, callback));
}
remaining -= length;
offset += length;
if(0L == remaining) {
break;
}
}
}
finally {
in.close();
}
Interruptibles.awaitAll(parts)
.forEach(part -> etags.put(part.getPart(), part));
final CompleteS3FileUploadRequest completeS3FileUploadRequest = new CompleteS3FileUploadRequest()
.keepShareLinks(new HostPreferences(session.getHost()).getBoolean("sds.upload.sharelinks.keep"))
.resolutionStrategy(CompleteS3FileUploadRequest.ResolutionStrategyEnum.OVERWRITE);
if(status.getFilekey() != null) {
final ObjectReader reader = session.getClient().getJSON().getContext(null).readerFor(FileKey.class);
final FileKey fileKey = reader.readValue(status.getFilekey().array());
final EncryptedFileKey encryptFileKey = Crypto.encryptFileKey(
TripleCryptConverter.toCryptoPlainFileKey(fileKey),
TripleCryptConverter.toCryptoUserPublicKey(session.keyPair().getPublicKeyContainer())
);
completeS3FileUploadRequest.setFileKey(TripleCryptConverter.toSwaggerFileKey(encryptFileKey));
}
etags.forEach((key, value) -> completeS3FileUploadRequest.addPartsItem(
new S3FileUploadPart().partEtag(value.getChecksum().hash).partNumber(key)));
if(log.isDebugEnabled()) {
log.debug(String.format("Complete file upload with %s for %s", completeS3FileUploadRequest, file));
}
new NodesApi(session.getClient()).completeS3FileUpload(completeS3FileUploadRequest, createFileUploadResponse.getUploadId(), StringUtils.EMPTY);
// Polling
return new SDSUploadService(session, nodeid).await(file, status, createFileUploadResponse.getUploadId()).getNode();
}
catch(CryptoSystemException | InvalidFileKeyException | InvalidKeyPairException | UnknownVersionException e) {
throw new TripleCryptExceptionMappingService().map("Upload {0} failed", e, file);
}
catch(ApiException e) {
throw new SDSExceptionMappingService(nodeid).map("Upload {0} failed", e, file);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file);
}
finally {
temp.shutdown();
// Cancel future tasks
pool.shutdown(false);
}
}
|
@Test
public void testTripleCryptUploadBelowMultipartSize() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final SDSDirectS3UploadFeature feature = new SDSDirectS3UploadFeature(session, nodeid, new SDSDirectS3WriteFeature(session, nodeid));
final Path room = new SDSDirectoryFeature(session, nodeid).createRoom(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), true);
final Path test = new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final Local local = new Local(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString());
final byte[] random = RandomUtils.nextBytes(578);
final OutputStream out = local.getOutputStream(false);
IOUtils.write(random, out);
out.close();
final TransferStatus status = new TransferStatus();
status.setFilekey(SDSTripleCryptEncryptorFeature.generateFileKey());
status.setLength(random.length);
final SDSEncryptionBulkFeature bulk = new SDSEncryptionBulkFeature(session, nodeid);
bulk.pre(Transfer.Type.upload, Collections.singletonMap(new TransferItem(test, local), status), new DisabledConnectionCallback());
final Node node = feature.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED),
new DisabledStreamListener(), status, new DisabledLoginCallback());
assertTrue(status.isComplete());
assertNotSame(PathAttributes.EMPTY, status.getResponse());
assertTrue(new SDSFindFeature(session, nodeid).find(test));
final PathAttributes attributes = new SDSAttributesFinderFeature(session, nodeid).find(test);
assertEquals(random.length, attributes.getSize());
assertEquals(new SDSAttributesAdapter(session).toAttributes(node), attributes);
final byte[] compare = new byte[random.length];
final InputStream stream = new TripleCryptReadFeature(session, nodeid, new SDSReadFeature(session, nodeid)).read(test, new TransferStatus(), new DisabledConnectionCallback() {
@Override
public Credentials prompt(final Host bookmark, final String title, final String reason, final LoginOptions options) {
return new VaultCredentials("eth[oh8uv4Eesij");
}
});
IOUtils.readFully(stream, compare);
stream.close();
assertArrayEquals(random, compare);
new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
local.delete();
}
|
@POST
@Path("{noteId}/revision")
@ZeppelinApi
public Response checkpointNote(String message,
@PathParam("noteId") String noteId) throws IOException {
LOGGER.info("Commit note by JSON {}", message);
CheckpointNoteRequest request = GSON.fromJson(message, CheckpointNoteRequest.class);
if (request == null || StringUtils.isEmpty(request.getCommitMessage())) {
LOGGER.warn("Trying to commit notebook {} with empty commitMessage", noteId);
throw new BadRequestException("commitMessage can not be empty");
}
NotebookRepoWithVersionControl.Revision revision = notebookService.checkpointNote(noteId, request.getCommitMessage(), getServiceContext(), new RestServiceCallback<>());
if (revision == null || StringUtils.isEmpty(revision.id)) {
return new JsonResponse<>(Status.OK, "Couldn't checkpoint note revision: possibly no changes found or storage doesn't support versioning. "
+ "Please check the logs for more details.").build();
}
return new JsonResponse<>(Status.OK, "", revision.id).build();
}
|
@Test
void testCheckpointNote() throws IOException {
LOG.info("Running testCheckpointNote");
String note1Id = null;
try {
String notePath = "note1";
note1Id = notebook.createNote(notePath, anonymous);
//Add a paragraph
notebook.processNote(note1Id, note -> {
Paragraph p1 = note.addNewParagraph(anonymous);
p1.setText("text1");
notebook.saveNote(note, AuthenticationInfo.ANONYMOUS);
return null;
});
// Call restful api to save a revision and verify
String commitMessage = "first commit";
CloseableHttpResponse post = httpPost("/notebook/" + note1Id + "/revision", "{\"commitMessage\" : \"" + commitMessage + "\"}");
assertThat(post, isAllowed());
Map<String, Object> resp = gson.fromJson(EntityUtils.toString(post.getEntity(), StandardCharsets.UTF_8),
new TypeToken<Map<String, Object>>() {
}.getType());
assertEquals("OK", resp.get("status"));
String revisionId = (String) resp.get("body");
notebook.processNote(note1Id, note -> {
Note revisionOfNote = notebook.getNoteByRevision(note.getId(), note.getPath(), revisionId, anonymous);
assertEquals(1, notebook.listRevisionHistory(note.getId(), note.getPath(), anonymous).size());
assertEquals(1, revisionOfNote.getParagraphs().size());
assertEquals("text1", revisionOfNote.getParagraph(0).getText());
return null;
});
post.close();
} finally {
// cleanup
if (null != note1Id) {
notebook.removeNote(note1Id, anonymous);
}
}
}
|
public Future<KafkaVersionChange> reconcile() {
return getVersionFromController()
.compose(i -> getPods())
.compose(this::detectToAndFromVersions)
.compose(i -> prepareVersionChange());
}
|
@Test
public void testUpgradeWithOldPodsAndNewSps(VertxTestContext context) {
String oldKafkaVersion = KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION;
String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION;
String oldLogMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION;
String kafkaVersion = VERSIONS.defaultVersion().version();
String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion();
String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion();
VersionChangeCreator vcc = mockVersionChangeCreator(
mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion),
mockNewCluster(
null,
mockSps(kafkaVersion),
mockUniformPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion)
)
);
Checkpoint async = context.checkpoint();
vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> {
assertThat(c.from(), is(VERSIONS.version(oldKafkaVersion)));
assertThat(c.to(), is(VERSIONS.defaultVersion()));
assertThat(c.interBrokerProtocolVersion(), is(oldInterBrokerProtocolVersion));
assertThat(c.logMessageFormatVersion(), is(oldLogMessageFormatVersion));
async.flag();
})));
}
|
public FEELFnResult<String> invoke(@ParameterName( "string" ) String string, @ParameterName( "match" ) String match) {
if ( string == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "string", "cannot be null"));
}
if ( match == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "match", "cannot be null"));
}
int index = string.indexOf( match );
if( index >= 0 ) {
return FEELFnResult.ofResult( string.substring( index+match.length() ) );
} else {
return FEELFnResult.ofResult( "" );
}
}
|
@Test
void invokeNull() {
FunctionTestUtil.assertResultError(substringAfterFunction.invoke((String) null, null),
InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(substringAfterFunction.invoke(null, "test"), InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(substringAfterFunction.invoke("test", null), InvalidParametersEvent.class);
}
|
public static boolean validateInstanceEndpoint(String endpoint) {
return INST_ENDPOINT_PATTERN.matcher(endpoint).matches();
}
|
@Test
public void testValidateInstanceEndpoint() {
assertThat(NameServerAddressUtils.validateInstanceEndpoint(endpoint1)).isEqualTo(false);
assertThat(NameServerAddressUtils.validateInstanceEndpoint(endpoint2)).isEqualTo(false);
assertThat(NameServerAddressUtils.validateInstanceEndpoint(endpoint3)).isEqualTo(true);
assertThat(NameServerAddressUtils.validateInstanceEndpoint(endpoint4)).isEqualTo(true);
}
|
public void dropPackage(DropPackageRequest request) {
boolean success = false;
try {
openTransaction();
MPackage proc = findMPackage(request.getCatName(), request.getDbName(), request.getPackageName());
pm.retrieve(proc);
if (proc != null) {
pm.deletePersistentAll(proc);
}
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
}
|
@Test
public void testDropPackage() throws Exception {
objectStore.createDatabase(new DatabaseBuilder()
.setName(DB1)
.setDescription("description")
.setLocation("locationurl")
.build(conf));
AddPackageRequest pkg = new AddPackageRequest("hive", DB1, "pkg1", "user1", "header", "body");
objectStore.addPackage(pkg);
Assert.assertNotNull(objectStore.findPackage(new GetPackageRequest("hive", DB1, "pkg1")));
objectStore.dropPackage(new DropPackageRequest("hive", DB1, "pkg1"));
Assert.assertNull(objectStore.findPackage(new GetPackageRequest("hive", DB1, "pkg1")));
}
|
T getFunction(final List<SqlArgument> arguments) {
// first try to get the candidates without any implicit casting
Optional<T> candidate = findMatchingCandidate(arguments, false);
if (candidate.isPresent()) {
return candidate.get();
} else if (!supportsImplicitCasts) {
throw createNoMatchingFunctionException(arguments);
}
// if none were found (candidate isn't present) try again with implicit casting
candidate = findMatchingCandidate(arguments, true);
if (candidate.isPresent()) {
return candidate.get();
}
throw createNoMatchingFunctionException(arguments);
}
|
@Test
public void shouldMatchGenericMethodWithMultipleGenerics() {
// Given:
final GenericType genericA = GenericType.of("A");
final GenericType genericB = GenericType.of("B");
givenFunctions(
function(EXPECTED, -1, genericA, genericB)
);
// When:
final KsqlScalarFunction fun = udfIndex.getFunction(ImmutableList.of(SqlArgument.of(INTEGER), SqlArgument.of(SqlTypes.STRING)));
// Then:
assertThat(fun.name(), equalTo(EXPECTED));
}
|
@Override
public BytesInput getBytes() {
// The Page Header should include: blockSizeInValues, numberOfMiniBlocks, totalValueCount
if (deltaValuesToFlush != 0) {
flushBlockBuffer();
}
return BytesInput.concat(
config.toBytesInput(),
BytesInput.fromUnsignedVarInt(totalValueCount),
BytesInput.fromZigZagVarInt(firstValue),
BytesInput.from(baos));
}
|
@Test
public void shouldSkip() throws IOException {
int[] data = new int[5 * blockSize + 1];
for (int i = 0; i < data.length; i++) {
data[i] = i * 32;
}
writeData(data);
reader = new DeltaBinaryPackingValuesReader();
reader.initFromPage(100, writer.getBytes().toInputStream());
for (int i = 0; i < data.length; i++) {
if (i % 3 == 0) {
reader.skip();
} else {
assertEquals(i * 32, reader.readInteger());
}
}
}
|
public RuntimeOptionsBuilder parse(String... args) {
return parse(Arrays.asList(args));
}
|
@Test
void set_strict_on_strict_aware_formatters() {
RuntimeOptions options = parser
.parse("--plugin", AwareFormatter.class.getName())
.build();
Plugins plugins = new Plugins(new PluginFactory(), options);
plugins.setEventBusOnEventListenerPlugins(new TimeServiceEventBus(Clock.systemUTC(), UUID::randomUUID));
AwareFormatter formatter = (AwareFormatter) plugins.getPlugins().get(0);
assertThat(formatter.isStrict(), is(true));
}
|
@Override
public PGobject parse(final String value) {
try {
PGobject result = new PGobject();
result.setType("json");
result.setValue(value);
return result;
} catch (final SQLException ex) {
throw new SQLWrapperException(ex);
}
}
|
@Test
void assertParse() {
PGobject actual = new PostgreSQLJsonValueParser().parse("['input']");
assertThat(actual.getType(), is("json"));
assertThat(actual.getValue(), is("['input']"));
}
|
public static UConditional create(
UExpression conditionExpr, UExpression trueExpr, UExpression falseExpr) {
return new AutoValue_UConditional(conditionExpr, trueExpr, falseExpr);
}
|
@Test
public void equality() {
ULiteral trueLit = ULiteral.booleanLit(true);
ULiteral falseLit = ULiteral.booleanLit(false);
ULiteral negOneLit = ULiteral.intLit(-1);
ULiteral oneLit = ULiteral.intLit(1);
new EqualsTester()
.addEqualityGroup(UConditional.create(trueLit, negOneLit, oneLit))
.addEqualityGroup(UConditional.create(trueLit, oneLit, oneLit))
.addEqualityGroup(UConditional.create(trueLit, negOneLit, negOneLit))
.addEqualityGroup(UConditional.create(falseLit, negOneLit, oneLit))
.testEquals();
}
|
@Override
public boolean alterOffsets(Map<String, String> connectorConfig, Map<Map<String, ?>, Map<String, ?>> offsets) {
AbstractConfig config = new AbstractConfig(CONFIG_DEF, connectorConfig);
String filename = config.getString(FILE_CONFIG);
if (filename == null || filename.isEmpty()) {
throw new ConnectException("Offsets cannot be modified if the '" + FILE_CONFIG + "' configuration is unspecified. " +
"This is because stdin is used for input and offsets are not tracked.");
}
// This connector makes use of a single source partition at a time which represents the file that it is configured to read from.
// However, there could also be source partitions from previous configurations of the connector.
for (Map.Entry<Map<String, ?>, Map<String, ?>> partitionOffset : offsets.entrySet()) {
Map<String, ?> offset = partitionOffset.getValue();
if (offset == null) {
// We allow tombstones for anything; if there's garbage in the offsets for the connector, we don't
// want to prevent users from being able to clean it up using the REST API
continue;
}
if (!offset.containsKey(POSITION_FIELD)) {
throw new ConnectException("Offset objects should either be null or contain the key '" + POSITION_FIELD + "'");
}
// The 'position' in the offset represents the position in the file's byte stream and should be a non-negative long value
if (!(offset.get(POSITION_FIELD) instanceof Long)) {
throw new ConnectException("The value for the '" + POSITION_FIELD + "' key in the offset is expected to be a Long value");
}
long offsetPosition = (Long) offset.get(POSITION_FIELD);
if (offsetPosition < 0) {
throw new ConnectException("The value for the '" + POSITION_FIELD + "' key in the offset should be a non-negative value");
}
Map<String, ?> partition = partitionOffset.getKey();
if (partition == null) {
throw new ConnectException("Partition objects cannot be null");
}
if (!partition.containsKey(FILENAME_FIELD)) {
throw new ConnectException("Partition objects should contain the key '" + FILENAME_FIELD + "'");
}
}
// Let the task check whether the actual value for the offset position is valid for the configured file on startup
return true;
}
|
@Test
public void testAlterOffsetsIncorrectPartitionKey() {
assertThrows(ConnectException.class, () -> connector.alterOffsets(sourceProperties, Collections.singletonMap(
Collections.singletonMap("other_partition_key", FILENAME),
Collections.singletonMap(POSITION_FIELD, 0L)
)));
// null partitions are invalid
assertThrows(ConnectException.class, () -> connector.alterOffsets(sourceProperties, Collections.singletonMap(
null,
Collections.singletonMap(POSITION_FIELD, 0L)
)));
}
|
@Override
public Set<Long> calculateUsers(DelegateExecution execution, String param) {
Set<Long> deptIds = StrUtils.splitToLongSet(param);
List<DeptRespDTO> depts = deptApi.getDeptList(deptIds);
return convertSet(depts, DeptRespDTO::getLeaderUserId);
}
|
@Test
public void testCalculateUsers() {
// 准备参数
String param = "1,2";
// mock 方法
DeptRespDTO dept1 = randomPojo(DeptRespDTO.class, o -> o.setLeaderUserId(11L));
DeptRespDTO dept2 = randomPojo(DeptRespDTO.class, o -> o.setLeaderUserId(22L));
when(deptApi.getDeptList(eq(asSet(1L, 2L)))).thenReturn(asList(dept1, dept2));
// 调用
Set<Long> results = strategy.calculateUsers(null, param);
// 断言
assertEquals(asSet(11L, 22L), results);
}
|
public static boolean isMatchWithPrefix(final byte[] candidate, final byte[] expected, final int prefixLength)
{
if (candidate.length != expected.length)
{
return false;
}
if (candidate.length == 4)
{
final int mask = prefixLengthToIpV4Mask(prefixLength);
return (toInt(candidate) & mask) == (toInt(expected) & mask);
}
else if (candidate.length == 16)
{
final long upperMask = prefixLengthToIpV6Mask(min(prefixLength, 64));
final long lowerMask = prefixLengthToIpV6Mask(max(prefixLength - 64, 0));
return
(upperMask & toLong(candidate, 0)) == (upperMask & toLong(expected, 0)) &&
(lowerMask & toLong(candidate, 8)) == (lowerMask & toLong(expected, 8));
}
throw new IllegalArgumentException("how many bytes does an IP address have again?");
}
|
@Test
void shouldNotMatchIfNotAllBytesWithPrefixMatch()
{
final byte[] a = { 'a', 'b', 'c', 'd' };
final byte[] b = { 'a', 'b', 'd', 'd' };
assertFalse(isMatchWithPrefix(a, b, 24));
}
|
@Override
public String toString() {
return toString(false);
}
|
@Test
public void testUnsupportedVersionsToString() {
NodeApiVersions versions = new NodeApiVersions(new ApiVersionCollection(), Collections.emptyList(), false);
StringBuilder bld = new StringBuilder();
String prefix = "(";
for (ApiKeys apiKey : ApiKeys.clientApis()) {
bld.append(prefix).append(apiKey.name).
append("(").append(apiKey.id).append("): UNSUPPORTED");
prefix = ", ";
}
bld.append(")");
assertEquals(bld.toString(), versions.toString());
}
|
public BackgroundException map(HttpResponse response) throws IOException {
final S3ServiceException failure;
if(null == response.getEntity()) {
failure = new S3ServiceException(response.getStatusLine().getReasonPhrase());
}
else {
EntityUtils.updateEntity(response, new BufferedHttpEntity(response.getEntity()));
failure = new S3ServiceException(response.getStatusLine().getReasonPhrase(),
EntityUtils.toString(response.getEntity()));
}
failure.setResponseCode(response.getStatusLine().getStatusCode());
if(response.containsHeader(MINIO_ERROR_CODE)) {
failure.setErrorCode(response.getFirstHeader(MINIO_ERROR_CODE).getValue());
}
if(response.containsHeader(MINIO_ERROR_DESCRIPTION)) {
failure.setErrorMessage(response.getFirstHeader(MINIO_ERROR_DESCRIPTION).getValue());
}
return this.map(failure);
}
|
@Test
public void testEmpty() {
assertEquals("Listing directory / failed.",
new S3ExceptionMappingService().map("Listing directory {0} failed", new ServiceException(), new Path("/", EnumSet.of(Path.Type.directory))).getMessage());
}
|
public HttpHost[] getHttpHosts() {
return httpHosts;
}
|
@Test
public void setsSchemePortAndHost() {
EsConfig esConfig = new EsConfig("https://somehost:1234");
HttpHost[] httpHosts = esConfig.getHttpHosts();
assertEquals(1, httpHosts.length);
assertEquals("https", httpHosts[0].getSchemeName());
assertEquals(1234, httpHosts[0].getPort());
assertEquals("somehost", httpHosts[0].getHostName());
}
|
@Override
public ContainersInfo getContainers(HttpServletRequest req,
HttpServletResponse res, String appId, String appAttemptId) {
// Check that the appId/appAttemptId format is accurate
try {
RouterServerUtil.validateApplicationId(appId);
RouterServerUtil.validateApplicationAttemptId(appAttemptId);
} catch (IllegalArgumentException e) {
RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_CONTAINERS,
UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage());
routerMetrics.incrGetContainersFailedRetrieved();
throw e;
}
try {
long startTime = clock.getTime();
ContainersInfo containersInfo = new ContainersInfo();
Collection<SubClusterInfo> subClustersActive = federationFacade.getActiveSubClusters();
Class[] argsClasses = new Class[]{
HttpServletRequest.class, HttpServletResponse.class, String.class, String.class};
Object[] args = new Object[]{req, res, appId, appAttemptId};
ClientMethod remoteMethod = new ClientMethod("getContainers", argsClasses, args);
Map<SubClusterInfo, ContainersInfo> containersInfoMap =
invokeConcurrent(subClustersActive, remoteMethod, ContainersInfo.class);
if (containersInfoMap != null && !containersInfoMap.isEmpty()) {
containersInfoMap.values().forEach(containers ->
containersInfo.addAll(containers.getContainers()));
}
if (containersInfo != null) {
long stopTime = clock.getTime();
RouterAuditLogger.logSuccess(getUser().getShortUserName(), GET_CONTAINERS,
TARGET_WEB_SERVICE);
routerMetrics.succeededGetContainersRetrieved(stopTime - startTime);
return containersInfo;
}
} catch (NotFoundException e) {
routerMetrics.incrGetContainersFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_CONTAINERS,
UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage());
RouterServerUtil.logAndThrowRunTimeException(e, "getContainers error, appId = %s, " +
" appAttemptId = %s, Probably getActiveSubclusters error.", appId, appAttemptId);
} catch (IOException | YarnException e) {
routerMetrics.incrGetContainersFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_CONTAINERS,
UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage());
RouterServerUtil.logAndThrowRunTimeException(e, "getContainers error, appId = %s, " +
" appAttemptId = %s.", appId, appAttemptId);
}
routerMetrics.incrGetContainersFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_CONTAINERS,
UNKNOWN, TARGET_WEB_SERVICE, "getContainers failed.");
throw RouterServerUtil.logAndReturnRunTimeException(
"getContainers failed, appId: %s, appAttemptId: %s.", appId, appAttemptId);
}
|
@Test
public void testGetContainersWrongFormat() throws Exception {
ApplicationId appId = ApplicationId.newInstance(Time.now(), 1);
ApplicationAttemptId appAttempt = ApplicationAttemptId.newInstance(appId, 1);
// Test Case 1: appId is wrong format, appAttemptId is accurate.
LambdaTestUtils.intercept(IllegalArgumentException.class,
"Invalid ApplicationId prefix: Application_wrong_id. " +
"The valid ApplicationId should start with prefix application",
() -> interceptor.getContainers(null, null, "Application_wrong_id", appAttempt.toString()));
// Test Case2: appId is accurate, appAttemptId is wrong format.
LambdaTestUtils.intercept(IllegalArgumentException.class,
"Invalid AppAttemptId prefix: AppAttempt_wrong_id",
() -> interceptor.getContainers(null, null, appId.toString(), "AppAttempt_wrong_id"));
}
|
public static int findNthByte(byte [] utf, int start, int length, byte b, int n) {
int pos = -1;
int nextStart = start;
for (int i = 0; i < n; i++) {
pos = findByte(utf, nextStart, length, b);
if (pos < 0) {
return pos;
}
nextStart = pos + 1;
}
return pos;
}
|
@Test
public void testFindNthByte() {
byte[] data = "Hello, world!".getBytes();
assertEquals("Did not find 2nd occurrence of character 'l'", 3,
UTF8ByteArrayUtils.findNthByte(data, 0, data.length, (byte) 'l', 2));
assertEquals("4th occurrence of character 'l' does not exist", -1,
UTF8ByteArrayUtils.findNthByte(data, 0, data.length, (byte) 'l', 4));
assertEquals("Did not find 3rd occurrence of character 'l'", 10,
UTF8ByteArrayUtils.findNthByte(data, (byte) 'l', 3));
}
|
public static void main(String[] args) {
/*
* Getting bar series
*/
BarSeries series = CsvBarsLoader.loadAppleIncSeries();
/*
* Creating indicators
*/
// Close price
ClosePriceIndicator closePrice = new ClosePriceIndicator(series);
EMAIndicator avg14 = new EMAIndicator(closePrice, 14);
StandardDeviationIndicator sd14 = new StandardDeviationIndicator(closePrice, 14);
// Bollinger bands
BollingerBandsMiddleIndicator middleBBand = new BollingerBandsMiddleIndicator(avg14);
BollingerBandsLowerIndicator lowBBand = new BollingerBandsLowerIndicator(middleBBand, sd14);
BollingerBandsUpperIndicator upBBand = new BollingerBandsUpperIndicator(middleBBand, sd14);
/*
* Building chart dataset
*/
TimeSeriesCollection dataset = new TimeSeriesCollection();
dataset.addSeries(buildChartBarSeries(series, closePrice, "Apple Inc. (AAPL) - NASDAQ GS"));
dataset.addSeries(buildChartBarSeries(series, lowBBand, "Low Bollinger Band"));
dataset.addSeries(buildChartBarSeries(series, upBBand, "High Bollinger Band"));
/*
* Creating the chart
*/
JFreeChart chart = ChartFactory.createTimeSeriesChart("Apple Inc. 2013 Close Prices", // title
"Date", // x-axis label
"Price Per Unit", // y-axis label
dataset, // data
true, // create legend?
true, // generate tooltips?
false // generate URLs?
);
XYPlot plot = (XYPlot) chart.getPlot();
DateAxis axis = (DateAxis) plot.getDomainAxis();
axis.setDateFormatOverride(new SimpleDateFormat("yyyy-MM-dd"));
/*
* Displaying the chart
*/
displayChart(chart);
}
|
@Test
public void test() {
IndicatorsToChart.main(null);
}
|
@Override
public void assign(Object o) {
if (o == null) {
predicate = null;
} else if (o instanceof Predicate) {
predicate = (Predicate)o;
} else if (o instanceof PredicateFieldValue) {
predicate = ((PredicateFieldValue)o).predicate;
} else {
throw new IllegalArgumentException("Expected " + getClass().getName() + ", got " +
o.getClass().getName() + ".");
}
}
|
@Test
public void requireThatBadAssignThrows() {
try {
new PredicateFieldValue().assign(new Object());
fail();
} catch (IllegalArgumentException e) {
assertEquals("Expected com.yahoo.document.datatypes.PredicateFieldValue, got java.lang.Object.",
e.getMessage());
}
}
|
@Override
public void close() throws IOException {
super.close();
closed = true;
if (channel != null) {
channel.close();
}
}
|
@Test
public void testClose() throws Exception {
BlobId blobId = BlobId.fromGsUtilUri("gs://bucket/path/to/closed.dat");
SeekableInputStream closed =
new GCSInputStream(storage, blobId, null, gcpProperties, MetricsContext.nullMetrics());
closed.close();
assertThatThrownBy(() -> closed.seek(0)).isInstanceOf(IllegalStateException.class);
}
|
@Override
public boolean test(Pair<Point, Point> pair) {
if (timeDeltaIsSmall(pair.first().time(), pair.second().time())) {
return distIsSmall(pair);
} else {
/*
* reject points with large time deltas because we don't want to rely on a numerically
* unstable process
*/
return false;
}
}
|
@Test
public void testCase3() {
DistanceFilter filter = newTestFilter();
LatLong position1 = new LatLong(0.0, 0.0);
double tooFarInNm = MAX_DISTANCE_IN_FEET * 3.0 / Spherical.feetPerNM();
Point p1 = new PointBuilder()
.latLong(position1)
.time(Instant.EPOCH)
.altitude(Distance.ofFeet(500.0))
.build();
Point p2 = new PointBuilder()
.latLong(position1.projectOut(90.0, tooFarInNm)) //move the position
.time(Instant.EPOCH.plusMillis(MAX_TIME_DELTA_IN_MILLISEC / 2))
.altitude(Distance.ofFeet(500.0))
.build();
assertFalse(filter.test(Pair.of(p1, p2)));
assertFalse(filter.test(Pair.of(p2, p1)));
}
|
public static DataConverter toDataConverter(LogicalType logicalType) {
return logicalType.accept(new LogicalTypeToDataConverter());
}
|
@Test
void testLogicalTypeToDataConverter() {
PythonTypeUtils.DataConverter converter = PythonTypeUtils.toDataConverter(new IntType());
GenericRowData data = new GenericRowData(1);
data.setField(0, 10);
Object externalData = converter.toExternal(data, 0);
assertThat(externalData).isInstanceOf(Long.class);
assertThat(externalData).isEqualTo(10L);
}
|
public String destinationURL(File rootPath, File file) {
return destinationURL(rootPath, file, getSrc(), getDest());
}
|
@Test
public void shouldProvideAppendFilePathToDestWhenUsingDoubleStart() {
ArtifactPlan artifactPlan = new ArtifactPlan(ArtifactPlanType.file, "**/*/a.log", "logs");
assertThat(artifactPlan.destinationURL(new File("pipelines/pipelineA"),
new File("pipelines/pipelineA/test/a/b/a.log"))).isEqualTo("logs/test/a/b");
}
|
@Override public Span error(Throwable throwable) {
synchronized (state) {
state.error(throwable);
}
return this;
}
|
@Test void error() {
RuntimeException error = new RuntimeException("this cake is a lie");
span.error(error);
span.flush();
assertThat(spans.get(0).error())
.isSameAs(error);
assertThat(spans.get(0).tags())
.doesNotContainKey("error");
}
|
@Override
public Collection<DatabasePacket> execute() throws SQLException {
switch (packet.getType()) {
case PREPARED_STATEMENT:
connectionSession.getServerPreparedStatementRegistry().removePreparedStatement(packet.getName());
break;
case PORTAL:
portalContext.close(packet.getName());
break;
default:
throw new UnsupportedSQLOperationException(packet.getType().name());
}
return Collections.singleton(new PostgreSQLCloseCompletePacket());
}
|
@Test
void assertExecuteClosePortal() throws SQLException {
when(packet.getType()).thenReturn(PostgreSQLComClosePacket.Type.PORTAL);
String portalName = "C_1";
when(packet.getName()).thenReturn(portalName);
PostgreSQLComCloseExecutor closeExecutor = new PostgreSQLComCloseExecutor(portalContext, packet, connectionSession);
Collection<DatabasePacket> actual = closeExecutor.execute();
assertThat(actual.size(), is(1));
assertThat(actual.iterator().next(), is(instanceOf(PostgreSQLCloseCompletePacket.class)));
verify(portalContext).close(portalName);
}
|
public static Optional<String> getDataSourceNameByDataSourceUnitNode(final String path) {
Pattern pattern = Pattern.compile(getMetaDataNode() + DATABASE_DATA_SOURCES_NODE + DATA_SOURCE_UNITS_NODE + DATA_SOURCE_SUFFIX, Pattern.CASE_INSENSITIVE);
Matcher matcher = pattern.matcher(path);
return matcher.find() ? Optional.of(matcher.group(2)) : Optional.empty();
}
|
@Test
void assertGetDataSourceNameByDataSourceUnitNode() {
Optional<String> actual = DataSourceMetaDataNode.getDataSourceNameByDataSourceUnitNode("/metadata/logic_db/data_sources/units/foo_ds");
assertTrue(actual.isPresent());
assertThat(actual.get(), is("foo_ds"));
}
|
@Override
public KeyValue<KOutT, GenericRow> transform(final KInT key, final GenericRow value) {
return KeyValue.pair(
keyDelegate.transform(
key,
value,
context.orElseThrow(() -> new IllegalStateException("Not initialized"))
),
valueDelegate.transform(
key,
value,
context.orElseThrow(() -> new IllegalStateException("Not initialized"))
)
);
}
|
@Test
public void shouldReturnValueFromInnerTransformer() {
// When:
final KeyValue<String, GenericRow> result = ksTransformer.transform(KEY, VALUE);
// Then:
assertThat(result, is(KeyValue.pair(RESULT_KEY, RESULT_VALUE)));
}
|
public Span nextSpan(Message message) {
TraceContextOrSamplingFlags extracted =
extractAndClearTraceIdProperties(processorExtractor, message, message);
Span result = tracer.nextSpan(extracted); // Processor spans use the normal sampler.
// When an upstream context was not present, lookup keys are unlikely added
if (extracted.context() == null && !result.isNoop()) {
// simplify code by re-using an existing MessagingRequest impl
tagQueueOrTopic(new MessageConsumerRequest(message, destination(message)), result);
}
return result;
}
|
@Test void nextSpan_should_retain_baggage_headers() throws JMSException {
message.setStringProperty(BAGGAGE_FIELD_KEY, "");
jmsTracing.nextSpan(message);
assertThat(message.getStringProperty(BAGGAGE_FIELD_KEY)).isEmpty();
}
|
static InitWriterConfig fromMap(Map<String, String> map) {
Map<String, String> envMap = new HashMap<>(map);
envMap.keySet().retainAll(InitWriterConfig.keyNames());
Map<String, Object> generatedMap = ConfigParameter.define(envMap, CONFIG_VALUES);
return new InitWriterConfig(generatedMap);
}
|
@Test
public void testFromMapMissingNodeNameThrows() {
Map<String, String> envVars = new HashMap<>(ENV_VARS);
envVars.remove(InitWriterConfig.NODE_NAME.key());
assertThrows(InvalidConfigurationException.class, () -> InitWriterConfig.fromMap(envVars));
}
|
public static Object get(Object object, int index) {
if (index < 0) {
throw new IndexOutOfBoundsException("Index cannot be negative: " + index);
}
if (object instanceof Map) {
Map map = (Map) object;
Iterator iterator = map.entrySet().iterator();
return get(iterator, index);
} else if (object instanceof List) {
return ((List) object).get(index);
} else if (object instanceof Object[]) {
return ((Object[]) object)[index];
} else if (object instanceof Iterator) {
Iterator it = (Iterator) object;
while (it.hasNext()) {
index--;
if (index == -1) {
return it.next();
} else {
it.next();
}
}
throw new IndexOutOfBoundsException("Entry does not exist: " + index);
} else if (object instanceof Collection) {
Iterator iterator = ((Collection) object).iterator();
return get(iterator, index);
} else if (object instanceof Enumeration) {
Enumeration it = (Enumeration) object;
while (it.hasMoreElements()) {
index--;
if (index == -1) {
return it.nextElement();
} else {
it.nextElement();
}
}
throw new IndexOutOfBoundsException("Entry does not exist: " + index);
} else if (object == null) {
throw new IllegalArgumentException("Unsupported object type: null");
} else {
try {
return Array.get(object, index);
} catch (IllegalArgumentException ex) {
throw new IllegalArgumentException("Unsupported object type: " + object.getClass().getName());
}
}
}
|
@Test
void testGetArray6() {
assertEquals(1, CollectionUtils.get(new int[] {1, 2}, 0));
assertEquals(2, CollectionUtils.get(new int[] {1, 2}, 1));
}
|
@Override
public ModuleState build() {
ModuleState moduleState = new ModuleState(com.alibaba.nacos.api.common.Constants.Config.CONFIG_MODULE);
moduleState.newState(Constants.DATASOURCE_PLATFORM_PROPERTY_STATE, DatasourcePlatformUtil.getDatasourcePlatform(""));
moduleState.newState(Constants.NACOS_PLUGIN_DATASOURCE_LOG_STATE,
EnvUtil.getProperty(CommonConstant.NACOS_PLUGIN_DATASOURCE_LOG, Boolean.class, false));
moduleState.newState(PropertiesConstant.NOTIFY_CONNECT_TIMEOUT, PropertyUtil.getNotifyConnectTimeout());
moduleState.newState(PropertiesConstant.NOTIFY_SOCKET_TIMEOUT, PropertyUtil.getNotifySocketTimeout());
moduleState.newState(PropertiesConstant.IS_HEALTH_CHECK, PropertyUtil.isHealthCheck());
moduleState.newState(PropertiesConstant.MAX_HEALTH_CHECK_FAIL_COUNT, PropertyUtil.getMaxHealthCheckFailCount());
moduleState.newState(PropertiesConstant.MAX_CONTENT, PropertyUtil.getMaxContent());
moduleState.newState(PropertiesConstant.IS_MANAGE_CAPACITY, PropertyUtil.isManageCapacity());
moduleState.newState(PropertiesConstant.IS_CAPACITY_LIMIT_CHECK, PropertyUtil.isCapacityLimitCheck());
moduleState.newState(PropertiesConstant.DEFAULT_CLUSTER_QUOTA, PropertyUtil.getDefaultClusterQuota());
moduleState.newState(PropertiesConstant.DEFAULT_GROUP_QUOTA, PropertyUtil.getDefaultGroupQuota());
moduleState.newState(PropertiesConstant.DEFAULT_MAX_SIZE, PropertyUtil.getDefaultMaxSize());
moduleState.newState(PropertiesConstant.DEFAULT_MAX_AGGR_COUNT, PropertyUtil.getDefaultMaxAggrCount());
moduleState.newState(PropertiesConstant.DEFAULT_MAX_AGGR_SIZE, PropertyUtil.getDefaultMaxAggrSize());
return moduleState;
}
|
@Test
void testBuild() {
ModuleState actual = new ConfigModuleStateBuilder().build();
Map<String, Object> states = actual.getStates();
assertEquals(PersistenceConstant.DERBY, states.get(Constants.DATASOURCE_PLATFORM_PROPERTY_STATE));
assertTrue((Boolean) states.get(Constants.NACOS_PLUGIN_DATASOURCE_LOG_STATE));
assertEquals(PropertyUtil.getNotifyConnectTimeout(), states.get(PropertiesConstant.NOTIFY_CONNECT_TIMEOUT));
assertEquals(PropertyUtil.getNotifySocketTimeout(), states.get(PropertiesConstant.NOTIFY_SOCKET_TIMEOUT));
assertEquals(PropertyUtil.isHealthCheck(), states.get(PropertiesConstant.IS_HEALTH_CHECK));
assertEquals(PropertyUtil.getMaxHealthCheckFailCount(), states.get(PropertiesConstant.MAX_HEALTH_CHECK_FAIL_COUNT));
assertEquals(PropertyUtil.getMaxContent(), states.get(PropertiesConstant.MAX_CONTENT));
assertEquals(PropertyUtil.isManageCapacity(), states.get(PropertiesConstant.IS_MANAGE_CAPACITY));
assertEquals(PropertyUtil.isCapacityLimitCheck(), states.get(PropertiesConstant.IS_CAPACITY_LIMIT_CHECK));
assertEquals(PropertyUtil.getDefaultClusterQuota(), states.get(PropertiesConstant.DEFAULT_CLUSTER_QUOTA));
assertEquals(PropertyUtil.getDefaultGroupQuota(), states.get(PropertiesConstant.DEFAULT_GROUP_QUOTA));
assertEquals(PropertyUtil.getDefaultMaxSize(), states.get(PropertiesConstant.DEFAULT_MAX_SIZE));
assertEquals(PropertyUtil.getDefaultMaxAggrCount(), states.get(PropertiesConstant.DEFAULT_MAX_AGGR_COUNT));
assertEquals(PropertyUtil.getDefaultMaxAggrSize(), states.get(PropertiesConstant.DEFAULT_MAX_AGGR_SIZE));
}
|
@SuppressWarnings({"unchecked", "UnstableApiUsage"})
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement) {
if (!(statement.getStatement() instanceof DropStatement)) {
return statement;
}
final DropStatement dropStatement = (DropStatement) statement.getStatement();
if (!dropStatement.isDeleteTopic()) {
return statement;
}
final SourceName sourceName = dropStatement.getName();
final DataSource source = metastore.getSource(sourceName);
if (source != null) {
if (source.isSource()) {
throw new KsqlException("Cannot delete topic for read-only source: " + sourceName.text());
}
checkTopicRefs(source);
deleteTopic(source);
final Closer closer = Closer.create();
closer.register(() -> deleteKeySubject(source));
closer.register(() -> deleteValueSubject(source));
try {
closer.close();
} catch (final KsqlException e) {
throw e;
} catch (final Exception e) {
throw new KsqlException(e);
}
} else if (!dropStatement.getIfExists()) {
throw new KsqlException("Could not find source to delete topic for: " + statement);
}
final T withoutDelete = (T) dropStatement.withoutDeleteClause();
final String withoutDeleteText = SqlFormatter.formatSql(withoutDelete) + ";";
return statement.withStatement(withoutDeleteText, withoutDelete);
}
|
@Test
public void shouldNotThrowIfSchemaIsMissing() throws IOException, RestClientException {
// Given:
when(topic.getKeyFormat())
.thenReturn(KeyFormat.of(FormatInfo.of(
FormatFactory.AVRO.name(), ImmutableMap.of(ConnectProperties.FULL_SCHEMA_NAME, "foo")),
SerdeFeatures.of(),
Optional.empty()));
when(topic.getValueFormat())
.thenReturn(ValueFormat.of(FormatInfo.of(
FormatFactory.AVRO.name(), ImmutableMap.of(ConnectProperties.FULL_SCHEMA_NAME, "foo")),
SerdeFeatures.of()));
doThrow(new RestClientException("Subject not found.", 404, 40401))
.when(registryClient).deleteSubject(KsqlConstants.getSRSubject("something", true));
doThrow(new RestClientException("Subject not found.", 404, 40401))
.when(registryClient).deleteSubject(KsqlConstants.getSRSubject("something", false));
// When:
deleteInjector.inject(DROP_WITH_DELETE_TOPIC);
}
|
@Override
public long getLong(int index) {
checkIndex(index, 8);
return _getLong(index);
}
|
@Test
public void testGetLongAfterRelease() {
assertThrows(IllegalReferenceCountException.class, new Executable() {
@Override
public void execute() {
releasedBuffer().getLong(0);
}
});
}
|
public RuntimeOptionsBuilder parse(String... args) {
return parse(Arrays.asList(args));
}
|
@Test
void assigns_glue() {
RuntimeOptions options = parser
.parse("--glue", "somewhere")
.build();
assertThat(options.getGlue(), contains(uri("classpath:/somewhere")));
}
|
public static Map<String, String> applyOverrides(
final Map<String, String> props,
final Properties overrides
) {
final Map<String, String> overridesMap = asMap(overrides);
final HashMap<String, String> merged = new HashMap<>(props);
merged.putAll(filterByKey(overridesMap, NOT_BLACKLISTED));
return ImmutableMap.copyOf(merged);
}
|
@Test
public void shouldFilterBlackListedFromOverrides() {
Stream.of("java.", "os.", "sun.", "user.", "line.separator", "path.separator", "file.separator")
.forEach(blackListed -> {
// Given:
final Properties overrides = properties(
blackListed + "props.should.be.filtered", "unexpected",
"should.not.be.filtered", "value"
);
// When:
final Map<String, ?> result = PropertiesUtil.applyOverrides(emptyMap(), overrides);
// Then:
assertThat(result.keySet(), hasItem("should.not.be.filtered"));
assertThat(result.keySet(), not(hasItem("props.should.be.filtered")));
});
}
|
public static void updateInstanceMetadata(Instance instance, InstanceMetadata metadata) {
instance.setEnabled(metadata.isEnabled());
instance.setWeight(metadata.getWeight());
for (Map.Entry<String, Object> entry : metadata.getExtendData().entrySet()) {
instance.getMetadata().put(entry.getKey(), entry.getValue().toString());
}
}
|
@Test
void testUpdateInstanceMetadata() {
InstanceMetadata metaData = new InstanceMetadata();
Map<String, Object> extendData = new ConcurrentHashMap<>(1);
extendData.put("k1", "v1");
extendData.put("k2", "v2");
metaData.setExtendData(extendData);
metaData.setEnabled(true);
metaData.setWeight(1);
Instance instance = InstanceUtil.parseToApiInstance(service, instancePublishInfo);
InstanceUtil.updateInstanceMetadata(instance, metaData);
assertNotNull(instance.getMetadata());
assertEquals(2, metaData.getExtendData().size());
}
|
@Override
public int compareTo(Migration that) {
return COMPARATOR.compare(this, that);
}
|
@Test
public void testCompareTo() {
final MigrationA a = new MigrationA();
final MigrationA aa = new MigrationA();
final MigrationB b = new MigrationB(); // same timestamp as A
final MigrationC c = new MigrationC(); // oldest timestamp
assertThat(a.compareTo(b)).isLessThan(0);
assertThat(a.compareTo(aa)).isEqualTo(0);
assertThat(a.compareTo(c)).isGreaterThan(1);
final List<Migration> sorted = Stream.of(c, b, a).sorted().toList();
assertThat(sorted).containsExactly(c, a, b);
final ImmutableSortedSet<Migration> set = ImmutableSortedSet.of(a, aa, b, c);
assertThat(set).containsExactly(c, a, b);
}
|
public static void verifyIncrementPubContent(String content) {
if (content == null || content.length() == 0) {
throw new IllegalArgumentException("publish/delete content can not be null");
}
for (int i = 0; i < content.length(); i++) {
char c = content.charAt(i);
if (c == '\r' || c == '\n') {
throw new IllegalArgumentException("publish/delete content can not contain return and linefeed");
}
if (c == Constants.WORD_SEPARATOR.charAt(0)) {
throw new IllegalArgumentException("publish/delete content can not contain(char)2");
}
}
}
|
@Test
void testVerifyIncrementPubContent() {
String content = "aabbb";
ContentUtils.verifyIncrementPubContent(content);
}
|
protected boolean configDevice(DeviceId deviceId) {
// Returns true if config was successful, false if not and a clean up is
// needed.
final Device device = deviceService.getDevice(deviceId);
if (device == null || !device.is(IntProgrammable.class)) {
return true;
}
if (isNotIntConfigured()) {
log.warn("Missing INT config, aborting programming of INT device {}", deviceId);
return true;
}
final boolean isEdge = !hostService.getConnectedHosts(deviceId).isEmpty();
final IntDeviceRole intDeviceRole =
isEdge ? IntDeviceRole.SOURCE_SINK : IntDeviceRole.TRANSIT;
log.info("Started programming of INT device {} with role {}...",
deviceId, intDeviceRole);
final IntProgrammable intProg = device.as(IntProgrammable.class);
if (!isIntStarted()) {
// Leave device with no INT configuration.
return true;
}
if (!intProg.init()) {
log.warn("Unable to init INT pipeline on {}", deviceId);
return false;
}
boolean supportSource = intProg.supportsFunctionality(IntProgrammable.IntFunctionality.SOURCE);
boolean supportSink = intProg.supportsFunctionality(IntProgrammable.IntFunctionality.SINK);
boolean supportPostcard = intProg.supportsFunctionality(IntProgrammable.IntFunctionality.POSTCARD);
if (intDeviceRole != IntDeviceRole.SOURCE_SINK && !supportPostcard) {
// Stop here, no more configuration needed for transit devices unless it support postcard.
return true;
}
if (supportSink || supportPostcard) {
if (!intProg.setupIntConfig(intConfig.get())) {
log.warn("Unable to apply INT report config on {}", deviceId);
return false;
}
}
// Port configuration.
final Set<PortNumber> hostPorts = deviceService.getPorts(deviceId)
.stream()
.map(port -> new ConnectPoint(deviceId, port.number()))
.filter(cp -> !hostService.getConnectedHosts(cp).isEmpty())
.map(ConnectPoint::port)
.collect(Collectors.toSet());
for (PortNumber port : hostPorts) {
if (supportSource) {
log.info("Setting port {}/{} as INT source port...", deviceId, port);
if (!intProg.setSourcePort(port)) {
log.warn("Unable to set INT source port {} on {}", port, deviceId);
return false;
}
}
if (supportSink) {
log.info("Setting port {}/{} as INT sink port...", deviceId, port);
if (!intProg.setSinkPort(port)) {
log.warn("Unable to set INT sink port {} on {}", port, deviceId);
return false;
}
}
}
if (!supportSource && !supportPostcard) {
// Stop here, no more configuration needed for sink devices unless
// it supports postcard mode.
return true;
}
// Apply intents.
// This is a trivial implementation where we simply get the
// corresponding INT objective from an intent and we apply to all
// device which support reporting.
int appliedCount = 0;
for (Versioned<IntIntent> versionedIntent : intentMap.values()) {
IntIntent intent = versionedIntent.value();
IntObjective intObjective = getIntObjective(intent);
if (intent.telemetryMode() == IntIntent.TelemetryMode.INBAND_TELEMETRY && supportSource) {
intProg.addIntObjective(intObjective);
appliedCount++;
} else if (intent.telemetryMode() == IntIntent.TelemetryMode.POSTCARD && supportPostcard) {
intProg.addIntObjective(intObjective);
appliedCount++;
} else {
log.warn("Device {} does not support intent {}.", deviceId, intent);
}
}
log.info("Completed programming of {}, applied {} INT objectives of {} total",
deviceId, appliedCount, intentMap.size());
return true;
}
|
@Test
public void testConfigSinkDevice() {
reset(deviceService, hostService);
Device device = getMockDevice(true, DEVICE_ID);
IntProgrammable intProg = getMockIntProgrammable(false, false, true, false);
setUpDeviceTest(device, intProg, true, true);
expect(intProg.setSinkPort(PortNumber.portNumber(1))).andReturn(true).once();
expect(intProg.setSinkPort(PortNumber.portNumber(2))).andReturn(true).once();
replay(deviceService, hostService, device, intProg);
installTestIntents();
assertTrue(manager.configDevice(DEVICE_ID));
verify(intProg);
}
|
@Override
public Optional<Integer> partitionFor(Facility facility) {
return Optional.ofNullable(facilityToPartitionMapping.get(facility));
}
|
@Test
public void partitionForWorksForEveryFacility() {
FacilityPartitionMapping mapping = sampleMapping();
for (Facility facility : Facility.values()) {
Optional<Integer> partition = mapping.partitionFor(facility);
assertTrue(partition.get() >= 0, "All partition numbers should be non-negative");
}
}
|
@VisibleForTesting
boolean checkAndDeleteCgroup(File cgf) throws InterruptedException {
boolean deleted = false;
// FileInputStream in = null;
try (FileInputStream in = new FileInputStream(cgf + "/tasks")) {
if (in.read() == -1) {
/*
* "tasks" file is empty, sleep a bit more and then try to delete the
* cgroup. Some versions of linux will occasionally panic due to a race
* condition in this area, hence the paranoia.
*/
Thread.sleep(deleteCgroupDelay);
deleted = cgf.delete();
if (!deleted) {
LOG.warn("Failed attempt to delete cgroup: " + cgf);
}
} else {
logLineFromTasksFile(cgf);
}
} catch (IOException e) {
LOG.warn("Failed to read cgroup tasks file. ", e);
}
return deleted;
}
|
@Test
public void testcheckAndDeleteCgroup() throws Exception {
CgroupsLCEResourcesHandler handler = new CgroupsLCEResourcesHandler();
handler.setConf(new YarnConfiguration());
handler.initConfig();
FileUtils.deleteQuietly(cgroupDir);
// Test 0
// tasks file not present, should return false
Assert.assertFalse(handler.checkAndDeleteCgroup(cgroupDir));
File tfile = new File(cgroupDir.getAbsolutePath(), "tasks");
FileOutputStream fos = FileUtils.openOutputStream(tfile);
File fspy = Mockito.spy(cgroupDir);
// Test 1, tasks file is empty
// tasks file has no data, should return true
Mockito.when(fspy.delete()).thenReturn(true);
Assert.assertTrue(handler.checkAndDeleteCgroup(fspy));
// Test 2, tasks file has data
fos.write("1234".getBytes());
fos.close();
// tasks has data, would not be able to delete, should return false
Assert.assertFalse(handler.checkAndDeleteCgroup(fspy));
FileUtils.deleteQuietly(cgroupDir);
}
|
public static boolean acceptEndpoint(String endpointUrl) {
return endpointUrl != null && endpointUrl.matches(ENDPOINT_PATTERN_STRING);
}
|
@Test
void testAcceptEndpoint() {
AsyncTestSpecification specification = new AsyncTestSpecification();
KafkaMessageConsumptionTask task = new KafkaMessageConsumptionTask(specification);
assertTrue(KafkaMessageConsumptionTask.acceptEndpoint("kafka://localhost/testTopic"));
assertTrue(KafkaMessageConsumptionTask.acceptEndpoint("kafka://localhost:9092/testTopic"));
assertTrue(
KafkaMessageConsumptionTask.acceptEndpoint("kafka://localhost/testTopic?securityProtocol=SASL_PLAINTEXT"));
;
assertTrue(KafkaMessageConsumptionTask
.acceptEndpoint("kafka://localhost:9094/testTopic?securityProtocol=SASL_PLAINTEXT"));
assertTrue(KafkaMessageConsumptionTask.acceptEndpoint(
"kafka://my-cluster-kafka-bootstrap-kafka.apps.cluster-943b.943b.example.com:443/UsersignedupAPI_0.1.2_user-signedup"));
}
|
@Override
public Map<Errors, Integer> errorCounts() {
HashMap<Errors, Integer> counts = new HashMap<>();
data.groups().forEach(
group -> updateErrorCounts(counts, Errors.forCode(group.errorCode()))
);
return counts;
}
|
@Test
void testErrorCounts() {
Errors e = Errors.INVALID_GROUP_ID;
int errorCount = 2;
ConsumerGroupDescribeResponseData data = new ConsumerGroupDescribeResponseData();
for (int i = 0; i < errorCount; i++) {
data.groups().add(
new ConsumerGroupDescribeResponseData.DescribedGroup()
.setErrorCode(e.code())
);
}
ConsumerGroupDescribeResponse response = new ConsumerGroupDescribeResponse(data);
Map<Errors, Integer> counts = response.errorCounts();
assertEquals(errorCount, counts.get(e));
assertNull(counts.get(Errors.COORDINATOR_NOT_AVAILABLE));
}
|
@VisibleForTesting
static String parseAcceptHeader(HttpServletRequest request) {
String format = request.getHeader(HttpHeaders.ACCEPT);
return format != null && format.contains(FORMAT_JSON) ?
FORMAT_JSON : FORMAT_XML;
}
|
@Test
public void testParseHeaders() throws Exception {
HashMap<String, String> verifyMap = new HashMap<String, String>();
verifyMap.put("text/plain", ConfServlet.FORMAT_XML);
verifyMap.put(null, ConfServlet.FORMAT_XML);
verifyMap.put("text/xml", ConfServlet.FORMAT_XML);
verifyMap.put("application/xml", ConfServlet.FORMAT_XML);
verifyMap.put("application/json", ConfServlet.FORMAT_JSON);
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
for(String contentTypeExpected : verifyMap.keySet()) {
String contenTypeActual = verifyMap.get(contentTypeExpected);
Mockito.when(request.getHeader(HttpHeaders.ACCEPT))
.thenReturn(contentTypeExpected);
assertEquals(contenTypeActual,
ConfServlet.parseAcceptHeader(request));
}
}
|
public Collection<String> getAllMetricNames() {
Set<String> names = new HashSet<>();
for (Identifier id : values.keySet()) {
names.add(id.getName());
}
return names;
}
|
@Test
final void testGetAllMetricNames() {
twoMetricsUniqueDimensions();
Collection<String> names = bucket.getAllMetricNames();
assertEquals(2, names.size());
assertTrue(names.contains("nalle"));
assertTrue(names.contains("nalle2"));
}
|
public static MacAddress valueOf(final String address) {
if (!isValid(address)) {
throw new IllegalArgumentException(
"Specified MAC Address must contain 12 hex digits"
+ " separated pairwise by :'s.");
}
final String[] elements = address.split(":");
final byte[] addressInBytes = new byte[MacAddress.MAC_ADDRESS_LENGTH];
for (int i = 0; i < MacAddress.MAC_ADDRESS_LENGTH; i++) {
final String element = elements[i];
addressInBytes[i] = (byte) Integer.parseInt(element, 16);
}
return new MacAddress(addressInBytes);
}
|
@Test(expected = IllegalArgumentException.class)
public void testValueOfInvalidString() throws Exception {
MacAddress.valueOf(INVALID_STR);
}
|
@Override
public Messages process(Messages messages) {
try (Timer.Context ignored = executionTime.time()) {
final State latestState = stateUpdater.getLatestState();
if (latestState.enableRuleMetrics()) {
return process(messages, new RuleMetricsListener(metricRegistry), latestState);
}
return process(messages, new NoopInterpreterListener(), latestState);
}
}
|
@Test
public void testMatchEitherStopsIfNoRuleMatched() {
final RuleService ruleService = mock(MongoDbRuleService.class);
when(ruleService.loadAll()).thenReturn(ImmutableList.of(RULE_TRUE, RULE_FALSE, RULE_ADD_FOOBAR));
final PipelineService pipelineService = mock(MongoDbPipelineService.class);
when(pipelineService.loadAll()).thenReturn(Collections.singleton(
PipelineDao.create("p1", "title", "description",
"pipeline \"pipeline\"\n" +
"stage 0 match either\n" +
" rule \"false\";\n" +
"stage 1 match either\n" +
" rule \"add_foobar\";\n" +
"end\n",
Tools.nowUTC(),
null)
));
final Map<String, Function<?>> functions = ImmutableMap.of(SetField.NAME, new SetField());
final PipelineInterpreter interpreter = createPipelineInterpreter(ruleService, pipelineService, functions);
final Messages processed = interpreter.process(messageInDefaultStream("message", "test"));
final List<Message> messages = ImmutableList.copyOf(processed);
assertThat(messages).hasSize(1);
final Message actualMessage = messages.get(0);
assertThat(actualMessage.hasField("foobar")).isFalse();
}
|
public Map<String, ByteString> getEntries() {
return map;
}
|
@Test
public void constructor_processes_entries() {
SensorCacheEntry entry1 = SensorCacheEntry.newBuilder().setKey("key1").setData(ByteString.copyFrom("data1", UTF_8)).build();
SensorCacheEntry entry2 = SensorCacheEntry.newBuilder().setKey("key2").setData(ByteString.copyFrom("data2", UTF_8)).build();
SensorCacheData data = new SensorCacheData(List.of(entry1, entry2));
assertThat(data.getEntries()).containsExactly(entry(entry1.getKey(), entry1.getData()), entry(entry2.getKey(), entry2.getData()));
}
|
@Override
public <K, V> void forward(final K key, final V value) {
throw new StreamsException(EXPLANATION);
}
|
@Test
public void shouldThrowOnForwardWithTo() {
assertThrows(StreamsException.class, () -> context.forward("key", "value", To.all()));
}
|
public void evaluate(AuthenticationContext context) {
if (context == null) {
return;
}
this.authenticationStrategy.evaluate(context);
}
|
@Test
public void evaluate3() {
if (MixAll.isMac()) {
return;
}
User user = User.of("test", "test");
this.authenticationMetadataManager.createUser(user);
DefaultAuthenticationContext context = new DefaultAuthenticationContext();
context.setRpcCode("11");
context.setUsername("test");
context.setContent("test".getBytes(StandardCharsets.UTF_8));
context.setSignature("test");
Assert.assertThrows(AuthenticationException.class, () -> this.evaluator.evaluate(context));
}
|
@Override
public void updateTask(Task task) {
Map.Entry<String, Long> taskInDb = getTaskChecksumAndUpdateTime(task.getTaskId());
String taskCheckSum = computeChecksum(task);
if (taskInDb != null) {
long updateInterval = task.getUpdateTime() - taskInDb.getValue();
if (taskCheckSum.equals(taskInDb.getKey()) && updateInterval < maxTaskUpdateInterval) {
LOG.debug(
"task has the same checksum and update interval {} is less than max interval {} millis and skip update",
updateInterval,
maxTaskUpdateInterval);
return;
}
LOG.info(
"update task [{}] with checksum=[{}] with an update interval=[{}]",
task.getTaskId(),
taskCheckSum,
updateInterval);
}
task.setWorkerId(taskCheckSum);
super.updateTask(task);
}
|
@Test
public void testUpdateTaskMissingChecksum() {
executionDao.updateTask(task);
Task actual = maestroExecutionDao.getTask(TEST_TASK_ID);
assertNull(actual.getWorkerId());
assertEquals(0, actual.getPollCount());
assertEquals(0, actual.getUpdateTime());
// should update DB if there is no checksum
maestroExecutionDao.updateTask(task);
actual = maestroExecutionDao.getTask(TEST_TASK_ID);
assertEquals("b1a2db354f803423e990fad1b9265b6f", actual.getWorkerId());
assertEquals(1, actual.getPollCount());
assertEquals(0, actual.getUpdateTime());
}
|
@Override
public KeyVersion createKey(final String name, final byte[] material,
final Options options) throws IOException {
return doOp(new ProviderCallable<KeyVersion>() {
@Override
public KeyVersion call(KMSClientProvider provider) throws IOException {
return provider.createKey(name, material, options);
}
}, nextIdx(), false);
}
|
@Test
public void testClientRetriesWithAuthenticationExceptionWrappedinIOException()
throws Exception {
Configuration conf = new Configuration();
conf.setInt(
CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 3);
KMSClientProvider p1 = mock(KMSClientProvider.class);
when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class)))
.thenThrow(new IOException(new AuthenticationException("p1")));
KMSClientProvider p2 = mock(KMSClientProvider.class);
when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class)))
.thenThrow(new IOException(new AuthenticationException("p1")));
when(p1.getKMSUrl()).thenReturn("p1");
when(p2.getKMSUrl()).thenReturn("p2");
LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
new KMSClientProvider[] {p1, p2}, 0, conf);
try {
kp.createKey("test3", new Options(conf));
fail("Should fail since provider p1 threw AuthenticationException");
} catch (Exception e) {
assertTrue(e.getCause() instanceof AuthenticationException);
}
verify(p1, Mockito.times(1)).createKey(Mockito.eq("test3"),
Mockito.any(Options.class));
verify(p2, Mockito.times(1)).createKey(Mockito.eq("test3"),
Mockito.any(Options.class));
}
|
@Override
public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException {
if(file.isRoot()) {
return PathAttributes.EMPTY;
}
if(new DefaultPathContainerService().isContainer(file)) {
return PathAttributes.EMPTY;
}
final Path query;
if(file.isPlaceholder()) {
query = new Path(file.getParent(), FilenameUtils.removeExtension(file.getName()), file.getType(), file.attributes());
}
else {
query = file;
}
final AttributedList<Path> list;
if(new SimplePathPredicate(DriveHomeFinderService.SHARED_DRIVES_NAME).test(file.getParent())) {
list = new DriveTeamDrivesListService(session, fileid).list(file.getParent(), listener);
}
else {
list = new FileidDriveListService(session, fileid, query).list(file.getParent(), listener);
}
final Path found = list.find(new ListFilteringFeature.ListFilteringPredicate(session.getCaseSensitivity(), file));
if(null == found) {
throw new NotfoundException(file.getAbsolute());
}
return found.attributes();
}
|
@Test
public void testDuplicatesWithSameName() throws Exception {
final DriveFileIdProvider fileid = new DriveFileIdProvider(session);
final Path folder = new DriveDirectoryFeature(session, fileid).mkdir(
new Path(DriveHomeFinderService.MYDRIVE_FOLDER, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus());
final String name = new AlphanumericRandomStringService().random();
final Path version1 = new DriveTouchFeature(session, fileid).touch(
new Path(folder, name, EnumSet.of(Path.Type.file)), new TransferStatus());
final DriveAttributesFinderFeature f = new DriveAttributesFinderFeature(session, fileid);
assertEquals(version1.attributes(), f.find(version1));
final AttributedList<Path> listBeforeDelete = new DriveListService(session, fileid).list(folder, new DisabledListProgressListener());
assertTrue(listBeforeDelete.contains(version1));
assertFalse(listBeforeDelete.find(new DefaultPathPredicate(version1)).attributes().isHidden());
new DriveTrashFeature(session, fileid).delete(Collections.singletonList(new Path(version1)), new DisabledLoginCallback(), new Delete.DisabledCallback());
final AttributedList<Path> listAfterDelete = new DriveListService(session, fileid).list(folder, new DisabledListProgressListener());
assertTrue(listAfterDelete.contains(version1));
assertTrue(listAfterDelete.find(new DefaultPathPredicate(version1)).attributes().isHidden());
final Path version2 = new DriveTouchFeature(session, fileid).touch(
new Path(folder, name, EnumSet.of(Path.Type.file)), new TransferStatus());
assertNotEquals(f.find(version1), f.find(version2));
final AttributedList<Path> listAfterReupload = new DriveListService(session, fileid).list(folder, new DisabledListProgressListener());
assertTrue(listAfterReupload.contains(version2));
assertFalse(listAfterReupload.find(new DefaultPathPredicate(version2)).attributes().isHidden());
new DriveDeleteFeature(session, fileid).delete(Arrays.asList(version1, version2, folder), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.