focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
public static List<HollowSchema> dependencyOrderedSchemaList(HollowDataset dataset) {
return dependencyOrderedSchemaList(dataset.getSchemas());
}
|
@Test
public void sortsSchemasEvenIfDependencyTypesNotPresent() throws IOException {
String schemasText = "TypeA { TypeB b; }"
+ "TypeB { TypeC c; }";
List<HollowSchema> schemas = HollowSchemaParser.parseCollectionOfSchemas(schemasText);
List<HollowSchema> sortedSchemas = HollowSchemaSorter.dependencyOrderedSchemaList(schemas);
Assert.assertEquals(2, sortedSchemas.size());
Assert.assertEquals("TypeB", sortedSchemas.get(0).getName());
Assert.assertEquals("TypeA", sortedSchemas.get(1).getName());
}
|
@Override
public Object handle(ProceedingJoinPoint proceedingJoinPoint, Retry retry, String methodName)
throws Throwable {
RetryTransformer<?> retryTransformer = RetryTransformer.of(retry);
Object returnValue = proceedingJoinPoint.proceed();
return executeRxJava2Aspect(retryTransformer, returnValue);
}
|
@Test
public void testReactorTypes() throws Throwable {
Retry retry = Retry.ofDefaults("test");
when(proceedingJoinPoint.proceed()).thenReturn(Single.just("Test"));
assertThat(rxJava2RetryAspectExt.handle(proceedingJoinPoint, retry, "testMethod"))
.isNotNull();
when(proceedingJoinPoint.proceed()).thenReturn(Flowable.just("Test"));
assertThat(rxJava2RetryAspectExt.handle(proceedingJoinPoint, retry, "testMethod"))
.isNotNull();
}
|
@Override
public Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
synchronized (getClassLoadingLock(name)) {
Class<?> loadedClass = findLoadedClass(name);
if (loadedClass != null) {
return loadedClass;
}
if (isClosed) {
throw new ClassNotFoundException("This ClassLoader is closed");
}
if (config.shouldAcquire(name)) {
loadedClass =
PerfStatsCollector.getInstance()
.measure("load sandboxed class", () -> maybeInstrumentClass(name));
} else {
loadedClass = getParent().loadClass(name);
}
if (resolve) {
resolveClass(loadedClass);
}
return loadedClass;
}
}
|
@Test
public void shouldHandleMethodsReturningArray() throws Exception {
Class<?> exampleClass = loadClass(AClassWithMethodReturningArray.class);
classHandler.valueToReturn = new String[] {"miao, mieuw"};
Method directMethod = exampleClass.getMethod("normalMethodReturningArray");
directMethod.setAccessible(true);
Object exampleInstance = exampleClass.getDeclaredConstructor().newInstance();
assertThat(transcript)
.containsExactly("methodInvoked: AClassWithMethodReturningArray.__constructor__()");
transcript.clear();
assertArrayEquals(
new String[] {"miao, mieuw"}, (String[]) directMethod.invoke(exampleInstance));
assertThat(transcript)
.containsExactly(
"methodInvoked: AClassWithMethodReturningArray.normalMethodReturningArray()");
}
|
public void translate(Pipeline pipeline) {
this.flinkBatchEnv = null;
this.flinkStreamEnv = null;
final boolean hasUnboundedOutput =
PipelineTranslationModeOptimizer.hasUnboundedOutput(pipeline);
if (hasUnboundedOutput) {
LOG.info("Found unbounded PCollection. Switching to streaming execution.");
options.setStreaming(true);
}
// Staged files need to be set before initializing the execution environments
prepareFilesToStageForRemoteClusterExecution(options);
FlinkPipelineTranslator translator;
if (options.isStreaming() || options.getUseDataStreamForBatch()) {
this.flinkStreamEnv = FlinkExecutionEnvironments.createStreamExecutionEnvironment(options);
if (hasUnboundedOutput && !flinkStreamEnv.getCheckpointConfig().isCheckpointingEnabled()) {
LOG.warn(
"UnboundedSources present which rely on checkpointing, but checkpointing is disabled.");
}
translator =
new FlinkStreamingPipelineTranslator(flinkStreamEnv, options, options.isStreaming());
if (!options.isStreaming()) {
flinkStreamEnv.setRuntimeMode(RuntimeExecutionMode.BATCH);
}
} else {
this.flinkBatchEnv = FlinkExecutionEnvironments.createBatchExecutionEnvironment(options);
translator = new FlinkBatchPipelineTranslator(flinkBatchEnv, options);
}
// Transform replacements need to receive the finalized PipelineOptions
// including execution mode (batch/streaming) and parallelism.
pipeline.replaceAll(FlinkTransformOverrides.getDefaultOverrides(options));
translator.translate(pipeline);
}
|
@Test
public void testTranslationModeNoOverrideWithoutUnboundedSources() {
boolean[] testArgs = new boolean[] {true, false};
for (boolean streaming : testArgs) {
FlinkPipelineOptions options = getDefaultPipelineOptions();
options.setRunner(FlinkRunner.class);
options.setStreaming(streaming);
FlinkPipelineExecutionEnvironment flinkEnv = new FlinkPipelineExecutionEnvironment(options);
Pipeline pipeline = Pipeline.create(options);
pipeline.apply(GenerateSequence.from(0).to(10));
flinkEnv.translate(pipeline);
assertThat(options.isStreaming(), Matchers.is(streaming));
}
}
|
public void unregister(String pluginId) {
Assert.notNull(pluginId, "The pluginId must not be null.");
if (!pluginMappingInfo.containsKey(pluginId)) {
return;
}
pluginMappingInfo.remove(pluginId).forEach(this::unregisterMapping);
}
|
@Test
void unregister() {
UserController userController = mock(UserController.class);
// register handler methods first
handlerMapping.registerHandlerMethods("fakePlugin", userController);
assertThat(handlerMapping.getMappings("fakePlugin")).hasSize(1);
// unregister
handlerMapping.unregister("fakePlugin");
assertThat(handlerMapping.getMappings("fakePlugin")).isEmpty();
}
|
public boolean init( StepMetaInterface smi, StepDataInterface sdi ) {
meta = (LDAPInputMeta) smi;
data = (LDAPInputData) sdi;
if ( super.init( smi, sdi ) ) {
data.rownr = 1L;
// Get multi valued field separator
data.multi_valuedFieldSeparator = environmentSubstitute( meta.getMultiValuedSeparator() );
data.nrfields = meta.getInputFields().length;
// Set the filter string
data.staticFilter = environmentSubstitute( meta.getFilterString() );
// Set the search base
data.staticSearchBase = environmentSubstitute( meta.getSearchBase() );
data.dynamic = ( meta.isDynamicSearch() || meta.isDynamicFilter() );
try {
// Try to connect to LDAP server
connectServerLdap();
return true;
} catch ( Exception e ) {
logError( BaseMessages.getString( PKG, "LDAPInput.ErrorInit", e.toString() ) );
stopAll();
setErrors( 1 );
}
}
return false;
}
|
@Test
public void testRowProcessing() throws Exception {
//Setup step
LDAPInput ldapInput = new LDAPInput(
stepMockHelper.stepMeta, stepMockHelper.stepDataInterface,
0, stepMockHelper.transMeta, stepMockHelper.trans );
LDAPInputData data = new LDAPInputData();
LDAPInputMeta meta = mockMeta();
//Mock fields
LDAPInputField[] fields = new LDAPInputField[] {
new LDAPInputField( "dn" ),
new LDAPInputField( "cn" ),
new LDAPInputField( "role" )
};
int sortedField = 1;
fields[sortedField].setSortedKey( true );
when( meta.getInputFields() ).thenReturn( fields );
//Mock LDAP Connection
when( meta.getProtocol() ).thenReturn( LdapMockProtocol.getName() );
when( meta.getHost() ).thenReturn( "host.mock" );
when( meta.getDerefAliases() ).thenReturn( "never" );
when( meta.getReferrals() ).thenReturn( "ignore" );
LdapMockProtocol.setup();
try {
//Run Initialization
assertTrue( "Input Initialization Failed", ldapInput.init( meta, data ) );
//Verify
assertEquals( "Field not marked as sorted", 1, data.connection.getSortingAttributes().size() );
assertEquals( "Field not marked as sorted", data.attrReturned[sortedField], data.connection.getSortingAttributes().get( 0 ) );
assertNotNull( data.attrReturned[sortedField] );
} finally {
LdapMockProtocol.cleanup();
}
}
|
@Override
public ResourceSet saveNew(ResourceSet rs) {
if (rs.getId() != null) {
throw new IllegalArgumentException("Can't save a new resource set with an ID already set to it.");
}
if (!checkScopeConsistency(rs)) {
throw new IllegalArgumentException("Can't save a resource set with inconsistent claims.");
}
ResourceSet saved = repository.save(rs);
return saved;
}
|
@Test(expected = IllegalArgumentException.class)
public void testSaveNew_hasId() {
ResourceSet rs = new ResourceSet();
rs.setId(1L);
resourceSetService.saveNew(rs);
}
|
@Override public long get(long key1, int key2) {
return super.get0(key1, key2);
}
|
@Test
public void testGotoAddress() {
final long addr1 = hsa.address();
final SlotAssignmentResult slot = insert(1, 2);
hsa.gotoNew();
assertEquals(NULL_ADDRESS, hsa.get(1, 2));
hsa.gotoAddress(addr1);
assertEquals(slot.address(), hsa.get(1, 2));
}
|
public void setApp(String app) {
this.app = app;
}
|
@Test
void testSetApp() {
assertEquals("unknown", basicContext.getApp());
basicContext.setApp("testApp");
assertEquals("testApp", basicContext.getApp());
}
|
public static SSLFactory createSSLFactoryAndEnableAutoRenewalWhenUsingFileStores(TlsConfig tlsConfig) {
return createSSLFactoryAndEnableAutoRenewalWhenUsingFileStores(tlsConfig, () -> false);
}
|
@Test
public void createSSLFactoryAndEnableAutoRenewalWhenUsingFileStoresWithPinotInsecureMode()
throws IOException, URISyntaxException, InterruptedException {
TlsConfig tlsConfig = createTlsConfig();
SSLFactory sslFactory =
RenewableTlsUtils.createSSLFactoryAndEnableAutoRenewalWhenUsingFileStores(tlsConfig, () -> true);
ensurSslFactoryUseUnsafeTrustManager(sslFactory);
updateTlsFilesAndWaitForSslFactoryToBeRenewed();
// after tls file update, the ssl factory should still use UnsafeX509ExtendedTrustManager
ensurSslFactoryUseUnsafeTrustManager(sslFactory);
}
|
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
}
|
@Test
public void sendPhoto() {
Message message = bot.execute(new SendPhoto(chatId, photoFileId)).message();
MessageTest.checkMessage(message);
PhotoSizeTest.checkPhotos(false, message.photo());
message = bot.execute(new SendPhoto(chatId, imageFile).hasSpoiler(true)
.caption("caption").captionEntities(new MessageEntity(MessageEntity.Type.italic, 0, 7))).message();
MessageTest.checkMessage(message);
PhotoSizeTest.checkPhotos(message.photo());
MessageEntity captionEntity = message.captionEntities()[0];
assertEquals(MessageEntity.Type.italic, captionEntity.type());
assertEquals((Integer) 0, captionEntity.offset());
assertEquals((Integer) 7, captionEntity.length());
String caption = "caption <b>bold</b>";
message = bot.execute(new SendPhoto(channelName, imageBytes).caption(caption).parseMode(ParseMode.HTML)).message();
MessageTest.checkMessage(message);
assertEquals(caption.replace("<b>", "").replace("</b>", ""), message.caption());
PhotoSizeTest.checkPhotos(message.photo());
captionEntity = message.captionEntities()[0];
assertEquals(MessageEntity.Type.bold, captionEntity.type());
assertEquals((Integer) 8, captionEntity.offset());
assertEquals((Integer) 4, captionEntity.length());
}
|
@Override
public int numPartitions() {
return numPartitions;
}
|
@Test
public void testNumPartitions() {
TopicMetadataImpl metadata = new TopicMetadataImpl(1234);
assertEquals(1234, metadata.numPartitions());
}
|
static String determineFullyQualifiedClassName(Path baseDir, String basePackageName, Path classFile) {
String subpackageName = determineSubpackageName(baseDir, classFile);
String simpleClassName = determineSimpleClassName(classFile);
return of(basePackageName, subpackageName, simpleClassName)
.filter(value -> !value.isEmpty()) // default package
.collect(joining(PACKAGE_SEPARATOR_STRING));
}
|
@Test
void determineFullyQualifiedClassName() {
Path baseDir = Paths.get("path", "to", "com", "example", "app");
String basePackageName = "com.example.app";
Path classFile = Paths.get("path", "to", "com", "example", "app", "App.class");
String fqn = ClasspathSupport.determineFullyQualifiedClassName(baseDir, basePackageName, classFile);
assertEquals("com.example.app.App", fqn);
}
|
public Page prependColumn(Block column)
{
if (column.getPositionCount() != positionCount) {
throw new IllegalArgumentException(String.format("Column does not have same position count (%s) as page (%s)", column.getPositionCount(), positionCount));
}
Block[] result = new Block[blocks.length + 1];
result[0] = column;
System.arraycopy(blocks, 0, result, 1, blocks.length);
return wrapBlocksWithoutCopy(positionCount, result);
}
|
@Test(expectedExceptions = IllegalArgumentException.class)
public void testPrependColumnWrongNumberOfRows()
{
int entries = 10;
BlockBuilder blockBuilder = BIGINT.createBlockBuilder(null, entries);
for (int i = 0; i < entries; i++) {
BIGINT.writeLong(blockBuilder, i);
}
Block block = blockBuilder.build();
Page page = new Page(block, block);
BlockBuilder newBlockBuilder = BIGINT.createBlockBuilder(null, entries - 5);
for (int i = 0; i < entries - 5; i++) {
BIGINT.writeLong(newBlockBuilder, -i);
}
Block newBlock = newBlockBuilder.build();
page.prependColumn(newBlock);
}
|
public void writeBytes(byte[] value) {
writeBytes(value, false);
}
|
@Test
public void testWriteBytes() {
byte[] first = {'a', 'b', 'c'};
byte[] second = {'d', 'e', 'f'};
byte[] last = {'x', 'y', 'z'};
OrderedCode orderedCode = new OrderedCode();
orderedCode.writeBytes(first);
byte[] firstEncoded = orderedCode.getEncodedBytes();
assertArrayEquals(orderedCode.readBytes(), first);
orderedCode.writeBytes(first);
orderedCode.writeBytes(second);
orderedCode.writeBytes(last);
byte[] allEncoded = orderedCode.getEncodedBytes();
assertArrayEquals(orderedCode.readBytes(), first);
assertArrayEquals(orderedCode.readBytes(), second);
assertArrayEquals(orderedCode.readBytes(), last);
orderedCode = new OrderedCode(firstEncoded);
orderedCode.writeBytes(second);
orderedCode.writeBytes(last);
assertArrayEquals(orderedCode.getEncodedBytes(), allEncoded);
assertArrayEquals(orderedCode.readBytes(), first);
assertArrayEquals(orderedCode.readBytes(), second);
assertArrayEquals(orderedCode.readBytes(), last);
orderedCode = new OrderedCode(allEncoded);
assertArrayEquals(orderedCode.readBytes(), first);
assertArrayEquals(orderedCode.readBytes(), second);
assertArrayEquals(orderedCode.readBytes(), last);
}
|
@Override
public void handleRestRequest(RestRequest req, Map<String, String> wireAttrs, RequestContext requestContext,
TransportCallback<RestResponse> callback)
{
_transportDispatcher.handleRestRequest(req, wireAttrs, requestContext,
new RequestFinalizerTransportCallback<>(callback, requestContext, req));
}
|
@Test(dataProvider = "throwTransportCallbackException")
public void testHandleRestRequestOrdering(boolean throwTransportCallbackException)
{
when(_restTransportResponse.getResponse())
.thenReturn(_restResponse);
final TestTransportCallback<RestResponse> transportCallback = new TestTransportCallback<>(throwTransportCallbackException);
_outerDispatcher.handleRestRequest(null, null, new RequestContext(), transportCallback);
Assert.assertEquals(_outerDispatcher._executionOrder, 1);
Assert.assertEquals(_innerDispatcher._executionOrder, 2);
Assert.assertEquals(_innerDispatcher._transportCallback._executionOrder, 3);
Assert.assertEquals(_outerDispatcher._transportCallback._executionOrder, 4);
Assert.assertEquals(transportCallback._executionOrder, 5);
Assert.assertEquals(_requestFinalizer._executionOrder, 6, "Expected request to be finalized after the callback.");
}
|
public QueueConfig setBackupCount(int backupCount) {
this.backupCount = checkBackupCount(backupCount, asyncBackupCount);
return this;
}
|
@Test(expected = IllegalArgumentException.class)
public void setBackupCount_whenItsNegative() {
queueConfig.setBackupCount(-1);
}
|
@Override
public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
final CompositePropertySource compositePropertySource = new CompositePropertySource(
DynamicConstants.PROPERTY_NAME);
compositePropertySource
.addPropertySource(new DynamicConfigPropertySource(DynamicConstants.PROPERTY_NAME));
tryAddDisableConfigSource(compositePropertySource);
environment.getPropertySources().addFirst(compositePropertySource);
}
|
@Test
public void locate() throws InterruptedException {
final SpringEnvironmentProcessor springEnvironmentProcessor = new SpringEnvironmentProcessor();
final MockEnvironment mockEnvironment = new MockEnvironment();
springEnvironmentProcessor.postProcessEnvironment(mockEnvironment, null);
final PropertySource<?> source = mockEnvironment.getPropertySources().get("Sermant-Dynamic-Config");
Assert.assertNotNull(source);
// Note the configSource injection test is performed. The spi file is viewed and sorted in the specified order
ConfigHolder.INSTANCE.resolve(event);
// Because this is an asynchronous execution, it waits for the asynchronous execution to complete
Thread.sleep(1000);
Assert.assertEquals(mockEnvironment.getProperty(KEY), VALUE);
}
|
private boolean isConformRules(String serviceName) {
return serviceName.split(NAME_SEPARATOR, -1).length == 4;
}
|
@Test
void testIsConformRules() {
NamingService namingService = mock(NacosNamingService.class);
URL serviceUrlWithoutCategory = URL.valueOf("nacos://127.0.0.1:3333/" + serviceInterface + "?interface="
+ serviceInterface + "¬ify=false&methods=test1,test2&version=1.0.0&group=default");
try {
String serviceName = "providers:org.apache.dubbo.registry.nacos.NacosService:1.0.0:default";
String category = this.serviceUrl.getParameter(CATEGORY_KEY, DEFAULT_CATEGORY);
URL newUrl = this.serviceUrl.addParameter(CATEGORY_KEY, category);
newUrl = newUrl.addParameter(PROTOCOL_KEY, this.serviceUrl.getProtocol());
newUrl = newUrl.addParameter(PATH_KEY, this.serviceUrl.getPath());
String ip = newUrl.getHost();
int port = newUrl.getPort();
Instance instance = new Instance();
instance.setIp(ip);
instance.setPort(port);
instance.setMetadata(new HashMap<>(newUrl.getParameters()));
List<Instance> instances = new ArrayList<>();
instances.add(instance);
when(namingService.getAllInstances(
serviceName, this.registryUrl.getParameter(GROUP_KEY, Constants.DEFAULT_GROUP)))
.thenReturn(instances);
String serviceNameWithoutVersion = "providers:org.apache.dubbo.registry.nacos.NacosService:default";
String serviceName1 = "providers:org.apache.dubbo.registry.nacos.NacosService:1.0.0:default";
List<String> serviceNames = new ArrayList<>();
serviceNames.add(serviceNameWithoutVersion);
serviceNames.add(serviceName1);
ListView<String> result = new ListView<>();
result.setData(serviceNames);
when(namingService.getServicesOfServer(
1, Integer.MAX_VALUE, registryUrl.getParameter(GROUP_KEY, Constants.DEFAULT_GROUP)))
.thenReturn(result);
} catch (NacosException e) {
// ignore
}
NacosNamingServiceWrapper nacosNamingServiceWrapper =
new NacosNamingServiceWrapper(new NacosConnectionManager(namingService), 0, 0);
nacosRegistry = new NacosRegistry(this.registryUrl, nacosNamingServiceWrapper);
Set<URL> registered;
nacosRegistry.register(this.serviceUrl);
nacosRegistry.register(serviceUrlWithoutCategory);
registered = nacosRegistry.getRegistered();
Assertions.assertTrue(registered.contains(serviceUrl));
Assertions.assertTrue(registered.contains(serviceUrlWithoutCategory));
Assertions.assertEquals(2, registered.size());
URL serviceUrlWithWildcard = URL.valueOf("nacos://127.0.0.1:3333/" + serviceInterface
+ "?interface=org.apache.dubbo.registry.nacos.NacosService"
+ "¬ify=false&methods=test1,test2&category=providers&version=*&group=default");
URL serviceUrlWithOutWildcard = URL.valueOf("nacos://127.0.0.1:3333/" + serviceInterface
+ "?interface=org.apache.dubbo.registry.nacos.NacosService"
+ "¬ify=false&methods=test1,test2&category=providers&version=1.0.0&group=default");
NotifyListener listener = mock(NotifyListener.class);
nacosRegistry.subscribe(serviceUrlWithWildcard, listener);
nacosRegistry.subscribe(serviceUrlWithOutWildcard, listener);
Map<URL, Set<NotifyListener>> subscribed = nacosRegistry.getSubscribed();
Assertions.assertEquals(2, registered.size());
Assertions.assertEquals(1, subscribed.get(serviceUrlWithOutWildcard).size());
Assertions.assertEquals(2, registered.size());
Assertions.assertEquals(1, subscribed.get(serviceUrlWithWildcard).size());
}
|
@Override
public FailoverSwitch getSwitch() {
try {
File switchFile = Paths.get(failoverDir, UtilAndComs.FAILOVER_SWITCH).toFile();
if (!switchFile.exists()) {
NAMING_LOGGER.debug("failover switch is not found, {}", switchFile.getName());
switchParams.put(FAILOVER_MODE_PARAM, Boolean.FALSE.toString());
return FAILOVER_SWITCH_FALSE;
}
long modified = switchFile.lastModified();
if (lastModifiedMillis < modified) {
lastModifiedMillis = modified;
String failover = ConcurrentDiskUtil.getFileContent(switchFile.getPath(), Charset.defaultCharset().toString());
if (!StringUtils.isEmpty(failover)) {
String[] lines = failover.split(DiskCache.getLineSeparator());
for (String line : lines) {
String line1 = line.trim();
if (IS_FAILOVER_MODE.equals(line1)) {
switchParams.put(FAILOVER_MODE_PARAM, Boolean.TRUE.toString());
NAMING_LOGGER.info("failover-mode is on");
new FailoverFileReader().run();
return FAILOVER_SWITCH_TRUE;
} else if (NO_FAILOVER_MODE.equals(line1)) {
switchParams.put(FAILOVER_MODE_PARAM, Boolean.FALSE.toString());
NAMING_LOGGER.info("failover-mode is off");
return FAILOVER_SWITCH_FALSE;
}
}
}
}
return switchParams.get(FAILOVER_MODE_PARAM).equals(Boolean.TRUE.toString()) ? FAILOVER_SWITCH_TRUE : FAILOVER_SWITCH_FALSE;
} catch (Throwable e) {
NAMING_LOGGER.error("[NA] failed to read failover switch.", e);
switchParams.put(FAILOVER_MODE_PARAM, Boolean.FALSE.toString());
return FAILOVER_SWITCH_FALSE;
}
}
|
@Test
void testGetSwitchForFailoverDisabledKeep() throws NoSuchFieldException, IllegalAccessException {
String dir = DiskFailoverDataSourceTest.class.getResource("/").getPath() + "/failover_test/disabled";
injectFailOverDir(dir);
assertFalse(dataSource.getSwitch().getEnabled());
assertFalse(dataSource.getSwitch().getEnabled());
}
|
static String headerLine(CSVFormat csvFormat) {
return String.join(String.valueOf(csvFormat.getDelimiter()), csvFormat.getHeader());
}
|
@Test
public void givenQuoteModeAll_isNoop() {
CSVFormat csvFormat = csvFormat().withQuoteMode(QuoteMode.ALL);
PCollection<String> input =
pipeline.apply(
Create.of(
headerLine(csvFormat),
"\"a\",\"1\",\"1.1\"",
"\"b\",\"2\",\"2.2\"",
"\"c\",\"3\",\"3.3\""));
CsvIOStringToCsvRecord underTest = new CsvIOStringToCsvRecord(csvFormat);
CsvIOParseResult<List<String>> result = input.apply(underTest);
PAssert.that(result.getOutput())
.containsInAnyOrder(
Arrays.asList(
Arrays.asList("a", "1", "1.1"),
Arrays.asList("b", "2", "2.2"),
Arrays.asList("c", "3", "3.3")));
PAssert.that(result.getErrors()).empty();
pipeline.run();
}
|
@Override
public boolean isNodeVersionCompatibleWith(Version clusterVersion) {
Preconditions.checkNotNull(clusterVersion);
return node.getVersion().asVersion().equals(clusterVersion);
}
|
@Test
public void test_nodeVersionCompatibleWith_ownClusterVersion() {
MemberVersion currentVersion = getNode(hazelcastInstance).getVersion();
assertTrue(nodeExtension.isNodeVersionCompatibleWith(currentVersion.asVersion()));
}
|
@Override
public List<DatabaseTableRespVO> getDatabaseTableList(Long dataSourceConfigId, String name, String comment) {
List<TableInfo> tables = databaseTableService.getTableList(dataSourceConfigId, name, comment);
// 移除在 Codegen 中,已经存在的
Set<String> existsTables = convertSet(
codegenTableMapper.selectListByDataSourceConfigId(dataSourceConfigId), CodegenTableDO::getTableName);
tables.removeIf(table -> existsTables.contains(table.getName()));
return BeanUtils.toBean(tables, DatabaseTableRespVO.class);
}
|
@Test
public void testGetDatabaseTableList() {
// 准备参数
Long dataSourceConfigId = randomLongId();
String name = randomString();
String comment = randomString();
// mock 方法
TableInfo tableInfo01 = mock(TableInfo.class);
when(tableInfo01.getName()).thenReturn("t_yunai");
when(tableInfo01.getComment()).thenReturn("芋艿");
TableInfo tableInfo02 = mock(TableInfo.class);
when(tableInfo02.getName()).thenReturn("t_yunai_02");
when(tableInfo02.getComment()).thenReturn("芋艿_02");
when(databaseTableService.getTableList(eq(dataSourceConfigId), eq(name), eq(comment)))
.thenReturn(ListUtil.toList(tableInfo01, tableInfo02));
// mock 数据
CodegenTableDO tableDO = randomPojo(CodegenTableDO.class,
o -> o.setScene(CodegenSceneEnum.ADMIN.getScene())
.setTableName("t_yunai_02")
.setDataSourceConfigId(dataSourceConfigId));
codegenTableMapper.insert(tableDO);
// 调用
List<DatabaseTableRespVO> result = codegenService.getDatabaseTableList(dataSourceConfigId, name, comment);
// 断言
assertEquals(1, result.size());
assertEquals("t_yunai", result.get(0).getName());
assertEquals("芋艿", result.get(0).getComment());
}
|
public static Trampoline<Integer> loop(int times, int prod) {
if (times == 0) {
return Trampoline.done(prod);
} else {
return Trampoline.more(() -> loop(times - 1, prod * times));
}
}
|
@Test
void testTrampolineWithFactorialFunction() {
long result = TrampolineApp.loop(10, 1).result();
assertEquals(3_628_800, result);
}
|
@Override
public EdgeIteratorState edge(int nodeA, int nodeB) {
if (isFrozen())
throw new IllegalStateException("Cannot create edge if graph is already frozen");
if (nodeA == nodeB)
// Loop edges would only make sense if their attributes were the same for both 'directions',
// because for routing algorithms (which ignore the way geometry) loop edges do not even
// have a well-defined 'direction'. So we either need to make sure the attributes
// are the same for both directions, or reject loop edges altogether. Since we currently
// don't know any use-case for loop edges in road networks (there is one for PT),
// we reject them here.
throw new IllegalArgumentException("Loop edges are not supported, got: " + nodeA + " - " + nodeB);
int edgeId = store.edge(nodeA, nodeB);
EdgeIteratorStateImpl edge = new EdgeIteratorStateImpl(this);
boolean valid = edge.init(edgeId, nodeB);
assert valid;
return edge;
}
|
@Test
public void setGetFlags() {
BaseGraph graph = createGHStorage();
EnumEncodedValue<RoadClass> rcEnc = encodingManager.getEnumEncodedValue(RoadClass.KEY, RoadClass.class);
EdgeIteratorState edge = graph.edge(0, 1).set(rcEnc, RoadClass.BRIDLEWAY);
assertEquals(RoadClass.BRIDLEWAY, edge.get(rcEnc));
edge.set(rcEnc, RoadClass.CORRIDOR);
assertEquals(RoadClass.CORRIDOR, edge.get(rcEnc));
}
|
static void urlEncode(String str, StringBuilder sb) {
for (int idx = 0; idx < str.length(); ++idx) {
char c = str.charAt(idx);
if ('+' == c) {
sb.append("%2B");
} else if ('%' == c) {
sb.append("%25");
} else {
sb.append(c);
}
}
}
|
@Test
void testUrlEncodePlus() {
// Arrange
final StringBuilder sb = new StringBuilder("????");
// Act
GroupKey.urlEncode("+", sb);
// Assert side effects
assertNotNull(sb);
assertEquals("????%2B", sb.toString());
}
|
@Override
@SuppressWarnings("checkstyle:booleanexpressioncomplexity")
public int compareTo(MemberVersion otherVersion) {
// pack major-minor-patch to 3 least significant bytes of integer, then compare the integers
// even though major, minor & patch are not expected to be negative, masking makes sure we avoid sign extension
int thisVersion = (major << 16 & 0xff0000) | ((minor << 8) & 0xff00) | (patch & 0xff);
int thatVersion = (otherVersion.major << 16 & 0xff0000) | (otherVersion.minor << 8 & 0xff00)
| (otherVersion.patch & 0xff);
if (thisVersion > thatVersion) {
return 1;
} else {
return thisVersion == thatVersion ? 0 : -1;
}
}
|
@Test
public void testCompareTo() {
assertTrue(VERSION_3_8.compareTo(VERSION_3_8) == 0);
assertTrue(VERSION_3_8.compareTo(VERSION_3_8_1) < 0);
assertTrue(VERSION_3_8.compareTo(VERSION_3_8_2) < 0);
assertTrue(VERSION_3_8.compareTo(VERSION_3_9) < 0);
assertTrue(VERSION_3_9.compareTo(VERSION_3_8) > 0);
assertTrue(VERSION_3_9.compareTo(VERSION_3_8_1) > 0);
assertTrue(VERSION_3_9.compareTo(VERSION_3_8_2) > 0);
}
|
public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, Timer timer) {
invokeCompletedOffsetCommitCallbacks();
if (offsets.isEmpty()) {
// We guarantee that the callbacks for all commitAsync() will be invoked when
// commitSync() completes, even if the user tries to commit empty offsets.
return invokePendingAsyncCommits(timer);
}
long attempts = 0L;
do {
if (coordinatorUnknownAndUnreadySync(timer)) {
return false;
}
RequestFuture<Void> future = sendOffsetCommitRequest(offsets);
client.poll(future, timer);
// We may have had in-flight offset commits when the synchronous commit began. If so, ensure that
// the corresponding callbacks are invoked prior to returning in order to preserve the order that
// the offset commits were applied.
invokeCompletedOffsetCommitCallbacks();
if (future.succeeded()) {
if (interceptors != null)
interceptors.onCommit(offsets);
return true;
}
if (future.failed() && !future.isRetriable())
throw future.exception();
timer.sleep(retryBackoff.backoff(attempts++));
} while (timer.notExpired());
return false;
}
|
@Test
public void testCommitOffsetSyncCoordinatorDisconnected() {
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE));
// sync commit with coordinator disconnected (should connect, get metadata, and then submit the commit request)
prepareOffsetCommitRequestDisconnect(singletonMap(t1p, 100L));
client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE);
coordinator.commitOffsetsSync(singletonMap(t1p, new OffsetAndMetadata(100L)), time.timer(Long.MAX_VALUE));
}
|
public static List<String> finalDestination(List<String> elements) {
if (isMagicPath(elements)) {
List<String> destDir = magicPathParents(elements);
List<String> children = magicPathChildren(elements);
checkArgument(!children.isEmpty(), "No path found under the prefix " +
MAGIC_PATH_PREFIX);
ArrayList<String> dest = new ArrayList<>(destDir);
if (containsBasePath(children)) {
// there's a base marker in the path
List<String> baseChildren = basePathChildren(children);
checkArgument(!baseChildren.isEmpty(),
"No path found under " + BASE);
dest.addAll(baseChildren);
} else {
dest.add(filename(children));
}
return dest;
} else {
return elements;
}
}
|
@Test
public void testFinalDestinationNoMagic() {
assertEquals(l("first", "2"),
finalDestination(l("first", "2")));
}
|
@SuppressWarnings("unused") // Part of required API.
public void execute(
final ConfiguredStatement<InsertValues> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final InsertValues insertValues = statement.getStatement();
final MetaStore metaStore = executionContext.getMetaStore();
final KsqlConfig config = statement.getSessionConfig().getConfig(true);
final DataSource dataSource = getDataSource(config, metaStore, insertValues);
validateInsert(insertValues.getColumns(), dataSource);
final ProducerRecord<byte[], byte[]> record =
buildRecord(statement, metaStore, dataSource, serviceContext);
try {
producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps());
} catch (final TopicAuthorizationException e) {
// TopicAuthorizationException does not give much detailed information about why it failed,
// except which topics are denied. Here we just add the ACL to make the error message
// consistent with other authorization error messages.
final Exception rootCause = new KsqlTopicAuthorizationException(
AclOperation.WRITE,
e.unauthorizedTopics()
);
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause);
} catch (final ClusterAuthorizationException e) {
// ClusterAuthorizationException is thrown when using idempotent producers
// and either a topic write permission or a cluster-level idempotent write
// permission (only applicable for broker versions no later than 2.8) is
// missing. In this case, we include additional context to help the user
// distinguish this type of failure from other permissions exceptions
// such as the ones thrown above when TopicAuthorizationException is caught.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} catch (final KafkaException e) {
if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) {
// The error message thrown when an idempotent producer is missing permissions
// is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException,
// as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException.
// ksqlDB handles these two the same way, accordingly.
// See https://issues.apache.org/jira/browse/KAFKA-14138 for more.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} else {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} catch (final Exception e) {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
}
|
@Test
public void shouldThrowOnTopicAuthorizationException() {
// Given:
final ConfiguredStatement<InsertValues> statement = givenInsertValues(
allAndPseudoColumnNames(SCHEMA),
ImmutableList.of(
new LongLiteral(1L),
new StringLiteral("str"),
new StringLiteral("str"),
new LongLiteral(2L))
);
doThrow(new TopicAuthorizationException(Collections.singleton("t1")))
.when(producer).send(any());
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> executor.execute(statement, mock(SessionProperties.class), engine, serviceContext)
);
// Then:
assertThat(e.getCause(), (hasMessage(
containsString("Authorization denied to Write on topic(s): [t1]"))));
}
|
public static void assertThatClassIsImmutable(Class<?> clazz) {
final ImmutableClassChecker checker = new ImmutableClassChecker();
if (!checker.isImmutableClass(clazz, false)) {
final Description toDescription = new StringDescription();
final Description mismatchDescription = new StringDescription();
checker.describeTo(toDescription);
checker.describeMismatch(mismatchDescription);
final String reason =
"\n" +
"Expected: is \"" + toDescription.toString() + "\"\n" +
" but : was \"" + mismatchDescription.toString() + "\"";
throw new AssertionError(reason);
}
}
|
@Test
public void testNonFinalClass() throws Exception {
boolean gotException = false;
try {
assertThatClassIsImmutable(NonFinal.class);
} catch (AssertionError assertion) {
assertThat(assertion.getMessage(),
containsString("is not final"));
gotException = true;
}
assertThat(gotException, is(true));
}
|
@VisibleForTesting
void startKsql(final KsqlConfig ksqlConfigWithPort) {
cleanupOldState();
initialize(ksqlConfigWithPort);
}
|
@Test
public void shouldNotCreateLogStreamIfAutoCreateNotConfigured() {
// Given:
when(processingLogConfig.getBoolean(ProcessingLogConfig.STREAM_AUTO_CREATE))
.thenReturn(false);
// When:
app.startKsql(ksqlConfig);
// Then:
verify(ksqlResource, never()).handleKsqlStatements(
securityContext,
new KsqlRequest(logCreateStatement, Collections.emptyMap(), Collections.emptyMap(), null)
);
}
|
@Override
public int hashCode() {
return Long.hashCode(toLong());
}
|
@Test
public void testHashCode() throws Exception {
assertEquals(Long.hashCode(MAC_ONOS_LONG), MAC_ONOS.hashCode());
}
|
public static int lower(Integer orderSource, int offset) {
if (offset <= 0) {
throw new IllegalArgumentException("offset must be greater than 0");
}
if (orderSource == null) {
orderSource = Ordered.LOWEST_PRECEDENCE;
}
if (Ordered.LOWEST_PRECEDENCE - offset < orderSource) {
return Ordered.LOWEST_PRECEDENCE;
}
return orderSource + offset;
}
|
@Test
public void test_lower() {
assertThat(OrderUtil.lower(1, 1)).isEqualTo(2);
assertThat(OrderUtil.lower(Ordered.LOWEST_PRECEDENCE - 1, 2)).isEqualTo(Ordered.LOWEST_PRECEDENCE);
assertThat(OrderUtil.lower(Ordered.LOWEST_PRECEDENCE, 1)).isEqualTo(Ordered.LOWEST_PRECEDENCE);
Assertions.assertThrows(IllegalArgumentException.class, () -> OrderUtil.lower(1, -1));
}
|
@Nonnull
public static <T> Sink<T> list(@Nonnull String listName) {
return fromProcessor("listSink(" + listName + ')', writeListP(listName));
}
|
@Test
public void when_setLocalParallelism_then_sinkHasIt() {
//Given
String sinkName = randomName();
int localParallelism = 5;
SinkStage stage = p
.readFrom(Sources.list(sinkName))
.writeTo(Sinks.list(sinkName));
//When
stage.setLocalParallelism(localParallelism);
//Then
assertEquals(localParallelism, transformOf(stage).localParallelism());
}
|
@Override
protected SchemaTransform from(Configuration configuration) {
return new JavaFilterTransform(configuration);
}
|
@Test
@Category(NeedsRunner.class)
public void testErrorHandling() {
Schema inputSchema = Schema.of(Schema.Field.of("s", Schema.FieldType.STRING));
PCollection<Row> input =
pipeline
.apply(
Create.of(
Row.withSchema(inputSchema).addValues("short").build(),
Row.withSchema(inputSchema).addValues("looooooooooooong").build()))
.setRowSchema(inputSchema);
PCollectionRowTuple result =
PCollectionRowTuple.of(JavaFilterTransformProvider.INPUT_ROWS_TAG, input)
.apply(
new JavaFilterTransformProvider()
.from(
JavaFilterTransformProvider.Configuration.builder()
.setLanguage("java")
.setKeep(
JavaRowUdf.Configuration.builder()
.setExpression("s.charAt(7) == 'o'")
.build())
.setErrorHandling(ErrorHandling.builder().setOutput("errors").build())
.build()));
PCollection<Row> good = result.get(JavaFilterTransformProvider.OUTPUT_ROWS_TAG);
PAssert.that(good)
.containsInAnyOrder(
Row.withSchema(inputSchema).withFieldValue("s", "looooooooooooong").build());
PCollection<Row> errors = result.get("errors");
Schema errorSchema = errors.getSchema();
PAssert.that(errors)
.containsInAnyOrder(
Row.withSchema(errorSchema)
.withFieldValue(
"failed_row", Row.withSchema(inputSchema).addValues("short").build())
.withFieldValue("error_message", "String index out of range: 7")
.build());
pipeline.run();
}
|
public static void convertReadBasedSplittableDoFnsToPrimitiveReadsIfNecessary(Pipeline pipeline) {
if (!(ExperimentalOptions.hasExperiment(pipeline.getOptions(), "use_sdf_read")
|| ExperimentalOptions.hasExperiment(
pipeline.getOptions(), "use_unbounded_sdf_wrapper"))
|| ExperimentalOptions.hasExperiment(
pipeline.getOptions(), "beam_fn_api_use_deprecated_read")
|| ExperimentalOptions.hasExperiment(pipeline.getOptions(), "use_deprecated_read")) {
convertReadBasedSplittableDoFnsToPrimitiveReads(pipeline);
}
}
|
@Test
public void testConvertToPrimitiveReadsHappen() {
PipelineOptions deprecatedReadOptions = PipelineOptionsFactory.create();
deprecatedReadOptions.setRunner(CrashingRunner.class);
ExperimentalOptions.addExperiment(
deprecatedReadOptions.as(ExperimentalOptions.class), "use_deprecated_read");
Pipeline pipeline = Pipeline.create(deprecatedReadOptions);
pipeline.apply(Read.from(new FakeBoundedSource()));
pipeline.apply(Read.from(new BoundedToUnboundedSourceAdapter<>(new FakeBoundedSource())));
SplittableParDo.convertReadBasedSplittableDoFnsToPrimitiveReadsIfNecessary(pipeline);
AtomicBoolean sawPrimitiveBoundedRead = new AtomicBoolean();
AtomicBoolean sawPrimitiveUnboundedRead = new AtomicBoolean();
pipeline.traverseTopologically(
new Defaults() {
@Override
public CompositeBehavior enterCompositeTransform(Node node) {
assertThat(node.getTransform(), not(instanceOf(Read.Bounded.class)));
assertThat(node.getTransform(), not(instanceOf(Read.Unbounded.class)));
return super.enterCompositeTransform(node);
}
@Override
public void visitPrimitiveTransform(Node node) {
if (node.getTransform() instanceof SplittableParDo.PrimitiveBoundedRead) {
sawPrimitiveBoundedRead.set(true);
} else if (node.getTransform() instanceof SplittableParDo.PrimitiveUnboundedRead) {
sawPrimitiveUnboundedRead.set(true);
}
}
});
assertTrue(sawPrimitiveBoundedRead.get());
assertTrue(sawPrimitiveUnboundedRead.get());
}
|
public static final void saveAttributesMap( DataNode dataNode, AttributesInterface attributesInterface )
throws KettleException {
saveAttributesMap( dataNode, attributesInterface, NODE_ATTRIBUTE_GROUPS );
}
|
@Test
public void testSaveAttributesMap_CustomTag_NullParameter() throws Exception {
try ( MockedStatic<AttributesMapUtil> mockedAttributesMapUtil = mockStatic( AttributesMapUtil.class ) ) {
mockedAttributesMapUtil.when( () -> AttributesMapUtil.saveAttributesMap( any( DataNode.class ),
any( AttributesInterface.class ), anyString() ) ).thenCallRealMethod();
DataNode dataNode = new DataNode( CNST_DUMMY );
AttributesMapUtil.saveAttributesMap( dataNode, null, CUSTOM_TAG );
assertNull( dataNode.getNode( AttributesMapUtil.NODE_ATTRIBUTE_GROUPS ) );
assertNull( dataNode.getNode( CUSTOM_TAG ) );
}
}
|
@Override
public String url(String remoteHost, String workingUrl) {
return String.format("%s/remoting/files/%s/%s/%s", remoteHost, workingUrl, ArtifactLogUtil.CRUISE_OUTPUT_FOLDER, ArtifactLogUtil.MD5_CHECKSUM_FILENAME);
}
|
@Test
public void shouldGenerateChecksumFileUrl() throws IOException {
String url = checksumFileHandler.url("http://foo/go", "cruise/1/stage/1/job");
assertThat(url, is("http://foo/go/remoting/files/cruise/1/stage/1/job/cruise-output/md5.checksum"));
}
|
public Iterator<ReadRowsResponse> readRows()
{
List<ReadRowsResponse> readRowResponses = new ArrayList<>();
long readRowsCount = 0;
int retries = 0;
Iterator<ReadRowsResponse> serverResponses = fetchResponses(request);
while (serverResponses.hasNext()) {
try {
ReadRowsResponse response = serverResponses.next();
readRowsCount += response.getRowCount();
readRowResponses.add(response);
}
catch (RuntimeException e) {
// if relevant, retry the read, from the last read position
if (BigQueryUtil.isRetryable(e) && retries < maxReadRowsRetries) {
request.getReadPositionBuilder().setOffset(readRowsCount);
serverResponses = fetchResponses(request);
retries++;
}
else {
// to safely close the client
try (BigQueryStorageClient ignored = client) {
throw e;
}
}
}
}
return readRowResponses.iterator();
}
|
@Test
void testNoFailures()
{
MockResponsesBatch batch1 = new MockResponsesBatch();
batch1.addResponse(Storage.ReadRowsResponse.newBuilder().setRowCount(10).build());
batch1.addResponse(Storage.ReadRowsResponse.newBuilder().setRowCount(11).build());
// so we can run multiple tests
ImmutableList<Storage.ReadRowsResponse> responses = ImmutableList.copyOf(
new MockReadRowsHelper(client, request, 3, ImmutableList.of(batch1))
.readRows());
assertThat(responses.size()).isEqualTo(2);
assertThat(responses.stream().mapToLong(Storage.ReadRowsResponse::getRowCount).sum()).isEqualTo(21);
}
|
static boolean isLeaf(int nodeOrder, int depth) {
checkTrue(depth > 0, "Invalid depth: " + depth);
int leafLevel = depth - 1;
int numberOfNodes = getNumberOfNodes(depth);
int maxNodeOrder = numberOfNodes - 1;
checkTrue(nodeOrder >= 0 && nodeOrder <= maxNodeOrder, "Invalid nodeOrder: " + nodeOrder + " in a tree with depth "
+ depth);
int leftMostLeafOrder = MerkleTreeUtil.getLeftMostNodeOrderOnLevel(leafLevel);
return nodeOrder >= leftMostLeafOrder;
}
|
@Test(expected = IllegalArgumentException.class)
public void testIsLeafThrowsOnNegativeNodeOrder() {
MerkleTreeUtil.isLeaf(-1, 3);
}
|
public static FileSystem write(final FileSystem fs, final Path path,
final byte[] bytes) throws IOException {
Objects.requireNonNull(path);
Objects.requireNonNull(bytes);
try (FSDataOutputStream out = fs.createFile(path).overwrite(true).build()) {
out.write(bytes);
}
return fs;
}
|
@Test
public void testWriteBytesFileSystem() throws IOException {
URI uri = tmp.toURI();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(uri, conf);
Path testPath = new Path(new Path(uri), "writebytes.out");
byte[] write = new byte[] {0x00, 0x01, 0x02, 0x03};
FileUtil.write(fs, testPath, write);
byte[] read = FileUtils.readFileToByteArray(new File(testPath.toUri()));
assertArrayEquals(write, read);
}
|
@GetMapping(
path = "/api/-/search",
produces = MediaType.APPLICATION_JSON_VALUE
)
@CrossOrigin
@Operation(summary = "Search extensions via text entered by a user")
@ApiResponses({
@ApiResponse(
responseCode = "200",
description = "The search results are returned in JSON format"
),
@ApiResponse(
responseCode = "400",
description = "The request contains an invalid parameter value",
content = @Content(
mediaType = MediaType.APPLICATION_JSON_VALUE,
examples = @ExampleObject(value = "{\"error\": \"The parameter 'size' must not be negative.\"}")
)
),
@ApiResponse(
responseCode = "429",
description = "A client has sent too many requests in a given amount of time",
content = @Content(),
headers = {
@Header(
name = "X-Rate-Limit-Retry-After-Seconds",
description = "Number of seconds to wait after receiving a 429 response",
schema = @Schema(type = "integer", format = "int32")
),
@Header(
name = "X-Rate-Limit-Remaining",
description = "Remaining number of requests left",
schema = @Schema(type = "integer", format = "int32")
)
}
)
})
public ResponseEntity<SearchResultJson> search(
@RequestParam(required = false)
@Parameter(description = "Query text for searching", example = "javascript")
String query,
@RequestParam(required = false)
@Parameter(description = "Extension category as shown in the UI", example = "Programming Languages")
String category,
@RequestParam(required = false)
@Parameter(
description = "Target platform",
example = TargetPlatform.NAME_LINUX_ARM64,
schema = @Schema(type = "string", allowableValues = {
NAME_WIN32_X64, NAME_WIN32_IA32, NAME_WIN32_ARM64,
NAME_LINUX_X64, NAME_LINUX_ARM64, NAME_LINUX_ARMHF,
NAME_ALPINE_X64, NAME_ALPINE_ARM64,
NAME_DARWIN_X64, NAME_DARWIN_ARM64,
NAME_WEB, NAME_UNIVERSAL
})
)
String targetPlatform,
@RequestParam(defaultValue = "18")
@Parameter(description = "Maximal number of entries to return", schema = @Schema(type = "integer", minimum = "0", defaultValue = "18"))
int size,
@RequestParam(defaultValue = "0")
@Parameter(description = "Number of entries to skip (usually a multiple of the page size)", schema = @Schema(type = "integer", minimum = "0", defaultValue = "0"))
int offset,
@RequestParam(defaultValue = "desc")
@Parameter(description = "Descending or ascending sort order", schema = @Schema(type = "string", allowableValues = {"asc", "desc"}))
String sortOrder,
@RequestParam(defaultValue = "relevance")
@Parameter(description = "Sort key (relevance is a weighted mix of various properties)", schema = @Schema(type = "string", allowableValues = {"relevance", "timestamp", "averageRating", "downloadCount"}))
String sortBy,
@RequestParam(required = false)
@Parameter(description = "Whether to include information on all available versions for each returned entry")
boolean includeAllVersions
) {
if (size < 0) {
var json = SearchResultJson.error("The parameter 'size' must not be negative.");
return new ResponseEntity<>(json, HttpStatus.BAD_REQUEST);
}
if (offset < 0) {
var json = SearchResultJson.error("The parameter 'offset' must not be negative.");
return new ResponseEntity<>(json, HttpStatus.BAD_REQUEST);
}
var options = new ISearchService.Options(query, category, targetPlatform, size, offset, sortOrder, sortBy, includeAllVersions);
var result = new SearchResultJson();
result.extensions = new ArrayList<>(size);
for (var registry : getRegistries()) {
if (result.extensions.size() >= size) {
return ResponseEntity.ok(result);
}
try {
var subResult = registry.search(options);
if(result.extensions.isEmpty() && subResult.extensions != null) {
result.extensions.addAll(subResult.extensions);
} else if (subResult.extensions != null && !subResult.extensions.isEmpty()) {
int limit = size - result.extensions.size();
var subResultSize = mergeSearchResults(result, subResult.extensions, limit);
result.offset += subResult.offset;
offset = Math.max(offset - subResult.offset - subResultSize, 0);
}
result.totalSize += subResult.totalSize;
} catch (NotFoundException exc) {
// Try the next registry
} catch (ErrorResultException exc) {
return exc.toResponseEntity(SearchResultJson.class);
}
}
return ResponseEntity.ok()
.cacheControl(CacheControl.noCache().cachePublic())
.body(result);
}
|
@Test
public void testSearch() throws Exception {
var extVersions = mockSearch();
extVersions.forEach(extVersion -> Mockito.when(repositories.findLatestVersion(extVersion.getExtension(), null, false, true)).thenReturn(extVersion));
Mockito.when(repositories.findLatestVersions(extVersions.stream().map(ExtensionVersion::getExtension).map(Extension::getId).toList()))
.thenReturn(extVersions);
mockMvc.perform(get("/api/-/search?query={query}&size={size}&offset={offset}", "foo", "10", "0"))
.andExpect(status().isOk())
.andExpect(content().json(searchJson(s -> {
s.offset = 0;
s.totalSize = 1;
var e1 = new SearchEntryJson();
e1.namespace = "foo";
e1.name = "bar";
e1.version = "1.0.0";
e1.timestamp = "2000-01-01T10:00Z";
e1.displayName = "Foo Bar";
s.extensions.add(e1);
})));
}
|
public static String replace(final String text, final String searchString, final String replacement) {
return replace(text, searchString, replacement, -1);
}
|
@Test
void testReplace() throws Exception {
assertThat(StringUtils.replace(null, "*", "*"), nullValue());
assertThat(StringUtils.replace("", "*", "*"), equalTo(""));
assertThat(StringUtils.replace("any", null, "*"), equalTo("any"));
assertThat(StringUtils.replace("any", "*", null), equalTo("any"));
assertThat(StringUtils.replace("any", "", "*"), equalTo("any"));
assertThat(StringUtils.replace("aba", "a", null), equalTo("aba"));
assertThat(StringUtils.replace("aba", "a", ""), equalTo("b"));
assertThat(StringUtils.replace("aba", "a", "z"), equalTo("zbz"));
assertThat(StringUtils.replace(null, "*", "*", 64), nullValue());
assertThat(StringUtils.replace("", "*", "*", 64), equalTo(""));
assertThat(StringUtils.replace("any", null, "*", 64), equalTo("any"));
assertThat(StringUtils.replace("any", "*", null, 64), equalTo("any"));
assertThat(StringUtils.replace("any", "", "*", 64), equalTo("any"));
assertThat(StringUtils.replace("any", "*", "*", 0), equalTo("any"));
assertThat(StringUtils.replace("abaa", "a", null, -1), equalTo("abaa"));
assertThat(StringUtils.replace("abaa", "a", "", -1), equalTo("b"));
assertThat(StringUtils.replace("abaa", "a", "z", 0), equalTo("abaa"));
assertThat(StringUtils.replace("abaa", "a", "z", 1), equalTo("zbaa"));
assertThat(StringUtils.replace("abaa", "a", "z", 2), equalTo("zbza"));
}
|
@Override
public String convert(String source) {
return source;
}
|
@Test
void testConvert() {
assertEquals("1", converter.convert("1"));
assertNull(converter.convert(null));
}
|
public static String wrapWithMarkdownClassDiv(String html) {
return new StringBuilder()
.append("<div class=\"markdown-body\">\n")
.append(html)
.append("\n</div>")
.toString();
}
|
@Test
void testHeader() {
InterpreterResult r1 = md.interpret("# H1", null);
assertEquals(wrapWithMarkdownClassDiv("<h1>H1</h1>\n"), r1.message().get(0).getData());
InterpreterResult r2 = md.interpret("## H2", null);
assertEquals(wrapWithMarkdownClassDiv("<h2>H2</h2>\n"), r2.message().get(0).getData());
InterpreterResult r3 = md.interpret("### H3", null);
assertEquals(wrapWithMarkdownClassDiv("<h3>H3</h3>\n"), r3.message().get(0).getData());
InterpreterResult r4 = md.interpret("#### H4", null);
assertEquals(wrapWithMarkdownClassDiv("<h4>H4</h4>\n"), r4.message().get(0).getData());
InterpreterResult r5 = md.interpret("##### H5", null);
assertEquals(wrapWithMarkdownClassDiv("<h5>H5</h5>\n"), r5.message().get(0).getData());
InterpreterResult r6 = md.interpret("###### H6", null);
assertEquals(wrapWithMarkdownClassDiv("<h6>H6</h6>\n"), r6.message().get(0).getData());
InterpreterResult r7 = md.interpret("Alt-H1\n" + "======", null);
assertEquals(wrapWithMarkdownClassDiv("<h1>Alt-H1</h1>\n"), r7.message().get(0).getData());
InterpreterResult r8 = md.interpret("Alt-H2\n" + "------", null);
assertEquals(wrapWithMarkdownClassDiv("<h2>Alt-H2</h2>\n"), r8.message().get(0).getData());
}
|
@Override
public ClientTransport getClientTransport(ClientTransportConfig config) {
ClientTransport transport = allTransports.get(config);
if (transport == null) {
transport = ExtensionLoaderFactory.getExtensionLoader(ClientTransport.class)
.getExtension(config.getContainer(),
new Class[] { ClientTransportConfig.class },
new Object[] { config });
ClientTransport old = allTransports.putIfAbsent(config, transport); // 保存唯一长连接
if (old != null) {
if (LOGGER.isWarnEnabled()) {
LOGGER.warn("Multiple threads init ClientTransport with same ClientTransportConfig!");
}
transport.destroy(); //如果同时有人插入,则使用第一个
transport = old;
}
}
return transport;
}
|
@Test
public void getClientTransport() {
NotReusableClientTransportHolder holder = new NotReusableClientTransportHolder();
ClientTransportConfig config = new ClientTransportConfig();
config.setProviderInfo(new ProviderInfo().setHost("127.0.0.1").setPort(12222))
.setContainer("test");
TestClientTransport clientTransport = (TestClientTransport) holder.getClientTransport(config);
ClientTransportConfig config2 = new ClientTransportConfig();
config2.setProviderInfo(new ProviderInfo().setHost("127.0.0.1").setPort(12222))
.setContainer("test");
TestClientTransport clientTransport2 = (TestClientTransport) holder.getClientTransport(config2);
Assert.assertFalse(clientTransport == clientTransport2);
ClientTransportConfig config3 = new ClientTransportConfig();
config3.setProviderInfo(new ProviderInfo().setHost("127.0.0.1").setPort(12223))
.setContainer("test");
TestClientTransport clientTransport3 = (TestClientTransport) holder.getClientTransport(config3);
Assert.assertFalse(clientTransport2 == clientTransport3);
Assert.assertFalse(holder.removeClientTransport(null));
clientTransport.setRequest(4);
Assert.assertTrue(holder.removeClientTransport(clientTransport));
Assert.assertEquals(2, holder.size());
clientTransport2.setRequest(0);
Assert.assertTrue(holder.removeClientTransport(clientTransport2));
Assert.assertEquals(1, holder.size());
holder.destroy();
Assert.assertEquals(0, holder.size());
}
|
@Override
public String getName() {
return "CodeMagic";
}
|
@Test
public void getName() {
assertThat(underTest.getName()).isEqualTo("CodeMagic");
}
|
@Override
public void execute(String commandName, BufferedReader reader, BufferedWriter writer)
throws Py4JException, IOException {
String subCommand = safeReadLine(reader);
boolean unknownSubCommand = false;
String param = reader.readLine();
String returnCommand = null;
try {
final String[] names;
if (subCommand.equals(DIR_FIELDS_SUBCOMMAND_NAME)) {
Object targetObject = gateway.getObject(param);
names = reflectionEngine.getPublicFieldNames(targetObject);
} else if (subCommand.equals(DIR_METHODS_SUBCOMMAND_NAME)) {
Object targetObject = gateway.getObject(param);
names = reflectionEngine.getPublicMethodNames(targetObject);
} else if (subCommand.equals(DIR_STATIC_SUBCOMMAND_NAME)) {
Class<?> clazz = TypeUtil.forName(param);
names = reflectionEngine.getPublicStaticNames(clazz);
} else if (subCommand.equals(DIR_JVMVIEW_SUBCOMMAND_NAME)) {
names = getJvmViewNames(param, reader);
} else {
names = null;
unknownSubCommand = true;
}
// Read and discard end of command
reader.readLine();
if (unknownSubCommand) {
returnCommand = Protocol.getOutputErrorCommand("Unknown Array SubCommand Name: " + subCommand);
} else if (names == null) {
ReturnObject returnObject = gateway.getReturnObject(null);
returnCommand = Protocol.getOutputCommand(returnObject);
} else {
StringBuilder namesJoinedBuilder = new StringBuilder();
for (String name : names) {
namesJoinedBuilder.append(name);
namesJoinedBuilder.append("\n");
}
final String namesJoined;
if (namesJoinedBuilder.length() > 0) {
namesJoined = namesJoinedBuilder.substring(0, namesJoinedBuilder.length() - 1);
} else {
namesJoined = "";
}
ReturnObject returnObject = gateway.getReturnObject(namesJoined);
returnCommand = Protocol.getOutputCommand(returnObject);
}
} catch (Exception e) {
logger.log(Level.FINEST, "Error in a dir subcommand", e);
returnCommand = Protocol.getOutputErrorCommand();
}
logger.finest("Returning command: " + returnCommand);
writer.write(returnCommand);
writer.flush();
}
|
@Test
public void testDirStatics() throws Exception {
String inputCommand = "s\n" + ExampleClass.class.getName() + "\ne\n";
assertTrue(gateway.getBindings().containsKey(target));
command.execute("d", new BufferedReader(new StringReader(inputCommand)), writer);
Set<String> methods = convertResponse(sWriter.toString());
assertEquals(ExampleClassStatics, methods);
}
|
public static FlinkPod loadPodFromTemplateFile(
FlinkKubeClient kubeClient, File podTemplateFile, String mainContainerName) {
final KubernetesPod pod = kubeClient.loadPodFromTemplateFile(podTemplateFile);
final List<Container> otherContainers = new ArrayList<>();
Container mainContainer = null;
if (null != pod.getInternalResource().getSpec()) {
for (Container container : pod.getInternalResource().getSpec().getContainers()) {
if (mainContainerName.equals(container.getName())) {
mainContainer = container;
} else {
otherContainers.add(container);
}
}
pod.getInternalResource().getSpec().setContainers(otherContainers);
} else {
// Set an empty spec for pod template
pod.getInternalResource().setSpec(new PodSpecBuilder().build());
}
if (mainContainer == null) {
LOG.info(
"Could not find main container {} in pod template, using empty one to initialize.",
mainContainerName);
mainContainer = new ContainerBuilder().build();
}
return new FlinkPod(pod.getInternalResource(), mainContainer);
}
|
@Test
void testLoadPodFromTemplateWithNonExistPathShouldFail() {
final String nonExistFile = "/path/of/non-exist.yaml";
final String msg = String.format("Pod template file %s does not exist.", nonExistFile);
assertThatThrownBy(
() ->
KubernetesUtils.loadPodFromTemplateFile(
flinkKubeClient,
new File(nonExistFile),
KubernetesPodTemplateTestUtils.TESTING_MAIN_CONTAINER_NAME),
"Kubernetes client should fail when the pod template file does not exist.")
.satisfies(FlinkAssertions.anyCauseMatches(msg));
}
|
@GetMapping("/readiness")
public Result<String> readiness(HttpServletRequest request) {
ReadinessResult result = ModuleHealthCheckerHolder.getInstance().checkReadiness();
if (result.isSuccess()) {
return Result.success("ok");
}
return Result.failure(result.getResultMessage());
}
|
@Test
void testReadinessSuccess() throws Exception {
Mockito.when(configInfoPersistService.configInfoCount(any(String.class))).thenReturn(0);
Mockito.when(serverStatusManager.getServerStatus()).thenReturn(ServerStatus.UP);
Result<String> result = healthControllerV2.readiness(null);
assertEquals(0, result.getCode().intValue());
assertEquals("success", result.getMessage());
}
|
public static <T extends EurekaEndpoint> List<T> randomize(List<T> list) {
List<T> randomList = new ArrayList<>(list);
if (randomList.size() < 2) {
return randomList;
}
Collections.shuffle(randomList,ThreadLocalRandom.current());
return randomList;
}
|
@Test
public void testRandomizeReturnsACopyOfTheMethodParameter() throws Exception {
List<AwsEndpoint> firstList = SampleCluster.UsEast1a.builder().withServerPool(1).build();
List<AwsEndpoint> secondList = ResolverUtils.randomize(firstList);
assertThat(firstList, is(not(sameInstance(secondList))));
}
|
@Override
public int hashCode() {
return Objects.hash(instance, scheme);
}
|
@Test
@DisplayName("test hashCode().")
public void test3() {
DefaultInstance instance1 = new DefaultInstance();
instance1.setId("test-1");
instance1.setProtocol("http");
PolarisServiceInstance polarisServiceInstance1 = new PolarisServiceInstance(instance1);
DefaultInstance instance2 = new DefaultInstance();
instance2.setId("test-1");
instance2.setProtocol("http");
PolarisServiceInstance polarisServiceInstance2 = new PolarisServiceInstance(instance2);
assertThat(polarisServiceInstance1.hashCode()).isEqualTo(polarisServiceInstance2.hashCode());
}
|
@Override
public void addSubscriber(Subscriber subscriber, Class<? extends Event> subscribeType) {
// Actually, do a classification based on the slowEvent type.
Class<? extends SlowEvent> subSlowEventType = (Class<? extends SlowEvent>) subscribeType;
// For stop waiting subscriber, see {@link DefaultPublisher#openEventHandler}.
subscribers.add(subscriber);
lock.lock();
try {
Set<Subscriber> sets = subMappings.get(subSlowEventType);
if (sets == null) {
Set<Subscriber> newSet = new ConcurrentHashSet<>();
newSet.add(subscriber);
subMappings.put(subSlowEventType, newSet);
return;
}
sets.add(subscriber);
} finally {
lock.unlock();
}
}
|
@Test
void testIgnoreExpiredEvent() throws InterruptedException {
MockSlowEvent1 mockSlowEvent1 = new MockSlowEvent1();
MockSlowEvent2 mockSlowEvent2 = new MockSlowEvent2();
defaultSharePublisher.addSubscriber(smartSubscriber1, MockSlowEvent1.class);
defaultSharePublisher.addSubscriber(smartSubscriber2, MockSlowEvent2.class);
defaultSharePublisher.publish(mockSlowEvent1);
defaultSharePublisher.publish(mockSlowEvent2);
TimeUnit.MILLISECONDS.sleep(1100);
verify(smartSubscriber1).onEvent(mockSlowEvent1);
verify(smartSubscriber2).onEvent(mockSlowEvent2);
reset(smartSubscriber1);
when(smartSubscriber1.ignoreExpireEvent()).thenReturn(true);
defaultSharePublisher.publish(mockSlowEvent1);
TimeUnit.MILLISECONDS.sleep(100);
verify(smartSubscriber1, never()).onEvent(mockSlowEvent1);
}
|
public String getMode() {
return Integer.toString(toInteger(), 8);
}
|
@Test
public void testToMode() {
final Permission permission = new Permission(Permission.Action.read,
Permission.Action.none, Permission.Action.none);
assertEquals("400", permission.getMode());
}
|
static CodecFactory getCodecFactory(JobConf job) {
CodecFactory factory = null;
if (FileOutputFormat.getCompressOutput(job)) {
int deflateLevel = job.getInt(DEFLATE_LEVEL_KEY, DEFAULT_DEFLATE_LEVEL);
int xzLevel = job.getInt(XZ_LEVEL_KEY, DEFAULT_XZ_LEVEL);
int zstdLevel = job.getInt(ZSTD_LEVEL_KEY, DEFAULT_ZSTANDARD_LEVEL);
boolean zstdBufferPool = job.getBoolean(ZSTD_BUFFERPOOL_KEY, DEFAULT_ZSTANDARD_BUFFERPOOL);
String codecName = job.get(AvroJob.OUTPUT_CODEC);
if (codecName == null) {
String codecClassName = job.get("mapred.output.compression.codec", null);
String avroCodecName = HadoopCodecFactory.getAvroCodecName(codecClassName);
if (codecClassName != null && avroCodecName != null) {
factory = HadoopCodecFactory.fromHadoopString(codecClassName);
job.set(AvroJob.OUTPUT_CODEC, avroCodecName);
return factory;
} else {
return CodecFactory.deflateCodec(deflateLevel);
}
} else {
if (codecName.equals(DEFLATE_CODEC)) {
factory = CodecFactory.deflateCodec(deflateLevel);
} else if (codecName.equals(XZ_CODEC)) {
factory = CodecFactory.xzCodec(xzLevel);
} else if (codecName.equals(ZSTANDARD_CODEC)) {
factory = CodecFactory.zstandardCodec(zstdLevel, false, zstdBufferPool);
} else {
factory = CodecFactory.fromString(codecName);
}
}
}
return factory;
}
|
@Test
void gZipCodecUsingHadoopClass() {
CodecFactory avroDeflateCodec = CodecFactory.fromString("deflate");
JobConf job = new JobConf();
job.set("mapred.output.compress", "true");
job.set("mapred.output.compression.codec", "org.apache.hadoop.io.compress.GZipCodec");
CodecFactory factory = AvroOutputFormat.getCodecFactory(job);
assertNotNull(factory);
assertEquals(factory.getClass(), avroDeflateCodec.getClass());
}
|
@Override
public List<Container> allocateContainers(ResourceBlacklistRequest blackList,
List<ResourceRequest> oppResourceReqs,
ApplicationAttemptId applicationAttemptId,
OpportunisticContainerContext opportContext, long rmIdentifier,
String appSubmitter) throws YarnException {
// Update black list.
updateBlacklist(blackList, opportContext);
// Add OPPORTUNISTIC requests to the outstanding ones.
opportContext.addToOutstandingReqs(oppResourceReqs);
Set<String> nodeBlackList = new HashSet<>(opportContext.getBlacklist());
Set<String> allocatedNodes = new HashSet<>();
List<Container> allocatedContainers = new ArrayList<>();
// Satisfy the outstanding OPPORTUNISTIC requests.
boolean continueLoop = true;
while (continueLoop) {
continueLoop = false;
List<Map<Resource, List<Allocation>>> allocations = new ArrayList<>();
for (SchedulerRequestKey schedulerKey :
opportContext.getOutstandingOpReqs().descendingKeySet()) {
// Allocated containers :
// Key = Requested Capability,
// Value = List of Containers of given cap (the actual container size
// might be different than what is requested, which is why
// we need the requested capability (key) to match against
// the outstanding reqs)
int remAllocs = -1;
int maxAllocationsPerAMHeartbeat = getMaxAllocationsPerAMHeartbeat();
if (maxAllocationsPerAMHeartbeat > 0) {
remAllocs =
maxAllocationsPerAMHeartbeat - allocatedContainers.size()
- getTotalAllocations(allocations);
if (remAllocs <= 0) {
LOG.info("Not allocating more containers as we have reached max "
+ "allocations per AM heartbeat {}",
maxAllocationsPerAMHeartbeat);
break;
}
}
Map<Resource, List<Allocation>> allocation = allocate(
rmIdentifier, opportContext, schedulerKey, applicationAttemptId,
appSubmitter, nodeBlackList, allocatedNodes, remAllocs);
if (allocation.size() > 0) {
allocations.add(allocation);
continueLoop = true;
}
}
matchAllocation(allocations, allocatedContainers, opportContext);
}
return allocatedContainers;
}
|
@Test
public void testAllocationLatencyMetrics() throws Exception {
oppCntxt = spy(oppCntxt);
OpportunisticSchedulerMetrics metrics =
mock(OpportunisticSchedulerMetrics.class);
when(oppCntxt.getOppSchedulerMetrics()).thenReturn(metrics);
ResourceBlacklistRequest blacklistRequest =
ResourceBlacklistRequest.newInstance(
Collections.emptyList(), Collections.emptyList());
List<ResourceRequest> reqs = Arrays.asList(
ResourceRequest.newInstance(PRIORITY_NORMAL, "*", CAPABILITY_1GB, 2,
true, null, OPPORTUNISTIC_REQ),
ResourceRequest.newInstance(PRIORITY_NORMAL, "h6", CAPABILITY_1GB, 2,
true, null, OPPORTUNISTIC_REQ),
ResourceRequest.newInstance(PRIORITY_NORMAL, "/r3", CAPABILITY_1GB, 2,
true, null, OPPORTUNISTIC_REQ));
ApplicationAttemptId appAttId = ApplicationAttemptId.newInstance(
ApplicationId.newInstance(0L, 1), 1);
oppCntxt.updateNodeList(
Arrays.asList(
RemoteNode.newInstance(
NodeId.newInstance("h3", 1234), "h3:1234", "/r2"),
RemoteNode.newInstance(
NodeId.newInstance("h2", 1234), "h2:1234", "/r1"),
RemoteNode.newInstance(
NodeId.newInstance("h5", 1234), "h5:1234", "/r1"),
RemoteNode.newInstance(
NodeId.newInstance("h4", 1234), "h4:1234", "/r2")));
List<Container> containers = allocator.allocateContainers(
blacklistRequest, reqs, appAttId, oppCntxt, 1L, "luser");
LOG.info("Containers: {}", containers);
Assert.assertEquals(2, containers.size());
// for each allocated container, latency should be added.
verify(metrics, times(2)).addAllocateOLatencyEntry(anyLong());
}
|
public <T extends Instance> List<T> select(Selector selector, String consumerIp, List<T> providers) {
if (Objects.isNull(selector)) {
return providers;
}
SelectorContextBuilder selectorContextBuilder = contextBuilders.get(selector.getContextType());
if (Objects.isNull(selectorContextBuilder)) {
Loggers.SRV_LOG.info("[SelectorManager] cannot find the contextBuilder of type {}.", selector.getType());
return providers;
}
try {
Object context = selectorContextBuilder.build(consumerIp, providers);
return (List<T>) selector.select(context);
} catch (Exception e) {
Loggers.SRV_LOG.warn("[SelectorManager] execute select failed, will return all providers.", e);
return providers;
}
}
|
@Test
void testSelect() throws NacosException {
Selector selector = selectorManager.parseSelector("mock", "key=value");
Instance instance = new Instance();
instance.setIp("2.2.2.2");
List<Instance> providers = Collections.singletonList(instance);
List<Instance> instances0 = selectorManager.select(selector, "1.1.1.1", providers);
assertEquals(1, instances0.size());
assertEquals("2.2.2.2", instances0.get(0).getIp());
// test json serial for Selector
Serializer serializer0 = SerializeFactory.getSerializer("JSON");
byte[] bytes = serializer0.serialize(selector);
Selector jsonSelector = serializer0.deserialize(bytes, Selector.class);
List<Instance> instances1 = selectorManager.select(jsonSelector, "1.1.1.1", providers);
assertEquals(1, instances1.size());
assertEquals("2.2.2.2", instances1.get(0).getIp());
// test hessian serial for Selector
Serializer serializer1 = SerializeFactory.getDefault();
byte[] bytes1 = serializer1.serialize(selector);
Selector hessianSelector = serializer1.deserialize(bytes1);
List<Instance> instances2 = selectorManager.select(hessianSelector, "1.1.1.1", providers);
assertEquals(1, instances2.size());
assertEquals("2.2.2.2", instances2.get(0).getIp());
}
|
@Override
public void deleteSocialClient(Long id) {
// 校验存在
validateSocialClientExists(id);
// 删除
socialClientMapper.deleteById(id);
}
|
@Test
public void testDeleteSocialClient_notExists() {
// 准备参数
Long id = randomLongId();
// 调用, 并断言异常
assertServiceException(() -> socialClientService.deleteSocialClient(id), SOCIAL_CLIENT_NOT_EXISTS);
}
|
public static TriggerStateMachine stateMachineForTrigger(RunnerApi.Trigger trigger) {
switch (trigger.getTriggerCase()) {
case AFTER_ALL:
return AfterAllStateMachine.of(
stateMachinesForTriggers(trigger.getAfterAll().getSubtriggersList()));
case AFTER_ANY:
return AfterFirstStateMachine.of(
stateMachinesForTriggers(trigger.getAfterAny().getSubtriggersList()));
case AFTER_END_OF_WINDOW:
return stateMachineForAfterEndOfWindow(trigger.getAfterEndOfWindow());
case ELEMENT_COUNT:
return AfterPaneStateMachine.elementCountAtLeast(
trigger.getElementCount().getElementCount());
case AFTER_SYNCHRONIZED_PROCESSING_TIME:
return AfterSynchronizedProcessingTimeStateMachine.ofFirstElement();
case DEFAULT:
return DefaultTriggerStateMachine.of();
case NEVER:
return NeverStateMachine.ever();
case ALWAYS:
return ReshuffleTriggerStateMachine.create();
case OR_FINALLY:
return stateMachineForTrigger(trigger.getOrFinally().getMain())
.orFinally(stateMachineForTrigger(trigger.getOrFinally().getFinally()));
case REPEAT:
return RepeatedlyStateMachine.forever(
stateMachineForTrigger(trigger.getRepeat().getSubtrigger()));
case AFTER_EACH:
return AfterEachStateMachine.inOrder(
stateMachinesForTriggers(trigger.getAfterEach().getSubtriggersList()));
case AFTER_PROCESSING_TIME:
return stateMachineForAfterProcessingTime(trigger.getAfterProcessingTime());
case TRIGGER_NOT_SET:
throw new IllegalArgumentException(
String.format("Required field 'trigger' not set on %s", trigger));
default:
throw new IllegalArgumentException(String.format("Unknown trigger type %s", trigger));
}
}
|
@Test
public void testRepeatedlyTranslation() {
RunnerApi.Trigger trigger =
RunnerApi.Trigger.newBuilder()
.setRepeat(RunnerApi.Trigger.Repeat.newBuilder().setSubtrigger(subtrigger1))
.build();
RepeatedlyStateMachine machine =
(RepeatedlyStateMachine) TriggerStateMachines.stateMachineForTrigger(trigger);
assertThat(machine, equalTo(RepeatedlyStateMachine.forever(submachine1)));
}
|
public static Map<String, String> getExternalResourceConfigurationKeys(
Configuration config, String suffix) {
final Set<String> resourceSet = getExternalResourceSet(config);
final Map<String, String> configKeysToResourceNameMap = new HashMap<>();
LOG.info("Enabled external resources: {}", resourceSet);
if (resourceSet.isEmpty()) {
return Collections.emptyMap();
}
final Map<String, String> externalResourceConfigs = new HashMap<>();
for (String resourceName : resourceSet) {
final ConfigOption<String> configKeyOption =
key(ExternalResourceOptions.getSystemConfigKeyConfigOptionForResource(
resourceName, suffix))
.stringType()
.noDefaultValue();
final String configKey = config.get(configKeyOption);
if (StringUtils.isNullOrWhitespaceOnly(configKey)) {
LOG.warn(
"Could not find valid {} for {}. Will ignore that resource.",
configKeyOption.key(),
resourceName);
} else {
configKeysToResourceNameMap.compute(
configKey,
(ignored, previousResource) -> {
if (previousResource != null) {
LOG.warn(
"Duplicate config key {} occurred for external resources, the one named {} will overwrite the value.",
configKey,
resourceName);
externalResourceConfigs.remove(previousResource);
}
return resourceName;
});
externalResourceConfigs.put(resourceName, configKey);
}
}
return externalResourceConfigs;
}
|
@Test
public void testGetExternalResourceConfigurationKeysWithConfigKeyNotSpecifiedOrEmpty() {
final Configuration config = new Configuration();
final String resourceConfigKey = "";
config.set(ExternalResourceOptions.EXTERNAL_RESOURCE_LIST, RESOURCE_LIST);
config.setString(
ExternalResourceOptions.getSystemConfigKeyConfigOptionForResource(
RESOURCE_NAME_1, SUFFIX),
resourceConfigKey);
final Map<String, String> configMap =
ExternalResourceUtils.getExternalResourceConfigurationKeys(config, SUFFIX);
assertThat(configMap.entrySet(), is(empty()));
}
|
public static Set<String> parseDeployOutput(File buildResult) throws IOException {
try (Stream<String> linesStream = Files.lines(buildResult.toPath())) {
return parseDeployOutput(linesStream);
}
}
|
@Test
void testParseDeployOutputDetectsDeploymentWithAltRepository() {
assertThat(
DeployParser.parseDeployOutput(
Stream.of(
"[INFO] --- maven-deploy-plugin:2.8.2:deploy (default-deploy) @ flink-parent ---",
"[INFO] Using alternate deployment repository.../tmp/flink-validation-deployment")))
.containsExactly("flink-parent");
}
|
@Override
public String pluginName() {
return PluginEnum.DIVIDE.getName();
}
|
@Test
public void pluginNamedTest() {
assertEquals(divideUpstreamDataHandler.pluginName(), PluginEnum.DIVIDE.getName());
}
|
public static List<TimeSlot> split(TimeSlot timeSlot, SegmentInMinutes unit) {
TimeSlot normalizedSlot = normalizeToSegmentBoundaries(timeSlot, unit);
return new SlotToSegments().apply(normalizedSlot, unit);
}
|
@Test
void splittingIntoSegmentsWhenThereIsNoLeftover() {
//given
Instant start = Instant.parse("2023-09-09T00:00:00Z");
Instant end = Instant.parse("2023-09-09T01:00:00Z");
TimeSlot timeSlot = new TimeSlot(start, end);
//when
List<TimeSlot> segments = Segments.split(timeSlot, SegmentInMinutes.of(15, FIFTEEN_MINUTES_SEGMENT_DURATION));
//then
assertEquals(4, segments.size());
assertEquals(Instant.parse("2023-09-09T00:00:00Z"), segments.get(0).from());
assertEquals(Instant.parse("2023-09-09T00:15:00Z"), segments.get(0).to());
assertEquals(Instant.parse("2023-09-09T00:15:00Z"), segments.get(1).from());
assertEquals(Instant.parse("2023-09-09T00:30:00Z"), segments.get(1).to());
assertEquals(Instant.parse("2023-09-09T00:30:00Z"), segments.get(2).from());
assertEquals(Instant.parse("2023-09-09T00:45:00Z"), segments.get(2).to());
assertEquals(Instant.parse("2023-09-09T00:45:00Z"), segments.get(3).from());
assertEquals(Instant.parse("2023-09-09T01:00:00Z"), segments.get(3).to());
}
|
T getFunction(final List<SqlArgument> arguments) {
// first try to get the candidates without any implicit casting
Optional<T> candidate = findMatchingCandidate(arguments, false);
if (candidate.isPresent()) {
return candidate.get();
} else if (!supportsImplicitCasts) {
throw createNoMatchingFunctionException(arguments);
}
// if none were found (candidate isn't present) try again with implicit casting
candidate = findMatchingCandidate(arguments, true);
if (candidate.isPresent()) {
return candidate.get();
}
throw createNoMatchingFunctionException(arguments);
}
|
@Test
public void shouldSupportMatchAndImplicitCastEnabled() {
// Given:
givenFunctions(
function(EXPECTED, -1, DOUBLE)
);
// When:
final KsqlFunction fun = udfIndex.getFunction(ImmutableList.of(SqlArgument.of(INTEGER)));
// Then:
assertThat(fun.name(), equalTo(EXPECTED));
}
|
@Override
public String getFieldDefinition( ValueMetaInterface v, String tk, String pk, boolean useAutoinc,
boolean addFieldName, boolean addCr ) {
String retval = "";
String fieldname = v.getName();
int length = v.getLength();
int precision = v.getPrecision();
if ( addFieldName ) {
retval += fieldname + " ";
}
int type = v.getType();
switch ( type ) {
case ValueMetaInterface.TYPE_TIMESTAMP:
case ValueMetaInterface.TYPE_DATE:
retval += "TIMESTAMP";
break;
case ValueMetaInterface.TYPE_BOOLEAN:
if ( supportsBooleanDataType() ) {
retval += "BOOLEAN";
} else {
retval += "CHAR(1)";
}
break;
case ValueMetaInterface.TYPE_NUMBER:
case ValueMetaInterface.TYPE_INTEGER:
case ValueMetaInterface.TYPE_BIGNUMBER:
if ( fieldname.equalsIgnoreCase( tk ) || // Technical key
fieldname.equalsIgnoreCase( pk ) // Primary key
) {
if ( length > 9 ) {
retval += "BIGSERIAL";
} else {
retval += "SERIAL";
}
} else {
if ( length > 0 ) {
if ( precision > 0 || length > 18 ) {
retval += "NUMERIC(" + length + ", " + precision + ")";
} else {
if ( length > 9 ) {
retval += "BIGINT";
} else {
if ( length < 5 ) {
retval += "SMALLINT";
} else {
retval += "INTEGER";
}
}
}
} else {
retval += "DOUBLE PRECISION";
}
}
break;
case ValueMetaInterface.TYPE_STRING:
retval += "VARCHAR";
if ( length > 0 ) {
retval += "(" + length;
} else {
retval += "("; // Maybe use some default DB String length?
}
retval += ")";
break;
default:
retval += " UNKNOWN";
break;
}
if ( addCr ) {
retval += Const.CR;
}
return retval;
}
|
@Test
public void testGetFieldDefinition() throws Exception {
assertEquals( "FOO TIMESTAMP",
nativeMeta.getFieldDefinition( new ValueMetaDate( "FOO" ), "", "", false, true, false ) );
assertEquals( "TIMESTAMP",
nativeMeta.getFieldDefinition( new ValueMetaTimestamp( "FOO" ), "", "", false, false, false ) );
// Simple hack to prevent duplication of code. Checking the case of supported boolean type
// both supported and unsupported. Should return BOOLEAN if supported, or CHAR(1) if not.
String[] typeCk = new String[] { "CHAR(1)", "BOOLEAN", "CHAR(1)" };
int i = ( nativeMeta.supportsBooleanDataType() ? 1 : 0 );
assertEquals( "BIGSERIAL",
nativeMeta.getFieldDefinition( new ValueMetaNumber( "FOO", 10, 0 ), "FOO", "", false, false, false ) );
assertEquals( "SERIAL",
nativeMeta.getFieldDefinition( new ValueMetaNumber( "FOO", 8, 0 ), "", "FOO", false, false, false ) );
assertEquals( "NUMERIC(19, 0)",
nativeMeta.getFieldDefinition( new ValueMetaNumber( "FOO", 19, 0 ), "", "", false, false, false ) );
assertEquals( "NUMERIC(22, 7)",
nativeMeta.getFieldDefinition( new ValueMetaNumber( "FOO", 22, 7 ), "", "", false, false, false ) );
assertEquals( "DOUBLE PRECISION",
nativeMeta.getFieldDefinition( new ValueMetaNumber( "FOO" ), "", "", false, false, false ) );
assertEquals( "BIGINT",
nativeMeta.getFieldDefinition( new ValueMetaBigNumber( "FOO", 10, 0 ), "", "", false, false, false ) );
assertEquals( "SMALLINT",
nativeMeta.getFieldDefinition( new ValueMetaInteger( "FOO", 3, 0 ), "", "", false, false, false ) );
assertEquals( "INTEGER",
nativeMeta.getFieldDefinition( new ValueMetaInteger( "FOO", 5, 0 ), "", "", false, false, false ) );
assertEquals( "VARCHAR()",
nativeMeta.getFieldDefinition( new ValueMetaString( "FOO", 0, 0 ), "", "", false, false, false ) ); // Pretty sure this is a bug ...
assertEquals( "VARCHAR(15)",
nativeMeta.getFieldDefinition( new ValueMetaString( "FOO", 15, 0 ), "", "", false, false, false ) );
assertEquals( " UNKNOWN",
nativeMeta.getFieldDefinition( new ValueMetaBinary( "FOO", 0, 0 ), "", "", false, false, false ) );
// assertEquals( "VARBINARY(50)",
// nativeMeta.getFieldDefinition( new ValueMetaBinary( "FOO", 50, 0 ), "", "", false, false, false ) );
assertEquals( " UNKNOWN",
nativeMeta.getFieldDefinition( new ValueMetaInternetAddress( "FOO" ), "", "", false, false, false ) );
assertEquals( " UNKNOWN" + System.getProperty( "line.separator" ),
nativeMeta.getFieldDefinition( new ValueMetaInternetAddress( "FOO" ), "", "", false, false, true ) );
}
|
@Override
public SchemaKStream<?> buildStream(final PlanBuildContext buildContext) {
final SchemaKStream<?> stream = getSource().buildStream(buildContext);
final List<ColumnName> keyColumnNames = getSchema().key().stream()
.map(Column::name)
.collect(Collectors.toList());
return stream.select(
keyColumnNames,
getSelectExpressions(),
buildContext.buildNodeContext(getId().toString()),
buildContext,
getFormatInfo()
);
}
|
@Test
public void shouldBuildSourceOnceWhenBeingBuilt() {
// When:
projectNode.buildStream(planBuildContext);
// Then:
verify(source, times(1)).buildStream(planBuildContext);
}
|
@SuppressWarnings({"checkstyle:OperatorWrap"})
public static Optional<Task.Status> checkProgress(
Map<String, Task> realTaskMap,
WorkflowSummary summary,
WorkflowRuntimeOverview overview,
boolean isFinal) {
boolean allDone = true;
boolean isFailed = false; // highest order
boolean isTimeout = false;
boolean isStopped = false; // lowest order
boolean allTerminal =
isFinal && realTaskMap.values().stream().allMatch(task -> task.getStatus().isTerminal());
for (Map.Entry<StepInstance.Status, WorkflowStepStatusSummary> entry :
overview.getStepOverview().entrySet()) {
if (entry.getKey() != StepInstance.Status.NOT_CREATED && entry.getValue().getCnt() > 0) {
if (!entry.getKey().isTerminal() || (!allTerminal && entry.getKey().isRetryable())) {
allDone = false;
break;
} else if (entry.getKey() == StepInstance.Status.FATALLY_FAILED
|| entry.getKey() == StepInstance.Status.INTERNALLY_FAILED
|| entry.getKey() == StepInstance.Status.USER_FAILED
|| entry.getKey() == StepInstance.Status.PLATFORM_FAILED
|| entry.getKey() == StepInstance.Status.TIMEOUT_FAILED) {
isFailed = true;
} else if (entry.getKey() == StepInstance.Status.TIMED_OUT) {
isTimeout = true;
} else if (entry.getKey() == StepInstance.Status.STOPPED) {
isStopped = true;
}
}
}
if (allDone && overview.existsNotCreatedStep()) {
allDone = confirmDone(realTaskMap, summary);
}
// It's unexpected. Can happen if conductor fails the run before running maestro task logic
if (allDone && !isFailed && !isTimeout && !isStopped && !overview.existsCreatedStep()) {
LOG.warn(
"There are no created steps in the workflow [{}] and mark it as failed.",
summary.getIdentity());
Monitors.error(TaskHelper.class.getName(), "checkProgress");
isFailed = true;
}
LOG.trace(
"Check task status: done [{}] and with flags: [isFailed: {}], [isTimeout: {}], [isStopped: {}] "
+ "with real task map: [{}] and workflow summary: [{}]",
allDone,
isFailed,
isTimeout,
isStopped,
realTaskMap,
summary);
if (allDone) {
if (isFailed) {
return Optional.of(Task.Status.FAILED);
} else if (isTimeout) {
return Optional.of(Task.Status.TIMED_OUT);
} else if (isStopped) {
return Optional.of(Task.Status.CANCELED);
} else {
// Use this special status to indicate workflow succeeded.
// So all dummy (NOT_CREATED) tasks will be cancelled.
return Optional.of(Task.Status.FAILED_WITH_TERMINAL_ERROR);
}
}
return Optional.empty();
}
|
@Test
public void testCheckProgressWithEmptyDag() {
Optional<Task.Status> actual =
TaskHelper.checkProgress(
Collections.emptyMap(), new WorkflowSummary(), new WorkflowRuntimeOverview(), true);
Assert.assertEquals(Task.Status.FAILED, actual.get());
}
|
private Schema getSchema() {
try {
final String schemaString = getProperties().getProperty(SCHEMA);
if (schemaString == null) {
throw new ParquetEncodingException("Can not store relation in Parquet as the schema is unknown");
}
return Utils.getSchemaFromString(schemaString);
} catch (ParserException e) {
throw new ParquetEncodingException("can not get schema from context", e);
}
}
|
@Test
public void testMultipleSchema() throws ExecException, Exception {
String out = "target/out";
int rows = 1000;
Properties props = new Properties();
props.setProperty("parquet.compression", "uncompressed");
props.setProperty("parquet.page.size", "1000");
PigServer pigServer = new PigServer(ExecType.LOCAL, props);
Data data = Storage.resetData(pigServer);
Collection<Tuple> list1 = new ArrayList<Tuple>();
for (int i = 0; i < rows; i++) {
list1.add(tuple("a" + i));
}
Collection<Tuple> list2 = new ArrayList<Tuple>();
for (int i = 0; i < rows; i++) {
list2.add(tuple("b" + i));
}
data.set("a", "a:chararray", list1);
data.set("b", "b:chararray", list2);
pigServer.setBatchOn();
pigServer.registerQuery("A = LOAD 'a' USING mock.Storage();");
pigServer.registerQuery("B = LOAD 'b' USING mock.Storage();");
pigServer.deleteFile(out);
pigServer.registerQuery("Store A into '" + out + "/a' using " + ParquetStorer.class.getName() + "();");
pigServer.registerQuery("Store B into '" + out + "/b' using " + ParquetStorer.class.getName() + "();");
if (pigServer.executeBatch().get(0).getStatus() != JOB_STATUS.COMPLETED) {
throw new RuntimeException(
"Job failed", pigServer.executeBatch().get(0).getException());
}
pigServer.registerQuery("B = LOAD '" + out + "/*' USING " + ParquetLoader.class.getName() + "();");
pigServer.registerQuery("Store B into 'out' using mock.Storage();");
if (pigServer.executeBatch().get(0).getStatus() != JOB_STATUS.COMPLETED) {
throw new RuntimeException(
"Job failed", pigServer.executeBatch().get(0).getException());
}
List<Tuple> result = data.get("out");
final Schema schema = data.getSchema("out");
assertEquals(2, schema.size());
// union could be in either order
int ai;
int bi;
if ("a".equals(schema.getField(0).alias)) {
ai = 0;
bi = 1;
assertEquals("a", schema.getField(0).alias);
assertEquals("b", schema.getField(1).alias);
} else {
ai = 1;
bi = 0;
assertEquals("b", schema.getField(0).alias);
assertEquals("a", schema.getField(1).alias);
}
assertEquals(rows * 2, result.size());
int a = 0;
int b = 0;
for (Tuple tuple : result) {
String fa = (String) tuple.get(ai);
String fb = (String) tuple.get(bi);
if (fa != null) {
assertEquals("a" + a, fa);
++a;
}
if (fb != null) {
assertEquals("b" + b, fb);
++b;
}
}
}
|
public AccessPrivilege getAccessPrivilege(InetAddress addr) {
return getAccessPrivilege(addr.getHostAddress(),
addr.getCanonicalHostName());
}
|
@Test
public void testWildcardRW() {
NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, "* rw");
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address1, hostname1));
}
|
public T merge(T other) {
checkNotNull(other, "Cannot merge with null resources");
checkArgument(getClass() == other.getClass(), "Merge with different resource type");
checkArgument(name.equals(other.getName()), "Merge with different resource name");
return create(value.add(other.getValue()));
}
|
@Test
void testMerge() {
final Resource v1 = new TestResource(0.1);
final Resource v2 = new TestResource(0.2);
assertTestResourceValueEquals(0.3, v1.merge(v2));
}
|
public OpenAPI filter(OpenAPI openAPI, OpenAPISpecFilter filter, Map<String, List<String>> params, Map<String, String> cookies, Map<String, List<String>> headers) {
OpenAPI filteredOpenAPI = filterOpenAPI(filter, openAPI, params, cookies, headers);
if (filteredOpenAPI == null) {
return filteredOpenAPI;
}
OpenAPI clone = new OpenAPI();
clone.info(filteredOpenAPI.getInfo());
clone.openapi(filteredOpenAPI.getOpenapi());
clone.jsonSchemaDialect(filteredOpenAPI.getJsonSchemaDialect());
clone.setSpecVersion(filteredOpenAPI.getSpecVersion());
clone.setExtensions(filteredOpenAPI.getExtensions());
clone.setExternalDocs(filteredOpenAPI.getExternalDocs());
clone.setSecurity(filteredOpenAPI.getSecurity());
clone.setServers(filteredOpenAPI.getServers());
clone.tags(filteredOpenAPI.getTags() == null ? null : new ArrayList<>(openAPI.getTags()));
final Set<String> allowedTags = new HashSet<>();
final Set<String> filteredTags = new HashSet<>();
Paths clonedPaths = new Paths();
if (filteredOpenAPI.getPaths() != null) {
for (String resourcePath : filteredOpenAPI.getPaths().keySet()) {
PathItem pathItem = filteredOpenAPI.getPaths().get(resourcePath);
PathItem filteredPathItem = filterPathItem(filter, pathItem, resourcePath, params, cookies, headers);
PathItem clonedPathItem = cloneFilteredPathItem(filter,filteredPathItem, resourcePath, params, cookies, headers, allowedTags, filteredTags);
if (clonedPathItem != null) {
if (!clonedPathItem.readOperations().isEmpty()) {
clonedPaths.addPathItem(resourcePath, clonedPathItem);
}
}
}
clone.paths(clonedPaths);
}
filteredTags.removeAll(allowedTags);
final List<Tag> tags = clone.getTags();
if (tags != null && !filteredTags.isEmpty()) {
tags.removeIf(tag -> filteredTags.contains(tag.getName()));
if (clone.getTags().isEmpty()) {
clone.setTags(null);
}
}
if (filteredOpenAPI.getWebhooks() != null) {
for (String resourcePath : filteredOpenAPI.getWebhooks().keySet()) {
PathItem pathItem = filteredOpenAPI.getPaths().get(resourcePath);
PathItem filteredPathItem = filterPathItem(filter, pathItem, resourcePath, params, cookies, headers);
PathItem clonedPathItem = cloneFilteredPathItem(filter,filteredPathItem, resourcePath, params, cookies, headers, allowedTags, filteredTags);
if (clonedPathItem != null) {
if (!clonedPathItem.readOperations().isEmpty()) {
clone.addWebhooks(resourcePath, clonedPathItem);
}
}
}
}
if (filteredOpenAPI.getComponents() != null) {
clone.components(new Components());
clone.getComponents().setSchemas(filterComponentsSchema(filter, filteredOpenAPI.getComponents().getSchemas(), params, cookies, headers));
clone.getComponents().setSecuritySchemes(filteredOpenAPI.getComponents().getSecuritySchemes());
clone.getComponents().setCallbacks(filteredOpenAPI.getComponents().getCallbacks());
clone.getComponents().setExamples(filteredOpenAPI.getComponents().getExamples());
clone.getComponents().setExtensions(filteredOpenAPI.getComponents().getExtensions());
clone.getComponents().setHeaders(filteredOpenAPI.getComponents().getHeaders());
clone.getComponents().setLinks(filteredOpenAPI.getComponents().getLinks());
clone.getComponents().setParameters(filteredOpenAPI.getComponents().getParameters());
clone.getComponents().setRequestBodies(filteredOpenAPI.getComponents().getRequestBodies());
clone.getComponents().setResponses(filteredOpenAPI.getComponents().getResponses());
clone.getComponents().setPathItems(filteredOpenAPI.getComponents().getPathItems());
}
if (filter.isRemovingUnreferencedDefinitions()) {
clone = removeBrokenReferenceDefinitions(clone);
}
return clone;
}
|
@Test(description = "it should clone everything concurrently")
public void cloneEverythingConcurrent() throws IOException {
final OpenAPI openAPI = getOpenAPI(RESOURCE_PATH);
ThreadGroup tg = new ThreadGroup("SpecFilterTest" + "|" + System.currentTimeMillis());
final Map<String, OpenAPI> filteredMap = new ConcurrentHashMap<>();
for (int i = 0; i < 10; i++) {
final int id = i;
new Thread(tg, "SpecFilterTest") {
public void run() {
try {
filteredMap.put("filtered " + id, new SpecFilter().filter(openAPI, new NoOpOperationsFilter(), null, null, null));
} catch (Exception e) {
e.printStackTrace();
}
}
}.start();
}
new Thread(new FailureHandler(tg, filteredMap, openAPI)).start();
}
|
@Override
public SelType call(String methodName, SelType[] args) {
if ("min".equals(methodName) && args.length == 2) {
return SelLong.of(Math.min(((SelLong) args[0]).longVal(), ((SelLong) args[1]).longVal()));
} else if ("max".equals(methodName) && args.length == 2) {
return SelLong.of(Math.max(((SelLong) args[0]).longVal(), ((SelLong) args[1]).longVal()));
} else if ("random".equals(methodName) && args.length == 0) {
return SelDouble.of(Math.random());
} else if ("pow".equals(methodName) && args.length == 2) {
return SelDouble.of(
Math.pow(
((Number) args[0].getInternalVal()).doubleValue(),
((Number) args[1].getInternalVal()).doubleValue()));
}
throw new UnsupportedOperationException(
type()
+ " DO NOT support calling method: "
+ methodName
+ " with args: "
+ Arrays.toString(args));
}
|
@Test(expected = UnsupportedOperationException.class)
public void testInvalidCallArg() {
SelJavaMath.INSTANCE.call("random", new SelType[] {SelType.NULL});
}
|
public Exception getException() {
if (exception != null) return exception;
try {
final Class<? extends Exception> exceptionClass = ReflectionUtils.toClass(getExceptionType());
if (getExceptionCauseType() != null) {
final Class<? extends Exception> exceptionCauseClass = ReflectionUtils.toClass(getExceptionCauseType());
final Exception exceptionCause = getExceptionCauseMessage() != null ? ReflectionUtils.newInstanceCE(exceptionCauseClass, getExceptionCauseMessage()) : ReflectionUtils.newInstanceCE(exceptionCauseClass);
exceptionCause.setStackTrace(new StackTraceElement[]{});
return getExceptionMessage() != null ? ReflectionUtils.newInstanceCE(exceptionClass, getExceptionMessage(), exceptionCause) : ReflectionUtils.newInstanceCE(exceptionClass, exceptionCause);
} else {
return getExceptionMessage() != null ? ReflectionUtils.newInstanceCE(exceptionClass, getExceptionMessage()) : ReflectionUtils.newInstanceCE(exceptionClass);
}
} catch (ReflectiveOperationException e) {
throw new IllegalStateException("Could not reconstruct exception for class " + getExceptionType() + " and message " + getExceptionMessage(), e);
}
}
|
@Test
void getExceptionWithNestedException() {
final FailedState failedState = new FailedState("JobRunr message", new CustomException(new CustomException()));
assertThat(failedState.getException())
.isInstanceOf(CustomException.class)
.hasCauseInstanceOf(CustomException.class);
}
|
public void setDestinationType(String destinationType) {
this.destinationType = destinationType;
}
|
@Test(timeout = 60000)
public void testInvalidDestinationTypeFailure() {
activationSpec.setDestinationType("foobar");
PropertyDescriptor[] expected = {destinationTypeProperty};
assertActivationSpecInvalid(expected);
}
|
@Override
public ObjectNode encode(KubevirtApiConfig entity, CodecContext context) {
ObjectNode node = context.mapper().createObjectNode()
.put(SCHEME, entity.scheme().name())
.put(IP_ADDRESS, entity.ipAddress().toString())
.put(PORT, entity.port())
.put(STATE, entity.state().name())
.put(DATACENTER_ID, entity.datacenterId())
.put(CLUSTER_ID, entity.clusterId());
if (entity.scheme() == HTTPS) {
node.put(CA_CERT_DATA, entity.caCertData())
.put(CLIENT_CERT_DATA, entity.clientCertData())
.put(CLIENT_KEY_DATA, entity.clientKeyData());
if (entity.token() != null) {
node.put(TOKEN, entity.token());
}
} else {
if (entity.token() != null) {
node.put(TOKEN, entity.token());
}
if (entity.caCertData() != null) {
node.put(CA_CERT_DATA, entity.caCertData());
}
if (entity.clientCertData() != null) {
node.put(CLIENT_CERT_DATA, entity.clientCertData());
}
if (entity.clientKeyData() != null) {
node.put(CLIENT_KEY_DATA, entity.clientKeyData());
}
}
if (entity.serviceFqdn() != null) {
node.put(SERVICE_FQDN, entity.serviceFqdn());
}
if (entity.apiServerFqdn() != null) {
node.put(API_SERVER_FQDN, entity.apiServerFqdn());
}
if (entity.controllerIp() != null) {
node.put(CONTROLLER_IP, entity.controllerIp().toString());
}
return node;
}
|
@Test
public void testKubevirtApiConfigEncode() {
KubevirtApiConfig config = DefaultKubevirtApiConfig.builder()
.scheme(HTTPS)
.ipAddress(IpAddress.valueOf("10.10.10.23"))
.port(6443)
.state(CONNECTED)
.token("token")
.caCertData("caCertData")
.clientCertData("clientCertData")
.clientKeyData("clientKeyData")
.serviceFqdn("kubevirt.edgestack.svc.cluster.local")
.apiServerFqdn("kubernetes.default.svc.cluster.local")
.controllerIp(IpAddress.valueOf("127.0.0.1"))
.datacenterId("BD")
.clusterId("BD-MEH-CT01")
.build();
ObjectNode configJson = kubevirtApiConfigCodec.encode(config, context);
assertThat(configJson, matchesKubevirtApiConfig(config));
}
|
@Override
public List<String> getColumnNames(Configuration conf) throws HiveJdbcDatabaseAccessException {
return getColumnMetadata(conf, ResultSetMetaData::getColumnName);
}
|
@Test
public void testGetColumnNames_starQuery() throws HiveJdbcDatabaseAccessException {
Configuration conf = buildConfiguration();
DatabaseAccessor accessor = DatabaseAccessorFactory.getAccessor(conf);
List<String> columnNames = accessor.getColumnNames(conf);
assertThat(columnNames, is(notNullValue()));
assertThat(columnNames.size(), is(equalTo(7)));
assertThat(columnNames.get(0), is(equalToIgnoringCase("strategy_id")));
}
|
public static String getLocalHost() {
InetAddress address = getLocalAddress();
return address == null ? "localhost" : address.getHostName();
}
|
@Test
public void testGetLocalHost() {
assertThat(NetUtil.getLocalHost()).isNotNull();
}
|
@GET
@Path("package")
public Response getAllPackageInfo() {
try {
return new JsonResponse<>(Response.Status.OK, "", helium.getAllPackageInfo()).build();
} catch (RuntimeException e) {
logger.error(e.getMessage(), e);
return new JsonResponse<>(Response.Status.INTERNAL_SERVER_ERROR, e.getMessage()).build();
}
}
|
@Test
void testVisualizationPackageOrder() throws IOException {
CloseableHttpResponse get1 = httpGet("/helium/order/visualization");
assertThat(get1, isAllowed());
Map<String, Object> resp1 = gson.fromJson(EntityUtils.toString(get1.getEntity(), StandardCharsets.UTF_8),
new TypeToken<Map<String, Object>>() { }.getType());
List<Object> body1 = (List<Object>) resp1.get("body");
assertEquals(0, body1.size());
get1.close();
//We assume allPackages list has been refreshed before sorting
helium.getAllPackageInfo();
String postRequestJson = "[name2, name1]";
CloseableHttpResponse post = httpPost("/helium/order/visualization", postRequestJson);
assertThat(post, isAllowed());
post.close();
CloseableHttpResponse get2 = httpGet("/helium/order/visualization");
assertThat(get2, isAllowed());
Map<String, Object> resp2 = gson.fromJson(EntityUtils.toString(get2.getEntity(), StandardCharsets.UTF_8),
new TypeToken<Map<String, Object>>() { }.getType());
List<Object> body2 = (List<Object>) resp2.get("body");
assertEquals(2, body2.size());
assertEquals("name2", body2.get(0));
assertEquals("name1", body2.get(1));
get2.close();
}
|
static CommandLineOptions parse(Iterable<String> options) {
CommandLineOptions.Builder optionsBuilder = CommandLineOptions.builder();
List<String> expandedOptions = new ArrayList<>();
expandParamsFiles(options, expandedOptions);
Iterator<String> it = expandedOptions.iterator();
while (it.hasNext()) {
String option = it.next();
if (!option.startsWith("-")) {
optionsBuilder.filesBuilder().add(option).addAll(it);
break;
}
String flag;
String value;
int idx = option.indexOf('=');
if (idx >= 0) {
flag = option.substring(0, idx);
value = option.substring(idx + 1);
} else {
flag = option;
value = null;
}
// NOTE: update usage information in UsageException when new flags are added
switch (flag) {
case "-i":
case "-r":
case "-replace":
case "--replace":
optionsBuilder.inPlace(true);
break;
case "--lines":
case "-lines":
case "--line":
case "-line":
parseRangeSet(optionsBuilder.linesBuilder(), getValue(flag, it, value));
break;
case "--offset":
case "-offset":
optionsBuilder.addOffset(parseInteger(it, flag, value));
break;
case "--length":
case "-length":
optionsBuilder.addLength(parseInteger(it, flag, value));
break;
case "--aosp":
case "-aosp":
case "-a":
optionsBuilder.aosp(true);
break;
case "--version":
case "-version":
case "-v":
optionsBuilder.version(true);
break;
case "--help":
case "-help":
case "-h":
optionsBuilder.help(true);
break;
case "--fix-imports-only":
optionsBuilder.fixImportsOnly(true);
break;
case "--skip-sorting-imports":
optionsBuilder.sortImports(false);
break;
case "--skip-removing-unused-imports":
optionsBuilder.removeUnusedImports(false);
break;
case "--skip-reflowing-long-strings":
optionsBuilder.reflowLongStrings(false);
break;
case "--skip-javadoc-formatting":
optionsBuilder.formatJavadoc(false);
break;
case "-":
optionsBuilder.stdin(true);
break;
case "-n":
case "--dry-run":
optionsBuilder.dryRun(true);
break;
case "--set-exit-if-changed":
optionsBuilder.setExitIfChanged(true);
break;
case "-assume-filename":
case "--assume-filename":
optionsBuilder.assumeFilename(getValue(flag, it, value));
break;
default:
throw new IllegalArgumentException("unexpected flag: " + flag);
}
}
return optionsBuilder.build();
}
|
@Test
public void paramsFile() throws IOException {
Path outer = testFolder.newFile("outer").toPath();
Path exit = testFolder.newFile("exit").toPath();
Path nested = testFolder.newFile("nested").toPath();
String[] args = {"--dry-run", "@" + exit, "L", "@" + outer, "Q"};
Files.write(exit, "--set-exit-if-changed".getBytes(UTF_8));
Files.write(outer, ("M\n@" + nested.toAbsolutePath() + "\nP").getBytes(UTF_8));
Files.write(nested, "ℕ\n\n \n@@O\n".getBytes(UTF_8));
CommandLineOptions options = CommandLineOptionsParser.parse(Arrays.asList(args));
assertThat(options.files()).containsExactly("L", "M", "ℕ", "@O", "P", "Q");
}
|
private List<Slobrok> getSlobroks(DeployState deployState, TreeConfigProducer<AnyConfigProducer> parent, Element slobroksE) {
List<Slobrok> slobroks = new ArrayList<>();
if (slobroksE != null)
slobroks = getExplicitSlobrokSetup(deployState, parent, slobroksE);
return slobroks;
}
|
@Test
void testAdminServerOnly() {
Admin admin = buildAdmin(servicesAdminServerOnly());
assertEquals(1, admin.getSlobroks().size());
}
|
public static List<Path> getQualifiedRemoteProvidedLibDirs(
org.apache.flink.configuration.Configuration configuration,
YarnConfiguration yarnConfiguration)
throws IOException {
return getRemoteSharedLibPaths(
configuration,
pathStr -> {
final Path path = new Path(pathStr);
return path.getFileSystem(yarnConfiguration).makeQualified(path);
});
}
|
@Test
void testSharedLibIsNotRemotePathShouldThrowException() {
final String localLib = "file:///flink/sharedLib";
final Configuration flinkConfig = new Configuration();
flinkConfig.set(YarnConfigOptions.PROVIDED_LIB_DIRS, Collections.singletonList(localLib));
final String msg =
"The \""
+ YarnConfigOptions.PROVIDED_LIB_DIRS.key()
+ "\" should only "
+ "contain dirs accessible from all worker nodes";
assertThatThrownBy(
() ->
Utils.getQualifiedRemoteProvidedLibDirs(
flinkConfig, new YarnConfiguration()))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining(msg);
}
|
public static int getLevenshteinDistance(final String s, final String t) {
checkNotNull(s);
checkNotNull(t);
// base cases
if (s.equals(t)) {
return 0;
}
if (s.length() == 0) {
return t.length();
}
if (t.length() == 0) {
return s.length();
}
// create two work arrays to store integer distances
final int[] v0 = new int[t.length() + 1];
final int[] v1 = new int[t.length() + 1];
// initialize v0 (the previous row of distances)
// this row is A[0][i]: edit distance for an empty s
// the distance is just the number of characters to delete from t
for (int i = 0; i < v0.length; i++) {
v0[i] = i;
}
for (int i = 0; i < s.length(); i++) {
// calculate v1 (current row distances) from the previous row v0
// first element of v1 is A[i+1][0]
// edit distance is delete (i+1) chars from s to match empty t
v1[0] = i + 1;
// use formula to fill in the rest of the row
for (int j = 0; j < t.length(); j++) {
int cost = (s.charAt(i) == t.charAt(j)) ? 0 : 1;
v1[j + 1] = Math.min(Math.min(v1[j] + 1, v0[j + 1] + 1), v0[j] + cost);
}
// copy v1 (current row) to v0 (previous row) for next iteration
System.arraycopy(v1, 0, v0, 0, v0.length);
}
return v1[t.length()];
}
|
@Test
public void testLevenshteinDistance() {
assertEquals(0, StringUtils.getLevenshteinDistance("", "")); // equal
assertEquals(3, StringUtils.getLevenshteinDistance("", "abc")); // first empty
assertEquals(3, StringUtils.getLevenshteinDistance("abc", "")); // second empty
assertEquals(5, StringUtils.getLevenshteinDistance("abc", "12345")); // completely different
assertEquals(1, StringUtils.getLevenshteinDistance("abc", "ac")); // deletion
assertEquals(1, StringUtils.getLevenshteinDistance("abc", "ab1c")); // insertion
assertEquals(1, StringUtils.getLevenshteinDistance("abc", "a1c")); // modification
}
|
public TopicRouteData queryTopicRouteData(String topic) {
TopicRouteData data = this.getAnExistTopicRouteData(topic);
if (data == null) {
this.updateTopicRouteInfoFromNameServer(topic);
data = this.getAnExistTopicRouteData(topic);
}
return data;
}
|
@Test
public void testQueryTopicRouteData() {
consumerTable.put(group, createMQConsumerInner());
topicRouteTable.put(topic, createTopicRouteData());
TopicRouteData actual = mqClientInstance.queryTopicRouteData(topic);
assertNotNull(actual);
assertNotNull(actual.getQueueDatas());
assertNotNull(actual.getBrokerDatas());
}
|
public static int compareVersions(String version1, String version2) {
ComparableVersion v1 = new ComparableVersion(version1);
ComparableVersion v2 = new ComparableVersion(version2);
return v1.compareTo(v2);
}
|
@Test
public void testCompareVersions() {
// Equal versions are equal.
assertEquals(0, VersionUtil.compareVersions("2.0.0", "2.0.0"));
assertEquals(0, VersionUtil.compareVersions("2.0.0a", "2.0.0a"));
assertEquals(0, VersionUtil.compareVersions(
"2.0.0-SNAPSHOT", "2.0.0-SNAPSHOT"));
assertEquals(0, VersionUtil.compareVersions("1", "1"));
assertEquals(0, VersionUtil.compareVersions("1", "1.0"));
assertEquals(0, VersionUtil.compareVersions("1", "1.0.0"));
assertEquals(0, VersionUtil.compareVersions("1.0", "1"));
assertEquals(0, VersionUtil.compareVersions("1.0", "1.0"));
assertEquals(0, VersionUtil.compareVersions("1.0", "1.0.0"));
assertEquals(0, VersionUtil.compareVersions("1.0.0", "1"));
assertEquals(0, VersionUtil.compareVersions("1.0.0", "1.0"));
assertEquals(0, VersionUtil.compareVersions("1.0.0", "1.0.0"));
assertEquals(0, VersionUtil.compareVersions("1.0.0-alpha-1", "1.0.0-a1"));
assertEquals(0, VersionUtil.compareVersions("1.0.0-alpha-2", "1.0.0-a2"));
assertEquals(0, VersionUtil.compareVersions("1.0.0-alpha1", "1.0.0-alpha-1"));
assertEquals(0, VersionUtil.compareVersions("1a0", "1.0.0-alpha-0"));
assertEquals(0, VersionUtil.compareVersions("1a0", "1-a0"));
assertEquals(0, VersionUtil.compareVersions("1.a0", "1-a0"));
assertEquals(0, VersionUtil.compareVersions("1.a0", "1.0.0-alpha-0"));
// Assert that lower versions are lower, and higher versions are higher.
assertExpectedValues("1", "2.0.0");
assertExpectedValues("1.0.0", "2");
assertExpectedValues("1.0.0", "2.0.0");
assertExpectedValues("1.0", "2.0.0");
assertExpectedValues("1.0.0", "2.0.0");
assertExpectedValues("1.0.0", "1.0.0a");
assertExpectedValues("1.0.0.0", "2.0.0");
assertExpectedValues("1.0.0", "1.0.0-dev");
assertExpectedValues("1.0.0", "1.0.1");
assertExpectedValues("1.0.0", "1.0.2");
assertExpectedValues("1.0.0", "1.1.0");
assertExpectedValues("2.0.0", "10.0.0");
assertExpectedValues("1.0.0", "1.0.0a");
assertExpectedValues("1.0.2a", "1.0.10");
assertExpectedValues("1.0.2a", "1.0.2b");
assertExpectedValues("1.0.2a", "1.0.2ab");
assertExpectedValues("1.0.0a1", "1.0.0a2");
assertExpectedValues("1.0.0a2", "1.0.0a10");
// The 'a' in "1.a" is not followed by digit, thus not treated as "alpha",
// and treated larger than "1.0", per maven's ComparableVersion class
// implementation.
assertExpectedValues("1.0", "1.a");
//The 'a' in "1.a0" is followed by digit, thus treated as "alpha-<digit>"
assertExpectedValues("1.a0", "1.0");
assertExpectedValues("1a0", "1.0");
assertExpectedValues("1.0.1-alpha-1", "1.0.1-alpha-2");
assertExpectedValues("1.0.1-beta-1", "1.0.1-beta-2");
// Snapshot builds precede their eventual releases.
assertExpectedValues("1.0-SNAPSHOT", "1.0");
assertExpectedValues("1.0.0-SNAPSHOT", "1.0");
assertExpectedValues("1.0.0-SNAPSHOT", "1.0.0");
assertExpectedValues("1.0.0", "1.0.1-SNAPSHOT");
assertExpectedValues("1.0.1-SNAPSHOT", "1.0.1");
assertExpectedValues("1.0.1-SNAPSHOT", "1.0.2");
assertExpectedValues("1.0.1-alpha-1", "1.0.1-SNAPSHOT");
assertExpectedValues("1.0.1-beta-1", "1.0.1-SNAPSHOT");
assertExpectedValues("1.0.1-beta-2", "1.0.1-SNAPSHOT");
}
|
@Override
public TTableDescriptor toThrift(List<ReferencedPartitionInfo> partitions) {
Preconditions.checkNotNull(partitions);
THdfsTable tHdfsTable = new THdfsTable();
tHdfsTable.setHdfs_base_dir(tableLocation);
// columns and partition columns
Set<String> partitionColumnNames = Sets.newHashSet();
List<TColumn> tPartitionColumns = Lists.newArrayList();
List<TColumn> tColumns = Lists.newArrayList();
for (Column column : getPartitionColumns()) {
tPartitionColumns.add(column.toThrift());
partitionColumnNames.add(column.getName());
}
for (Column column : getBaseSchema()) {
if (partitionColumnNames.contains(column.getName())) {
continue;
}
tColumns.add(column.toThrift());
}
tHdfsTable.setColumns(tColumns);
if (!tPartitionColumns.isEmpty()) {
tHdfsTable.setPartition_columns(tPartitionColumns);
}
// partitions
List<String> partitionNames = Lists.newArrayList();
for (ReferencedPartitionInfo partition : partitions) {
partitionNames.add(PartitionUtil.toHivePartitionName(getPartitionColumnNames(), partition.getKey()));
}
List<PartitionInfo> hivePartitions;
try {
useMetadataCache = true;
hivePartitions = GlobalStateMgr.getCurrentState().getMetadataMgr()
.getPartitions(this.getCatalogName(), this, partitionNames);
} catch (StarRocksConnectorException e) {
LOG.warn("table {} gets partition info failed.", name, e);
return null;
}
for (int i = 0; i < hivePartitions.size(); i++) {
ReferencedPartitionInfo info = partitions.get(i);
PartitionKey key = info.getKey();
long partitionId = info.getId();
THdfsPartition tPartition = new THdfsPartition();
tPartition.setFile_format(hivePartitions.get(i).getFileFormat().toThrift());
List<LiteralExpr> keys = key.getKeys();
tPartition.setPartition_key_exprs(keys.stream().map(Expr::treeToThrift).collect(Collectors.toList()));
THdfsPartitionLocation tPartitionLocation = new THdfsPartitionLocation();
tPartitionLocation.setPrefix_index(-1);
tPartitionLocation.setSuffix(hivePartitions.get(i).getFullPath());
tPartition.setLocation(tPartitionLocation);
tHdfsTable.putToPartitions(partitionId, tPartition);
}
tHdfsTable.setSerde_lib(hiveProperties.get(HIVE_TABLE_SERDE_LIB));
tHdfsTable.setInput_format(hiveProperties.get(HIVE_TABLE_INPUT_FORMAT));
tHdfsTable.setHive_column_names(hiveProperties.get(HIVE_TABLE_COLUMN_NAMES));
tHdfsTable.setHive_column_types(hiveProperties.get(HIVE_TABLE_COLUMN_TYPES));
tHdfsTable.setSerde_properties(serdeProperties);
tHdfsTable.setTime_zone(TimeUtils.getSessionTimeZone());
TTableDescriptor tTableDescriptor = new TTableDescriptor(id, TTableType.HDFS_TABLE, fullSchema.size(),
0, hiveTableName, hiveDbName);
tTableDescriptor.setHdfsTable(tHdfsTable);
return tTableDescriptor;
}
|
@Test
public void testCreateExternalTableWithStorageFormat(@Mocked MetadataMgr metadataMgr) throws Exception {
List<String> targetFormats = new ArrayList<>();
targetFormats.add("AVRO");
targetFormats.add("RCBINARY");
targetFormats.add("RCTEXT");
targetFormats.add("SEQUENCE");
for (String targetFormat : targetFormats) {
HiveTable oTable = createExternalTableByFormat(targetFormat);
String inputFormatClass = HiveStorageFormat.get(targetFormat).getInputFormat();
String serde = HiveStorageFormat.get(targetFormat).getSerde();
new Expectations() {
{
GlobalStateMgr.getCurrentState().getMetadataMgr();
result = metadataMgr;
minTimes = 0;
metadataMgr.getTable(anyString, anyString, anyString);
result = oTable;
}
};
String createTableSql =
"create external table if not exists db.hive_tbl (col1 int, col2 int) engine=hive properties " +
"(\"resource\"=\"hive0\", \"database\"=\"db0\", \"table\"=\"table0\")";
CreateTableStmt createTableStmt =
(CreateTableStmt) UtFrameUtils.parseStmtWithNewParser(createTableSql, connectContext);
com.starrocks.catalog.Table table = createTable(createTableStmt);
Assert.assertTrue(table instanceof HiveTable);
HiveTable hiveTable = (HiveTable) table;
List<DescriptorTable.ReferencedPartitionInfo> partitions = new ArrayList<>();
TTableDescriptor tTableDescriptor = hiveTable.toThrift(partitions);
Assert.assertEquals(tTableDescriptor.getHdfsTable().getInput_format(), inputFormatClass);
Assert.assertEquals(tTableDescriptor.getHdfsTable().getSerde_lib(), serde);
Assert.assertEquals(tTableDescriptor.getHdfsTable().getHive_column_names(), "col2");
Assert.assertEquals(tTableDescriptor.getHdfsTable().getHive_column_types(), "INT");
}
}
|
public static SchemaAndValue parseString(String value) {
if (value == null) {
return NULL_SCHEMA_AND_VALUE;
}
if (value.isEmpty()) {
return new SchemaAndValue(Schema.STRING_SCHEMA, value);
}
ValueParser parser = new ValueParser(new Parser(value));
return parser.parse(false);
}
|
@Test
public void shouldParseStringListWithNullLastAsString() {
String str = "[1, null]";
SchemaAndValue result = Values.parseString(str);
assertEquals(Type.STRING, result.schema().type());
assertEquals(str, result.value());
}
|
public static DecisionTree fit(Formula formula, DataFrame data) {
return fit(formula, data, new Properties());
}
|
@Test
public void testBreastCancer() {
System.out.println("Breast Cancer");
MathEx.setSeed(19650218); // to get repeatable results.
ClassificationValidations<DecisionTree> result = CrossValidation.classification(10, BreastCancer.formula, BreastCancer.data,
(f, x) -> DecisionTree.fit(f, x, SplitRule.GINI, 20, 100, 5));
System.out.println(result);
assertEquals(0.9275, result.avg.accuracy, 1E-4);
}
|
public int execute()
{
int executed = 0;
final long now = now();
sortIfNeeded();
Set<Ticket> cancelled = new HashSet<>();
for (Ticket ticket : this.tickets) {
if (now - ticket.start < ticket.delay) {
// tickets are ordered, not meeting the condition means the next ones do not as well
break;
}
if (!ticket.alive) {
// Dead ticket, let's continue
cancelled.add(ticket);
continue;
}
ticket.alive = false;
cancelled.add(ticket);
ticket.handler.time(ticket.args);
++executed;
}
for (int idx = tickets.size(); idx-- > 0; ) {
Ticket ticket = tickets.get(idx);
if (ticket.alive) {
break;
}
cancelled.add(ticket);
}
this.tickets.removeAll(cancelled);
cancelled.clear();
return executed;
}
|
@Test
public void testInvokedAfterReset()
{
long fullTimeout = 50;
testNotInvokedAfterResetHalfTime();
// Wait until the end
time.set(time.get() + fullTimeout);
int rc = tickets.execute();
assertThat(rc, is(1));
assertThat(invoked.get(), is(1));
}
|
public static Env addEnvironment(String name) {
if (StringUtils.isBlank(name)) {
throw new RuntimeException("Cannot add a blank environment: " + "[" + name + "]");
}
name = getWellFormName(name);
if (STRING_ENV_MAP.containsKey(name)) {
// has been existed
logger.debug("{} already exists.", name);
} else {
// not existed
STRING_ENV_MAP.put(name, new Env(name));
}
return STRING_ENV_MAP.get(name);
}
|
@Test(expected = RuntimeException.class)
public void testAddEnvironmentBlankString() {
Env.addEnvironment("");
}
|
public static Statement sanitize(
final Statement node,
final MetaStore metaStore) {
return sanitize(node, metaStore, true);
}
|
@Test
public void shouldAllowDuplicateLambdaArgumentInSeparateExpression() {
// Given:
final Statement stmt = givenQuery(
"SELECT TRANSFORM_ARRAY(Col4, X => X + 5, (X,Y) => Y + 5) FROM TEST1;");
// When:
final Query result = (Query) AstSanitizer.sanitize(stmt, META_STORE);
// Then:
assertThat(result.getSelect(), is(new Select(ImmutableList.of(
new SingleColumn(
new FunctionCall(
FunctionName.of("TRANSFORM_ARRAY"),
ImmutableList.of(
column(TEST1_NAME, "COL4"),
new LambdaFunctionCall(
ImmutableList.of("X"),
new ArithmeticBinaryExpression(
Operator.ADD,
new LambdaVariable("X"),
new IntegerLiteral(5))
),
new LambdaFunctionCall(
ImmutableList.of("X", "Y"),
new ArithmeticBinaryExpression(
Operator.ADD,
new LambdaVariable("Y"),
new IntegerLiteral(5))
)
)
),
Optional.of(ColumnName.of("KSQL_COL_0")))
))));
}
|
public String buildRealData(final ConditionData condition, final ServerWebExchange exchange) {
return ParameterDataFactory.builderData(condition.getParamType(), condition.getParamName(), exchange);
}
|
@Test
public void testBuildRealDataUriBranch() {
conditionData.setParamType(ParamTypeEnum.URI.getName());
assertEquals("/http", abstractMatchStrategy.buildRealData(conditionData, exchange));
}
|
@Override
public T getContent() {
return content;
}
|
@Test
void getContent() {
String targetEngine = "targetEngine";
Object content = "content";
EfestoRedirectOutput retrieved = new EfestoRedirectOutput(modelLocalUriId, targetEngine, content) {};
assertThat(retrieved.getContent()).isEqualTo(content);
}
|
@Override
public void write(InputT element, Context context) throws IOException, InterruptedException {
while (bufferedRequestEntries.size() >= maxBufferedRequests) {
flush();
}
addEntryToBuffer(elementConverter.apply(element, context), false);
nonBlockingFlush();
}
|
@Test
public void testFlushThresholdMetBeforeBatchLimitWillCreateASmallerBatchOfSizeAboveThreshold()
throws IOException, InterruptedException {
AsyncSinkWriterImpl sink =
new AsyncSinkWriterImplBuilder()
.context(sinkInitContext)
.maxBatchSizeInBytes(30)
.maxRecordSizeInBytes(30)
.build();
/* Sink has flush threshold of 30 bytes, each integer is 4 bytes, therefore, flushing
* should occur once 7 elements have been written - an 8th element cannot be added since
* that would make the buffer 32 bytes, which is over the threshold.
*/
for (int i = 0; i < 100; i++) {
sink.write(String.valueOf(i));
assertThat(res.size()).isEqualTo((i / 7) * 7);
}
}
|
public static byte[] computeHmac(final byte[] key, final String string)
throws SaslException {
Mac mac = createSha1Hmac(key);
mac.update(string.getBytes(StandardCharsets.UTF_8));
return mac.doFinal();
}
|
@Test
public void testComputeHmac2() throws Exception
{
// Setup test fixture.
final byte[] key = StringUtils.decodeHex("1d96ee3a529b5a5f9e47c01f229a2cb8a6e15f7d"); // 'salted password' from the test vectors.
final String value = "Server Key";
// Execute system under test.
final byte[] result = ScramUtils.computeHmac(key, value);
// Verify results.
assertArrayEquals(StringUtils.decodeHex("0fe09258b3ac852ba502cc62ba903eaacdbf7d31"), result); // test against 'server key' from the test vectors.
}
|
@VisibleForTesting
static JibContainerBuilder processCommonConfiguration(
RawConfiguration rawConfiguration,
InferredAuthProvider inferredAuthProvider,
ProjectProperties projectProperties)
throws InvalidFilesModificationTimeException, InvalidAppRootException,
IncompatibleBaseImageJavaVersionException, IOException, InvalidImageReferenceException,
InvalidContainerizingModeException, MainClassInferenceException, InvalidPlatformException,
InvalidContainerVolumeException, InvalidWorkingDirectoryException,
InvalidCreationTimeException, ExtraDirectoryNotFoundException {
// Create and configure JibContainerBuilder
ModificationTimeProvider modificationTimeProvider =
createModificationTimeProvider(rawConfiguration.getFilesModificationTime());
JavaContainerBuilder javaContainerBuilder =
getJavaContainerBuilderWithBaseImage(
rawConfiguration, projectProperties, inferredAuthProvider)
.setAppRoot(getAppRootChecked(rawConfiguration, projectProperties))
.setModificationTimeProvider(modificationTimeProvider);
JibContainerBuilder jibContainerBuilder =
projectProperties.createJibContainerBuilder(
javaContainerBuilder,
getContainerizingModeChecked(rawConfiguration, projectProperties));
jibContainerBuilder
.setFormat(rawConfiguration.getImageFormat())
.setPlatforms(getPlatformsSet(rawConfiguration))
.setEntrypoint(computeEntrypoint(rawConfiguration, projectProperties, jibContainerBuilder))
.setProgramArguments(rawConfiguration.getProgramArguments().orElse(null))
.setEnvironment(rawConfiguration.getEnvironment())
.setExposedPorts(Ports.parse(rawConfiguration.getPorts()))
.setVolumes(getVolumesSet(rawConfiguration))
.setLabels(rawConfiguration.getLabels())
.setUser(rawConfiguration.getUser().orElse(null))
.setCreationTime(getCreationTime(rawConfiguration.getCreationTime(), projectProperties));
getWorkingDirectoryChecked(rawConfiguration)
.ifPresent(jibContainerBuilder::setWorkingDirectory);
// Adds all the extra files.
for (ExtraDirectoriesConfiguration extraDirectory : rawConfiguration.getExtraDirectories()) {
Path from = extraDirectory.getFrom();
if (Files.exists(from)) {
jibContainerBuilder.addFileEntriesLayer(
JavaContainerBuilderHelper.extraDirectoryLayerConfiguration(
from,
AbsoluteUnixPath.get(extraDirectory.getInto()),
extraDirectory.getIncludesList(),
extraDirectory.getExcludesList(),
rawConfiguration.getExtraDirectoryPermissions(),
modificationTimeProvider));
} else if (!from.endsWith(DEFAULT_JIB_DIR)) {
throw new ExtraDirectoryNotFoundException(from.toString(), from.toString());
}
}
return jibContainerBuilder;
}
|
@Test
public void testEntrypoint()
throws InvalidImageReferenceException, IOException, MainClassInferenceException,
InvalidAppRootException, InvalidWorkingDirectoryException, InvalidPlatformException,
InvalidContainerVolumeException, IncompatibleBaseImageJavaVersionException,
NumberFormatException, InvalidContainerizingModeException,
InvalidFilesModificationTimeException, InvalidCreationTimeException,
ExtraDirectoryNotFoundException {
when(rawConfiguration.getEntrypoint())
.thenReturn(Optional.of(Arrays.asList("custom", "entrypoint")));
ContainerBuildPlan buildPlan = processCommonConfiguration();
assertThat(buildPlan.getEntrypoint()).containsExactly("custom", "entrypoint").inOrder();
verifyNoInteractions(logger);
}
|
@Override
public Optional<DatabaseAdminExecutor> create(final SQLStatementContext sqlStatementContext) {
SQLStatement sqlStatement = sqlStatementContext.getSqlStatement();
if (sqlStatement instanceof ShowFunctionStatusStatement) {
return Optional.of(new ShowFunctionStatusExecutor((ShowFunctionStatusStatement) sqlStatement));
}
if (sqlStatement instanceof ShowProcedureStatusStatement) {
return Optional.of(new ShowProcedureStatusExecutor((ShowProcedureStatusStatement) sqlStatement));
}
if (sqlStatement instanceof ShowTablesStatement) {
return Optional.of(new ShowTablesExecutor((ShowTablesStatement) sqlStatement, sqlStatementContext.getDatabaseType()));
}
return Optional.empty();
}
|
@Test
void assertCreateWithSelectStatementFromInformationSchemaOfOtherTable() {
initProxyContext(Collections.emptyMap());
SimpleTableSegment tableSegment = new SimpleTableSegment(new TableNameSegment(10, 13, new IdentifierValue("CHARACTER_SETS")));
tableSegment.setOwner(new OwnerSegment(7, 8, new IdentifierValue("information_schema")));
MySQLSelectStatement selectStatement = mock(MySQLSelectStatement.class);
when(selectStatement.getFrom()).thenReturn(Optional.of(tableSegment));
when(sqlStatementContext.getSqlStatement()).thenReturn(selectStatement);
Optional<DatabaseAdminExecutor> actual = new MySQLAdminExecutorCreator().create(sqlStatementContext, "select CHARACTER_SET_NAME from CHARACTER_SETS", "", Collections.emptyList());
assertFalse(actual.isPresent());
}
|
@Override
public void loadGlue(Glue glue, List<URI> gluePaths) {
gluePaths.stream()
.filter(gluePath -> CLASSPATH_SCHEME.equals(gluePath.getScheme()))
.map(ClasspathSupport::packageName)
.map(classFinder::scanForClassesInPackage)
.flatMap(Collection::stream)
.filter(InjectorSource.class::isAssignableFrom)
.distinct()
.forEach(container::addClass);
}
|
@Test
void doesnt_save_anything_in_glue() {
GuiceBackend backend = new GuiceBackend(factory, classLoader);
backend.loadGlue(null, singletonList(URI.create("classpath:io/cucumber/guice/integration")));
verify(factory).addClass(YourInjectorSource.class);
}
|
long snapshotsRetrieved() {
return snapshotsRetrieved.get();
}
|
@Test
public void metrics_are_not_refreshed_if_ttl_not_expired() {
assertEquals(0, nodeMetricsClient.snapshotsRetrieved());
updateSnapshot(defaultMetricsConsumerId, TTL);
assertEquals(1, nodeMetricsClient.snapshotsRetrieved());
updateSnapshot(defaultMetricsConsumerId, TTL);
assertEquals(1, nodeMetricsClient.snapshotsRetrieved());
updateSnapshot(defaultMetricsConsumerId, Duration.ZERO);
assertEquals(2, nodeMetricsClient.snapshotsRetrieved());
}
|
@Override
@SuppressWarnings("unchecked")
public TypeSerializer<T> restoreSerializer() {
final int numFields = snapshotData.getFieldSerializerSnapshots().size();
final ArrayList<Field> restoredFields = new ArrayList<>(numFields);
final ArrayList<TypeSerializer<?>> restoredFieldSerializers = new ArrayList<>(numFields);
snapshotData
.getFieldSerializerSnapshots()
.forEach(
(fieldName, field, fieldSerializerSnapshot) -> {
restoredFields.add(field);
checkState(
fieldSerializerSnapshot != null,
"field serializer snapshots should be present.");
restoredFieldSerializers.add(
fieldSerializerSnapshot.restoreSerializer());
});
final LinkedHashMap<Class<?>, TypeSerializer<?>> registeredSubclassSerializers =
restoreSerializers(
snapshotData.getRegisteredSubclassSerializerSnapshots().unwrapOptionals());
final Tuple2<LinkedHashMap<Class<?>, Integer>, TypeSerializer<Object>[]>
decomposedSubclassSerializerRegistry =
decomposeSubclassSerializerRegistry(registeredSubclassSerializers);
final LinkedHashMap<Class<?>, TypeSerializer<?>> nonRegisteredSubclassSerializers =
restoreSerializers(
snapshotData
.getNonRegisteredSubclassSerializerSnapshots()
.unwrapOptionals());
return new PojoSerializer<>(
snapshotData.getPojoClass(),
restoredFields.toArray(new Field[numFields]),
restoredFieldSerializers.toArray(new TypeSerializer[numFields]),
decomposedSubclassSerializerRegistry.f0,
decomposedSubclassSerializerRegistry.f1,
nonRegisteredSubclassSerializers,
new SerializerConfigImpl());
}
|
@Test
void testRestoreSerializerWithRemovedFields() {
final PojoSerializerSnapshot<TestPojo> testSnapshot =
buildTestSnapshot(
Arrays.asList(
mockRemovedField(ID_FIELD),
NAME_FIELD,
mockRemovedField(HEIGHT_FIELD)));
final TypeSerializer<TestPojo> restoredSerializer = testSnapshot.restoreSerializer();
assertThat(restoredSerializer).isInstanceOf(PojoSerializer.class);
final PojoSerializer<TestPojo> restoredPojoSerializer =
(PojoSerializer<TestPojo>) restoredSerializer;
final Field[] restoredFields = restoredPojoSerializer.getFields();
assertThat(restoredFields).containsExactly(null, NAME_FIELD.field, null);
final TypeSerializer<?>[] restoredFieldSerializers =
restoredPojoSerializer.getFieldSerializers();
assertThat(restoredFieldSerializers)
.containsExactly(
IntSerializer.INSTANCE,
StringSerializer.INSTANCE,
DoubleSerializer.INSTANCE);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.