focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
|---|---|
@Override
public void validTenant(Long id) {
TenantDO tenant = getTenant(id);
if (tenant == null) {
throw exception(TENANT_NOT_EXISTS);
}
if (tenant.getStatus().equals(CommonStatusEnum.DISABLE.getStatus())) {
throw exception(TENANT_DISABLE, tenant.getName());
}
if (DateUtils.isExpired(tenant.getExpireTime())) {
throw exception(TENANT_EXPIRE, tenant.getName());
}
}
|
@Test
public void testValidTenant_disable() {
// mock 数据
TenantDO tenant = randomPojo(TenantDO.class, o -> o.setId(1L).setStatus(CommonStatusEnum.DISABLE.getStatus()));
tenantMapper.insert(tenant);
// 调用,并断言业务异常
assertServiceException(() -> tenantService.validTenant(1L), TENANT_DISABLE, tenant.getName());
}
|
public User userDTOToUser(AdminUserDTO userDTO) {
if (userDTO == null) {
return null;
} else {
User user = new User();
user.setId(userDTO.getId());
user.setLogin(userDTO.getLogin());
user.setFirstName(userDTO.getFirstName());
user.setLastName(userDTO.getLastName());
user.setEmail(userDTO.getEmail());
user.setImageUrl(userDTO.getImageUrl());
user.setActivated(userDTO.isActivated());
user.setLangKey(userDTO.getLangKey());
Set<Authority> authorities = this.authoritiesFromStrings(userDTO.getAuthorities());
user.setAuthorities(authorities);
return user;
}
}
|
@Test
void userDTOToUserMapWithNullAuthoritiesStringShouldReturnUserWithEmptyAuthorities() {
userDto.setAuthorities(null);
User user = userMapper.userDTOToUser(userDto);
assertThat(user).isNotNull();
assertThat(user.getAuthorities()).isNotNull();
assertThat(user.getAuthorities()).isEmpty();
}
|
@VisibleForTesting
void scan(long streamTimeout) {
ArrayList<OpenFileCtx> ctxToRemove = new ArrayList<OpenFileCtx>();
Iterator<Entry<FileHandle, OpenFileCtx>> it = openFileMap.entrySet()
.iterator();
if (LOG.isTraceEnabled()) {
LOG.trace("openFileMap size:" + size());
}
while (it.hasNext()) {
Entry<FileHandle, OpenFileCtx> pairs = it.next();
FileHandle handle = pairs.getKey();
OpenFileCtx ctx = pairs.getValue();
if (!ctx.streamCleanup(handle, streamTimeout)) {
continue;
}
// Check it again inside lock before removing
synchronized (this) {
OpenFileCtx ctx2 = openFileMap.get(handle);
if (ctx2 != null) {
if (ctx2.streamCleanup(handle, streamTimeout)) {
openFileMap.remove(handle);
if (LOG.isDebugEnabled()) {
LOG.debug("After remove stream " + handle.dumpFileHandle()
+ ", the stream number:" + size());
}
ctxToRemove.add(ctx2);
}
}
}
}
// Invoke the cleanup outside the lock
for (OpenFileCtx ofc : ctxToRemove) {
ofc.cleanup();
}
}
|
@Test
public void testScan() throws IOException, InterruptedException {
NfsConfiguration conf = new NfsConfiguration();
// Only two entries will be in the cache
conf.setInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY, 2);
DFSClient dfsClient = Mockito.mock(DFSClient.class);
Nfs3FileAttributes attr = new Nfs3FileAttributes();
HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
Mockito.when(fos.getPos()).thenReturn((long) 0);
OpenFileCtx context1 = new OpenFileCtx(fos, attr, "/dumpFilePath",
dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtx context2 = new OpenFileCtx(fos, attr, "/dumpFilePath",
dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtx context3 = new OpenFileCtx(fos, attr, "/dumpFilePath",
dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtx context4 = new OpenFileCtx(fos, attr, "/dumpFilePath",
dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtxCache cache = new OpenFileCtxCache(conf, 10 * 60 * 100);
// Test cleaning expired entry
boolean ret = cache.put(new FileHandle(1), context1);
assertTrue(ret);
ret = cache.put(new FileHandle(2), context2);
assertTrue(ret);
Thread.sleep(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT + 1);
cache.scan(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT);
assertTrue(cache.size() == 0);
// Test cleaning inactive entry
ret = cache.put(new FileHandle(3), context3);
assertTrue(ret);
ret = cache.put(new FileHandle(4), context4);
assertTrue(ret);
context3.setActiveStatusForTest(false);
cache.scan(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_DEFAULT);
assertTrue(cache.size() == 1);
assertTrue(cache.get(new FileHandle(3)) == null);
assertTrue(cache.get(new FileHandle(4)) != null);
}
|
public static <MSG extends Message> CloseableIterator<MSG> readStream(File file, Parser<MSG> parser) {
try {
// the input stream is closed by the CloseableIterator
BufferedInputStream input = new BufferedInputStream(new FileInputStream(file));
return readStream(input, parser);
} catch (Exception e) {
throw ContextException.of("Unable to read messages", e).addContext("file", file);
}
}
|
@Test
public void read_empty_stream() throws Exception {
File file = temp.newFile();
CloseableIterator<Fake> it = Protobuf.readStream(file, Fake.parser());
assertThat(it).isNotNull();
assertThat(it.hasNext()).isFalse();
}
|
public void setWantClientAuth(Boolean wantClientAuth) {
this.wantClientAuth = wantClientAuth;
}
|
@Test
public void testSetWantClientAuth() throws Exception {
configuration.setWantClientAuth(true);
configuration.configure(configurable);
assertTrue(configurable.isWantClientAuth());
}
|
@SuppressWarnings("unchecked")
@SneakyThrows(ReflectiveOperationException.class)
public static <T> T invokeMethod(final Method method, final Object target, final Object... args) {
boolean accessible = method.isAccessible();
if (!accessible) {
method.setAccessible(true);
}
T result = (T) method.invoke(target, args);
if (!accessible) {
method.setAccessible(false);
}
return result;
}
|
@Test
void assertInvokeMethod() throws NoSuchMethodException {
assertThat(ReflectionUtils.invokeMethod(ReflectionFixture.class.getDeclaredMethod("getInstanceValue"), new ReflectionFixture()), is("instance_value"));
}
|
@Override
public long readLong() {
checkReadableBytes0(8);
long v = _getLong(readerIndex);
readerIndex += 8;
return v;
}
|
@Test
public void testReadLongAfterRelease() {
assertThrows(IllegalReferenceCountException.class, new Executable() {
@Override
public void execute() {
releasedBuffer().readLong();
}
});
}
|
public void registerNewEventDefinition(String id, User user) {
final GRN grn = grnRegistry.newGRN(GRNTypes.EVENT_DEFINITION, id);
registerNewEntity(grn, user);
}
|
@Test
void registerNewEventDefinition() {
final User mockUser = mock(User.class);
when(mockUser.getName()).thenReturn("mockuser");
when(mockUser.getId()).thenReturn("mockuser");
entityOwnershipService.registerNewEventDefinition("1234", mockUser);
ArgumentCaptor<GrantDTO> grant = ArgumentCaptor.forClass(GrantDTO.class);
ArgumentCaptor<User> user = ArgumentCaptor.forClass(User.class);
verify(dbGrantService).create(grant.capture(), user.capture());
assertThat(grant.getValue()).satisfies(g -> {
assertThat(g.capability()).isEqualTo(Capability.OWN);
assertThat(g.target().type()).isEqualTo(GRNTypes.EVENT_DEFINITION.type());
assertThat(g.target().entity()).isEqualTo("1234");
assertThat(g.grantee().type()).isEqualTo(GRNTypes.USER.type());
assertThat(g.grantee().entity()).isEqualTo("mockuser");
});
}
|
public static byte[] reverseBytes(byte[] bytes) {
// We could use the XOR trick here but it's easier to understand if we don't. If we find this is really a
// performance issue the matter can be revisited.
byte[] buf = new byte[bytes.length];
for (int i = 0; i < bytes.length; i++)
buf[i] = bytes[bytes.length - 1 - i];
return buf;
}
|
@Test
public void testReverseBytes() {
assertArrayEquals(new byte[]{1, 2, 3, 4, 5}, ByteUtils.reverseBytes(new byte[]{5, 4, 3, 2, 1}));
assertArrayEquals(new byte[]{0}, ByteUtils.reverseBytes(new byte[]{0}));
assertArrayEquals(new byte[]{}, ByteUtils.reverseBytes(new byte[]{}));
}
|
@Override
public ModuleState build() {
ModuleState moduleState = new ModuleState(DistroConstants.DISTRO_MODULE);
moduleState.newState(DistroConstants.DATA_SYNC_DELAY_MILLISECONDS_STATE,
EnvUtil.getProperty(DistroConstants.DATA_SYNC_DELAY_MILLISECONDS, Long.class,
DistroConstants.DEFAULT_DATA_SYNC_DELAY_MILLISECONDS));
moduleState.newState(DistroConstants.DATA_SYNC_TIMEOUT_MILLISECONDS_STATE,
EnvUtil.getProperty(DistroConstants.DATA_SYNC_TIMEOUT_MILLISECONDS, Long.class,
DistroConstants.DEFAULT_DATA_SYNC_TIMEOUT_MILLISECONDS));
moduleState.newState(DistroConstants.DATA_SYNC_RETRY_DELAY_MILLISECONDS_STATE,
EnvUtil.getProperty(DistroConstants.DATA_SYNC_RETRY_DELAY_MILLISECONDS, Long.class,
DistroConstants.DEFAULT_DATA_SYNC_RETRY_DELAY_MILLISECONDS));
moduleState.newState(DistroConstants.DATA_VERIFY_INTERVAL_MILLISECONDS_STATE,
EnvUtil.getProperty(DistroConstants.DATA_VERIFY_INTERVAL_MILLISECONDS, Long.class,
DistroConstants.DEFAULT_DATA_VERIFY_INTERVAL_MILLISECONDS));
moduleState.newState(DistroConstants.DATA_VERIFY_TIMEOUT_MILLISECONDS_STATE,
EnvUtil.getProperty(DistroConstants.DATA_VERIFY_TIMEOUT_MILLISECONDS, Long.class,
DistroConstants.DEFAULT_DATA_VERIFY_TIMEOUT_MILLISECONDS));
moduleState.newState(DistroConstants.DATA_LOAD_RETRY_DELAY_MILLISECONDS_STATE,
EnvUtil.getProperty(DistroConstants.DATA_LOAD_RETRY_DELAY_MILLISECONDS, Long.class,
DistroConstants.DEFAULT_DATA_LOAD_RETRY_DELAY_MILLISECONDS));
moduleState.newState(DistroConstants.DATA_LOAD_TIMEOUT_MILLISECONDS_STATE,
EnvUtil.getProperty(DistroConstants.DATA_LOAD_TIMEOUT_MILLISECONDS, Long.class,
DistroConstants.DEFAULT_DATA_LOAD_TIMEOUT_MILLISECONDS));
return moduleState;
}
|
@Test
void testBuild() {
ModuleState actual = new DistroModuleStateBuilder().build();
Map<String, Object> states = actual.getStates();
assertEquals(DistroConstants.DISTRO_MODULE, actual.getModuleName());
assertEquals(DistroConstants.DEFAULT_DATA_SYNC_DELAY_MILLISECONDS, states.get(DistroConstants.DATA_SYNC_DELAY_MILLISECONDS_STATE));
assertEquals(DistroConstants.DEFAULT_DATA_SYNC_TIMEOUT_MILLISECONDS,
states.get(DistroConstants.DATA_SYNC_TIMEOUT_MILLISECONDS_STATE));
assertEquals(DistroConstants.DEFAULT_DATA_SYNC_RETRY_DELAY_MILLISECONDS,
states.get(DistroConstants.DATA_SYNC_RETRY_DELAY_MILLISECONDS_STATE));
assertEquals(DistroConstants.DEFAULT_DATA_VERIFY_INTERVAL_MILLISECONDS,
states.get(DistroConstants.DATA_VERIFY_INTERVAL_MILLISECONDS_STATE));
assertEquals(DistroConstants.DEFAULT_DATA_VERIFY_TIMEOUT_MILLISECONDS,
states.get(DistroConstants.DATA_VERIFY_TIMEOUT_MILLISECONDS_STATE));
assertEquals(DistroConstants.DEFAULT_DATA_LOAD_RETRY_DELAY_MILLISECONDS,
states.get(DistroConstants.DATA_LOAD_RETRY_DELAY_MILLISECONDS_STATE));
assertEquals(DistroConstants.DEFAULT_DATA_LOAD_TIMEOUT_MILLISECONDS,
states.get(DistroConstants.DATA_LOAD_TIMEOUT_MILLISECONDS_STATE));
}
|
@Nullable static String normalizeIdField(String field, @Nullable String id, boolean isNullable) {
if (id == null) {
if (isNullable) return null;
throw new NullPointerException(field + " == null");
}
int length = id.length();
if (length == 0) {
if (isNullable) return null;
throw new IllegalArgumentException(field + " is empty");
}
int desiredLength = field.equals("traceId") && length > 16 ? 32 : 16;
int existingPadding = validateHexAndReturnPadding(field, id, desiredLength);
if (desiredLength == 32 && existingPadding >= 16) { // overly padded traceId
return id.substring(16);
}
return length == desiredLength ? id : padLeft(id, desiredLength, existingPadding);
}
|
@Test void normalizeIdField_badCharacters() {
assertThatThrownBy(() -> normalizeIdField("traceId", "000-0000000004d20000000ss000162e", false))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("traceId should be lower-hex encoded with no prefix");
}
|
public static List<URL> parseConfigurators(String rawConfig) {
// compatible url JsonArray, such as [ "override://xxx", "override://xxx" ]
List<URL> compatibleUrls = parseJsonArray(rawConfig);
if (CollectionUtils.isNotEmpty(compatibleUrls)) {
return compatibleUrls;
}
List<URL> urls = new ArrayList<>();
ConfiguratorConfig configuratorConfig = parseObject(rawConfig);
String scope = configuratorConfig.getScope();
List<ConfigItem> items = configuratorConfig.getConfigs();
if (ConfiguratorConfig.SCOPE_APPLICATION.equals(scope)) {
items.forEach(item -> urls.addAll(appItemToUrls(item, configuratorConfig)));
} else {
// service scope by default.
items.forEach(item -> urls.addAll(serviceItemToUrls(item, configuratorConfig)));
}
return urls;
}
|
@Test
void parseConfiguratorsAppNoServiceTest() throws IOException {
try (InputStream yamlStream = this.getClass().getResourceAsStream("/AppNoService.yml")) {
List<URL> urls = ConfigParser.parseConfigurators(streamToString(yamlStream));
Assertions.assertNotNull(urls);
Assertions.assertEquals(1, urls.size());
URL url = urls.get(0);
Assertions.assertEquals("127.0.0.1", url.getAddress());
Assertions.assertEquals("*", url.getServiceInterface());
Assertions.assertEquals(6666, url.getParameter(TIMEOUT_KEY, 0));
Assertions.assertEquals("random", url.getParameter(LOADBALANCE_KEY));
Assertions.assertEquals("demo-consumer", url.getApplication());
}
}
|
public void insert(String data) {
this.insert(this.root, data);
}
|
@Test
public void prefixSearch44() throws Exception {
TrieTree trieTree = new TrieTree();
trieTree.insert("a");
trieTree.insert("b");
trieTree.insert("c");
trieTree.insert("d");
trieTree.insert("e");
trieTree.insert("f");
trieTree.insert("g");
trieTree.insert("h");
}
|
protected void connect0(CertConfig certConfig) {
String caCertPath = certConfig.getCaCertPath();
String remoteAddress = certConfig.getRemoteAddress();
logger.info(
"Try to connect to Dubbo Cert Authority server: " + remoteAddress + ", caCertPath: " + remoteAddress);
try {
if (StringUtils.isNotEmpty(caCertPath)) {
channel = NettyChannelBuilder.forTarget(remoteAddress)
.sslContext(GrpcSslContexts.forClient()
.trustManager(new File(caCertPath))
.build())
.build();
} else {
logger.warn(
CONFIG_SSL_CONNECT_INSECURE,
"",
"",
"No caCertPath is provided, will use insecure connection.");
channel = NettyChannelBuilder.forTarget(remoteAddress)
.sslContext(GrpcSslContexts.forClient()
.trustManager(InsecureTrustManagerFactory.INSTANCE)
.build())
.build();
}
} catch (Exception e) {
logger.error(LoggerCodeConstants.CONFIG_SSL_PATH_LOAD_FAILED, "", "", "Failed to load SSL cert file.", e);
throw new RuntimeException(e);
}
}
|
@Test
void testConnect2() {
FrameworkModel frameworkModel = new FrameworkModel();
DubboCertManager certManager = new DubboCertManager(frameworkModel);
String file =
this.getClass().getClassLoader().getResource("certs/ca.crt").getFile();
CertConfig certConfig = new CertConfig("127.0.0.1:30062", null, file, null);
certManager.connect0(certConfig);
Assertions.assertNotNull(certManager.channel);
Assertions.assertEquals("127.0.0.1:30062", certManager.channel.authority());
frameworkModel.destroy();
}
|
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) {
CharStream input = CharStreams.fromString(source);
FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input );
CommonTokenStream tokens = new CommonTokenStream( lexer );
FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens );
ParserHelper parserHelper = new ParserHelper(eventsManager);
additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol()));
parser.setHelper(parserHelper);
parser.setErrorHandler( new FEELErrorHandler() );
parser.removeErrorListeners(); // removes the error listener that prints to the console
parser.addErrorListener( new FEELParserErrorListener( eventsManager ) );
// pre-loads the parser with symbols
defineVariables( inputVariableTypes, inputVariables, parser );
if (typeRegistry != null) {
parserHelper.setTypeRegistry(typeRegistry);
}
return parser;
}
|
@Test
void quantifiedExpressionSome() {
String inputExpression = "some item in order.items satisfies item.price > 100";
BaseNode someBase = parse( inputExpression );
assertThat( someBase).isInstanceOf(QuantifiedExpressionNode.class);
assertThat( someBase.getText()).isEqualTo(inputExpression);
assertThat( someBase.getResultType()).isEqualTo(BuiltInType.BOOLEAN);
QuantifiedExpressionNode someExpr = (QuantifiedExpressionNode) someBase;
assertThat( someExpr.getQuantifier()).isEqualTo(QuantifiedExpressionNode.Quantifier.SOME);
assertThat( someExpr.getIterationContexts()).hasSize(1);
assertThat( someExpr.getIterationContexts().get( 0 ).getText()).isEqualTo( "item in order.items");
assertThat( someExpr.getExpression().getText()).isEqualTo( "item.price > 100");
}
|
public List<StateObject> getDiscardables() {
return Stream.concat(
streamOperatorAndKeyedStates().flatMap(Collection::stream),
collectUniqueDelegates(streamChannelStates()))
.collect(Collectors.toList());
}
|
@Test
void testGetDiscardables() throws IOException {
Tuple2<List<StateObject>, OperatorSubtaskState> opStates =
generateSampleOperatorSubtaskState();
List<StateObject> states = opStates.f0;
OperatorSubtaskState operatorSubtaskState = opStates.f1;
List<StateObject> discardables =
Arrays.asList(
states.get(0),
states.get(1),
states.get(2),
states.get(3),
((InputChannelStateHandle) states.get(4)).getDelegate(),
((ResultSubpartitionStateHandle) states.get(5)).getDelegate());
assertThat(new HashSet<>(operatorSubtaskState.getDiscardables()))
.isEqualTo(new HashSet<>(discardables));
}
|
@Override
public short getTypeCode() {
return MessageType.TYPE_GLOBAL_BEGIN_RESULT;
}
|
@Test
public void testGetTypeCode() {
GlobalBeginResponse globalBeginResponse = new GlobalBeginResponse();
Assertions.assertEquals(MessageType.TYPE_GLOBAL_BEGIN_RESULT, globalBeginResponse.getTypeCode());
}
|
public void set(int i) {
throw new UnsupportedOperationException("RangeSet is immutable");
}
|
@Test(expected = UnsupportedOperationException.class)
public void set() throws Exception {
RangeSet rs = new RangeSet(4);
rs.set(1);
}
|
@Override
public AbstractUploadFilter filter(final Session<?> source, final Session<?> destination, final TransferAction action, final ProgressListener listener) {
if(log.isDebugEnabled()) {
log.debug(String.format("Filter transfer with action %s and options %s", action, options));
}
final Symlink symlink = source.getFeature(Symlink.class);
final UploadSymlinkResolver resolver = new UploadSymlinkResolver(symlink, roots);
final Find find;
final AttributesFinder attributes;
if(roots.size() > 1 || roots.stream().filter(item -> item.remote.isDirectory()).findAny().isPresent()) {
find = new CachingFindFeature(source, cache, source.getFeature(Find.class, new DefaultFindFeature(source)));
attributes = new CachingAttributesFinderFeature(source, cache, source.getFeature(AttributesFinder.class, new DefaultAttributesFinderFeature(source)));
}
else {
find = new CachingFindFeature(source, cache, source.getFeature(Find.class));
attributes = new CachingAttributesFinderFeature(source, cache, source.getFeature(AttributesFinder.class));
}
if(log.isDebugEnabled()) {
log.debug(String.format("Determined features %s and %s", find, attributes));
}
if(action.equals(TransferAction.resume)) {
return new ResumeFilter(resolver, source, options).withFinder(find).withAttributes(attributes);
}
if(action.equals(TransferAction.rename)) {
return new RenameFilter(resolver, source, options).withFinder(find).withAttributes(attributes);
}
if(action.equals(TransferAction.renameexisting)) {
return new RenameExistingFilter(resolver, source, options).withFinder(find).withAttributes(attributes);
}
if(action.equals(TransferAction.skip)) {
return new SkipFilter(resolver, source, options).withFinder(find).withAttributes(attributes);
}
if(action.equals(TransferAction.comparison)) {
return new CompareFilter(resolver, source, options, listener).withFinder(find).withAttributes(attributes);
}
return new OverwriteFilter(resolver, source, options).withFinder(find).withAttributes(attributes);
}
|
@Test
public void testTemporaryDisabledMultipartUpload() throws Exception {
final Host h = new Host(new TestProtocol());
final NullSession session = new NullSession(h);
final AbstractUploadFilter f = new UploadTransfer(h, Collections.emptyList())
.filter(session, null, TransferAction.overwrite, new DisabledProgressListener());
final Path file = new Path("/t", EnumSet.of(Path.Type.file));
final TransferStatus status = f.prepare(file, new NullLocal("t"), new TransferStatus(), new DisabledProgressListener());
assertNull(status.getRename().local);
assertNull(status.getRename().remote);
}
|
@Override
public int[] borrowIntArray(int positionCount)
{
checkState(getBorrowedArrayCount() < maxOutstandingArrays, "Requested too many arrays: %s", getBorrowedArrayCount());
int[] array;
while (!intArrays.isEmpty() && intArrays.peek().length < positionCount) {
array = intArrays.pop();
estimatedSizeInBytes -= sizeOf(array);
}
if (intArrays.isEmpty()) {
array = new int[positionCount];
estimatedSizeInBytes += sizeOf(array);
}
else {
array = intArrays.pop();
}
verify(borrowedIntArrays.add(array), "Attempted to borrow array which was already borrowed");
return array;
}
|
@Test
public void testOverAllocateLeases()
{
SimpleArrayAllocator allocator = new SimpleArrayAllocator(2);
allocator.borrowIntArray(10);
allocator.borrowIntArray(10);
assertThrows(IllegalStateException.class, () -> allocator.borrowIntArray(10));
}
|
@Udf(description = "Returns a new string with all matches of regexp in str replaced with newStr")
public String regexpReplace(
@UdfParameter(
description = "The source string. If null, then function returns null.") final String str,
@UdfParameter(
description = "The regexp to match."
+ " If null, then function returns null.") final String regexp,
@UdfParameter(
description = "The string to replace the matches with."
+ " If null, then function returns null.") final String newStr) {
if (str == null || regexp == null || newStr == null) {
return null;
}
try {
return str.replaceAll(regexp, newStr);
} catch (PatternSyntaxException e) {
throw new KsqlFunctionException("Invalid regular expression pattern: " + regexp, e);
}
}
|
@Test(expected = KsqlFunctionException.class)
public void shouldThrowExceptionOnBadPattern() {
udf.regexpReplace("foobar", "(()", "bar");
}
|
@Override
protected void analyzeDependency(final Dependency dependency, final Engine engine) throws AnalysisException {
// batch request component-reports for all dependencies
synchronized (FETCH_MUTIX) {
if (reports == null) {
try {
requestDelay();
reports = requestReports(engine.getDependencies());
} catch (TransportException ex) {
final String message = ex.getMessage();
final boolean warnOnly = getSettings().getBoolean(Settings.KEYS.ANALYZER_OSSINDEX_WARN_ONLY_ON_REMOTE_ERRORS, false);
this.setEnabled(false);
if (StringUtils.endsWith(message, "401")) {
LOG.error("Invalid credentials for the OSS Index, disabling the analyzer");
throw new AnalysisException("Invalid credentials provided for OSS Index", ex);
} else if (StringUtils.endsWith(message, "403")) {
LOG.error("OSS Index access forbidden, disabling the analyzer");
throw new AnalysisException("OSS Index access forbidden", ex);
} else if (StringUtils.endsWith(message, "429")) {
if (warnOnly) {
LOG.warn("OSS Index rate limit exceeded, disabling the analyzer", ex);
} else {
throw new AnalysisException("OSS Index rate limit exceeded, disabling the analyzer", ex);
}
} else if (warnOnly) {
LOG.warn("Error requesting component reports, disabling the analyzer", ex);
} else {
LOG.debug("Error requesting component reports, disabling the analyzer", ex);
throw new AnalysisException("Failed to request component-reports", ex);
}
} catch (SocketTimeoutException e) {
final boolean warnOnly = getSettings().getBoolean(Settings.KEYS.ANALYZER_OSSINDEX_WARN_ONLY_ON_REMOTE_ERRORS, false);
this.setEnabled(false);
if (warnOnly) {
LOG.warn("OSS Index socket timeout, disabling the analyzer", e);
} else {
LOG.debug("OSS Index socket timeout", e);
throw new AnalysisException("Failed to establish socket to OSS Index", e);
}
} catch (Exception e) {
LOG.debug("Error requesting component reports", e);
throw new AnalysisException("Failed to request component-reports", e);
}
}
// skip enrichment if we failed to fetch reports
if (reports != null) {
enrich(dependency);
}
}
}
|
@Test
public void should_enrich_be_included_in_mutex_to_prevent_NPE()
throws Exception {
// Given
SproutOssIndexAnalyzer analyzer = new SproutOssIndexAnalyzer();
Identifier identifier = new PurlIdentifier("maven", "test", "test", "1.0",
Confidence.HIGHEST);
Dependency dependency = new Dependency();
dependency.addSoftwareIdentifier(identifier);
Settings settings = getSettings();
Engine engine = new Engine(settings);
engine.setDependencies(Collections.singletonList(dependency));
analyzer.initialize(settings);
String expectedOutput = "https://ossindex.sonatype.org/component/pkg:maven/test/test@1.0";
// When
analyzer.analyzeDependency(dependency, engine);
// Then
assertTrue(identifier.getUrl().startsWith(expectedOutput));
analyzer.awaitPendingClosure();
}
|
@Override
@Cacheable(cacheNames = RedisKeyConstants.NOTIFY_TEMPLATE, key = "#code",
unless = "#result == null")
public NotifyTemplateDO getNotifyTemplateByCodeFromCache(String code) {
return notifyTemplateMapper.selectByCode(code);
}
|
@Test
public void testGetNotifyTemplateByCodeFromCache() {
// mock 数据
NotifyTemplateDO dbNotifyTemplate = randomPojo(NotifyTemplateDO.class);
notifyTemplateMapper.insert(dbNotifyTemplate);
// 准备参数
String code = dbNotifyTemplate.getCode();
// 调用
NotifyTemplateDO notifyTemplate = notifyTemplateService.getNotifyTemplateByCodeFromCache(code);
// 断言
assertPojoEquals(dbNotifyTemplate, notifyTemplate);
}
|
public static <T> Inner<T> create() {
return new Inner<>();
}
|
@Test
@Category(NeedsRunner.class)
public void addSimpleFields() {
Schema schema = Schema.builder().addStringField("field1").build();
PCollection<Row> added =
pipeline
.apply(
Create.of(Row.withSchema(schema).addValue("value").build()).withRowSchema(schema))
.apply(
AddFields.<Row>create()
.field("field2", Schema.FieldType.INT32)
.field("field3", Schema.FieldType.array(Schema.FieldType.STRING))
.field("field4", Schema.FieldType.iterable(Schema.FieldType.STRING)));
Schema expectedSchema =
Schema.builder()
.addStringField("field1")
.addNullableField("field2", Schema.FieldType.INT32)
.addNullableField("field3", Schema.FieldType.array(Schema.FieldType.STRING))
.addNullableField("field4", Schema.FieldType.iterable(Schema.FieldType.STRING))
.build();
assertEquals(expectedSchema, added.getSchema());
Row expected = Row.withSchema(expectedSchema).addValues("value", null, null, null).build();
PAssert.that(added).containsInAnyOrder(expected);
pipeline.run();
}
|
public static void injectDefaults(Configuration source, Configuration target) {
Check.notNull(source, "source");
Check.notNull(target, "target");
for (Map.Entry<String, String> entry : source) {
if (target.get(entry.getKey()) == null) {
target.set(entry.getKey(), entry.getValue());
}
}
}
|
@Test
public void injectDefaults() throws Exception {
Configuration srcConf = new Configuration(false);
Configuration targetConf = new Configuration(false);
srcConf.set("testParameter1", "valueFromSource");
srcConf.set("testParameter2", "valueFromSource");
targetConf.set("testParameter2", "originalValueFromTarget");
targetConf.set("testParameter3", "originalValueFromTarget");
ConfigurationUtils.injectDefaults(srcConf, targetConf);
assertEquals("valueFromSource", targetConf.get("testParameter1"));
assertEquals("originalValueFromTarget", targetConf.get("testParameter2"));
assertEquals("originalValueFromTarget", targetConf.get("testParameter3"));
assertEquals("valueFromSource", srcConf.get("testParameter1"));
assertEquals("valueFromSource", srcConf.get("testParameter2"));
assertNull(srcConf.get("testParameter3"));
}
|
public UniqueRuleItemNodePath getUniqueItem(final String itemType) {
return uniqueItems.get(itemType);
}
|
@Test
void assertGetUniqueItem() {
UniqueRuleItemNodePath uniqueRulePath = ruleNodePath.getUniqueItem("tables");
assertThat(uniqueRulePath.getPath(), is("tables"));
assertTrue(uniqueRulePath.isValidatedPath("/metadata/db/rules/foo/tables/versions/1234"));
UniqueRuleItemNodePath uniqueRulePathWithType = ruleNodePath.getUniqueItem("type");
assertThat(uniqueRulePathWithType.getPath(), is("tables/type"));
assertTrue(uniqueRulePathWithType.isActiveVersionPath("/metadata/db/rules/foo/tables/type/active_version"));
}
|
@Override
protected <R> EurekaHttpResponse<R> execute(RequestExecutor<R> requestExecutor) {
List<EurekaEndpoint> candidateHosts = null;
int endpointIdx = 0;
for (int retry = 0; retry < numberOfRetries; retry++) {
EurekaHttpClient currentHttpClient = delegate.get();
EurekaEndpoint currentEndpoint = null;
if (currentHttpClient == null) {
if (candidateHosts == null) {
candidateHosts = getHostCandidates();
if (candidateHosts.isEmpty()) {
throw new TransportException("There is no known eureka server; cluster server list is empty");
}
}
if (endpointIdx >= candidateHosts.size()) {
throw new TransportException("Cannot execute request on any known server");
}
currentEndpoint = candidateHosts.get(endpointIdx++);
currentHttpClient = clientFactory.newClient(currentEndpoint);
}
try {
EurekaHttpResponse<R> response = requestExecutor.execute(currentHttpClient);
if (serverStatusEvaluator.accept(response.getStatusCode(), requestExecutor.getRequestType())) {
delegate.set(currentHttpClient);
if (retry > 0) {
logger.info("Request execution succeeded on retry #{}", retry);
}
return response;
}
logger.warn("Request execution failure with status code {}; retrying on another server if available", response.getStatusCode());
} catch (Exception e) {
logger.warn("Request execution failed with message: {}", e.getMessage()); // just log message as the underlying client should log the stacktrace
}
// Connection error or 5xx from the server that must be retried on another server
delegate.compareAndSet(currentHttpClient, null);
if (currentEndpoint != null) {
quarantineSet.add(currentEndpoint);
}
}
throw new TransportException("Retry limit reached; giving up on completing the request");
}
|
@Test
public void testRequestIsRetriedOnConnectionError() throws Exception {
when(clientFactory.newClient(Matchers.<EurekaEndpoint>anyVararg())).thenReturn(clusterDelegates.get(0), clusterDelegates.get(1));
when(requestExecutor.execute(clusterDelegates.get(0))).thenThrow(new TransportException("simulated network error"));
when(requestExecutor.execute(clusterDelegates.get(1))).thenReturn(EurekaHttpResponse.status(200));
EurekaHttpResponse<Void> httpResponse = retryableClient.execute(requestExecutor);
assertThat(httpResponse.getStatusCode(), is(equalTo(200)));
verify(clientFactory, times(2)).newClient(Matchers.<EurekaEndpoint>anyVararg());
verify(requestExecutor, times(1)).execute(clusterDelegates.get(0));
verify(requestExecutor, times(1)).execute(clusterDelegates.get(1));
}
|
public static Builder builder() {
return new Builder();
}
|
@Test
public void testRoundTripSerDe() throws JsonProcessingException {
// Full request
String fullJson = "{\"removed\":[\"foo\"],\"updated\":[\"owner\"],\"missing\":[\"bar\"]}";
assertRoundTripSerializesEquallyFrom(
fullJson,
UpdateNamespacePropertiesResponse.builder()
.addUpdated(UPDATED)
.addRemoved(REMOVED)
.addMissing(MISSING)
.build());
// Only updated
String jsonOnlyUpdated = "{\"removed\":[],\"updated\":[\"owner\"],\"missing\":[]}";
assertRoundTripSerializesEquallyFrom(
jsonOnlyUpdated, UpdateNamespacePropertiesResponse.builder().addUpdated(UPDATED).build());
assertRoundTripSerializesEquallyFrom(
jsonOnlyUpdated, UpdateNamespacePropertiesResponse.builder().addUpdated("owner").build());
assertRoundTripSerializesEquallyFrom(
jsonOnlyUpdated,
UpdateNamespacePropertiesResponse.builder()
.addUpdated(UPDATED)
.addMissing(EMPTY_LIST)
.addRemoved(EMPTY_LIST)
.build());
// Only removed
String jsonOnlyRemoved = "{\"removed\":[\"foo\"],\"updated\":[],\"missing\":[]}";
assertRoundTripSerializesEquallyFrom(
jsonOnlyRemoved, UpdateNamespacePropertiesResponse.builder().addRemoved(REMOVED).build());
assertRoundTripSerializesEquallyFrom(
jsonOnlyRemoved, UpdateNamespacePropertiesResponse.builder().addRemoved("foo").build());
assertRoundTripSerializesEquallyFrom(
jsonOnlyRemoved,
UpdateNamespacePropertiesResponse.builder()
.addRemoved(REMOVED)
.addUpdated(EMPTY_LIST)
.addMissing(EMPTY_LIST)
.build());
// Only missing
String jsonOnlyMissing = "{\"removed\":[],\"updated\":[],\"missing\":[\"bar\"]}";
assertRoundTripSerializesEquallyFrom(
jsonOnlyMissing, UpdateNamespacePropertiesResponse.builder().addMissing(MISSING).build());
assertRoundTripSerializesEquallyFrom(
jsonOnlyMissing, UpdateNamespacePropertiesResponse.builder().addMissing("bar").build());
assertRoundTripSerializesEquallyFrom(
jsonOnlyMissing,
UpdateNamespacePropertiesResponse.builder()
.addMissing(MISSING)
.addUpdated(EMPTY_LIST)
.addRemoved(EMPTY_LIST)
.build());
// All fields are empty
String jsonWithAllFieldsAsEmptyList = "{\"removed\":[],\"updated\":[],\"missing\":[]}";
assertRoundTripSerializesEquallyFrom(
jsonWithAllFieldsAsEmptyList, UpdateNamespacePropertiesResponse.builder().build());
}
|
public synchronized boolean createIndex(String indexName)
throws ElasticsearchResourceManagerException {
LOG.info("Creating index using name '{}'.", indexName);
try {
// Check to see if the index exists
if (indexExists(indexName)) {
return false;
}
managedIndexNames.add(indexName);
return elasticsearchClient
.indices()
.create(new CreateIndexRequest(indexName), RequestOptions.DEFAULT)
.isAcknowledged();
} catch (Exception e) {
throw new ElasticsearchResourceManagerException("Error creating index.", e);
}
}
|
@Test
public void testCreateCollectionShouldReturnTrueIfElasticsearchDoesNotThrowAnyError()
throws IOException {
when(elasticsearchClient
.indices()
.exists(any(GetIndexRequest.class), eq(RequestOptions.DEFAULT)))
.thenReturn(false);
when(elasticsearchClient
.indices()
.create(any(CreateIndexRequest.class), eq(RequestOptions.DEFAULT))
.isAcknowledged())
.thenReturn(true);
assertThat(testManager.createIndex(INDEX_NAME)).isEqualTo(true);
verify(elasticsearchClient.indices())
.exists(any(GetIndexRequest.class), eq(RequestOptions.DEFAULT));
}
|
@Override
public boolean isErrorEnabled() {
return logger.isErrorEnabled();
}
|
@Test
public void testIsErrorEnabled() {
Log mockLog = mock(Log.class);
when(mockLog.isErrorEnabled()).thenReturn(true);
InternalLogger logger = new CommonsLogger(mockLog, "foo");
assertTrue(logger.isErrorEnabled());
verify(mockLog).isErrorEnabled();
}
|
@Override
public boolean updateTaskExecutionState(TaskExecutionStateTransition taskExecutionState) {
return state.tryCall(
StateWithExecutionGraph.class,
stateWithExecutionGraph ->
stateWithExecutionGraph.updateTaskExecutionState(
taskExecutionState, labelFailure(taskExecutionState)),
"updateTaskExecutionState")
.orElse(false);
}
|
@Test
void testUpdateTaskExecutionStateReturnsFalseInIllegalState() throws Exception {
final JobGraph jobGraph = createJobGraph();
final AdaptiveScheduler scheduler =
new AdaptiveSchedulerBuilder(
jobGraph, mainThreadExecutor, EXECUTOR_RESOURCE.getExecutor())
.build();
assertThat(
scheduler.updateTaskExecutionState(
new TaskExecutionStateTransition(
new TaskExecutionState(
createExecutionAttemptId(),
ExecutionState.FAILED))))
.isFalse();
}
|
protected final static List<String> splitStringPreserveDelimiter(String str, Pattern SPLIT_PATTERN) {
List<String> list = new ArrayList<>();
if (str != null) {
Matcher matcher = SPLIT_PATTERN.matcher(str);
int pos = 0;
while (matcher.find()) {
if (pos < matcher.start()) {
list.add(str.substring(pos, matcher.start()));
}
list.add(matcher.group());
pos = matcher.end();
}
if (pos < str.length()) {
list.add(str.substring(pos));
}
}
return list;
}
|
@Test
public void testSplitString2() {
List<String> list = DiffRowGenerator.splitStringPreserveDelimiter("test , test2", DiffRowGenerator.SPLIT_BY_WORD_PATTERN);
System.out.println(list);
assertEquals(5, list.size());
assertEquals("[test, , ,, , test2]", list.toString());
}
|
public JwtBuilder jwtBuilder() {
return new JwtBuilder();
}
|
@Test
void testParseWith32Key() {
NacosJwtParser parser = new NacosJwtParser(encode("SecretKey01234567890123456789012"));
String token = parser.jwtBuilder().setUserName("nacos").setExpiredTime(100L).compact();
assertTrue(token.startsWith(NacosSignatureAlgorithm.HS256.getHeader()));
}
|
@Override
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
for(Path f : files.keySet()) {
try {
if(f.isDirectory()) {
new FoldersApi(new BoxApiClient(session.getClient())).deleteFoldersId(fileid.getFileId(f), null, true);
}
else {
new FilesApi(new BoxApiClient(session.getClient())).deleteFilesId(fileid.getFileId(f), null);
}
}
catch(ApiException e) {
throw new BoxExceptionMappingService(fileid).map("Cannot delete {0}", e, f);
}
}
}
|
@Test
public void testDeleteMultipleFiles() throws Exception {
final BoxFileidProvider fileid = new BoxFileidProvider(session);
final Path folder = new BoxDirectoryFeature(session, fileid).mkdir(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(AbstractPath.Type.directory)), new TransferStatus());
final Path file1 = new BoxTouchFeature(session, fileid).touch(new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
final Path file2 = new BoxTouchFeature(session, fileid).touch(new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
assertTrue(new BoxFindFeature(session, fileid).find(file1));
assertTrue(new BoxFindFeature(session, fileid).find(file2));
new BoxDeleteFeature(session, fileid).delete(Arrays.asList(file1, file2), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse((new BoxFindFeature(session, fileid).find(file1, new DisabledListProgressListener())));
assertFalse((new BoxFindFeature(session, fileid).find(file2, new DisabledListProgressListener())));
assertTrue(new BoxFindFeature(session, fileid).find(folder, new DisabledListProgressListener()));
new BoxDeleteFeature(session, fileid).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse((new BoxFindFeature(session, fileid).find(folder, new DisabledListProgressListener())));
}
|
public Iterator<Optional<Page>> process(SqlFunctionProperties properties, DriverYieldSignal yieldSignal, LocalMemoryContext memoryContext, Page page)
{
WorkProcessor<Page> processor = createWorkProcessor(properties, yieldSignal, memoryContext, page);
return processor.yieldingIterator();
}
|
@Test
public void testProjectLazyLoad()
{
PageProcessor pageProcessor = new PageProcessor(Optional.of(new SelectAllFilter()), ImmutableList.of(new PageProjectionWithOutputs(new LazyPagePageProjection(), new int[] {
0})), OptionalInt.of(MAX_BATCH_SIZE));
// if channel 1 is loaded, test will fail
Page inputPage = new Page(createLongSequenceBlock(0, 100), new LazyBlock(100, lazyBlock -> {
throw new AssertionError("Lazy block should not be loaded");
}));
LocalMemoryContext memoryContext = newSimpleAggregatedMemoryContext().newLocalMemoryContext(PageProcessor.class.getSimpleName());
Iterator<Optional<Page>> output = pageProcessor.process(SESSION.getSqlFunctionProperties(), new DriverYieldSignal(), memoryContext, inputPage);
List<Optional<Page>> outputPages = ImmutableList.copyOf(output);
assertEquals(outputPages.size(), 1);
assertPageEquals(ImmutableList.of(BIGINT), outputPages.get(0).orElse(null), new Page(createLongSequenceBlock(0, 100)));
}
|
@InvokeOnHeader(Web3jConstants.ETH_NEW_PENDING_TRANSACTION_FILTER)
void ethNewPendingTransactionFilter(Message message) throws IOException {
Request<?, EthFilter> request = web3j.ethNewPendingTransactionFilter();
setRequestId(message, request);
EthFilter response = request.send();
boolean hasError = checkForError(message, response);
if (!hasError) {
message.setBody(response.getFilterId());
}
}
|
@Test
public void ethNewPendingTransactionFilterTest() throws Exception {
EthFilter response = Mockito.mock(EthFilter.class);
Mockito.when(mockWeb3j.ethNewPendingTransactionFilter()).thenReturn(request);
Mockito.when(request.send()).thenReturn(response);
Mockito.when(response.getFilterId()).thenReturn(BigInteger.ONE);
Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_NEW_PENDING_TRANSACTION_FILTER);
template.send(exchange);
BigInteger body = exchange.getIn().getBody(BigInteger.class);
assertEquals(BigInteger.ONE, body);
}
|
public T send() throws IOException {
return web3jService.send(this, responseType);
}
|
@Test
public void testEthGetFilterLogs() throws Exception {
web3j.ethGetFilterLogs(Numeric.toBigInt("0x16")).send();
verifyResult(
"{\"jsonrpc\":\"2.0\",\"method\":\"eth_getFilterLogs\","
+ "\"params\":[\"0x16\"],\"id\":1}");
}
|
@Override
protected Object getTargetObject(boolean key) {
Object targetObject;
if (key) {
// keyData is never null
if (keyData.isPortable() || keyData.isJson() || keyData.isCompact()) {
targetObject = keyData;
} else {
targetObject = getKey();
}
} else {
if (valueObject == null) {
targetObject = getTargetObjectFromData();
} else {
if (valueObject instanceof PortableGenericRecord
|| valueObject instanceof CompactGenericRecord) {
// These two classes should be able to be handled by respective Getters
// see PortableGetter and CompactGetter
// We get into this branch when in memory format is Object and
// - the cluster does not have PortableFactory configuration for Portable
// - the cluster does not related classes for Compact
targetObject = getValue();
} else if (valueObject instanceof Portable
|| serializationService.isCompactSerializable(valueObject)) {
targetObject = getValueData();
} else {
// Note that targetObject can be PortableGenericRecord
// and it will be handled with PortableGetter for query.
// We get PortableGenericRecord here when in-memory format is OBJECT and
// the cluster does not have PortableFactory configuration for the object's factory ID
targetObject = getValue();
}
}
}
return targetObject;
}
|
@Test
public void testGetTargetObject_givenKeyDataIsPortable_whenKeyFlagIsTrue_thenReturnKeyData() {
Data keyData = mockPortableData();
QueryableEntry entry = createEntry(keyData, new Object(), newExtractor());
Object targetObject = entry.getTargetObject(true);
assertSame(keyData, targetObject);
}
|
@Udf
public <T extends Comparable<? super T>> T arrayMax(@UdfParameter(
description = "Array of values from which to find the maximum") final List<T> input) {
if (input == null) {
return null;
}
T candidate = null;
for (T thisVal : input) {
if (thisVal != null) {
if (candidate == null) {
candidate = thisVal;
} else if (thisVal.compareTo(candidate) > 0) {
candidate = thisVal;
}
}
}
return candidate;
}
|
@Test
public void shouldReturnNullForListOfNullInput() {
final List<Integer> input = Arrays.asList(null, null, null);
assertThat(udf.arrayMax(input), is(nullValue()));
}
|
@Override
public boolean hasAccess( RepositoryFilePermission perm ) throws KettleException {
if ( hasAccess == null ) {
hasAccess = new HashMap<RepositoryFilePermission, Boolean>();
}
if ( hasAccess.get( perm ) == null ) {
hasAccess.put( perm, new Boolean( aclService.hasAccess( getObjectId(), perm ) ) );
}
return hasAccess.get( perm ).booleanValue();
}
|
@Test
public void testAccess() throws Exception {
when( mockAclService.hasAccess( mockObjectId, RepositoryFilePermission.READ ) ).thenReturn( true );
when( mockAclService.hasAccess( mockObjectId, RepositoryFilePermission.WRITE ) ).thenReturn( false );
assertTrue( uiTransformation.hasAccess( RepositoryFilePermission.READ ) );
assertFalse( uiTransformation.hasAccess( RepositoryFilePermission.WRITE ) );
}
|
@Description("removes whitespace from the beginning and end of a string")
@ScalarFunction
@LiteralParameters("x")
@SqlType("varchar(x)")
public static Slice trim(@SqlType("varchar(x)") Slice slice)
{
return SliceUtf8.trim(slice);
}
|
@Test
public void testTrim()
{
assertFunction("TRIM('')", createVarcharType(0), "");
assertFunction("TRIM(' ')", createVarcharType(3), "");
assertFunction("TRIM(' hello ')", createVarcharType(9), "hello");
assertFunction("TRIM(' hello')", createVarcharType(7), "hello");
assertFunction("TRIM('hello ')", createVarcharType(7), "hello");
assertFunction("TRIM(' hello world ')", createVarcharType(13), "hello world");
assertFunction("TRIM('\u4FE1\u5FF5 \u7231 \u5E0C\u671B \u2028 ')", createVarcharType(10), "\u4FE1\u5FF5 \u7231 \u5E0C\u671B");
assertFunction("TRIM('\u4FE1\u5FF5 \u7231 \u5E0C\u671B ')", createVarcharType(9), "\u4FE1\u5FF5 \u7231 \u5E0C\u671B");
assertFunction("TRIM(' \u4FE1\u5FF5 \u7231 \u5E0C\u671B ')", createVarcharType(9), "\u4FE1\u5FF5 \u7231 \u5E0C\u671B");
assertFunction("TRIM(' \u4FE1\u5FF5 \u7231 \u5E0C\u671B')", createVarcharType(9), "\u4FE1\u5FF5 \u7231 \u5E0C\u671B");
assertFunction("TRIM(' \u2028 \u4FE1\u5FF5 \u7231 \u5E0C\u671B')", createVarcharType(10), "\u4FE1\u5FF5 \u7231 \u5E0C\u671B");
}
|
public static GenericData get() {
return INSTANCE;
}
|
@Test
void testEquals() {
Schema s = recordSchema();
GenericRecord r0 = new GenericData.Record(s);
GenericRecord r1 = new GenericData.Record(s);
GenericRecord r2 = new GenericData.Record(s);
Collection<CharSequence> l0 = new ArrayDeque<>();
List<CharSequence> l1 = new ArrayList<>();
GenericArray<CharSequence> l2 = new GenericData.Array<>(1, s.getFields().get(0).schema());
String foo = "foo";
l0.add(new StringBuilder(foo));
l1.add(foo);
l2.add(new Utf8(foo));
r0.put(0, l0);
r1.put(0, l1);
r2.put(0, l2);
assertEquals(r0, r1);
assertEquals(r0, r2);
assertEquals(r1, r2);
}
|
@Udf
public int field(
@UdfParameter final String str,
@UdfParameter final String... args
) {
if (str == null || args == null) {
return 0;
}
for (int i = 0; i < args.length; i++) {
if (str.equals(args[i])) {
return i + 1;
}
}
return 0;
}
|
@Test
public void shouldNotFindStringInNullArray() {
// When:
String[] array = null;
final int pos = field.field("1", array);
// Then:
assertThat(pos, equalTo(0));
}
|
public static <K, X, Y, V> Map<K, V> merge(Map<K, X> map1, Map<K, Y> map2, BiFunction<X, Y, V> merge) {
if (MapUtil.isEmpty(map1) && MapUtil.isEmpty(map2)) {
return MapUtil.newHashMap(0);
} else if (MapUtil.isEmpty(map1)) {
map1 = MapUtil.newHashMap(0);
} else if (MapUtil.isEmpty(map2)) {
map2 = MapUtil.newHashMap(0);
}
Set<K> key = new HashSet<>();
key.addAll(map1.keySet());
key.addAll(map2.keySet());
Map<K, V> map = MapUtil.newHashMap(key.size());
for (K t : key) {
X x = map1.get(t);
Y y = map2.get(t);
V z = merge.apply(x, y);
if (z != null) {
map.put(t, z);
}
}
return map;
}
|
@Test
public void testMerge() {
Map<Long, Student> map1 = null;
Map<Long, Student> map2 = Collections.emptyMap();
Map<Long, String> map = CollStreamUtil.merge(map1, map2, (s1, s2) -> s1.getName() + s2.getName());
assertEquals(map, Collections.EMPTY_MAP);
map1 = new HashMap<>();
map1.put(1L, new Student(1, 1, 1, "张三"));
map = CollStreamUtil.merge(map1, map2, this::merge);
Map<Long, String> temp = new HashMap<>();
temp.put(1L, "张三");
assertEquals(map, temp);
map2 = new HashMap<>();
map2.put(1L, new Student(2, 1, 1, "李四"));
map = CollStreamUtil.merge(map1, map2, this::merge);
Map<Long, String> compare = new HashMap<>();
compare.put(1L, "张三李四");
assertEquals(map, compare);
}
|
public static ShowResultSet execute(ShowStmt statement, ConnectContext context) {
return GlobalStateMgr.getCurrentState().getShowExecutor().showExecutorVisitor.visit(statement, context);
}
|
@Test
public void testShowCreateExternalCatalogTable() throws DdlException, AnalysisException {
new MockUp<MetadataMgr>() {
@Mock
public Database getDb(String catalogName, String dbName) {
return new Database();
}
@Mock
public Table getTable(String catalogName, String dbName, String tblName) {
List<Column> fullSchema = new ArrayList<>();
Column columnId = new Column("id", Type.INT, true);
columnId.setComment("id");
Column columnName = new Column("name", Type.VARCHAR);
Column columnYear = new Column("year", Type.INT);
Column columnDt = new Column("dt", Type.INT);
fullSchema.add(columnId);
fullSchema.add(columnName);
fullSchema.add(columnYear);
fullSchema.add(columnDt);
List<String> partitions = Lists.newArrayList();
partitions.add("year");
partitions.add("dt");
HiveTable.Builder tableBuilder = HiveTable.builder()
.setId(1)
.setTableName("test_table")
.setCatalogName("hive_catalog")
.setResourceName(toResourceName("hive_catalog", "hive"))
.setHiveDbName("hive_db")
.setHiveTableName("test_table")
.setPartitionColumnNames(partitions)
.setFullSchema(fullSchema)
.setTableLocation("hdfs://hadoop/hive/warehouse/test.db/test")
.setCreateTime(10000);
return tableBuilder.build();
}
};
ShowCreateTableStmt stmt = new ShowCreateTableStmt(new TableName("hive_catalog", "hive_db", "test_table"),
ShowCreateTableStmt.CreateTableType.TABLE);
ShowResultSet resultSet = ShowExecutor.execute(stmt, ctx);
Assert.assertEquals("test_table", resultSet.getResultRows().get(0).get(0));
Assert.assertEquals("CREATE TABLE `test_table` (\n" +
" `id` int(11) DEFAULT NULL COMMENT \"id\",\n" +
" `name` varchar DEFAULT NULL,\n" +
" `year` int(11) DEFAULT NULL,\n" +
" `dt` int(11) DEFAULT NULL\n" +
")\n" +
"PARTITION BY (year, dt)\n" +
"PROPERTIES (\"location\" = \"hdfs://hadoop/hive/warehouse/test.db/test\");",
resultSet.getResultRows().get(0).get(1));
}
|
public boolean unblock()
{
final AtomicBuffer buffer = this.buffer;
final long headPosition = buffer.getLongVolatile(headPositionIndex);
final long tailPosition = buffer.getLongVolatile(tailPositionIndex);
if (headPosition == tailPosition)
{
return false;
}
final int mask = capacity - 1;
final int consumerIndex = (int)(headPosition & mask);
final int producerIndex = (int)(tailPosition & mask);
boolean unblocked = false;
int length = buffer.getIntVolatile(consumerIndex);
if (length < 0)
{
buffer.putInt(typeOffset(consumerIndex), PADDING_MSG_TYPE_ID);
buffer.putIntOrdered(lengthOffset(consumerIndex), -length);
unblocked = true;
}
else if (0 == length)
{
// go from (consumerIndex to producerIndex) or (consumerIndex to capacity)
final int limit = producerIndex > consumerIndex ? producerIndex : capacity;
int i = consumerIndex + ALIGNMENT;
do
{
// read the top int of every long (looking for length aligned to 8=ALIGNMENT)
length = buffer.getIntVolatile(i);
if (0 != length)
{
if (scanBackToConfirmStillZeroed(buffer, i, consumerIndex))
{
buffer.putInt(typeOffset(consumerIndex), PADDING_MSG_TYPE_ID);
buffer.putIntOrdered(lengthOffset(consumerIndex), i - consumerIndex);
unblocked = true;
}
break;
}
i += ALIGNMENT;
}
while (i < limit);
}
return unblocked;
}
|
@Test
void shouldUnblockWhenFullWithHeader()
{
final int messageLength = ALIGNMENT * 4;
when(buffer.getLongVolatile(HEAD_COUNTER_INDEX)).thenReturn((long)messageLength);
when(buffer.getLongVolatile(TAIL_COUNTER_INDEX)).thenReturn((long)messageLength + CAPACITY);
when(buffer.getIntVolatile(messageLength)).thenReturn(-messageLength);
assertTrue(ringBuffer.unblock());
final InOrder inOrder = inOrder(buffer);
inOrder.verify(buffer).putInt(typeOffset(messageLength), PADDING_MSG_TYPE_ID);
inOrder.verify(buffer).putIntOrdered(lengthOffset(messageLength), messageLength);
}
|
@Override
public Object convert(String value) {
if (value == null || value.isEmpty()) {
return value;
}
final CSVParser parser = getCsvParser();
final Map<String, String> fields = Maps.newHashMap();
try {
final String[] strings = parser.parseLine(value);
if (strings.length != fieldNames.length) {
LOG.error("Different number of columns in CSV data ({}) and configured field names ({}). Discarding input.",
strings.length, fieldNames.length);
return null;
}
for (int i = 0; i < strings.length; i++) {
fields.put(fieldNames[i], strings[i]);
}
} catch (IOException e) {
LOG.error("Invalid CSV input, discarding input", e);
return null;
}
return fields;
}
|
@Test
@SuppressWarnings("unchecked")
public void testEdgeCases() throws ConfigurationException {
Map<String, Object> configMap = Maps.newHashMap();
configMap.put("column_header", "f1,f2");
CsvConverter csvConverter = new CsvConverter(configMap);
String resultString = (String) csvConverter.convert("");
assertEquals("", resultString);
// too few fields
Map<String, String> result = (Map<String, String>) csvConverter.convert("field1");
assertNull("Too few fields in data doesn't work", result);
// too many fields
result = (Map<String, String>) csvConverter.convert("field1,field2,field3");
assertNull("Too many fields in data doesn't work", result);
// unclosed quote level
result = (Map<String, String>) csvConverter.convert("field1,field2,\"field3");
assertNull("Unbalanced quoting does not work", result);
}
|
@Override
public int deleteUndoLogByLogCreated(Date logCreated, int limitRows, Connection conn) throws SQLException {
try (PreparedStatement deletePST = conn.prepareStatement(DELETE_UNDO_LOG_BY_CREATE_SQL)) {
deletePST.setDate(1, new java.sql.Date(logCreated.getTime()));
deletePST.setInt(2, limitRows);
int deleteRows = deletePST.executeUpdate();
if (logger.isDebugEnabled()) {
logger.debug("batch delete undo log size {}", deleteRows);
}
return deleteRows;
} catch (Exception e) {
if (!(e instanceof SQLException)) {
e = new SQLException(e);
}
throw (SQLException) e;
}
}
|
@Test
public void testDeleteUndoLogByLogCreated() throws SQLException {
Assertions.assertEquals(0, undoLogManager.deleteUndoLogByLogCreated(new Date(), 3000, dataSource.getConnection()));
Assertions.assertDoesNotThrow(() -> undoLogManager.deleteUndoLogByLogCreated(new Date(), 3000, connectionProxy));
}
|
public List<AggregationProjection> getAggregationProjections() {
List<AggregationProjection> result = new LinkedList<>();
for (Projection each : projections) {
if (each instanceof AggregationProjection) {
AggregationProjection aggregationProjection = (AggregationProjection) each;
result.add(aggregationProjection);
result.addAll(aggregationProjection.getDerivedAggregationProjections());
}
}
return result;
}
|
@Test
void assertGetAggregationProjections() {
Projection projection = getAggregationProjection();
List<AggregationProjection> items = new ProjectionsContext(0, 0, true, Arrays.asList(projection, getColumnProjection())).getAggregationProjections();
assertTrue(items.contains(projection));
assertThat(items.size(), is(1));
}
|
public static <K, E> Collector<E, ImmutableListMultimap.Builder<K, E>, ImmutableListMultimap<K, E>> index(Function<? super E, K> keyFunction) {
return index(keyFunction, Function.identity());
}
|
@Test
public void index_with_valueFunction_fails_if_value_function_returns_null() {
assertThatThrownBy(() -> SINGLE_ELEMENT_LIST.stream().collect(index(MyObj::getId, s -> null)))
.isInstanceOf(NullPointerException.class)
.hasMessage("Value function can't return null");
}
|
@InvokeOnHeader(Web3jConstants.NET_PEER_COUNT)
void netPeerCount(Message message) throws IOException {
Request<?, NetPeerCount> request = web3j.netPeerCount();
setRequestId(message, request);
NetPeerCount response = request.send();
boolean hasError = checkForError(message, response);
if (!hasError) {
message.setBody(response.getQuantity());
}
}
|
@Test
public void netPeerCountTest() throws Exception {
BigInteger peerCount = BigInteger.ONE;
NetPeerCount response = Mockito.mock(NetPeerCount.class);
Mockito.when(mockWeb3j.netPeerCount()).thenReturn(request);
Mockito.when(request.send()).thenReturn(response);
Mockito.when(response.getQuantity()).thenReturn(peerCount);
Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.NET_PEER_COUNT);
template.send(exchange);
BigInteger body = exchange.getIn().getBody(BigInteger.class);
assertEquals(peerCount, body);
}
|
public DirectGraph getGraph() {
checkState(finalized, "Can't get a graph before the Pipeline has been completely traversed");
return DirectGraph.create(
producers, viewWriters, perElementConsumers, rootTransforms, stepNames);
}
|
@Test
public void getValueToConsumersWithDuplicateInputSucceeds() {
PCollection<String> created = p.apply(Create.of("1", "2", "3"));
PCollection<String> flattened =
PCollectionList.of(created).and(created).apply(Flatten.pCollections());
p.traverseTopologically(visitor);
DirectGraph graph = visitor.getGraph();
AppliedPTransform<?, ?, ?> flattenedProducer = graph.getProducer(flattened);
assertThat(
graph.getPerElementConsumers(created),
Matchers.containsInAnyOrder(new Object[] {flattenedProducer, flattenedProducer}));
assertThat(graph.getPerElementConsumers(flattened), emptyIterable());
}
|
public static TypeBuilder<Schema> builder() {
return new TypeBuilder<>(new SchemaCompletion(), new NameContext());
}
|
@Test
void testBoolean() {
Schema.Type type = Schema.Type.BOOLEAN;
Schema simple = SchemaBuilder.builder().booleanType();
Schema expected = primitive(type, simple);
Schema built1 = SchemaBuilder.builder().booleanBuilder().prop("p", "v").endBoolean();
assertEquals(expected, built1);
}
|
public TopicList getSystemTopicList() {
TopicList topicList = new TopicList();
try {
this.lock.readLock().lockInterruptibly();
for (Map.Entry<String, Set<String>> entry : clusterAddrTable.entrySet()) {
topicList.getTopicList().add(entry.getKey());
topicList.getTopicList().addAll(entry.getValue());
}
if (!brokerAddrTable.isEmpty()) {
for (String s : brokerAddrTable.keySet()) {
BrokerData bd = brokerAddrTable.get(s);
HashMap<Long, String> brokerAddrs = bd.getBrokerAddrs();
if (brokerAddrs != null && !brokerAddrs.isEmpty()) {
Iterator<Long> it2 = brokerAddrs.keySet().iterator();
topicList.setBrokerAddr(brokerAddrs.get(it2.next()));
break;
}
}
}
} catch (Exception e) {
log.error("getSystemTopicList Exception", e);
} finally {
this.lock.readLock().unlock();
}
return topicList;
}
|
@Test
public void testGetSystemTopicList() {
byte[] topicList = routeInfoManager.getSystemTopicList().encode();
assertThat(topicList).isNotNull();
}
|
public static BinaryOperationExpression bind(final BinaryOperationExpression segment, final SegmentType parentSegmentType, final SQLStatementBinderContext binderContext,
final Map<String, TableSegmentBinderContext> tableBinderContexts, final Map<String, TableSegmentBinderContext> outerTableBinderContexts) {
ExpressionSegment boundLeft = ExpressionSegmentBinder.bind(segment.getLeft(), parentSegmentType, binderContext, tableBinderContexts, outerTableBinderContexts);
ExpressionSegment boundRight = ExpressionSegmentBinder.bind(segment.getRight(), parentSegmentType, binderContext, tableBinderContexts, outerTableBinderContexts);
return new BinaryOperationExpression(segment.getStartIndex(), segment.getStopIndex(), boundLeft, boundRight, segment.getOperator(), segment.getText());
}
|
@Test
void assertBinaryOperationExpression() {
BinaryOperationExpression binaryOperationExpression = new BinaryOperationExpression(0, 0,
new LiteralExpressionSegment(0, 0, "test"),
new LiteralExpressionSegment(0, 0, "test"), "=", "test");
SQLStatementBinderContext binderContext = mock(SQLStatementBinderContext.class);
BinaryOperationExpression actual = BinaryOperationExpressionBinder.bind(binaryOperationExpression, SegmentType.PROJECTION,
binderContext, Collections.emptyMap(), Collections.emptyMap());
assertThat(actual.getLeft().getText(), is("test"));
assertThat(actual.getRight().getText(), is("test"));
assertThat(actual.getOperator(), is("="));
assertThat(actual.getText(), is("test"));
}
|
@Override
public int size() {
return mWidth * mHeight;
}
|
@Test
public void testSize() {
final MapTileArea area = new MapTileArea();
for (int zoom = 0; zoom <= TileSystem.getMaximumZoomLevel(); zoom++) {
final int mapTileUpperBound = getMapTileUpperBound(zoom);
final long size = ((long) mapTileUpperBound) * mapTileUpperBound;
if (size >= Integer.MAX_VALUE) {
return;
}
setNewWorld(area, zoom);
Assert.assertEquals(size, area.size());
Assert.assertTrue(area.iterator().hasNext());
}
}
|
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TopicMetadata that = (TopicMetadata) o;
if (!id.equals(that.id)) return false;
if (!name.equals(that.name)) return false;
if (numPartitions != that.numPartitions) return false;
return partitionRacks.equals(that.partitionRacks);
}
|
@Test
public void testEquals() {
Uuid topicId = Uuid.randomUuid();
Map<Integer, Set<String>> partitionRacks = mkMapOfPartitionRacks(15);
TopicMetadata topicMetadata = new TopicMetadata(topicId, "foo", 15, partitionRacks);
assertEquals(new TopicMetadata(topicId, "foo", 15, partitionRacks), topicMetadata);
assertNotEquals(new TopicMetadata(topicId, "foo", 5, mkMapOfPartitionRacks(5)), topicMetadata);
}
|
@Override
public void createDatabaseIndexes() throws Exception {
super.createDatabaseIndexes();
log.info("Installing SQL DataBase schema PostgreSQL specific indexes part: " + SCHEMA_ENTITIES_IDX_PSQL_ADDON_SQL);
executeQueryFromFile(SCHEMA_ENTITIES_IDX_PSQL_ADDON_SQL);
}
|
@Test
public void givenPsqlDbSchemaService_whenCreateDatabaseIndexes_thenVerifyPsqlIndexSpecificCall() throws Exception {
SqlEntityDatabaseSchemaService service = spy(new SqlEntityDatabaseSchemaService());
willDoNothing().given(service).executeQueryFromFile(anyString());
service.createDatabaseIndexes();
verify(service, times(1)).executeQueryFromFile(SqlEntityDatabaseSchemaService.SCHEMA_ENTITIES_IDX_SQL);
verify(service, times(1)).executeQueryFromFile(SqlEntityDatabaseSchemaService.SCHEMA_ENTITIES_IDX_PSQL_ADDON_SQL);
verify(service, times(2)).executeQueryFromFile(anyString());
}
|
@Bean
@ConfigurationProperties(prefix = "shenyu.sync.websocket")
public WebsocketConfig websocketConfig() {
return new WebsocketConfig();
}
|
@Test
public void testWebsocketConfig() {
assertThat(websocketConfig.getUrls(), is(Lists.newArrayList("ws://localhost:9095/websocket")));
}
|
public Optional<Buffer> getBuffer() {
return buffer != null ? Optional.of(buffer) : Optional.empty();
}
|
@Test
void testGetBuffer() {
Buffer buffer = BufferBuilderTestUtils.buildSomeBuffer(0);
NettyPayload nettyPayload = NettyPayload.newBuffer(buffer, 0, 0);
assertThat(nettyPayload.getBuffer()).isPresent();
assertThat(nettyPayload.getBuffer()).hasValue(buffer);
assertThatThrownBy(() -> NettyPayload.newBuffer(null, 0, 0))
.isInstanceOf(IllegalStateException.class);
assertThatThrownBy(() -> NettyPayload.newBuffer(buffer, -1, 0))
.isInstanceOf(IllegalStateException.class);
assertThatThrownBy(() -> NettyPayload.newBuffer(buffer, 0, -1))
.isInstanceOf(IllegalStateException.class);
}
|
@SuppressWarnings("unchecked")
@Override
public boolean canHandleReturnType(Class returnType) {
return rxSupportedTypes.stream()
.anyMatch(classType -> classType.isAssignableFrom(returnType));
}
|
@Test
public void testCheckTypes() {
assertThat(rxJava3BulkheadAspectExt.canHandleReturnType(Flowable.class)).isTrue();
assertThat(rxJava3BulkheadAspectExt.canHandleReturnType(Single.class)).isTrue();
}
|
void compatibleCreationTime(Comment comment) {
var creationTime = comment.getSpec().getCreationTime();
if (creationTime == null) {
creationTime = defaultIfNull(comment.getSpec().getApprovedTime(),
comment.getMetadata().getCreationTimestamp());
}
comment.getSpec().setCreationTime(creationTime);
}
|
@Test
void compatibleCreationTime() {
Comment comment = new Comment();
comment.setMetadata(new Metadata());
comment.getMetadata().setName("fake-comment");
comment.setSpec(new Comment.CommentSpec());
comment.getSpec().setApprovedTime(Instant.now());
comment.getSpec().setCreationTime(null);
commentReconciler.compatibleCreationTime(comment);
assertThat(comment.getSpec().getCreationTime())
.isEqualTo(comment.getSpec().getApprovedTime());
}
|
public static Context context(int ioThreads)
{
return new Context(ioThreads);
}
|
@Test
public void testSocketSendRecvBinaryPicture()
{
Context context = ZMQ.context(1);
Socket push = context.socket(SocketType.PUSH);
Socket pull = context.socket(SocketType.PULL);
boolean rc = pull.setReceiveTimeOut(50);
assertThat(rc, is(true));
int port = push.bindToRandomPort("tcp://127.0.0.1");
rc = pull.connect("tcp://127.0.0.1:" + port);
assertThat(rc, is(true));
String picture = "1248sScfm";
ZMsg msg = new ZMsg();
msg.add("Hello");
msg.add("World");
rc = push.sendBinaryPicture(
picture,
255,
65535,
429496729,
Long.MAX_VALUE,
"Hello World",
"Hello cruel World!",
"ABC".getBytes(ZMQ.CHARSET),
new ZFrame("My frame"),
msg);
assertThat(rc, is(true));
Object[] objects = pull.recvBinaryPicture(picture);
assertThat(objects[0], is(equalTo(255)));
assertThat(objects[1], is(equalTo(65535)));
assertThat(objects[2], is(equalTo(429496729)));
assertThat(objects[3], is(equalTo(Long.MAX_VALUE)));
assertThat(objects[4], is(equalTo("Hello World")));
assertThat(objects[5], is(equalTo("Hello cruel World!")));
assertThat(objects[6], is(equalTo("ABC".getBytes(zmq.ZMQ.CHARSET))));
assertThat(objects[7], is(equalTo(new ZFrame("My frame"))));
ZMsg expectedMsg = new ZMsg();
expectedMsg.add("Hello");
expectedMsg.add("World");
assertThat(objects[8], is(equalTo(expectedMsg)));
push.close();
pull.close();
context.term();
}
|
@Override
public V get()
throws InterruptedException, ExecutionException {
try {
return get(Long.MAX_VALUE, TimeUnit.SECONDS);
} catch (TimeoutException e) {
throw new ExecutionException(e);
}
}
|
@Test
public void completeDelegate_successfully_callbackAfterGet_invokeGetOnOuter_callbacksRun() throws Exception {
BiConsumer<String, Throwable> callback = getStringExecutionCallback();
delegateFuture.run();
outerFuture.get();
outerFuture.whenCompleteAsync(callback, CALLER_RUNS);
verify(callback, times(1)).accept(any(String.class), isNull());
verify(callback, times(0)).accept(isNull(), any(Throwable.class));
verifyNoMoreInteractions(callback);
}
|
@Override
public <C extends AbstractConfig> void bind(String prefix, C dubboConfig) {
DataBinder dataBinder = new DataBinder(dubboConfig);
// Set ignored*
dataBinder.setIgnoreInvalidFields(isIgnoreInvalidFields());
dataBinder.setIgnoreUnknownFields(isIgnoreUnknownFields());
// Get properties under specified prefix from PropertySources
Map<String, Object> properties = PropertySourcesUtils.getSubProperties(getPropertySources(), prefix);
// Convert Map to MutablePropertyValues
MutablePropertyValues propertyValues = new MutablePropertyValues(properties);
// Bind
dataBinder.bind(propertyValues);
BindingResult bindingResult = dataBinder.getBindingResult();
if (bindingResult.hasGlobalErrors()) {
throw new RuntimeException(
"Data bind global error, please check config. config: " + bindingResult.getGlobalError() + "");
}
if (bindingResult.hasFieldErrors()) {
throw new RuntimeException(buildErrorMsg(
bindingResult.getFieldErrors(),
prefix,
dubboConfig.getClass().getSimpleName()));
}
}
|
@Test
void testBinder() {
ApplicationConfig applicationConfig = new ApplicationConfig();
dubboConfigBinder.bind("dubbo.application", applicationConfig);
Assertions.assertEquals("hello", applicationConfig.getName());
Assertions.assertEquals("world", applicationConfig.getOwner());
RegistryConfig registryConfig = new RegistryConfig();
dubboConfigBinder.bind("dubbo.registry", registryConfig);
Assertions.assertEquals("10.20.153.17", registryConfig.getAddress());
ProtocolConfig protocolConfig = new ProtocolConfig();
dubboConfigBinder.bind("dubbo.protocol", protocolConfig);
Assertions.assertEquals(Integer.valueOf(20881), protocolConfig.getPort());
}
|
public void parseStepParameter(
Map<String, Map<String, Object>> allStepOutputData,
Map<String, Parameter> workflowParams,
Map<String, Parameter> stepParams,
Parameter param,
String stepId) {
parseStepParameter(
allStepOutputData, workflowParams, stepParams, param, stepId, new HashSet<>());
}
|
@Test
public void testParseStepParameterWithSameName() {
LongParameter bar = LongParameter.builder().name("sample").expression("sample;").build();
paramEvaluator.parseStepParameter(
Collections.emptyMap(),
Collections.singletonMap(
"sample", LongParameter.builder().evaluatedResult(5L).evaluatedTime(123L).build()),
Collections.singletonMap("sample", LongParameter.builder().expression("sample;").build()),
bar,
"step1");
assertEquals(5L, bar.getEvaluatedResult().longValue());
bar = LongParameter.builder().name("sample").expression("sample;").build();
paramEvaluator.parseStepParameter(
Collections.emptyMap(),
Collections.singletonMap(
"sample", LongParameter.builder().evaluatedResult(5L).evaluatedTime(123L).build()),
Collections.singletonMap(
"sample", LongParameter.builder().evaluatedResult(123L).evaluatedTime(123L).build()),
bar,
"step1");
assertEquals(123L, bar.getEvaluatedResult().longValue());
}
|
@Override
public void unparse(SqlWriter writer, int leftPrec, int rightPrec) {
writer.keyword("CREATE");
if (getReplace()) {
writer.keyword("OR REPLACE");
}
writer.keyword("EXTERNAL MAPPING");
if (ifNotExists) {
writer.keyword("IF NOT EXISTS");
}
name.unparse(writer, leftPrec, rightPrec);
if (externalName != null) {
writer.keyword("EXTERNAL NAME");
externalName.unparse(writer, leftPrec, rightPrec);
}
if (!columns.isEmpty()) {
SqlWriter.Frame frame = writer.startList("(", ")");
for (SqlNode column : columns) {
printIndent(writer);
column.unparse(writer, 0, 0);
}
writer.newlineAndIndent();
writer.endList(frame);
}
if (dataConnection != null) {
writer.newlineAndIndent();
writer.keyword("DATA CONNECTION");
dataConnection.unparse(writer, leftPrec, rightPrec);
} else {
assert connectorType != null;
writer.newlineAndIndent();
writer.keyword("TYPE");
connectorType.unparse(writer, leftPrec, rightPrec);
}
if (objectType != null) {
writer.newlineAndIndent();
writer.keyword("OBJECT TYPE");
objectType.unparse(writer, leftPrec, rightPrec);
}
unparseOptions(writer, options);
}
|
@Test
public void test_unparse_external_name_with_schema() {
Mapping mapping = new Mapping(
"name",
new String[]{"external\"schema", "external\"name"},
null,
"Type",
null,
singletonList(new MappingField("field", QueryDataType.VARCHAR, "__key.field")),
ImmutableMap.of("key", "value")
);
String sql = SqlCreateMapping.unparse(mapping);
assertThat(sql).isEqualTo("CREATE OR REPLACE EXTERNAL MAPPING \"hazelcast\".\"public\".\"name\" " +
"EXTERNAL NAME \"external\"\"schema\".\"external\"\"name\" (" + LE +
" \"field\" VARCHAR EXTERNAL NAME \"__key.field\"" + LE +
")" + LE +
"TYPE \"Type\"" + LE +
"OPTIONS (" + LE +
" 'key'='value'" + LE +
")"
);
}
|
public static DateTime convertToDateTime(@Nonnull Object value) {
if (value instanceof DateTime) {
return (DateTime) value;
}
if (value instanceof Date) {
return new DateTime(value, DateTimeZone.UTC);
} else if (value instanceof ZonedDateTime) {
final DateTimeZone dateTimeZone = DateTimeZone.forTimeZone(TimeZone.getTimeZone(((ZonedDateTime) value).getZone()));
return new DateTime(Date.from(((ZonedDateTime) value).toInstant()), dateTimeZone);
} else if (value instanceof OffsetDateTime) {
return new DateTime(Date.from(((OffsetDateTime) value).toInstant()), DateTimeZone.UTC);
} else if (value instanceof LocalDateTime) {
final LocalDateTime localDateTime = (LocalDateTime) value;
final ZoneId defaultZoneId = ZoneId.systemDefault();
final ZoneOffset offset = defaultZoneId.getRules().getOffset(localDateTime);
return new DateTime(Date.from(localDateTime.toInstant(offset)));
} else if (value instanceof LocalDate) {
final LocalDate localDate = (LocalDate) value;
final LocalDateTime localDateTime = localDate.atStartOfDay();
final ZoneId defaultZoneId = ZoneId.systemDefault();
final ZoneOffset offset = defaultZoneId.getRules().getOffset(localDateTime);
return new DateTime(Date.from(localDateTime.toInstant(offset)));
} else if (value instanceof Instant) {
return new DateTime(Date.from((Instant) value), DateTimeZone.UTC);
} else if (value instanceof String) {
return ES_DATE_FORMAT_FORMATTER.parseDateTime((String) value);
} else {
throw new IllegalArgumentException("Value of invalid type <" + value.getClass().getSimpleName() + "> provided");
}
}
|
@Test
void convertFromZonedDateTimeUTC() {
final ZonedDateTime input = ZonedDateTime.of(2020, 10, 24, 10, 59, 0, 0, ZoneOffset.UTC);
final DateTime output = DateTimeConverter.convertToDateTime(input);
final DateTime expectedOutput = new DateTime(2020, 10, 24, 10, 59, DateTimeZone.UTC);
assertThat(output).isEqualTo(expectedOutput);
}
|
@POST
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response createSubnet(InputStream input) throws IOException {
log.trace(String.format(MESSAGE, "CREATE"));
String inputStr = IOUtils.toString(input, REST_UTF8);
if (!haService.isActive()
&& !DEFAULT_ACTIVE_IP_ADDRESS.equals(haService.getActiveIp())) {
return syncPost(haService, SUBNETS, inputStr);
}
final NeutronSubnet subnet = (NeutronSubnet)
jsonToModelEntity(inputStr, NeutronSubnet.class);
adminService.createSubnet(subnet);
UriBuilder locationBuilder = uriInfo.getBaseUriBuilder()
.path(SUBNETS)
.path(subnet.getId());
// TODO fix networking-onos to send Network UPDATE when subnet created
return created(locationBuilder.build()).build();
}
|
@Test
public void testCreateSubnetWithCreationOperation() {
expect(mockOpenstackHaService.isActive()).andReturn(true).anyTimes();
replay(mockOpenstackHaService);
mockOpenstackNetworkAdminService.createSubnet(anyObject());
replay(mockOpenstackNetworkAdminService);
final WebTarget wt = target();
InputStream jsonStream = OpenstackSubnetWebResourceTest.class
.getResourceAsStream("openstack-subnet.json");
Response response = wt.path(PATH).request(MediaType.APPLICATION_JSON_TYPE)
.post(Entity.json(jsonStream));
final int status = response.getStatus();
assertThat(status, is(201));
verify(mockOpenstackNetworkAdminService);
}
|
boolean openNextFile() {
try {
if ( meta.getFileInFields() ) {
data.readrow = getRow(); // Grab another row ...
if ( data.readrow == null ) { // finished processing!
if ( isDetailed() ) {
logDetailed( BaseMessages.getString( PKG, "LoadFileInput.Log.FinishedProcessing" ) );
}
return false;
}
if ( first ) {
first = false;
data.inputRowMeta = getInputRowMeta();
data.outputRowMeta = data.inputRowMeta.clone();
meta.getFields( data.outputRowMeta, getStepname(), null, null, this, repository, metaStore );
// Create convert meta-data objects that will contain Date & Number formatters
// All non binary content is handled as a String. It would be converted to the target type after the processing.
data.convertRowMeta = data.outputRowMeta.cloneToType( ValueMetaInterface.TYPE_STRING );
if ( meta.getFileInFields() ) {
// Check is filename field is provided
if ( Utils.isEmpty( meta.getDynamicFilenameField() ) ) {
logError( BaseMessages.getString( PKG, "LoadFileInput.Log.NoField" ) );
throw new KettleException( BaseMessages.getString( PKG, "LoadFileInput.Log.NoField" ) );
}
// cache the position of the field
if ( data.indexOfFilenameField < 0 ) {
data.indexOfFilenameField = data.inputRowMeta.indexOfValue( meta.getDynamicFilenameField() );
if ( data.indexOfFilenameField < 0 ) {
// The field is unreachable !
logError( BaseMessages.getString( PKG, "LoadFileInput.Log.ErrorFindingField" )
+ "[" + meta.getDynamicFilenameField() + "]" );
throw new KettleException( BaseMessages.getString(
PKG, "LoadFileInput.Exception.CouldnotFindField", meta.getDynamicFilenameField() ) );
}
}
// Get the number of previous fields
data.totalpreviousfields = data.inputRowMeta.size();
}
} // end if first
// get field value
String Fieldvalue = data.inputRowMeta.getString( data.readrow, data.indexOfFilenameField );
if ( isDetailed() ) {
logDetailed( BaseMessages.getString(
PKG, "LoadFileInput.Log.Stream", meta.getDynamicFilenameField(), Fieldvalue ) );
}
try {
// Source is a file.
data.file = KettleVFS.getFileObject( Fieldvalue );
} catch ( Exception e ) {
throw new KettleException( e );
}
} else {
if ( data.filenr >= data.files.nrOfFiles() ) {
// finished processing!
if ( isDetailed() ) {
logDetailed( BaseMessages.getString( PKG, "LoadFileInput.Log.FinishedProcessing" ) );
}
return false;
}
// Is this the last file?
data.last_file = ( data.filenr == data.files.nrOfFiles() - 1 );
data.file = data.files.getFile( data.filenr );
}
// Check if file exists
if ( meta.isIgnoreMissingPath() && !data.file.exists() ) {
logBasic( BaseMessages.getString( PKG, "LoadFileInput.Error.FileNotExists", "" + data.file.getName() ) );
return openNextFile();
}
// Check if file is empty
data.fileSize = data.file.getContent().getSize();
// Move file pointer ahead!
data.filenr++;
if ( meta.isIgnoreEmptyFile() && data.fileSize == 0 ) {
logError( BaseMessages.getString( PKG, "LoadFileInput.Error.FileSizeZero", "" + data.file.getName() ) );
return openNextFile();
} else {
if ( isDetailed() ) {
logDetailed( BaseMessages.getString( PKG, "LoadFileInput.Log.OpeningFile", data.file.toString() ) );
}
data.filename = KettleVFS.getFilename( data.file );
// Add additional fields?
if ( meta.getShortFileNameField() != null && meta.getShortFileNameField().length() > 0 ) {
data.shortFilename = data.file.getName().getBaseName();
}
if ( meta.getPathField() != null && meta.getPathField().length() > 0 ) {
data.path = KettleVFS.getFilename( data.file.getParent() );
}
if ( meta.isHiddenField() != null && meta.isHiddenField().length() > 0 ) {
data.hidden = data.file.isHidden();
}
if ( meta.getExtensionField() != null && meta.getExtensionField().length() > 0 ) {
data.extension = data.file.getName().getExtension();
}
if ( meta.getLastModificationDateField() != null && meta.getLastModificationDateField().length() > 0 ) {
data.lastModificationDateTime = new Date( data.file.getContent().getLastModifiedTime() );
}
if ( meta.getUriField() != null && meta.getUriField().length() > 0 ) {
data.uriName = Const.optionallyDecodeUriString( data.file.getName().getURI() );
}
if ( meta.getRootUriField() != null && meta.getRootUriField().length() > 0 ) {
data.rootUriName = data.file.getName().getRootURI();
}
// get File content
getFileContent();
addFileToResultFilesName( data.file );
if ( isDetailed() ) {
logDetailed( BaseMessages.getString( PKG, "LoadFileInput.Log.FileOpened", data.file.toString() ) );
}
}
} catch ( Exception e ) {
logError( BaseMessages.getString( PKG, "LoadFileInput.Log.UnableToOpenFile", "" + data.filenr, data.file
.toString(), e.toString() ) );
stopAll();
setErrors( 1 );
return false;
}
return true;
}
|
@Test
public void testOpenNextFile_010_ignoreEmpty() {
stepMetaInterface.setIgnoreEmptyFile( true );
stepInputFiles.addFile( getFile( "input0.txt" ) );
stepInputFiles.addFile( getFile( "input1.txt" ) );
stepInputFiles.addFile( getFile( "input0.txt" ) );
assertTrue( stepLoadFileInput.openNextFile() );
assertFalse( stepLoadFileInput.openNextFile() );
}
|
@Override
public Future<RestResponse> restRequest(RestRequest request)
{
return restRequest(request, new RequestContext());
}
|
@Test
public void testRestRetryUnlimitedClientRetryRatio() throws Exception
{
SimpleLoadBalancer balancer = prepareLoadBalancer(Arrays.asList("http://test.linkedin.com/retry1", "http://test.linkedin.com/good"),
HttpClientFactory.UNLIMITED_CLIENT_REQUEST_RETRY_RATIO);
ClockedExecutor clock = new ClockedExecutor();
DynamicClient dynamicClient = new DynamicClient(balancer, null);
RetryClient client = new RetryClient(
dynamicClient,
balancer,
D2ClientConfig.DEFAULT_RETRY_LIMIT,
RetryClient.DEFAULT_UPDATE_INTERVAL_MS,
RetryClient.DEFAULT_AGGREGATED_INTERVAL_NUM,
clock,
true,
false);
URI uri = URI.create("d2://retryService?arg1=empty&arg2=empty");
RestRequest restRequest = new RestRequestBuilder(uri).build();
clock.scheduleWithFixedDelay(() ->
{
DegraderTrackerClientTest.TestCallback<RestResponse> restCallback = new DegraderTrackerClientTest.TestCallback<>();
client.restRequest(restRequest, restCallback);
// This request will be retried and route to the good host
assertNull(restCallback.e);
assertNotNull(restCallback.t);
}, 0, 100, TimeUnit.MILLISECONDS);
clock.runFor(RetryClient.DEFAULT_UPDATE_INTERVAL_MS * 2);
}
|
@VisibleForTesting
String[] findClassNames(AbstractConfiguration config) {
// Find individually-specified filter classes.
String[] filterClassNamesStrArray = config.getStringArray("zuul.filters.classes");
Stream<String> classNameStream =
Arrays.stream(filterClassNamesStrArray).map(String::trim).filter(blank.negate());
// Find filter classes in specified packages.
String[] packageNamesStrArray = config.getStringArray("zuul.filters.packages");
ClassPath cp;
try {
cp = ClassPath.from(this.getClass().getClassLoader());
} catch (IOException e) {
throw new RuntimeException("Error attempting to read classpath to find filters!", e);
}
Stream<String> packageStream = Arrays.stream(packageNamesStrArray)
.map(String::trim)
.filter(blank.negate())
.flatMap(packageName -> cp.getTopLevelClasses(packageName).stream())
.map(ClassPath.ClassInfo::load)
.filter(ZuulFilter.class::isAssignableFrom)
.map(Class::getCanonicalName);
String[] filterClassNames =
Stream.concat(classNameStream, packageStream).toArray(String[]::new);
if (filterClassNames.length != 0) {
LOG.info("Using filter classnames: ");
for (String location : filterClassNames) {
LOG.info(" {}", location);
}
}
return filterClassNames;
}
|
@Test
void testClassNamesOnly() {
Class<?> expectedClass = TestZuulFilter.class;
Mockito.when(configuration.getStringArray("zuul.filters.classes"))
.thenReturn(new String[] {"com.netflix.zuul.init.TestZuulFilter"});
Mockito.when(configuration.getStringArray("zuul.filters.packages")).thenReturn(new String[] {});
String[] classNames = module.findClassNames(configuration);
assertThat(classNames.length, equalTo(1));
assertThat(classNames[0], equalTo(expectedClass.getCanonicalName()));
}
|
@Override
public GetQueueInfoResponse getQueueInfo(GetQueueInfoRequest request)
throws YarnException, IOException {
if (request == null || request.getQueueName() == null) {
routerMetrics.incrGetQueueInfoFailedRetrieved();
String msg = "Missing getQueueInfo request or queueName.";
RouterAuditLogger.logFailure(user.getShortUserName(), GET_QUEUEINFO, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, msg);
RouterServerUtil.logAndThrowException(msg, null);
}
String rSubCluster = request.getSubClusterId();
long startTime = clock.getTime();
ClientMethod remoteMethod = new ClientMethod("getQueueInfo",
new Class[]{GetQueueInfoRequest.class}, new Object[]{request});
Collection<GetQueueInfoResponse> queues = null;
try {
if (StringUtils.isNotBlank(rSubCluster)) {
queues = invoke(remoteMethod, GetQueueInfoResponse.class, rSubCluster);
} else {
queues = invokeConcurrent(remoteMethod, GetQueueInfoResponse.class);
}
} catch (Exception ex) {
routerMetrics.incrGetQueueInfoFailedRetrieved();
String msg = "Unable to get queue [" + request.getQueueName() + "] to exception.";
RouterAuditLogger.logFailure(user.getShortUserName(), GET_QUEUEINFO, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, msg);
RouterServerUtil.logAndThrowException(msg, ex);
}
long stopTime = clock.getTime();
routerMetrics.succeededGetQueueInfoRetrieved(stopTime - startTime);
RouterAuditLogger.logSuccess(user.getShortUserName(), GET_QUEUEINFO, TARGET_CLIENT_RM_SERVICE);
// Merge the GetQueueInfoResponse
return RouterYarnClientUtils.mergeQueues(queues);
}
|
@Test
public void testGetQueueInfo() throws Exception {
LOG.info("Test FederationClientInterceptor : Get Queue Info request.");
// null request
LambdaTestUtils.intercept(YarnException.class, "Missing getQueueInfo request or queueName.",
() -> interceptor.getQueueInfo(null));
// normal request
GetQueueInfoResponse response = interceptor.getQueueInfo(
GetQueueInfoRequest.newInstance("root", true, true, true));
Assert.assertNotNull(response);
QueueInfo queueInfo = response.getQueueInfo();
Assert.assertNotNull(queueInfo);
Assert.assertEquals("root", queueInfo.getQueueName());
Assert.assertEquals(4.0, queueInfo.getCapacity(), 0);
Assert.assertEquals(0.0, queueInfo.getCurrentCapacity(), 0);
Assert.assertEquals(12, queueInfo.getChildQueues().size(), 0);
Assert.assertEquals(1, queueInfo.getAccessibleNodeLabels().size());
}
|
public static Builder newBuilder(String name) {
return new Builder(name);
}
|
@Test
void testBuildSlotSharingGroupWithoutAllRequiredConfig() {
assertThatThrownBy(
() ->
SlotSharingGroup.newBuilder("ssg")
.setCpuCores(1)
.setTaskOffHeapMemoryMB(10)
.build())
.isInstanceOf(IllegalArgumentException.class);
}
|
public long getTimestamp() {
if (hasTimestamp) {
return timestamp;
} else {
return Long.MIN_VALUE;
// throw new IllegalStateException(
// "Record has no timestamp. Is the time characteristic set to 'ProcessingTime', or
// " +
// "did you forget to call 'DataStream.assignTimestampsAndWatermarks(...)'?");
}
}
|
@Test
void testAllowedTimestampRange() {
assertThat(new StreamRecord<>("test", 0).getTimestamp()).isZero();
assertThat(new StreamRecord<>("test", -1).getTimestamp()).isEqualTo(-1L);
assertThat(new StreamRecord<>("test", 1).getTimestamp()).isOne();
assertThat(new StreamRecord<>("test", Long.MIN_VALUE).getTimestamp())
.isEqualTo(Long.MIN_VALUE);
assertThat(new StreamRecord<>("test", Long.MAX_VALUE).getTimestamp())
.isEqualTo(Long.MAX_VALUE);
}
|
public static Optional<CeTaskInterruptedException> isTaskInterruptedException(Throwable e) {
if (e instanceof CeTaskInterruptedException ceTaskInterruptedException) {
return Optional.of(ceTaskInterruptedException);
}
return isCauseInterruptedException(e);
}
|
@Test
public void isCauseInterruptedException_returns_CeTaskInterruptedException_or_subclass() {
String message = randomAlphabetic(50);
CeActivityDto.Status status = randomStatus();
CeTaskInterruptedException e1 = new CeTaskInterruptedException(message, status) {
};
CeTaskInterruptedException e2 = new CeTaskInterruptedExceptionSubclass(message, status);
assertThat(isTaskInterruptedException(e1)).contains(e1);
assertThat(isTaskInterruptedException(e2)).contains(e2);
assertThat(isTaskInterruptedException(new RuntimeException())).isEmpty();
assertThat(isTaskInterruptedException(new Exception())).isEmpty();
}
|
@Override
public Num calculate(BarSeries series, Position position) {
return criterion.calculate(series, position).dividedBy(enterAndHoldCriterion.calculate(series, position));
}
|
@Test
public void calculateWithNumberOfBars() {
MockBarSeries series = new MockBarSeries(numFunction, 100, 95, 100, 80, 85, 130);
TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, series), Trade.sellAt(1, series),
Trade.buyAt(2, series), Trade.sellAt(5, series));
AnalysisCriterion buyAndHold = getCriterion(new NumberOfBarsCriterion());
assertNumEquals(6d / 6d, buyAndHold.calculate(series, tradingRecord));
}
|
@Override
public void updateDiyTemplate(DiyTemplateUpdateReqVO updateReqVO) {
// 校验存在
validateDiyTemplateExists(updateReqVO.getId());
// 校验名称唯一
validateNameUnique(updateReqVO.getId(), updateReqVO.getName());
// 更新
DiyTemplateDO updateObj = DiyTemplateConvert.INSTANCE.convert(updateReqVO);
diyTemplateMapper.updateById(updateObj);
}
|
@Test
public void testUpdateDiyTemplate_success() {
// mock 数据
DiyTemplateDO dbDiyTemplate = randomPojo(DiyTemplateDO.class);
diyTemplateMapper.insert(dbDiyTemplate);// @Sql: 先插入出一条存在的数据
// 准备参数
DiyTemplateUpdateReqVO reqVO = randomPojo(DiyTemplateUpdateReqVO.class, o -> {
o.setId(dbDiyTemplate.getId()); // 设置更新的 ID
});
// 调用
diyTemplateService.updateDiyTemplate(reqVO);
// 校验是否更新正确
DiyTemplateDO diyTemplate = diyTemplateMapper.selectById(reqVO.getId()); // 获取最新的
assertPojoEquals(reqVO, diyTemplate);
}
|
@Override
public <T> T convert(DataTable dataTable, Type type) {
return convert(dataTable, type, false);
}
|
@Test
void convert_to_empty_list_of_object__using_default_converter__throws_exception() {
DataTable table = parse("",
" | firstName | lastName | birthDate |");
registry.setDefaultDataTableEntryTransformer(JACKSON_TABLE_ENTRY_BY_TYPE_CONVERTER);
registry.setDefaultDataTableCellTransformer(TABLE_CELL_BY_TYPE_CONVERTER_SHOULD_NOT_BE_USED);
CucumberDataTableException exception = assertThrows(
CucumberDataTableException.class,
() -> converter.convert(table, LIST_OF_AUTHOR));
assertThat(exception.getMessage(), is("" +
"Can't convert DataTable to List<io.cucumber.datatable.DataTableTypeRegistryTableConverterTest$Author>.\n"
+
"Please review these problems:\n" +
"\n" +
" - There was a default table cell transformer that could be used but the table was too wide to use it.\n"
+
" Please reduce the table width to use this converter.\n" +
"\n" +
" - There was no table entry or table row transformer registered for io.cucumber.datatable.DataTableTypeRegistryTableConverterTest$Author.\n"
+
" Please consider registering a table entry or row transformer.\n" +
"\n" +
" - There was a default table entry transformer that could be used but the table was too short use it.\n"
+
" Please increase the table height to use this converter.\n" +
"\n" +
"Note: Usually solving one is enough"));
}
|
@Override
public void init(final InternalProcessorContext<Void, Void> context) {
super.init(context);
this.context = context;
try {
keySerializer = prepareKeySerializer(keySerializer, context, this.name());
} catch (ConfigException | StreamsException e) {
throw new StreamsException(String.format("Failed to initialize key serdes for sink node %s", name()), e, context.taskId());
}
try {
valSerializer = prepareValueSerializer(valSerializer, context, this.name());
} catch (final ConfigException | StreamsException e) {
throw new StreamsException(String.format("Failed to initialize value serdes for sink node %s", name()), e, context.taskId());
}
}
|
@Test
public void shouldThrowStreamsExceptionWithExplicitErrorMessage() {
utilsMock.when(() -> WrappingNullableUtils.prepareKeySerializer(any(), any(), any())).thenThrow(new StreamsException(""));
final Throwable exception = assertThrows(StreamsException.class, () -> sink.init(context));
assertThat(exception.getMessage(), equalTo("Failed to initialize key serdes for sink node anyNodeName"));
}
|
private static void validateResourceRequest(ResourceRequest resReq,
Resource maximumAllocation, QueueInfo queueInfo, RMContext rmContext)
throws InvalidResourceRequestException {
final Resource requestedResource = resReq.getCapability();
checkResourceRequestAgainstAvailableResource(requestedResource,
maximumAllocation);
String labelExp = resReq.getNodeLabelExpression();
// we don't allow specify label expression other than resourceName=ANY now
if (!ResourceRequest.ANY.equals(resReq.getResourceName())
&& labelExp != null && !labelExp.trim().isEmpty()) {
throw new InvalidLabelResourceRequestException(
"Invalid resource request, queue=" + queueInfo.getQueueName()
+ " specified node label expression in a "
+ "resource request has resource name = "
+ resReq.getResourceName());
}
// we don't allow specify label expression with more than one node labels now
if (labelExp != null && labelExp.contains("&&")) {
throw new InvalidLabelResourceRequestException(
"Invalid resource request, queue=" + queueInfo.getQueueName()
+ " specified more than one node label "
+ "in a node label expression, node label expression = "
+ labelExp);
}
if (labelExp != null && !labelExp.trim().isEmpty() && queueInfo != null) {
if (!checkQueueLabelExpression(queueInfo.getAccessibleNodeLabels(),
labelExp, rmContext)) {
throw new InvalidLabelResourceRequestException(
"Invalid resource request" + ", queue=" + queueInfo.getQueueName()
+ " doesn't have permission to access all labels "
+ "in resource request. labelExpression of resource request="
+ labelExp + ". Queue labels="
+ (queueInfo.getAccessibleNodeLabels() == null ? ""
: StringUtils.join(
queueInfo.getAccessibleNodeLabels().iterator(), ',')));
} else {
checkQueueLabelInLabelManager(labelExp, rmContext);
}
}
}
|
@Test(timeout = 30000)
public void testValidateResourceRequest() throws IOException {
ResourceScheduler mockScheduler = mock(ResourceScheduler.class);
QueueInfo queueInfo = mock(QueueInfo.class);
when(queueInfo.getQueueName()).thenReturn("queue");
Resource maxResource =
Resources.createResource(
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
when(rmContext.getScheduler()).thenReturn(mockScheduler);
when(mockScheduler.getQueueInfo(Mockito.anyString(), Mockito.anyBoolean(),
Mockito.anyBoolean())).thenReturn(queueInfo);
// zero memory
try {
Resource resource =
Resources.createResource(0,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
ResourceRequest resReq =
BuilderUtils.newResourceRequest(mock(Priority.class),
ResourceRequest.ANY, resource, 1);
normalizeAndvalidateRequest(resReq, null,
mockScheduler, rmContext, maxResource);
} catch (InvalidResourceRequestException e) {
fail("Zero memory should be accepted");
}
// zero vcores
try {
Resource resource =
Resources.createResource(
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0);
ResourceRequest resReq =
BuilderUtils.newResourceRequest(mock(Priority.class),
ResourceRequest.ANY, resource, 1);
normalizeAndvalidateRequest(resReq, null,
mockScheduler, rmContext, maxResource);
} catch (InvalidResourceRequestException e) {
fail("Zero vcores should be accepted");
}
// max memory
try {
Resource resource =
Resources.createResource(
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
ResourceRequest resReq =
BuilderUtils.newResourceRequest(mock(Priority.class),
ResourceRequest.ANY, resource, 1);
normalizeAndvalidateRequest(resReq, null,
mockScheduler, rmContext, maxResource);
} catch (InvalidResourceRequestException e) {
fail("Max memory should be accepted");
}
// max vcores
try {
Resource resource =
Resources.createResource(
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES);
ResourceRequest resReq =
BuilderUtils.newResourceRequest(mock(Priority.class),
ResourceRequest.ANY, resource, 1);
normalizeAndvalidateRequest(resReq, null,
mockScheduler, rmContext, maxResource);
} catch (InvalidResourceRequestException e) {
fail("Max vcores should not be accepted");
}
// negative memory
try {
Resource resource =
Resources.createResource(-1,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
ResourceRequest resReq =
BuilderUtils.newResourceRequest(mock(Priority.class),
ResourceRequest.ANY, resource, 1);
normalizeAndvalidateRequest(resReq, null,
mockScheduler, rmContext, maxResource);
fail("Negative memory should not be accepted");
} catch (InvalidResourceRequestException e) {
assertEquals(LESS_THAN_ZERO, e.getInvalidResourceType());
}
// negative vcores
try {
Resource resource =
Resources.createResource(
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, -1);
ResourceRequest resReq =
BuilderUtils.newResourceRequest(mock(Priority.class),
ResourceRequest.ANY, resource, 1);
normalizeAndvalidateRequest(resReq, null,
mockScheduler, rmContext, maxResource);
fail("Negative vcores should not be accepted");
} catch (InvalidResourceRequestException e) {
assertEquals(LESS_THAN_ZERO, e.getInvalidResourceType());
}
// more than max memory
try {
Resource resource =
Resources.createResource(
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB + 1,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
ResourceRequest resReq =
BuilderUtils.newResourceRequest(mock(Priority.class),
ResourceRequest.ANY, resource, 1);
normalizeAndvalidateRequest(resReq, null,
mockScheduler, rmContext, maxResource);
fail("More than max memory should not be accepted");
} catch (InvalidResourceRequestException e) {
assertEquals(GREATER_THEN_MAX_ALLOCATION, e.getInvalidResourceType());
}
// more than max vcores
try {
Resource resource = Resources.createResource(
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES + 1);
ResourceRequest resReq =
BuilderUtils.newResourceRequest(mock(Priority.class),
ResourceRequest.ANY, resource, 1);
normalizeAndvalidateRequest(resReq, null,
mockScheduler, rmContext, maxResource);
fail("More than max vcores should not be accepted");
} catch (InvalidResourceRequestException e) {
assertEquals(GREATER_THEN_MAX_ALLOCATION, e.getInvalidResourceType());
}
}
|
public abstract boolean isNested();
|
@Test
void testIsNested() {
assertThat(Projection.of(new int[] {2, 1}).isNested()).isFalse();
assertThat(Projection.of(new int[][] {new int[] {1}, new int[] {3}}).isNested()).isFalse();
assertThat(
Projection.of(new int[][] {new int[] {1}, new int[] {1, 2}, new int[] {3}})
.isNested())
.isTrue();
}
|
public Map<String, Parameter> generateMergedWorkflowParams(
WorkflowInstance instance, RunRequest request) {
Workflow workflow = instance.getRuntimeWorkflow();
Map<String, ParamDefinition> allParamDefs = new LinkedHashMap<>();
Map<String, ParamDefinition> defaultWorkflowParams =
defaultParamManager.getDefaultWorkflowParams();
// merge workflow params for start
if (request.isFreshRun()) {
// merge default workflow params
ParamsMergeHelper.mergeParams(
allParamDefs,
defaultWorkflowParams,
ParamsMergeHelper.MergeContext.workflowCreate(ParamSource.SYSTEM_DEFAULT, request));
// merge defined workflow params
if (workflow.getParams() != null) {
ParamsMergeHelper.mergeParams(
allParamDefs,
workflow.getParams(),
ParamsMergeHelper.MergeContext.workflowCreate(ParamSource.DEFINITION, request));
}
}
// merge workflow params from previous instance for restart
if (!request.isFreshRun() && instance.getParams() != null) {
Map<String, ParamDefinition> previousParamDefs =
instance.getParams().entrySet().stream()
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().toDefinition()));
// remove reserved params, which should be injected again by the system.
for (String paramName : Constants.RESERVED_PARAM_NAMES) {
previousParamDefs.remove(paramName);
}
ParamsMergeHelper.mergeParams(
allParamDefs,
previousParamDefs,
ParamsMergeHelper.MergeContext.workflowCreate(ParamSource.SYSTEM, false));
}
// merge run params
if (request.getRunParams() != null) {
ParamSource source = getParamSource(request.getInitiator(), request.isFreshRun());
ParamsMergeHelper.mergeParams(
allParamDefs,
request.getRunParams(),
ParamsMergeHelper.MergeContext.workflowCreate(source, request));
}
// merge user provided restart run params
getUserRestartParam(request)
.ifPresent(
userRestartParams -> {
ParamSource source = getParamSource(request.getInitiator(), request.isFreshRun());
ParamsMergeHelper.mergeParams(
allParamDefs,
userRestartParams,
ParamsMergeHelper.MergeContext.workflowCreate(source, request));
});
// cleanup any placeholder params and convert to params
return ParamsMergeHelper.convertToParameters(ParamsMergeHelper.cleanupParams(allParamDefs));
}
|
@Test
public void testCalculateTimezonesWithTriggers() throws IOException {
WorkflowDefinition definition =
loadObject(
"fixtures/parameters/sample-wf-with-time-triggers.json", WorkflowDefinition.class);
workflow = definition.getWorkflow();
workflowInstance.setRuntimeWorkflow(workflow);
paramsManager = new ParamsManager(defaultsManager);
Step step = Mockito.mock(Step.class);
when(step.getType()).thenReturn(StepType.TITUS);
RunProperties runProperties = new RunProperties();
runProperties.setOwner(User.builder().name("demo").build());
workflowInstance.setRunProperties(runProperties);
RunRequest request =
RunRequest.builder()
.initiator(new ManualInitiator())
.currentPolicy(RunPolicy.START_FRESH_NEW_RUN)
.build();
Map<String, Parameter> workflowParams =
paramsManager.generateMergedWorkflowParams(workflowInstance, request);
paramExtensionRepo.reset(
Collections.emptyMap(),
Collections.emptyMap(),
InstanceWrapper.from(workflowInstance, request));
paramEvaluator.evaluateWorkflowParameters(workflowParams, workflow.getId());
paramExtensionRepo.clear();
// if triggers, use first trigger timezone
Assert.assertEquals("US/Pacific", workflowParams.get("WORKFLOW_CRON_TIMEZONE").asString());
Assert.assertEquals("US/Pacific", workflowParams.get("DSL_DEFAULT_TZ").asString());
Assert.assertEquals("demo", workflowParams.get("owner").asString());
}
|
@Override
protected File getFile(HandlerRequest<EmptyRequestBody> handlerRequest) {
if (logDir == null) {
return null;
}
// wrapping around another File instantiation is a simple way to remove any path information
// - we're
// solely interested in the filename
String filename =
new File(handlerRequest.getPathParameter(LogFileNamePathParameter.class)).getName();
return new File(logDir, filename);
}
|
@Test
void testGetJobManagerCustomLogsValidFilename() throws Exception {
File actualFile = testInstance.getFile(createHandlerRequest(VALID_LOG_FILENAME));
assertThat(actualFile).isNotNull();
String actualContent = String.join("", Files.readAllLines(actualFile.toPath()));
assertThat(actualContent).isEqualTo(VALID_LOG_CONTENT);
}
|
public int getDepth(Throwable ex) {
return getDepth(ex.getClass(), 0);
}
|
@Test
public void ancestry() {
RollbackRule rr = new RollbackRule(java.lang.Exception.class.getName());
// Exception -> Runtime -> MyRuntimeException
assertThat(rr.getDepth(new MyRuntimeException(""))).isEqualTo(2);
}
|
public BackgroundJobServerConfiguration andName(String name) {
if (isNullOrEmpty(name)) throw new IllegalArgumentException("The name can not be null or empty");
if (name.length() >= 128) throw new IllegalArgumentException("The length of the name can not exceed 128 characters");
this.name = name;
return this;
}
|
@Test
void ifNameIsNullThenExceptionIsThrown() {
assertThatThrownBy(() -> backgroundJobServerConfiguration.andName(null))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("The name can not be null or empty");
}
|
public static MemberSelector or(MemberSelector... selectors) {
return new OrMemberSelector(selectors);
}
|
@Test
public void testOrMemberSelector2() {
MemberSelector selector = MemberSelectors.or(LOCAL_MEMBER_SELECTOR, LITE_MEMBER_SELECTOR);
assertFalse(selector.select(member));
verify(member).localMember();
verify(member).isLiteMember();
}
|
@Override
public Response toResponse(Throwable e) {
if (log.isDebugEnabled()) {
log.debug("Uncaught exception in REST call: ", e);
} else if (log.isInfoEnabled()) {
log.info("Uncaught exception in REST call: {}", e.getMessage());
}
if (e instanceof NotFoundException) {
return buildResponse(Response.Status.NOT_FOUND, e);
} else if (e instanceof InvalidRequestException) {
return buildResponse(Response.Status.BAD_REQUEST, e);
} else if (e instanceof InvalidTypeIdException) {
return buildResponse(Response.Status.NOT_IMPLEMENTED, e);
} else if (e instanceof JsonMappingException) {
return buildResponse(Response.Status.BAD_REQUEST, e);
} else if (e instanceof ClassNotFoundException) {
return buildResponse(Response.Status.NOT_IMPLEMENTED, e);
} else if (e instanceof SerializationException) {
return buildResponse(Response.Status.BAD_REQUEST, e);
} else if (e instanceof RequestConflictException) {
return buildResponse(Response.Status.CONFLICT, e);
} else {
return buildResponse(Response.Status.INTERNAL_SERVER_ERROR, e);
}
}
|
@Test
public void testToResponseJsonMappingException() {
RestExceptionMapper mapper = new RestExceptionMapper();
JsonParser parser = null;
Response resp = mapper.toResponse(JsonMappingException.from(parser, "dummy msg"));
assertEquals(resp.getStatus(), Response.Status.BAD_REQUEST.getStatusCode());
}
|
public static boolean sleep(long millis) {
if (millis > 0) {
try {
Thread.sleep(millis);
} catch (InterruptedException e) {
return false;
}
}
return true;
}
|
@Test
public void testSleep() {
Assert.assertTrue(ThreadUtil.sleep(0L));
}
|
public static <T> T wrap(Callable<T> callable) {
try {
return callable.call();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
|
@Test
public void testWrap2() {
boolean flag = Assert.wrap(() -> true);
org.junit.Assert.assertEquals(true, flag);
}
|
public static InetSocketAddress parseAddress(String address, int defaultPort) {
return parseAddress(address, defaultPort, false);
}
|
@Test
void shouldParseAddressForIPv4WithPort() {
InetSocketAddress socketAddress = AddressUtils.parseAddress("127.0.0.1:8080", 80);
assertThat(socketAddress.isUnresolved()).isFalse();
assertThat(socketAddress.getAddress().getHostAddress()).isEqualTo("127.0.0.1");
assertThat(socketAddress.getPort()).isEqualTo(8080);
assertThat(socketAddress.getHostString()).isEqualTo("127.0.0.1");
}
|
@Override
public int hashCode() {
return super.hashCode()
+ Long.valueOf(value).hashCode()
+ Integer.valueOf(lowerBound).hashCode()
+ Integer.valueOf(upperBound).hashCode();
}
|
@Test
void requireThatHashCodeIsImplemented() {
assertEquals(new RangeEdgePartition("foo=-10", 10, 2, 3).hashCode(),
new RangeEdgePartition("foo=-10", 10, 2, 3).hashCode());
}
|
public static boolean isEnable() {
return EVENT_BUS_ENABLE;
}
|
@Test
public void isEnable1() throws Exception {
Assert.assertEquals(EventBus.isEnable(NullTestEvent.class), false);
}
|
@Private
@VisibleForTesting
public int getPort() {
return this.getListenerAddress().getPort();
}
|
@Test
@Timeout(240000)
void testHostedUIs() throws Exception {
ApplicationHistoryServer historyServer = new ApplicationHistoryServer();
Configuration config = new YarnConfiguration();
config.setClass(YarnConfiguration.TIMELINE_SERVICE_STORE,
MemoryTimelineStore.class, TimelineStore.class);
config.setClass(YarnConfiguration.TIMELINE_SERVICE_STATE_STORE_CLASS,
MemoryTimelineStateStore.class, TimelineStateStore.class);
config.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
"localhost:0");
final String UI1 = "UI1";
String connFileStr = "";
File diskFile = new File("./pom.xml");
String diskFileStr = readInputStream(new FileInputStream(diskFile));
try {
config.set(YarnConfiguration.TIMELINE_SERVICE_UI_NAMES, UI1);
config.set(YarnConfiguration.TIMELINE_SERVICE_UI_WEB_PATH_PREFIX + UI1,
"/" + UI1);
config.set(YarnConfiguration.TIMELINE_SERVICE_UI_ON_DISK_PATH_PREFIX
+ UI1, "./");
historyServer.init(config);
historyServer.start();
URL url = new URL("http://localhost:" + historyServer.getPort() + "/"
+ UI1 + "/pom.xml");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.connect();
assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
connFileStr = readInputStream(conn.getInputStream());
} finally {
historyServer.stop();
}
assertEquals(diskFileStr, connFileStr,
"Web file contents should be the same as on disk contents");
}
|
@Transactional
public App createNewApp(App app) {
String createBy = app.getDataChangeCreatedBy();
App createdApp = appService.save(app);
String appId = createdApp.getAppId();
appNamespaceService.createDefaultAppNamespace(appId, createBy);
clusterService.createDefaultCluster(appId, createBy);
namespaceService.instanceOfAppNamespaces(appId, ConfigConsts.CLUSTER_NAME_DEFAULT, createBy);
return app;
}
|
@Test(expected = ServiceException.class)
public void testCreateDuplicateApp() {
String appId = "someAppId";
App app = new App();
app.setAppId(appId);
app.setName("someAppName");
String owner = "someOwnerName";
app.setOwnerName(owner);
app.setOwnerEmail("someOwnerName@ctrip.com");
app.setDataChangeCreatedBy(owner);
app.setDataChangeLastModifiedBy(owner);
app.setDataChangeCreatedTime(new Date());
appRepository.save(app);
adminService.createNewApp(app);
}
|
public static int[] calcSortOrder(IntArrayList arr1, IntArrayList arr2) {
if (arr1.elementsCount != arr2.elementsCount) {
throw new IllegalArgumentException("Arrays must have equal size");
}
return calcSortOrder(arr1.buffer, arr2.buffer, arr1.elementsCount);
}
|
@Test
public void testCalcSortOrder() {
assertEquals(from(), from(ArrayUtil.calcSortOrder(from(), from())));
assertEquals(from(0), from(ArrayUtil.calcSortOrder(from(3), from(4))));
assertEquals(from(0, 2, 3, 1), from(ArrayUtil.calcSortOrder(from(3, 6, 3, 4), from(0, -1, 2, -6))));
assertEquals(from(2, 3, 1, 0), from(ArrayUtil.calcSortOrder(from(3, 3, 0, 0), from(0, -1, 1, 2))));
assertEquals(from(), from(ArrayUtil.calcSortOrder(new int[]{3, 3, 0, 0}, new int[]{0, -1, 1, 2}, 0)));
assertEquals(from(0), from(ArrayUtil.calcSortOrder(new int[]{3, 3, 0, 0}, new int[]{0, -1, 1, 2}, 1)));
assertEquals(from(1, 0), from(ArrayUtil.calcSortOrder(new int[]{3, 3, 0, 0}, new int[]{0, -1, 1, 2}, 2)));
assertEquals(from(2, 1, 0), from(ArrayUtil.calcSortOrder(new int[]{3, 3, 0, 0}, new int[]{0, -1, 1, 2}, 3)));
assertEquals(from(2, 3, 1, 0), from(ArrayUtil.calcSortOrder(new int[]{3, 3, 0, 0}, new int[]{0, -1, 1, 2}, 4)));
}
|
public FlagSet<E> copy() {
return new FlagSet<>(enumClass, prefix, flags);
}
|
@Test
public void testCopy() throws Throwable {
FlagSet<SimpleEnum> s1 =
createFlagSet(SimpleEnum.class, KEYDOT, SimpleEnum.a, SimpleEnum.b);
s1.makeImmutable();
FlagSet<SimpleEnum> s2 = s1.copy();
Assertions.assertThat(s2)
.describedAs("copy of %s", s1)
.isNotSameAs(s1);
Assertions.assertThat(!s2.isImmutable())
.describedAs("set %s is immutable", s2)
.isTrue();
Assertions.assertThat(s1)
.describedAs("s1 == s2")
.isEqualTo(s2);
}
|
public Predicate convert(ScalarOperator operator) {
if (operator == null) {
return null;
}
return operator.accept(this, null);
}
|
@Test
public void testBinaryString() {
ConstantOperator value = ConstantOperator.createVarchar("ttt");
ScalarOperator op = new BinaryPredicateOperator(BinaryType.EQ, F1, value);
Predicate result = CONVERTER.convert(op);
Assert.assertTrue(result instanceof LeafPredicate);
LeafPredicate leafPredicate = (LeafPredicate) result;
Assert.assertEquals(BinaryString.fromString("ttt"), leafPredicate.literals().get(0));
}
|
@Bean
public ShenyuPlugin paramMappingPlugin(final ServerCodecConfigurer configurer) {
Map<String, Operator> operatorMap = new HashMap<>(4);
operatorMap.put(Constants.DEFAULT, new DefaultOperator());
operatorMap.put(MediaType.APPLICATION_JSON.toString(), new JsonOperator(configurer.getReaders()));
operatorMap.put(MediaType.APPLICATION_FORM_URLENCODED.toString(), new FormDataOperator());
return new ParamMappingPlugin(operatorMap);
}
|
@Test
public void testParamMappingPlugin() {
new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(ParamMappingPluginConfiguration.class, DefaultServerCodecConfigurer.class))
.withBean(ParamMappingPluginConfigurationTest.class)
.withPropertyValues("debug=true")
.run(context -> {
ShenyuPlugin plugin = context.getBean("paramMappingPlugin", ShenyuPlugin.class);
assertNotNull(plugin);
assertThat(plugin instanceof ParamMappingPlugin).isEqualTo(true);
assertThat(plugin.named()).isEqualTo(PluginEnum.PARAM_MAPPING.getName());
});
}
|
public void contains(@Nullable CharSequence string) {
checkNotNull(string);
if (actual == null) {
failWithActual("expected a string that contains", string);
} else if (!actual.contains(string)) {
failWithActual("expected to contain", string);
}
}
|
@Test
public void stringDoesNotContainIgnoringCaseFail() {
expectFailureWhenTestingThat("äbc").ignoringCase().doesNotContain("Äb");
assertFailureValue("expected not to contain", "Äb");
assertThat(expectFailure.getFailure()).factKeys().contains("(case is ignored)");
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.