language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
apache__camel
|
components/camel-kubernetes/src/main/java/org/apache/camel/component/kubernetes/namespaces/KubernetesNamespacesEndpoint.java
|
{
"start": 1705,
"end": 2323
}
|
class ____ extends AbstractKubernetesEndpoint {
public KubernetesNamespacesEndpoint(String uri, KubernetesNamespacesComponent component, KubernetesConfiguration config) {
super(uri, component, config);
}
@Override
public Producer createProducer() throws Exception {
return new KubernetesNamespacesProducer(this);
}
@Override
public Consumer createConsumer(Processor processor) throws Exception {
Consumer consumer = new KubernetesNamespacesConsumer(this, processor);
configureConsumer(consumer);
return consumer;
}
}
|
KubernetesNamespacesEndpoint
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/customizers/ContextCustomizerDeclarativeRegistrationTests.java
|
{
"start": 1592,
"end": 2445
}
|
class ____ {
// GlobalFruitContextCustomizerFactory is registered via spring.factories
@Autowired(required = false)
@Qualifier("global$fruit")
String fruit;
@Autowired
Integer enigma;
@Autowired(required = false)
@Qualifier("foo")
String foo;
@Autowired(required = false)
@Qualifier("bar")
String bar;
@Test
void injectedBean() {
// registered globally via spring.factories
assertThat(fruit).isEqualTo("apple, banana, cherry");
// From local @ContextCustomizerFactories
assertThat(enigma).isEqualTo(42);
// @ContextCustomizerFactories is not currently supported as a repeatable annotation,
// and a directly present @ContextCustomizerFactories annotation overrides
// @ContextCustomizerFactories meta-annotations.
assertThat(foo).isNull();
assertThat(bar).isNull();
}
}
|
ContextCustomizerDeclarativeRegistrationTests
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/dialect/aggregate/SQLServerAggregateSupport.java
|
{
"start": 1014,
"end": 11171
}
|
class ____ extends AggregateSupportImpl {
private static final AggregateSupport JSON_INSTANCE = new SQLServerAggregateSupport( true );
private static final AggregateSupport LEGACY_INSTANCE = new SQLServerAggregateSupport( false );
private static final String JSON_QUERY_START = "json_query(";
private static final String JSON_QUERY_JSON_END = "')";
private static final int JSON_VALUE_MAX_LENGTH = 4000;
private static final String XML_QUERY_START = "cast('<e>'+cast(";
private static final String XML_QUERY_SEPARATOR = ".query('";
private static final String XML_QUERY_END = "/*') as nvarchar(max))+'</e>' as xml)";
private final boolean supportsJson;
private SQLServerAggregateSupport(boolean supportsJson) {
this.supportsJson = supportsJson;
}
public static AggregateSupport valueOf(Dialect dialect) {
return dialect.getVersion().isSameOrAfter( 13 )
? SQLServerAggregateSupport.JSON_INSTANCE
: SQLServerAggregateSupport.LEGACY_INSTANCE;
}
@Override
public String aggregateComponentCustomReadExpression(
String template,
String placeholder,
String aggregateParentReadExpression,
String columnExpression,
int aggregateColumnTypeCode,
SqlTypedMapping column,
TypeConfiguration typeConfiguration) {
switch ( aggregateColumnTypeCode ) {
case JSON:
case JSON_ARRAY:
if ( !supportsJson ) {
break;
}
final String parentJsonPartExpression;
if ( aggregateParentReadExpression.startsWith( JSON_QUERY_START )
&& aggregateParentReadExpression.endsWith( JSON_QUERY_JSON_END ) ) {
parentJsonPartExpression = aggregateParentReadExpression.substring( JSON_QUERY_START.length(), aggregateParentReadExpression.length() - JSON_QUERY_JSON_END.length() ) + ".";
}
else {
parentJsonPartExpression = aggregateParentReadExpression + ",'$.";
}
switch ( column.getJdbcMapping().getJdbcType().getDefaultSqlTypeCode() ) {
case JSON:
case JSON_ARRAY:
return template.replace(
placeholder,
"json_query(" + parentJsonPartExpression + columnExpression + "')"
);
case BINARY:
case VARBINARY:
case LONG32VARBINARY:
case BLOB:
// We encode binary data as hex, so we have to decode here
if ( determineLength( column ) * 2 > JSON_VALUE_MAX_LENGTH ) {
// Since data is HEX encoded, multiply the max length by 2 since we need 2 hex chars per byte
return template.replace(
placeholder,
"(select convert(" + column.getColumnDefinition() + ",v,2) from openjson(" + aggregateParentReadExpression + ") with (v varchar(max) '$." + columnExpression + "'))"
);
}
else {
return template.replace(
placeholder,
"convert(" + column.getColumnDefinition() + ",json_value(" + parentJsonPartExpression + columnExpression + "'),2)"
);
}
case CHAR:
case NCHAR:
case VARCHAR:
case NVARCHAR:
case LONG32VARCHAR:
case LONG32NVARCHAR:
case CLOB:
case NCLOB:
if ( determineLength( column ) > JSON_VALUE_MAX_LENGTH ) {
return template.replace(
placeholder,
"(select * from openjson(" + aggregateParentReadExpression + ") with (v " + column.getColumnDefinition() + " '$." + columnExpression + "'))"
);
}
// Fall-through intended
case BIT:
case TINYINT:
case SMALLINT:
case INTEGER:
case BIGINT:
case REAL:
case FLOAT:
case DOUBLE:
case NUMERIC:
case DECIMAL:
case TIME:
case TIME_UTC:
case TIME_WITH_TIMEZONE:
case DATE:
case TIMESTAMP:
case TIMESTAMP_UTC:
case TIMESTAMP_WITH_TIMEZONE:
return template.replace(
placeholder,
"cast(json_value(" + parentJsonPartExpression + columnExpression + "') as " + column.getColumnDefinition() + ")"
);
default:
return template.replace(
placeholder,
"(select * from openjson(" + aggregateParentReadExpression + ") with (v " + column.getColumnDefinition() + " '$." + columnExpression + "'))"
);
}
case SQLXML:
case XML_ARRAY:
final String xmlColumn;
final String parentXmlPartExpression;
final int queryIndex;
if ( aggregateParentReadExpression.startsWith( XML_QUERY_START )
&& aggregateParentReadExpression.endsWith( XML_QUERY_END )
&& (queryIndex = aggregateParentReadExpression.indexOf( XML_QUERY_SEPARATOR )) != -1 ) {
xmlColumn = aggregateParentReadExpression.substring( XML_QUERY_START.length(), queryIndex );
parentXmlPartExpression = aggregateParentReadExpression.substring( queryIndex + XML_QUERY_SEPARATOR.length(), aggregateParentReadExpression.length() - XML_QUERY_END.length() );
}
else {
xmlColumn = aggregateParentReadExpression;
parentXmlPartExpression = "/" + XmlHelper.ROOT_TAG;
}
switch ( column.getJdbcMapping().getJdbcType().getDefaultSqlTypeCode() ) {
case SQLXML:
return template.replace(
placeholder,
XML_QUERY_START + xmlColumn + XML_QUERY_SEPARATOR + parentXmlPartExpression + "/" + columnExpression + XML_QUERY_END
);
case XML_ARRAY:
return template.replace(
placeholder,
"cast('<Collection>'+cast(" + xmlColumn + XML_QUERY_SEPARATOR + parentXmlPartExpression + "/" + columnExpression + "/*') as nvarchar(max))+'</Collection>' as xml)"
);
case BINARY:
case VARBINARY:
case LONG32VARBINARY:
case BLOB:
// We encode binary data as hex, so we have to decode here
return template.replace(
placeholder,
"convert(" + column.getColumnDefinition() + "," + xmlColumn + ".value('(" + parentXmlPartExpression + "/" + columnExpression + "/text())[1]','nvarchar(max)'),2)"
);
default:
return template.replace(
placeholder,
xmlColumn + ".value('(" + parentXmlPartExpression + "/" + columnExpression + "/text())[1]','" + column.getColumnDefinition() + "')"
);
}
}
throw new IllegalArgumentException( "Unsupported aggregate SQL type: " + aggregateColumnTypeCode );
}
private static Long determineLength(SqlTypedMapping column) {
final Long length = column.getLength();
if ( length != null ) {
return length;
}
else {
final String columnDefinition = column.getColumnDefinition();
assert columnDefinition != null;
final int parenthesisIndex = columnDefinition.indexOf( '(' );
if ( parenthesisIndex != -1 ) {
int end;
for ( end = parenthesisIndex + 1; end < columnDefinition.length(); end++ ) {
if ( !Character.isDigit( columnDefinition.charAt( end ) ) ) {
break;
}
}
return Long.parseLong( columnDefinition.substring( parenthesisIndex + 1, end ) );
}
// Default to the max varchar length
return 8000L;
}
}
@Override
public String aggregateComponentAssignmentExpression(
String aggregateParentAssignmentExpression,
String columnExpression,
int aggregateColumnTypeCode,
Column column) {
switch ( aggregateColumnTypeCode ) {
case JSON:
case JSON_ARRAY:
if ( !supportsJson ) {
break;
}
case SQLXML:
case XML_ARRAY:
// For JSON/XML we always have to replace the whole object
return aggregateParentAssignmentExpression;
}
throw new IllegalArgumentException( "Unsupported aggregate SQL type: " + aggregateColumnTypeCode );
}
private static String jsonCustomWriteExpression(String customWriteExpression, JdbcMapping jdbcMapping) {
switch ( jdbcMapping.getJdbcType().getDefaultSqlTypeCode() ) {
case BINARY:
case VARBINARY:
case LONG32VARBINARY:
case BLOB:
return "convert(nvarchar(max)," + customWriteExpression + ",2)";
case TIME:
return "left(" + customWriteExpression + ",8)";
case DATE:
return "format(" + customWriteExpression + ",'yyyy-MM-dd')";
case TIMESTAMP:
return "format(" + customWriteExpression + ",'yyyy-MM-ddTHH:mm:ss.fffffff')";
case TIMESTAMP_UTC:
case TIMESTAMP_WITH_TIMEZONE:
return "format(" + customWriteExpression + ",'yyyy-MM-ddTHH:mm:ss.fffffffzzz')";
case UUID:
return "cast(" + customWriteExpression + " as nvarchar(36))";
case JSON:
case JSON_ARRAY:
return "json_query(" + customWriteExpression + ")";
default:
return customWriteExpression;
}
}
private static String xmlCustomWriteExpression(String customWriteExpression, JdbcMapping jdbcMapping) {
switch ( jdbcMapping.getJdbcType().getDefaultSqlTypeCode() ) {
case BOOLEAN:
case BIT:
return "case " + customWriteExpression + " when 1 then 'true' when 0 then 'false' end";
case BINARY:
case VARBINARY:
case LONG32VARBINARY:
case BLOB:
return "convert(nvarchar(max)," + customWriteExpression + ",2)";
case TIME:
return "left(" + customWriteExpression + ",8)";
case DATE:
return "format(" + customWriteExpression + ",'yyyy-MM-dd')";
case TIMESTAMP:
return "format(" + customWriteExpression + ",'yyyy-MM-ddTHH:mm:ss.fffffff')";
case TIMESTAMP_UTC:
case TIMESTAMP_WITH_TIMEZONE:
return "format(" + customWriteExpression + ",'yyyy-MM-ddTHH:mm:ss.fffffffzzz')";
case UUID:
return "cast(" + customWriteExpression + " as nvarchar(36))";
default:
return customWriteExpression;
}
}
@Override
public boolean requiresAggregateCustomWriteExpressionRenderer(int aggregateSqlTypeCode) {
return aggregateSqlTypeCode == JSON || aggregateSqlTypeCode == SQLXML;
}
@Override
public WriteExpressionRenderer aggregateCustomWriteExpressionRenderer(
SelectableMapping aggregateColumn,
SelectableMapping[] columnsToUpdate,
TypeConfiguration typeConfiguration) {
final int aggregateSqlTypeCode = aggregateColumn.getJdbcMapping().getJdbcType().getDefaultSqlTypeCode();
switch ( aggregateSqlTypeCode ) {
case JSON:
if ( !supportsJson ) {
break;
}
return new RootJsonWriteExpression( aggregateColumn, columnsToUpdate, this, typeConfiguration );
case SQLXML:
return new RootXmlWriteExpression( aggregateColumn, columnsToUpdate );
}
throw new IllegalArgumentException( "Unsupported aggregate SQL type: " + aggregateSqlTypeCode );
}
|
SQLServerAggregateSupport
|
java
|
spring-projects__spring-framework
|
spring-beans/src/test/java/org/springframework/beans/factory/DefaultListableBeanFactoryTests.java
|
{
"start": 138166,
"end": 138330
}
|
class ____ implements DisposableBean {
static boolean closed;
@Override
public void destroy() {
closed = true;
}
}
public static
|
BeanWithDisposableBean
|
java
|
spring-projects__spring-security
|
config/src/main/java/org/springframework/security/config/method/MethodSecurityBeanDefinitionParser.java
|
{
"start": 3722,
"end": 14492
}
|
class ____ implements BeanDefinitionParser {
private final Log logger = LogFactory.getLog(getClass());
private static final String ATT_USE_JSR250 = "jsr250-enabled";
private static final String ATT_USE_SECURED = "secured-enabled";
private static final String ATT_USE_PREPOST = "pre-post-enabled";
private static final String ATT_AUTHORIZATION_MGR = "authorization-manager-ref";
private static final String ATT_OBSERVATION_REGISTRY_REF = "observation-registry-ref";
private static final String ATT_ACCESS = "access";
private static final String ATT_EXPRESSION = "expression";
private static final String ATT_MODE = "mode";
private static final String ATT_SECURITY_CONTEXT_HOLDER_STRATEGY_REF = "security-context-holder-strategy-ref";
@Override
public BeanDefinition parse(Element element, ParserContext pc) {
CompositeComponentDefinition compositeDef = new CompositeComponentDefinition(element.getTagName(),
pc.extractSource(element));
pc.pushContainingComponent(compositeDef);
BeanMetadataElement securityContextHolderStrategy = getSecurityContextHolderStrategy(element);
BeanMetadataElement observationRegistry = getObservationRegistry(element);
boolean prePostAnnotationsEnabled = !element.hasAttribute(ATT_USE_PREPOST)
|| "true".equals(element.getAttribute(ATT_USE_PREPOST));
boolean useAspectJ = "aspectj".equals(element.getAttribute(ATT_MODE));
if (prePostAnnotationsEnabled) {
BeanDefinitionBuilder preFilterInterceptor = BeanDefinitionBuilder
.rootBeanDefinition(PreFilterAuthorizationMethodInterceptor.class)
.setRole(BeanDefinition.ROLE_INFRASTRUCTURE)
.addPropertyValue("securityContextHolderStrategy", securityContextHolderStrategy);
BeanDefinitionBuilder preAuthorizeInterceptor = BeanDefinitionBuilder
.rootBeanDefinition(PreAuthorizeAuthorizationMethodInterceptor.class)
.setRole(BeanDefinition.ROLE_INFRASTRUCTURE)
.addPropertyValue("securityContextHolderStrategy", securityContextHolderStrategy)
.addPropertyValue("observationRegistry", observationRegistry);
BeanDefinitionBuilder postAuthorizeInterceptor = BeanDefinitionBuilder
.rootBeanDefinition(PostAuthorizeAuthorizationMethodInterceptor.class)
.setRole(BeanDefinition.ROLE_INFRASTRUCTURE)
.addPropertyValue("securityContextHolderStrategy", securityContextHolderStrategy)
.addPropertyValue("observationRegistry", observationRegistry);
BeanDefinitionBuilder postFilterInterceptor = BeanDefinitionBuilder
.rootBeanDefinition(PostFilterAuthorizationMethodInterceptor.class)
.setRole(BeanDefinition.ROLE_INFRASTRUCTURE)
.addPropertyValue("securityContextHolderStrategy", securityContextHolderStrategy);
Element expressionHandlerElt = DomUtils.getChildElementByTagName(element, Elements.EXPRESSION_HANDLER);
if (expressionHandlerElt != null) {
String expressionHandlerRef = expressionHandlerElt.getAttribute("ref");
preFilterInterceptor.addPropertyReference("expressionHandler", expressionHandlerRef);
preAuthorizeInterceptor.addPropertyReference("expressionHandler", expressionHandlerRef);
postAuthorizeInterceptor.addPropertyReference("expressionHandler", expressionHandlerRef);
postFilterInterceptor.addPropertyReference("expressionHandler", expressionHandlerRef);
}
else {
BeanDefinition expressionHandler = BeanDefinitionBuilder
.rootBeanDefinition(MethodSecurityExpressionHandlerBean.class)
.getBeanDefinition();
preFilterInterceptor.addPropertyValue("expressionHandler", expressionHandler);
preAuthorizeInterceptor.addPropertyValue("expressionHandler", expressionHandler);
postAuthorizeInterceptor.addPropertyValue("expressionHandler", expressionHandler);
postFilterInterceptor.addPropertyValue("expressionHandler", expressionHandler);
}
pc.getRegistry()
.registerBeanDefinition("preFilterAuthorizationMethodInterceptor",
preFilterInterceptor.getBeanDefinition());
pc.getRegistry()
.registerBeanDefinition("preAuthorizeAuthorizationMethodInterceptor",
preAuthorizeInterceptor.getBeanDefinition());
pc.getRegistry()
.registerBeanDefinition("postAuthorizeAuthorizationMethodInterceptor",
postAuthorizeInterceptor.getBeanDefinition());
pc.getRegistry()
.registerBeanDefinition("postFilterAuthorizationMethodInterceptor",
postFilterInterceptor.getBeanDefinition());
}
boolean securedEnabled = "true".equals(element.getAttribute(ATT_USE_SECURED));
if (securedEnabled) {
BeanDefinitionBuilder securedInterceptor = BeanDefinitionBuilder
.rootBeanDefinition(SecuredAuthorizationMethodInterceptor.class)
.setRole(BeanDefinition.ROLE_INFRASTRUCTURE)
.addPropertyValue("securityContextHolderStrategy", securityContextHolderStrategy)
.addPropertyValue("observationRegistry", observationRegistry);
pc.getRegistry()
.registerBeanDefinition("securedAuthorizationMethodInterceptor",
securedInterceptor.getBeanDefinition());
}
boolean jsr250Enabled = "true".equals(element.getAttribute(ATT_USE_JSR250));
if (jsr250Enabled) {
BeanDefinitionBuilder jsr250Interceptor = BeanDefinitionBuilder
.rootBeanDefinition(Jsr250AuthorizationMethodInterceptor.class)
.setRole(BeanDefinition.ROLE_INFRASTRUCTURE)
.addPropertyValue("securityContextHolderStrategy", securityContextHolderStrategy)
.addPropertyValue("observationRegistry", observationRegistry);
pc.getRegistry()
.registerBeanDefinition("jsr250AuthorizationMethodInterceptor", jsr250Interceptor.getBeanDefinition());
}
Map<Pointcut, BeanMetadataElement> managers = new ManagedMap<>();
List<Element> methods = DomUtils.getChildElementsByTagName(element, Elements.PROTECT_POINTCUT);
if (useAspectJ) {
if (!methods.isEmpty()) {
pc.getReaderContext()
.error("Cannot use <protect-pointcut> and mode='aspectj' together", pc.extractSource(element));
}
registerInterceptors(pc.getRegistry());
}
else {
if (!methods.isEmpty()) {
for (Element protectElt : methods) {
managers.put(pointcut(protectElt), authorizationManager(element, protectElt));
}
BeanDefinitionBuilder protectPointcutInterceptor = BeanDefinitionBuilder
.rootBeanDefinition(AuthorizationManagerBeforeMethodInterceptor.class)
.setRole(BeanDefinition.ROLE_INFRASTRUCTURE)
.addPropertyValue("securityContextHolderStrategy", securityContextHolderStrategy)
.addConstructorArgValue(pointcut(managers.keySet()))
.addConstructorArgValue(authorizationManager(managers));
pc.getRegistry()
.registerBeanDefinition("protectPointcutInterceptor",
protectPointcutInterceptor.getBeanDefinition());
}
AopNamespaceUtils.registerAutoProxyCreatorIfNecessary(pc, element);
}
pc.popAndRegisterContainingComponent();
return null;
}
private BeanMetadataElement getObservationRegistry(Element methodSecurityElmt) {
String holderStrategyRef = methodSecurityElmt.getAttribute(ATT_OBSERVATION_REGISTRY_REF);
if (StringUtils.hasText(holderStrategyRef)) {
return new RuntimeBeanReference(holderStrategyRef);
}
return BeanDefinitionBuilder.rootBeanDefinition(ObservationRegistryFactory.class).getBeanDefinition();
}
private BeanMetadataElement getSecurityContextHolderStrategy(Element methodSecurityElmt) {
String holderStrategyRef = methodSecurityElmt.getAttribute(ATT_SECURITY_CONTEXT_HOLDER_STRATEGY_REF);
if (StringUtils.hasText(holderStrategyRef)) {
return new RuntimeBeanReference(holderStrategyRef);
}
return BeanDefinitionBuilder.rootBeanDefinition(SecurityContextHolderStrategyFactory.class).getBeanDefinition();
}
private Pointcut pointcut(Element protectElt) {
String expression = protectElt.getAttribute(ATT_EXPRESSION);
expression = replaceBooleanOperators(expression);
return new AspectJMethodMatcher(expression);
}
private Pointcut pointcut(Collection<Pointcut> pointcuts) {
Pointcut result = null;
for (Pointcut pointcut : pointcuts) {
if (result == null) {
result = pointcut;
}
else {
result = Pointcuts.union(result, pointcut);
}
}
return result;
}
private String replaceBooleanOperators(String expression) {
expression = StringUtils.replace(expression, " and ", " && ");
expression = StringUtils.replace(expression, " or ", " || ");
expression = StringUtils.replace(expression, " not ", " ! ");
return expression;
}
private BeanMetadataElement authorizationManager(Element element, Element protectElt) {
String authorizationManager = element.getAttribute(ATT_AUTHORIZATION_MGR);
if (StringUtils.hasText(authorizationManager)) {
return new RuntimeBeanReference(authorizationManager);
}
String access = protectElt.getAttribute(ATT_ACCESS);
return BeanDefinitionBuilder.rootBeanDefinition(MethodExpressionAuthorizationManager.class)
.addConstructorArgValue(access)
.getBeanDefinition();
}
private BeanMetadataElement authorizationManager(Map<Pointcut, BeanMetadataElement> managers) {
return BeanDefinitionBuilder.rootBeanDefinition(PointcutDelegatingAuthorizationManager.class)
.addConstructorArgValue(managers)
.getBeanDefinition();
}
private void registerInterceptors(BeanDefinitionRegistry registry) {
registerBeanDefinition("preFilterAuthorizationMethodInterceptor",
"org.springframework.security.authorization.method.aspectj.PreFilterAspect", "preFilterAspect$0",
registry);
registerBeanDefinition("postFilterAuthorizationMethodInterceptor",
"org.springframework.security.authorization.method.aspectj.PostFilterAspect", "postFilterAspect$0",
registry);
registerBeanDefinition("preAuthorizeAuthorizationMethodInterceptor",
"org.springframework.security.authorization.method.aspectj.PreAuthorizeAspect", "preAuthorizeAspect$0",
registry);
registerBeanDefinition("postAuthorizeAuthorizationMethodInterceptor",
"org.springframework.security.authorization.method.aspectj.PostAuthorizeAspect",
"postAuthorizeAspect$0", registry);
registerBeanDefinition("securedAuthorizationMethodInterceptor",
"org.springframework.security.authorization.method.aspectj.SecuredAspect", "securedAspect$0", registry);
}
private void registerBeanDefinition(String beanName, String aspectClassName, String aspectBeanName,
BeanDefinitionRegistry registry) {
if (!registry.containsBeanDefinition(beanName)) {
return;
}
BeanDefinition interceptor = registry.getBeanDefinition(beanName);
BeanDefinitionBuilder aspect = BeanDefinitionBuilder.rootBeanDefinition(aspectClassName);
aspect.setFactoryMethod("aspectOf");
aspect.setRole(BeanDefinition.ROLE_INFRASTRUCTURE);
aspect.addPropertyValue("securityInterceptor", interceptor);
registry.registerBeanDefinition(aspectBeanName, aspect.getBeanDefinition());
}
public static final
|
MethodSecurityBeanDefinitionParser
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/ser/AnyGetterTest.java
|
{
"start": 3544,
"end": 3873
}
|
class ____ extends StdScalarSerializer<String>
{
public MyUCSerializer() { super(String.class); }
@Override
public void serialize(String value, JsonGenerator gen,
SerializationContext provider) {
gen.writeString(value.toUpperCase());
}
}
static
|
MyUCSerializer
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/ImmutableCheckerTest.java
|
{
"start": 49940,
"end": 50168
}
|
class ____<T> {}
@Test
public void incompleteClassPath() {
compilationHelper
.addSourceLines(
"Test.java",
"import " + ClassPathTest.class.getCanonicalName() + ";",
"
|
ClassPathTest
|
java
|
spring-projects__spring-framework
|
spring-websocket/src/test/java/org/springframework/web/socket/messaging/StompSubProtocolHandlerTests.java
|
{
"start": 3275,
"end": 23359
}
|
class ____ {
private static final byte[] EMPTY_PAYLOAD = new byte[0];
private StompSubProtocolHandler protocolHandler = new StompSubProtocolHandler();
private TestWebSocketSession session = new TestWebSocketSession();
private MessageChannel channel = mock();
@SuppressWarnings("rawtypes")
private ArgumentCaptor<Message> messageCaptor = ArgumentCaptor.forClass(Message.class);
@BeforeEach
void setup() {
this.session.setId("s1");
this.session.setPrincipal(new TestPrincipal("joe"));
given(this.channel.send(any())).willReturn(true);
}
@Test
void handleMessageToClientWithConnectedFrame() {
StompHeaderAccessor headers = StompHeaderAccessor.create(StompCommand.CONNECTED);
Message<byte[]> message = MessageBuilder.createMessage(EMPTY_PAYLOAD, headers.getMessageHeaders());
this.protocolHandler.handleMessageToClient(this.session, message);
assertThat(this.session.getSentMessages()).hasSize(1);
WebSocketMessage<?> textMessage = this.session.getSentMessages().get(0);
assertThat(textMessage.getPayload()).isEqualTo("""
CONNECTED
user-name:joe
\u0000""");
}
@Test
void handleMessageToClientWithDestinationUserNameProvider() {
this.session.setPrincipal(new UniqueUser("joe"));
StompHeaderAccessor headers = StompHeaderAccessor.create(StompCommand.CONNECTED);
Message<byte[]> message = MessageBuilder.createMessage(EMPTY_PAYLOAD, headers.getMessageHeaders());
this.protocolHandler.handleMessageToClient(this.session, message);
assertThat(this.session.getSentMessages()).hasSize(1);
WebSocketMessage<?> textMessage = this.session.getSentMessages().get(0);
assertThat(textMessage.getPayload()).isEqualTo("""
CONNECTED
user-name:joe
\u0000""");
}
@Test
void handleMessageToClientWithSimpConnectAck() {
StompHeaderAccessor accessor = StompHeaderAccessor.create(StompCommand.CONNECT);
accessor.setHeartbeat(10000, 10000);
accessor.setAcceptVersion("1.0,1.1,1.2");
Message<?> connectMessage = MessageBuilder.createMessage(EMPTY_PAYLOAD, accessor.getMessageHeaders());
SimpMessageHeaderAccessor ackAccessor = SimpMessageHeaderAccessor.create(SimpMessageType.CONNECT_ACK);
ackAccessor.setHeader(SimpMessageHeaderAccessor.CONNECT_MESSAGE_HEADER, connectMessage);
ackAccessor.setHeader(SimpMessageHeaderAccessor.HEART_BEAT_HEADER, new long[] {15000, 15000});
Message<byte[]> ackMessage = MessageBuilder.createMessage(EMPTY_PAYLOAD, ackAccessor.getMessageHeaders());
this.protocolHandler.handleMessageToClient(this.session, ackMessage);
assertThat(this.session.getSentMessages()).hasSize(1);
TextMessage actual = (TextMessage) this.session.getSentMessages().get(0);
assertThat(actual.getPayload()).isEqualTo("""
CONNECTED
version:1.2
heart-beat:15000,15000
user-name:joe
\u0000""");
}
@Test
void handleMessageToClientWithSimpConnectAckDefaultHeartBeat() {
StompHeaderAccessor accessor = StompHeaderAccessor.create(StompCommand.CONNECT);
accessor.setHeartbeat(10000, 10000);
accessor.setAcceptVersion("1.0");
Message<?> connectMessage = MessageBuilder.createMessage(EMPTY_PAYLOAD, accessor.getMessageHeaders());
SimpMessageHeaderAccessor ackAccessor = SimpMessageHeaderAccessor.create(SimpMessageType.CONNECT_ACK);
ackAccessor.setHeader(SimpMessageHeaderAccessor.CONNECT_MESSAGE_HEADER, connectMessage);
Message<byte[]> ackMessage = MessageBuilder.createMessage(EMPTY_PAYLOAD, ackAccessor.getMessageHeaders());
this.protocolHandler.handleMessageToClient(this.session, ackMessage);
assertThat(this.session.getSentMessages()).hasSize(1);
TextMessage actual = (TextMessage) this.session.getSentMessages().get(0);
assertThat(actual.getPayload()).isEqualTo("""
CONNECTED
version:1.0
heart-beat:0,0
user-name:joe
\u0000""");
}
@Test
void handleMessageToClientWithSimpDisconnectAck() {
StompHeaderAccessor accessor = StompHeaderAccessor.create(StompCommand.DISCONNECT);
Message<?> connectMessage = MessageBuilder.createMessage(EMPTY_PAYLOAD, accessor.getMessageHeaders());
SimpMessageHeaderAccessor ackAccessor = SimpMessageHeaderAccessor.create(SimpMessageType.DISCONNECT_ACK);
ackAccessor.setHeader(SimpMessageHeaderAccessor.DISCONNECT_MESSAGE_HEADER, connectMessage);
Message<byte[]> ackMessage = MessageBuilder.createMessage(EMPTY_PAYLOAD, ackAccessor.getMessageHeaders());
this.protocolHandler.handleMessageToClient(this.session, ackMessage);
assertThat(this.session.getSentMessages()).hasSize(1);
TextMessage actual = (TextMessage) this.session.getSentMessages().get(0);
assertThat(actual.getPayload()).isEqualTo("""
ERROR
message:Session closed.
content-length:0
\u0000""");
}
@Test
void handleMessageToClientWithSimpDisconnectAckAndReceipt() {
StompHeaderAccessor accessor = StompHeaderAccessor.create(StompCommand.DISCONNECT);
accessor.setReceipt("message-123");
Message<?> connectMessage = MessageBuilder.createMessage(EMPTY_PAYLOAD, accessor.getMessageHeaders());
SimpMessageHeaderAccessor ackAccessor = SimpMessageHeaderAccessor.create(SimpMessageType.DISCONNECT_ACK);
ackAccessor.setHeader(SimpMessageHeaderAccessor.DISCONNECT_MESSAGE_HEADER, connectMessage);
Message<byte[]> ackMessage = MessageBuilder.createMessage(EMPTY_PAYLOAD, ackAccessor.getMessageHeaders());
this.protocolHandler.handleMessageToClient(this.session, ackMessage);
assertThat(this.session.getSentMessages()).hasSize(1);
TextMessage actual = (TextMessage) this.session.getSentMessages().get(0);
assertThat(actual.getPayload()).isEqualTo("""
RECEIPT
receipt-id:message-123
\u0000""");
}
@Test
void handleMessageToClientWithSimpHeartbeat() {
SimpMessageHeaderAccessor accessor = SimpMessageHeaderAccessor.create(SimpMessageType.HEARTBEAT);
accessor.setSessionId("s1");
accessor.setUser(new TestPrincipal("joe"));
Message<byte[]> ackMessage = MessageBuilder.createMessage(EMPTY_PAYLOAD, accessor.getMessageHeaders());
this.protocolHandler.handleMessageToClient(this.session, ackMessage);
assertThat(this.session.getSentMessages()).hasSize(1);
TextMessage actual = (TextMessage) this.session.getSentMessages().get(0);
assertThat(actual.getPayload()).isEqualTo("\n");
}
@Test
void handleMessageToClientWithHeartbeatSuppressingSockJsHeartbeat() throws IOException {
SockJsSession sockJsSession = mock();
given(sockJsSession.getId()).willReturn("s1");
StompHeaderAccessor accessor = StompHeaderAccessor.create(StompCommand.CONNECTED);
accessor.setHeartbeat(0, 10);
Message<byte[]> message = MessageBuilder.createMessage(EMPTY_PAYLOAD, accessor.getMessageHeaders());
this.protocolHandler.handleMessageToClient(sockJsSession, message);
verify(sockJsSession).getId();
verify(sockJsSession).getPrincipal();
verify(sockJsSession).disableHeartbeat();
verify(sockJsSession).sendMessage(any(WebSocketMessage.class));
verifyNoMoreInteractions(sockJsSession);
sockJsSession = mock();
given(sockJsSession.getId()).willReturn("s1");
accessor = StompHeaderAccessor.create(StompCommand.CONNECTED);
accessor.setHeartbeat(0, 0);
message = MessageBuilder.createMessage(EMPTY_PAYLOAD, accessor.getMessageHeaders());
this.protocolHandler.handleMessageToClient(sockJsSession, message);
verify(sockJsSession).getId();
verify(sockJsSession).getPrincipal();
verify(sockJsSession).sendMessage(any(WebSocketMessage.class));
verifyNoMoreInteractions(sockJsSession);
}
@Test
void handleMessageToClientWithUserDestination() {
StompHeaderAccessor headers = StompHeaderAccessor.create(StompCommand.MESSAGE);
headers.setMessageId("mess0");
headers.setSubscriptionId("sub0");
headers.setDestination("/queue/foo-user123");
headers.setNativeHeader(StompHeaderAccessor.ORIGINAL_DESTINATION, "/user/queue/foo");
Message<byte[]> message = MessageBuilder.createMessage(EMPTY_PAYLOAD, headers.getMessageHeaders());
this.protocolHandler.handleMessageToClient(this.session, message);
assertThat(this.session.getSentMessages()).hasSize(1);
WebSocketMessage<?> textMessage = this.session.getSentMessages().get(0);
assertThat(((String) textMessage.getPayload())).contains("destination:/user/queue/foo\n");
assertThat(((String) textMessage.getPayload())).doesNotContain(SimpMessageHeaderAccessor.ORIGINAL_DESTINATION);
}
// SPR-12475
@Test
void handleMessageToClientWithBinaryWebSocketMessage() {
StompHeaderAccessor headers = StompHeaderAccessor.create(StompCommand.MESSAGE);
headers.setMessageId("mess0");
headers.setSubscriptionId("sub0");
headers.setContentType(MimeTypeUtils.APPLICATION_OCTET_STREAM);
headers.setDestination("/queue/foo");
// Non-empty payload
byte[] payload = new byte[1];
Message<byte[]> message = MessageBuilder.createMessage(payload, headers.getMessageHeaders());
this.protocolHandler.handleMessageToClient(this.session, message);
assertThat(this.session.getSentMessages()).hasSize(1);
WebSocketMessage<?> webSocketMessage = this.session.getSentMessages().get(0);
assertThat(webSocketMessage).isInstanceOf(BinaryMessage.class);
// Empty payload
payload = EMPTY_PAYLOAD;
message = MessageBuilder.createMessage(payload, headers.getMessageHeaders());
this.protocolHandler.handleMessageToClient(this.session, message);
assertThat(this.session.getSentMessages()).hasSize(2);
webSocketMessage = this.session.getSentMessages().get(1);
assertThat(webSocketMessage).isInstanceOf(TextMessage.class);
}
@Test
void handleMessageFromClient() {
TextMessage textMessage = StompTextMessageBuilder.create(StompCommand.STOMP).headers(
"login:guest", "passcode:guest", "accept-version:1.1,1.0", "heart-beat:10000,10000").build();
this.protocolHandler.afterSessionStarted(this.session, this.channel);
this.protocolHandler.handleMessageFromClient(this.session, textMessage, this.channel);
verify(this.channel).send(this.messageCaptor.capture());
Message<?> actual = this.messageCaptor.getValue();
assertThat(actual).isNotNull();
assertThat(SimpMessageHeaderAccessor.getSessionId(actual.getHeaders())).isEqualTo("s1");
assertThat(SimpMessageHeaderAccessor.getSessionAttributes(actual.getHeaders())).isNotNull();
assertThat(SimpMessageHeaderAccessor.getUser(actual.getHeaders())).isNotNull();
assertThat(SimpMessageHeaderAccessor.getUser(actual.getHeaders()).getName()).isEqualTo("joe");
assertThat(SimpMessageHeaderAccessor.getHeartbeat(actual.getHeaders())).isNotNull();
assertThat(SimpMessageHeaderAccessor.getHeartbeat(actual.getHeaders())).isEqualTo(new long[] {10000, 10000});
StompHeaderAccessor stompAccessor = StompHeaderAccessor.wrap(actual);
assertThat(stompAccessor.getCommand()).isEqualTo(StompCommand.STOMP);
assertThat(stompAccessor.getLogin()).isEqualTo("guest");
assertThat(stompAccessor.getPasscode()).isEqualTo("guest");
assertThat(stompAccessor.getHeartbeat()).isEqualTo(new long[] {10000, 10000});
assertThat(stompAccessor.getAcceptVersion()).isEqualTo(new HashSet<>(Arrays.asList("1.1","1.0")));
assertThat(this.session.getSentMessages()).isEmpty();
}
@Test
void handleMessageFromClientWithImmutableMessageInterceptor() {
AtomicReference<Boolean> mutable = new AtomicReference<>();
ExecutorSubscribableChannel channel = new ExecutorSubscribableChannel();
channel.addInterceptor(new ChannelInterceptor() {
@Override
public Message<?> preSend(Message<?> message, MessageChannel channel) {
mutable.set(MessageHeaderAccessor.getAccessor(message, MessageHeaderAccessor.class).isMutable());
return message;
}
});
channel.addInterceptor(new ImmutableMessageChannelInterceptor());
StompSubProtocolHandler handler = new StompSubProtocolHandler();
handler.afterSessionStarted(this.session, channel);
TextMessage message = StompTextMessageBuilder.create(StompCommand.CONNECT).build();
handler.handleMessageFromClient(this.session, message, channel);
assertThat(mutable.get()).isNotNull();
assertThat(mutable.get()).isTrue();
}
@Test
void handleMessageFromClientWithoutImmutableMessageInterceptor() {
AtomicReference<Boolean> mutable = new AtomicReference<>();
ExecutorSubscribableChannel channel = new ExecutorSubscribableChannel();
channel.addInterceptor(new ChannelInterceptor() {
@Override
public Message<?> preSend(Message<?> message, MessageChannel channel) {
mutable.set(MessageHeaderAccessor.getAccessor(message, MessageHeaderAccessor.class).isMutable());
return message;
}
});
StompSubProtocolHandler handler = new StompSubProtocolHandler();
handler.afterSessionStarted(this.session, channel);
TextMessage message = StompTextMessageBuilder.create(StompCommand.CONNECT).build();
handler.handleMessageFromClient(this.session, message, channel);
assertThat(mutable.get()).isNotNull();
assertThat(mutable.get()).isFalse();
}
@Test // SPR-14690
void handleMessageFromClientWithTokenAuthentication() {
ExecutorSubscribableChannel channel = new ExecutorSubscribableChannel();
channel.addInterceptor(new AuthenticationInterceptor("__pete__@gmail.com"));
channel.addInterceptor(new ImmutableMessageChannelInterceptor());
TestMessageHandler messageHandler = new TestMessageHandler();
channel.subscribe(messageHandler);
StompSubProtocolHandler handler = new StompSubProtocolHandler();
handler.afterSessionStarted(this.session, channel);
TextMessage wsMessage = StompTextMessageBuilder.create(StompCommand.CONNECT).build();
handler.handleMessageFromClient(this.session, wsMessage, channel);
assertThat(messageHandler.getMessages()).hasSize(1);
Message<?> message = messageHandler.getMessages().get(0);
Principal user = SimpMessageHeaderAccessor.getUser(message.getHeaders());
assertThat(user).isNotNull();
assertThat(user.getName()).isEqualTo("__pete__@gmail.com");
StompHeaderAccessor accessor = StompHeaderAccessor.create(StompCommand.CONNECTED);
message = MessageBuilder.createMessage(EMPTY_PAYLOAD, accessor.getMessageHeaders());
handler.handleMessageToClient(this.session, message);
assertThat(this.session.getSentMessages()).hasSize(1);
WebSocketMessage<?> textMessage = this.session.getSentMessages().get(0);
assertThat(textMessage.getPayload()).isEqualTo("""
CONNECTED
user-name:__pete__@gmail.com
\u0000""");
}
@Test
void handleMessageFromClientWithInvalidStompCommand() {
TextMessage textMessage = new TextMessage("FOO\n\n\0");
this.protocolHandler.afterSessionStarted(this.session, this.channel);
this.protocolHandler.handleMessageFromClient(this.session, textMessage, this.channel);
verifyNoInteractions(this.channel);
assertThat(this.session.getSentMessages()).hasSize(1);
TextMessage actual = (TextMessage) this.session.getSentMessages().get(0);
assertThat(actual.getPayload()).startsWith("ERROR");
}
@Test
void eventPublication() {
TestPublisher publisher = new TestPublisher();
this.protocolHandler.setApplicationEventPublisher(publisher);
this.protocolHandler.afterSessionStarted(this.session, this.channel);
StompHeaderAccessor headers = StompHeaderAccessor.create(StompCommand.CONNECT);
Message<byte[]> message = MessageBuilder.createMessage(EMPTY_PAYLOAD, headers.getMessageHeaders());
TextMessage textMessage = new TextMessage(new StompEncoder().encode(message));
this.protocolHandler.handleMessageFromClient(this.session, textMessage, this.channel);
headers = StompHeaderAccessor.create(StompCommand.CONNECTED);
message = MessageBuilder.createMessage(EMPTY_PAYLOAD, headers.getMessageHeaders());
this.protocolHandler.handleMessageToClient(this.session, message);
headers = StompHeaderAccessor.create(StompCommand.SUBSCRIBE);
message = MessageBuilder.createMessage(EMPTY_PAYLOAD, headers.getMessageHeaders());
textMessage = new TextMessage(new StompEncoder().encode(message));
this.protocolHandler.handleMessageFromClient(this.session, textMessage, this.channel);
headers = StompHeaderAccessor.create(StompCommand.UNSUBSCRIBE);
message = MessageBuilder.createMessage(EMPTY_PAYLOAD, headers.getMessageHeaders());
textMessage = new TextMessage(new StompEncoder().encode(message));
this.protocolHandler.handleMessageFromClient(this.session, textMessage, this.channel);
this.protocolHandler.afterSessionEnded(this.session, CloseStatus.BAD_DATA, this.channel);
assertThat(publisher.events.size()).as("Unexpected events " + publisher.events).isEqualTo(5);
assertThat(publisher.events.get(0).getClass()).isEqualTo(SessionConnectEvent.class);
assertThat(publisher.events.get(1).getClass()).isEqualTo(SessionConnectedEvent.class);
assertThat(publisher.events.get(2).getClass()).isEqualTo(SessionSubscribeEvent.class);
assertThat(publisher.events.get(3).getClass()).isEqualTo(SessionUnsubscribeEvent.class);
assertThat(publisher.events.get(4).getClass()).isEqualTo(SessionDisconnectEvent.class);
}
@Test
void eventPublicationWithExceptions() {
ApplicationEventPublisher publisher = mock();
this.protocolHandler.setApplicationEventPublisher(publisher);
this.protocolHandler.afterSessionStarted(this.session, this.channel);
StompHeaderAccessor headers = StompHeaderAccessor.create(StompCommand.CONNECT);
Message<byte[]> message = MessageBuilder.createMessage(EMPTY_PAYLOAD, headers.getMessageHeaders());
TextMessage textMessage = new TextMessage(new StompEncoder().encode(message));
this.protocolHandler.handleMessageFromClient(this.session, textMessage, this.channel);
verify(this.channel).send(this.messageCaptor.capture());
Message<?> actual = this.messageCaptor.getValue();
assertThat(actual).isNotNull();
assertThat(StompHeaderAccessor.wrap(actual).getCommand()).isEqualTo(StompCommand.CONNECT);
reset(this.channel);
headers = StompHeaderAccessor.create(StompCommand.CONNECTED);
message = MessageBuilder.createMessage(EMPTY_PAYLOAD, headers.getMessageHeaders());
this.protocolHandler.handleMessageToClient(this.session, message);
assertThat(this.session.getSentMessages()).hasSize(1);
textMessage = (TextMessage) this.session.getSentMessages().get(0);
assertThat(textMessage.getPayload()).isEqualTo("""
CONNECTED
user-name:joe
\u0000""");
this.protocolHandler.afterSessionEnded(this.session, CloseStatus.BAD_DATA, this.channel);
verify(this.channel).send(this.messageCaptor.capture());
actual = this.messageCaptor.getValue();
assertThat(actual).isNotNull();
StompHeaderAccessor accessor = StompHeaderAccessor.wrap(actual);
assertThat(accessor.getCommand()).isEqualTo(StompCommand.DISCONNECT);
assertThat(accessor.getSessionId()).isEqualTo("s1");
assertThat(accessor.getUser().getName()).isEqualTo("joe");
}
@Test
void webSocketScope() {
Runnable runnable = mock();
SimpAttributes simpAttributes = new SimpAttributes(this.session.getId(), this.session.getAttributes());
simpAttributes.setAttribute("name", "value");
simpAttributes.registerDestructionCallback("name", runnable);
MessageChannel testChannel = new MessageChannel() {
@Override
public boolean send(Message<?> message) {
SimpAttributes simpAttributes = SimpAttributesContextHolder.currentAttributes();
assertThat(simpAttributes.getAttribute("name")).isEqualTo("value");
return true;
}
@Override
public boolean send(Message<?> message, long timeout) {
return false;
}
};
this.protocolHandler.afterSessionStarted(this.session, this.channel);
StompHeaderAccessor headers = StompHeaderAccessor.create(StompCommand.CONNECT);
Message<byte[]> message = MessageBuilder.createMessage(EMPTY_PAYLOAD, headers.getMessageHeaders());
TextMessage textMessage = new TextMessage(new StompEncoder().encode(message));
this.protocolHandler.handleMessageFromClient(this.session, textMessage, testChannel);
assertThat(session.getSentMessages()).isEqualTo(Collections.<WebSocketMessage<?>>emptyList());
this.protocolHandler.afterSessionEnded(this.session, CloseStatus.BAD_DATA, testChannel);
assertThat(this.session.getSentMessages()).isEqualTo(Collections.<WebSocketMessage<?>>emptyList());
verify(runnable, times(1)).run();
}
private static
|
StompSubProtocolHandlerTests
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java
|
{
"start": 4026,
"end": 41858
}
|
class ____ extends Plugin implements SystemIndexPlugin {
public static final String SYSTEM_INDEX_NAME = ".fake-system-index";
@Override
public Collection<SystemIndexDescriptor> getSystemIndexDescriptors(Settings settings) {
return Collections.singletonList(SystemIndexDescriptorUtils.createUnmanaged(SYSTEM_INDEX_NAME + "*", "test index"));
}
@Override
public String getFeatureName() {
return "fake system index";
}
@Override
public String getFeatureDescription() {
return "fake system index";
}
}
public void testAutoFollow() throws Exception {
Settings leaderIndexSettings = indexSettings(1, 0).build();
createLeaderIndex("logs-201812", leaderIndexSettings);
// Enabling auto following:
if (randomBoolean()) {
putAutoFollowPatterns("my-pattern", new String[] { "logs-*", "transactions-*" });
} else {
putAutoFollowPatterns("my-pattern1", new String[] { "logs-*" });
putAutoFollowPatterns("my-pattern2", new String[] { "transactions-*" });
}
createLeaderIndex("metrics-201901", leaderIndexSettings);
createLeaderIndex("logs-201901", leaderIndexSettings);
ESIntegTestCase.awaitIndexExists("copy-logs-201901", followerClient());
createLeaderIndex("transactions-201901", leaderIndexSettings);
assertLongBusy(() -> {
AutoFollowStats autoFollowStats = getAutoFollowStats();
assertThat(autoFollowStats.getNumberOfSuccessfulFollowIndices(), equalTo(2L));
assertTrue(ESIntegTestCase.indexExists("copy-transactions-201901", followerClient()));
});
assertFalse(ESIntegTestCase.indexExists("copy-metrics-201901", followerClient()));
assertFalse(ESIntegTestCase.indexExists("copy-logs-201812", followerClient()));
}
public void testAutoFollowDoNotFollowSystemIndices() throws Exception {
putAutoFollowPatterns("my-pattern", new String[] { ".*", "logs-*" });
// Trigger system index creation
leaderClient().prepareIndex(FakeSystemIndex.SYSTEM_INDEX_NAME).setSource(Map.of("a", "b")).get();
Settings leaderIndexSettings = indexSettings(1, 0).build();
createLeaderIndex("logs-201901", leaderIndexSettings);
assertLongBusy(() -> {
AutoFollowStats autoFollowStats = getAutoFollowStats();
assertThat(autoFollowStats.getNumberOfSuccessfulFollowIndices(), equalTo(1L));
assertTrue(ESIntegTestCase.indexExists("copy-logs-201901", followerClient()));
assertFalse(ESIntegTestCase.indexExists("copy-.fake-system-index", followerClient()));
});
}
public void testCleanFollowedLeaderIndexUUIDs() throws Exception {
Settings leaderIndexSettings = indexSettings(1, 0).build();
putAutoFollowPatterns("my-pattern", new String[] { "logs-*" });
createLeaderIndex("logs-201901", leaderIndexSettings);
assertLongBusy(() -> {
AutoFollowStats autoFollowStats = getAutoFollowStats();
assertThat(autoFollowStats.getNumberOfSuccessfulFollowIndices(), equalTo(1L));
assertTrue(ESIntegTestCase.indexExists("copy-logs-201901", followerClient()));
Metadata metadata = getFollowerCluster().clusterService().state().metadata();
String leaderIndexUUID = metadata.getProject()
.index("copy-logs-201901")
.getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY)
.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_UUID_KEY);
AutoFollowMetadata autoFollowMetadata = metadata.getProject().custom(AutoFollowMetadata.TYPE);
assertThat(autoFollowMetadata, notNullValue());
List<String> followedLeaderIndixUUIDs = autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("my-pattern");
assertThat(followedLeaderIndixUUIDs.size(), equalTo(1));
assertThat(followedLeaderIndixUUIDs.get(0), equalTo(leaderIndexUUID));
});
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest("logs-201901");
assertAcked(leaderClient().admin().indices().delete(deleteIndexRequest).actionGet());
assertLongBusy(() -> {
AutoFollowMetadata autoFollowMetadata = getFollowerCluster().clusterService()
.state()
.metadata()
.getProject()
.custom(AutoFollowMetadata.TYPE);
assertThat(autoFollowMetadata, notNullValue());
List<String> followedLeaderIndixUUIDs = autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("my-pattern");
assertThat(followedLeaderIndixUUIDs.size(), equalTo(0));
});
}
public void testAutoFollowManyIndices() throws Exception {
Settings leaderIndexSettings = indexSettings(1, 0).build();
putAutoFollowPatterns("my-pattern", new String[] { "logs-*" });
long numIndices = randomIntBetween(4, 8);
for (int i = 0; i < numIndices; i++) {
createLeaderIndex("logs-" + i, leaderIndexSettings);
}
long expectedVal1 = numIndices;
Metadata[] metadata = new Metadata[1];
AutoFollowStats[] autoFollowStats = new AutoFollowStats[1];
try {
assertLongBusy(() -> {
metadata[0] = getFollowerCluster().clusterService().state().metadata();
autoFollowStats[0] = getAutoFollowStats();
assertThat(metadata[0].getProject().indices().size(), equalTo((int) expectedVal1));
AutoFollowMetadata autoFollowMetadata = metadata[0].getProject().custom(AutoFollowMetadata.TYPE);
assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("my-pattern"), hasSize((int) expectedVal1));
assertThat(autoFollowStats[0].getNumberOfSuccessfulFollowIndices(), equalTo(expectedVal1));
});
} catch (AssertionError ae) {
logger.warn("indices={}", Arrays.toString(metadata[0].getProject().indices().keySet().toArray(new String[0])));
logger.warn("auto follow stats={}", Strings.toString(autoFollowStats[0]));
throw ae;
}
// Delete auto follow pattern and make sure that in the background the auto follower has stopped
// then the leader index created after that should never be auto followed:
deleteAutoFollowPattern("my-pattern");
try {
assertLongBusy(() -> {
metadata[0] = getFollowerCluster().clusterService().state().metadata();
autoFollowStats[0] = getAutoFollowStats();
assertThat(metadata[0].getProject().indices().size(), equalTo((int) expectedVal1));
AutoFollowMetadata autoFollowMetadata = metadata[0].getProject().custom(AutoFollowMetadata.TYPE);
assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("my-pattern"), nullValue());
assertThat(autoFollowStats[0].getAutoFollowedClusters().size(), equalTo(0));
});
} catch (AssertionError ae) {
logger.warn("indices={}", Arrays.toString(metadata[0].getProject().indices().keySet().toArray(new String[0])));
logger.warn("auto follow stats={}", Strings.toString(autoFollowStats[0]));
throw ae;
}
createLeaderIndex("logs-does-not-count", leaderIndexSettings);
putAutoFollowPatterns("my-pattern", new String[] { "logs-*" });
long i = numIndices;
numIndices = numIndices + randomIntBetween(4, 8);
for (; i < numIndices; i++) {
createLeaderIndex("logs-" + i, leaderIndexSettings);
}
long expectedVal2 = numIndices;
assertLongBusy(() -> {
metadata[0] = getFollowerCluster().clusterService().state().metadata();
autoFollowStats[0] = getAutoFollowStats();
assertThat(metadata[0].getProject().indices().size(), equalTo((int) expectedVal2));
AutoFollowMetadata autoFollowMetadata = metadata[0].getProject().custom(AutoFollowMetadata.TYPE);
// expectedVal2 + 1, because logs-does-not-count is also marked as auto followed.
// (This is because indices created before a pattern exists are not auto followed and are just marked as such.)
assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("my-pattern"), hasSize((int) expectedVal2 + 1));
long count = Arrays.stream(metadata[0].getProject().getConcreteAllIndices()).filter(s -> s.startsWith("copy-")).count();
assertThat(count, equalTo(expectedVal2));
// Ensure that there are no auto follow errors:
// (added specifically to see that there are no leader indices auto followed multiple times)
assertThat(autoFollowStats[0].getRecentAutoFollowErrors().size(), equalTo(0));
});
}
public void testAutoFollowParameterAreDelegated() throws Exception {
Settings leaderIndexSettings = indexSettings(1, 0).build();
// Enabling auto following:
PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT);
request.setRemoteCluster("leader_cluster");
request.setLeaderIndexPatterns(Collections.singletonList("logs-*"));
// Need to set this, because following an index in the same cluster
request.setFollowIndexNamePattern("copy-{{leader_index}}");
if (randomBoolean()) {
request.getParameters().setMaxWriteBufferCount(randomIntBetween(0, Integer.MAX_VALUE));
}
if (randomBoolean()) {
request.getParameters().setMaxOutstandingReadRequests(randomIntBetween(0, Integer.MAX_VALUE));
}
if (randomBoolean()) {
request.getParameters().setMaxOutstandingWriteRequests(randomIntBetween(0, Integer.MAX_VALUE));
}
if (randomBoolean()) {
request.getParameters().setMaxReadRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE));
}
if (randomBoolean()) {
request.getParameters().setMaxReadRequestSize(ByteSizeValue.ofBytes(randomNonNegativeLong()));
}
if (randomBoolean()) {
request.getParameters().setMaxRetryDelay(TimeValue.timeValueMillis(500));
}
if (randomBoolean()) {
request.getParameters().setReadPollTimeout(TimeValue.timeValueMillis(500));
}
if (randomBoolean()) {
request.getParameters().setMaxWriteRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE));
}
if (randomBoolean()) {
request.getParameters().setMaxWriteBufferSize(ByteSizeValue.ofBytes(randomNonNegativeLong()));
}
if (randomBoolean()) {
request.getParameters().setMaxWriteRequestSize(ByteSizeValue.ofBytes(randomNonNegativeLong()));
}
request.setName("my-pattern");
assertTrue(followerClient().execute(PutAutoFollowPatternAction.INSTANCE, request).actionGet().isAcknowledged());
createLeaderIndex("logs-201901", leaderIndexSettings);
assertLongBusy(() -> {
FollowInfoAction.Request followInfoRequest = new FollowInfoAction.Request(TEST_REQUEST_TIMEOUT);
followInfoRequest.setFollowerIndices("copy-logs-201901");
FollowInfoAction.Response followInfoResponse;
try {
followInfoResponse = followerClient().execute(FollowInfoAction.INSTANCE, followInfoRequest).actionGet();
} catch (IndexNotFoundException e) {
throw new AssertionError(e);
}
assertThat(followInfoResponse.getFollowInfos().size(), equalTo(1));
FollowerInfo followerInfo = followInfoResponse.getFollowInfos().get(0);
assertThat(followerInfo.getFollowerIndex(), equalTo("copy-logs-201901"));
assertThat(followerInfo.getRemoteCluster(), equalTo("leader_cluster"));
assertThat(followerInfo.getLeaderIndex(), equalTo("logs-201901"));
FollowParameters followParameters = followerInfo.getParameters();
assertThat(followParameters, notNullValue());
if (request.getParameters().getMaxWriteBufferCount() != null) {
assertThat(followParameters.getMaxWriteBufferCount(), equalTo(request.getParameters().getMaxWriteBufferCount()));
}
if (request.getParameters().getMaxWriteBufferSize() != null) {
assertThat(followParameters.getMaxWriteBufferSize(), equalTo(request.getParameters().getMaxWriteBufferSize()));
}
if (request.getParameters().getMaxOutstandingReadRequests() != null) {
assertThat(
followParameters.getMaxOutstandingReadRequests(),
equalTo(request.getParameters().getMaxOutstandingReadRequests())
);
}
if (request.getParameters().getMaxOutstandingWriteRequests() != null) {
assertThat(
followParameters.getMaxOutstandingWriteRequests(),
equalTo(request.getParameters().getMaxOutstandingWriteRequests())
);
}
if (request.getParameters().getMaxReadRequestOperationCount() != null) {
assertThat(
followParameters.getMaxReadRequestOperationCount(),
equalTo(request.getParameters().getMaxReadRequestOperationCount())
);
}
if (request.getParameters().getMaxReadRequestSize() != null) {
assertThat(followParameters.getMaxReadRequestSize(), equalTo(request.getParameters().getMaxReadRequestSize()));
}
if (request.getParameters().getMaxRetryDelay() != null) {
assertThat(followParameters.getMaxRetryDelay(), equalTo(request.getParameters().getMaxRetryDelay()));
}
if (request.getParameters().getReadPollTimeout() != null) {
assertThat(followParameters.getReadPollTimeout(), equalTo(request.getParameters().getReadPollTimeout()));
}
if (request.getParameters().getMaxWriteRequestOperationCount() != null) {
assertThat(
followParameters.getMaxWriteRequestOperationCount(),
equalTo(request.getParameters().getMaxWriteRequestOperationCount())
);
}
if (request.getParameters().getMaxWriteRequestSize() != null) {
assertThat(followParameters.getMaxWriteRequestSize(), equalTo(request.getParameters().getMaxWriteRequestSize()));
}
});
}
public void testConflictingPatterns() throws Exception {
Settings leaderIndexSettings = indexSettings(1, 0).build();
// Enabling auto following:
putAutoFollowPatterns("my-pattern1", new String[] { "logs-*" });
putAutoFollowPatterns("my-pattern2", new String[] { "logs-2018*" });
createLeaderIndex("logs-201701", leaderIndexSettings);
assertLongBusy(() -> {
AutoFollowStats autoFollowStats = getAutoFollowStats();
assertThat(autoFollowStats.getNumberOfSuccessfulFollowIndices(), equalTo(1L));
assertThat(autoFollowStats.getNumberOfFailedFollowIndices(), equalTo(0L));
assertThat(autoFollowStats.getNumberOfFailedRemoteClusterStateRequests(), equalTo(0L));
});
assertTrue(ESIntegTestCase.indexExists("copy-logs-201701", followerClient()));
createLeaderIndex("logs-201801", leaderIndexSettings);
assertLongBusy(() -> {
AutoFollowStats autoFollowStats = getAutoFollowStats();
assertThat(autoFollowStats.getNumberOfSuccessfulFollowIndices(), equalTo(1L));
assertThat(autoFollowStats.getNumberOfFailedFollowIndices(), greaterThanOrEqualTo(1L));
assertThat(autoFollowStats.getNumberOfFailedRemoteClusterStateRequests(), equalTo(0L));
assertThat(autoFollowStats.getRecentAutoFollowErrors().size(), equalTo(2));
ElasticsearchException autoFollowError1 = autoFollowStats.getRecentAutoFollowErrors().get("my-pattern1:logs-201801").v2();
assertThat(autoFollowError1, notNullValue());
assertThat(
autoFollowError1.getRootCause().getMessage(),
equalTo("index to follow [logs-201801] for pattern [my-pattern1] " + "matches with other patterns [my-pattern2]")
);
ElasticsearchException autoFollowError2 = autoFollowStats.getRecentAutoFollowErrors().get("my-pattern2:logs-201801").v2();
assertThat(autoFollowError2, notNullValue());
assertThat(
autoFollowError2.getRootCause().getMessage(),
equalTo("index to follow [logs-201801] for pattern [my-pattern2] " + "matches with other patterns [my-pattern1]")
);
});
assertFalse(ESIntegTestCase.indexExists("copy-logs-201801", followerClient()));
}
public void testPauseAndResumeAutoFollowPattern() throws Exception {
final Settings leaderIndexSettings = indexSettings(1, 0).build();
// index created in the remote cluster before the auto follow pattern exists won't be auto followed
createLeaderIndex("test-existing-index-is-ignored", leaderIndexSettings);
// create the auto follow pattern
putAutoFollowPatterns("test-pattern", new String[] { "test-*", "tests-*" });
assertLongBusy(() -> {
final AutoFollowStats autoFollowStats = getAutoFollowStats();
assertThat(autoFollowStats.getAutoFollowedClusters().size(), equalTo(1));
assertThat(autoFollowStats.getNumberOfSuccessfulFollowIndices(), equalTo(0L));
});
// index created in the remote cluster are auto followed
createLeaderIndex("test-new-index-is-auto-followed", leaderIndexSettings);
assertLongBusy(() -> {
final AutoFollowStats autoFollowStats = getAutoFollowStats();
assertThat(autoFollowStats.getAutoFollowedClusters().size(), equalTo(1));
assertThat(autoFollowStats.getNumberOfSuccessfulFollowIndices(), equalTo(1L));
assertTrue(ESIntegTestCase.indexExists("copy-test-new-index-is-auto-followed", followerClient()));
});
ensureFollowerGreen("copy-test-new-index-is-auto-followed");
// pause the auto follow pattern
pauseAutoFollowPattern("test-pattern");
assertBusy(() -> assertThat(getAutoFollowStats().getAutoFollowedClusters().size(), equalTo(0)));
// indices created in the remote cluster are not auto followed because the pattern is paused
final int nbIndicesCreatedWhilePaused = randomIntBetween(1, 5);
for (int i = 0; i < nbIndicesCreatedWhilePaused; i++) {
createLeaderIndex("test-index-created-while-pattern-is-paused-" + i, leaderIndexSettings);
}
// sometimes create another index in the remote cluster and close (or delete) it right away
// it should not be auto followed when the pattern is resumed
if (randomBoolean()) {
final String indexName = "test-index-" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT);
createLeaderIndex(indexName, leaderIndexSettings);
if (randomBoolean()) {
assertAcked(leaderClient().admin().indices().prepareClose(indexName));
} else {
assertAcked(leaderClient().admin().indices().prepareDelete(indexName));
}
}
if (randomBoolean()) {
createLeaderIndex("logs-20200101", leaderIndexSettings);
}
// pattern is paused, none of the newly created indices has been followed yet
assertThat(followerClient().admin().indices().prepareStats("copy-*").get().getIndices().size(), equalTo(1));
ensureLeaderGreen("test-index-created-while-pattern-is-paused-*");
// resume the auto follow pattern, indices created while the pattern was paused are picked up for auto-following
resumeAutoFollowPattern("test-pattern");
assertLongBusy(() -> {
final Client client = followerClient();
assertThat(getAutoFollowStats().getAutoFollowedClusters().size(), equalTo(1));
assertThat(
client.admin()
.cluster()
.prepareState(TEST_REQUEST_TIMEOUT)
.clear()
.setIndices("copy-*")
.setMetadata(true)
.get()
.getState()
.getMetadata()
.getProject()
.indices()
.size(),
equalTo(1 + nbIndicesCreatedWhilePaused)
);
for (int i = 0; i < nbIndicesCreatedWhilePaused; i++) {
assertTrue(ESIntegTestCase.indexExists("copy-test-index-created-while-pattern-is-paused-" + i, client));
}
});
}
public void testPauseAndResumeWithMultipleAutoFollowPatterns() throws Exception {
final Settings leaderIndexSettings = indexSettings(1, 0).build();
final String[] prefixes = { "logs-", "users-", "docs-", "monitoring-", "data-", "system-", "events-", "files-" };
// create an auto follow pattern for each prefix
final List<String> autoFollowPatterns = Arrays.stream(prefixes).map(prefix -> {
final String pattern = prefix + "pattern";
putAutoFollowPatterns(pattern, new String[] { prefix + "*" });
return pattern;
}).toList();
// pick up some random pattern to pause
final List<String> pausedAutoFollowerPatterns = randomSubsetOf(randomIntBetween(1, 3), autoFollowPatterns);
// all patterns should be active
assertBusy(() -> autoFollowPatterns.forEach(pattern -> assertTrue(getAutoFollowPattern(pattern).isActive())));
assertBusy(() -> assertThat(getAutoFollowStats().getAutoFollowedClusters().size(), equalTo(1)));
final AtomicBoolean running = new AtomicBoolean(true);
final AtomicInteger leaderIndices = new AtomicInteger(0);
final CountDownLatch latchThree = new CountDownLatch(3);
final CountDownLatch latchSix = new CountDownLatch(6);
final CountDownLatch latchNine = new CountDownLatch(9);
// start creating new indices on the remote cluster
final Thread createNewLeaderIndicesThread = new Thread(() -> {
while (running.get() && leaderIndices.get() < 20) {
final String prefix = randomFrom(prefixes);
final String leaderIndex = prefix + leaderIndices.incrementAndGet();
try {
createLeaderIndex(leaderIndex, leaderIndexSettings);
ensureLeaderGreen(leaderIndex);
if (pausedAutoFollowerPatterns.stream().noneMatch(pattern -> pattern.startsWith(prefix))) {
ensureFollowerGreen("copy-" + leaderIndex);
} else {
Thread.sleep(200L);
}
latchThree.countDown();
latchSix.countDown();
latchNine.countDown();
} catch (Exception e) {
throw new AssertionError(e);
}
}
});
createNewLeaderIndicesThread.start();
// wait for 3 leader indices to be created on the remote cluster
latchThree.await(60L, TimeUnit.SECONDS);
assertThat(leaderIndices.get(), greaterThanOrEqualTo(3));
assertLongBusy(() -> assertThat(getAutoFollowStats().getNumberOfSuccessfulFollowIndices(), greaterThanOrEqualTo(3L)));
// now pause some random patterns
pausedAutoFollowerPatterns.forEach(this::pauseAutoFollowPattern);
assertLongBusy(
() -> autoFollowPatterns.forEach(
pattern -> assertThat(
getAutoFollowPattern(pattern).isActive(),
equalTo(pausedAutoFollowerPatterns.contains(pattern) == false)
)
)
);
// wait for more leader indices to be created on the remote cluster
latchSix.await(60L, TimeUnit.SECONDS);
assertThat(leaderIndices.get(), greaterThanOrEqualTo(6));
// resume auto follow patterns
pausedAutoFollowerPatterns.forEach(this::resumeAutoFollowPattern);
assertLongBusy(() -> autoFollowPatterns.forEach(pattern -> assertTrue(getAutoFollowPattern(pattern).isActive())));
// wait for more leader indices to be created on the remote cluster
latchNine.await(60L, TimeUnit.SECONDS);
assertThat(leaderIndices.get(), greaterThanOrEqualTo(9));
assertLongBusy(() -> assertThat(getAutoFollowStats().getNumberOfSuccessfulFollowIndices(), greaterThanOrEqualTo(9L)));
running.set(false);
createNewLeaderIndicesThread.join();
// check that all leader indices have been correctly auto followed
List<String> matchingPrefixes = Arrays.stream(prefixes).map(prefix -> prefix + "*").collect(Collectors.toList());
for (IndexMetadata leaderIndexMetadata : leaderClient().admin()
.cluster()
.prepareState(TEST_REQUEST_TIMEOUT)
.get()
.getState()
.metadata()
.getProject()) {
final String leaderIndex = leaderIndexMetadata.getIndex().getName();
if (Regex.simpleMatch(matchingPrefixes, leaderIndex)) {
String followingIndex = "copy-" + leaderIndex;
assertBusy(
() -> assertThat(
"Following index [" + followingIndex + "] must exists",
ESIntegTestCase.indexExists(followingIndex, followerClient()),
is(true)
)
);
}
}
autoFollowPatterns.forEach(this::deleteAutoFollowPattern);
ensureFollowerGreen("copy-*");
assertThat(followerClient().admin().indices().prepareStats("copy-*").get().getIndices().size(), equalTo(leaderIndices.get()));
}
public void testAutoFollowExclusion() throws Exception {
Settings leaderIndexSettings = indexSettings(1, 0).build();
putAutoFollowPatterns("my-pattern1", new String[] { "logs-*" }, Collections.singletonList("logs-2018*"));
createLeaderIndex("logs-201801", leaderIndexSettings);
AutoFollowStats autoFollowStats = getAutoFollowStats();
assertThat(autoFollowStats.getNumberOfSuccessfulFollowIndices(), equalTo(0L));
assertThat(autoFollowStats.getNumberOfFailedFollowIndices(), equalTo(0L));
assertThat(autoFollowStats.getNumberOfFailedRemoteClusterStateRequests(), equalTo(0L));
assertFalse(ESIntegTestCase.indexExists("copy-logs-201801", followerClient()));
createLeaderIndex("logs-201701", leaderIndexSettings);
assertLongBusy(() -> {
AutoFollowStats autoFollowStatsResponse = getAutoFollowStats();
assertThat(autoFollowStatsResponse.getNumberOfSuccessfulFollowIndices(), equalTo(1L));
assertThat(autoFollowStatsResponse.getNumberOfFailedFollowIndices(), greaterThanOrEqualTo(0L));
assertThat(autoFollowStatsResponse.getNumberOfFailedRemoteClusterStateRequests(), equalTo(0L));
});
assertTrue(ESIntegTestCase.indexExists("copy-logs-201701", followerClient()));
assertFalse(ESIntegTestCase.indexExists("copy-logs-201801", followerClient()));
}
public void testAutoFollowDatastreamWithClosingFollowerIndex() throws Exception {
final String datastream = "logs-1";
TransportPutComposableIndexTemplateAction.Request request = new TransportPutComposableIndexTemplateAction.Request("template-id");
request.indexTemplate(
ComposableIndexTemplate.builder()
.indexPatterns(List.of("logs-*"))
.template(
new Template(
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.build(),
null,
null
)
)
.dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate())
.build()
);
assertAcked(leaderClient().execute(TransportPutComposableIndexTemplateAction.TYPE, request).get());
CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(
TEST_REQUEST_TIMEOUT,
TEST_REQUEST_TIMEOUT,
datastream
);
assertAcked(leaderClient().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get());
leaderClient().prepareIndex(datastream)
.setCreate(true)
.setSource("foo", "bar", DataStream.TIMESTAMP_FIELD_NAME, randomNonNegativeLong())
.get();
PutAutoFollowPatternAction.Request followRequest = new PutAutoFollowPatternAction.Request(
TEST_REQUEST_TIMEOUT,
TEST_REQUEST_TIMEOUT
);
followRequest.setName("pattern-1");
followRequest.setRemoteCluster("leader_cluster");
followRequest.setLeaderIndexPatterns(List.of("logs-*"));
followRequest.setFollowIndexNamePattern("{{leader_index}}");
assertTrue(followerClient().execute(PutAutoFollowPatternAction.INSTANCE, followRequest).get().isAcknowledged());
logger.info("--> roll over once and wait for the auto-follow to pick up the new index");
leaderClient().admin().indices().prepareRolloverIndex("logs-1").get();
assertLongBusy(() -> {
AutoFollowStats autoFollowStats = getAutoFollowStats();
assertThat(autoFollowStats.getNumberOfSuccessfulFollowIndices(), equalTo(1L));
});
ensureFollowerGreen("*");
final RolloverResponse rolloverResponse = leaderClient().admin().indices().prepareRolloverIndex(datastream).get();
final String indexInDatastream = rolloverResponse.getOldIndex();
logger.info("--> closing [{}] on follower so it will be re-opened by crr", indexInDatastream);
assertAcked(followerClient().admin().indices().prepareClose(indexInDatastream).setMasterNodeTimeout(TimeValue.MAX_VALUE).get());
logger.info("--> deleting and recreating index [{}] on leader to change index uuid on leader", indexInDatastream);
assertAcked(leaderClient().admin().indices().prepareDelete(indexInDatastream).get());
assertAcked(
leaderClient().admin()
.indices()
.prepareCreate(indexInDatastream)
.setMapping(MetadataIndexTemplateService.DEFAULT_TIMESTAMP_MAPPING_WITHOUT_ROUTING.toString())
);
leaderClient().prepareIndex(indexInDatastream)
.setCreate(true)
.setSource("foo", "bar", DataStream.TIMESTAMP_FIELD_NAME, randomNonNegativeLong())
.get();
leaderClient().execute(
ModifyDataStreamsAction.INSTANCE,
new ModifyDataStreamsAction.Request(
TEST_REQUEST_TIMEOUT,
TEST_REQUEST_TIMEOUT,
List.of(DataStreamAction.addBackingIndex(datastream, indexInDatastream))
)
).get();
assertLongBusy(() -> {
AutoFollowStats autoFollowStats = getAutoFollowStats();
assertThat(autoFollowStats.getNumberOfSuccessfulFollowIndices(), equalTo(3L));
});
final Metadata metadata = followerClient().admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).get().getState().metadata();
final DataStream dataStream = metadata.getProject().dataStreams().get(datastream);
assertTrue(dataStream.getIndices().stream().anyMatch(i -> i.getName().equals(indexInDatastream)));
assertEquals(IndexMetadata.State.OPEN, metadata.getProject().index(indexInDatastream).getState());
ensureFollowerGreen("*");
final IndicesStatsResponse stats = followerClient().admin().indices().prepareStats(datastream).get();
assertThat(stats.getIndices(), aMapWithSize(2));
assertAcked(leaderClient().admin().indices().prepareDelete(indexInDatastream).get());
assertAcked(followerClient().admin().indices().prepareDelete(indexInDatastream).setMasterNodeTimeout(TimeValue.MAX_VALUE).get());
ensureFollowerGreen("*");
final IndicesStatsResponse statsAfterDelete = followerClient().admin().indices().prepareStats(datastream).get();
assertThat(statsAfterDelete.getIndices(), aMapWithSize(1));
assertThat(statsAfterDelete.getIndices(), hasKey(rolloverResponse.getNewIndex()));
}
private void putAutoFollowPatterns(String name, String[] patterns) {
putAutoFollowPatterns(name, patterns, Collections.emptyList());
}
private void putAutoFollowPatterns(String name, String[] patterns, List<String> exclusionPatterns) {
PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT);
request.setName(name);
request.setRemoteCluster("leader_cluster");
request.setLeaderIndexPatterns(Arrays.asList(patterns));
request.setLeaderIndexExclusionPatterns(exclusionPatterns);
// Need to set this, because following an index in the same cluster
request.setFollowIndexNamePattern("copy-{{leader_index}}");
if (randomBoolean()) {
request.masterNodeTimeout(TimeValue.timeValueSeconds(randomFrom(10, 20, 30)));
}
assertTrue(followerClient().execute(PutAutoFollowPatternAction.INSTANCE, request).actionGet().isAcknowledged());
}
private void deleteAutoFollowPattern(final String name) {
DeleteAutoFollowPatternAction.Request request = new DeleteAutoFollowPatternAction.Request(
TEST_REQUEST_TIMEOUT,
TEST_REQUEST_TIMEOUT,
name
);
if (randomBoolean()) {
request.masterNodeTimeout(TimeValue.timeValueSeconds(randomFrom(10, 20, 30)));
}
assertTrue(followerClient().execute(DeleteAutoFollowPatternAction.INSTANCE, request).actionGet().isAcknowledged());
}
private AutoFollowStats getAutoFollowStats() {
CcrStatsAction.Request request = new CcrStatsAction.Request(TEST_REQUEST_TIMEOUT);
if (randomBoolean()) {
request.masterNodeTimeout(TimeValue.timeValueSeconds(randomFrom(10, 20, 30)));
}
return followerClient().execute(CcrStatsAction.INSTANCE, request).actionGet().getAutoFollowStats();
}
private void createLeaderIndex(String index, Settings settings) {
CreateIndexRequest request = new CreateIndexRequest(index);
request.settings(settings);
leaderClient().admin().indices().create(request).actionGet();
}
private void pauseAutoFollowPattern(final String name) {
ActivateAutoFollowPatternAction.Request request = new ActivateAutoFollowPatternAction.Request(
TEST_REQUEST_TIMEOUT,
TEST_REQUEST_TIMEOUT,
name,
false
);
if (randomBoolean()) {
request.masterNodeTimeout(TimeValue.timeValueSeconds(randomFrom(10, 20, 30)));
}
assertAcked(followerClient().execute(ActivateAutoFollowPatternAction.INSTANCE, request).actionGet());
}
private void resumeAutoFollowPattern(final String name) {
ActivateAutoFollowPatternAction.Request request = new ActivateAutoFollowPatternAction.Request(
TEST_REQUEST_TIMEOUT,
TEST_REQUEST_TIMEOUT,
name,
true
);
if (randomBoolean()) {
request.masterNodeTimeout(TimeValue.timeValueSeconds(randomFrom(10, 20, 30)));
}
assertAcked(followerClient().execute(ActivateAutoFollowPatternAction.INSTANCE, request).actionGet());
}
private AutoFollowMetadata.AutoFollowPattern getAutoFollowPattern(final String name) {
GetAutoFollowPatternAction.Request request = new GetAutoFollowPatternAction.Request(TEST_REQUEST_TIMEOUT);
request.setName(name);
if (randomBoolean()) {
request.masterNodeTimeout(TimeValue.timeValueSeconds(randomFrom(10, 20, 30)));
}
GetAutoFollowPatternAction.Response response = followerClient().execute(GetAutoFollowPatternAction.INSTANCE, request).actionGet();
assertTrue(response.getAutoFollowPatterns().containsKey(name));
return response.getAutoFollowPatterns().get(name);
}
private void assertLongBusy(CheckedRunnable<Exception> codeBlock) throws Exception {
try {
assertBusy(codeBlock, 120L, TimeUnit.SECONDS);
} catch (AssertionError ae) {
AutoFollowStats autoFollowStats = null;
try {
autoFollowStats = getAutoFollowStats();
} catch (Exception e) {
ae.addSuppressed(e);
}
final AutoFollowStats finalAutoFollowStats = autoFollowStats;
logger.warn(
() -> format(
"AssertionError when waiting for auto-follower, auto-follow stats are: %s",
finalAutoFollowStats != null ? Strings.toString(finalAutoFollowStats) : "null"
),
ae
);
throw ae;
}
}
}
|
FakeSystemIndex
|
java
|
spring-cloud__spring-cloud-gateway
|
spring-cloud-gateway-server-webflux/src/main/java/org/springframework/cloud/gateway/filter/ratelimit/RateLimiter.java
|
{
"start": 914,
"end": 1030
}
|
interface ____<C> extends StatefulConfigurable<C> {
Mono<Response> isAllowed(String routeId, String id);
|
RateLimiter
|
java
|
mockito__mockito
|
mockito-core/src/main/java/org/mockito/internal/invocation/mockref/MockReference.java
|
{
"start": 343,
"end": 408
}
|
interface ____<T> extends Serializable {
T get();
}
|
MockReference
|
java
|
spring-projects__spring-security
|
core/src/main/java/org/springframework/security/core/authority/GrantedAuthoritiesContainer.java
|
{
"start": 1081,
"end": 1211
}
|
interface ____ extends Serializable {
Collection<? extends GrantedAuthority> getGrantedAuthorities();
}
|
GrantedAuthoritiesContainer
|
java
|
apache__hadoop
|
hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
|
{
"start": 1752,
"end": 14597
}
|
class ____
extends FileSystemContractBaseTest {
public static final String TEST_FS_OSS_NAME = "test.fs.oss.name";
public static final String FS_OSS_IMPL_DISABLE_CACHE
= "fs.oss.impl.disable.cache";
private static Path testRootPath =
new Path(AliyunOSSTestUtils.generateUniqueTestPath());
@BeforeEach
public void setUp() throws Exception {
Configuration conf = new Configuration();
fs = AliyunOSSTestUtils.createTestFileSystem(conf);
assumeTrue(fs != null);
}
@Override
public Path getTestBaseDir() {
return testRootPath;
}
@Test
public void testMkdirsWithUmask() throws Exception {
// not supported
}
@Test
public void testRootDirAlwaysExists() throws Exception {
//this will throw an exception if the path is not found
fs.getFileStatus(super.path("/"));
//this catches overrides of the base exists() method that don't
//use getFileStatus() as an existence probe
assertTrue(
fs.exists(super.path("/")), "FileSystem.exists() fails for root");
}
@Test
public void testRenameRootDirForbidden() throws Exception {
assumeTrue(renameSupported());
rename(super.path("/"),
super.path("/test/newRootDir"),
false, true, false);
}
@Test
public void testListStatus() throws IOException {
Path file = this.path("/test/hadoop/file");
this.createFile(file);
assertTrue(this.fs.exists(file), "File exists");
FileStatus fs = this.fs.getFileStatus(file);
assertEquals(fs.getOwner(),
UserGroupInformation.getCurrentUser().getShortUserName());
assertEquals(fs.getGroup(),
UserGroupInformation.getCurrentUser().getShortUserName());
}
@Test
public void testGetFileStatusInVersioningBucket() throws Exception {
Path file = this.path("/test/hadoop/file");
for (int i = 1; i <= 30; ++i) {
this.createFile(new Path(file, "sub" + i));
}
assertTrue(this.fs.exists(file), "File exists");
FileStatus fs = this.fs.getFileStatus(file);
assertEquals(fs.getOwner(),
UserGroupInformation.getCurrentUser().getShortUserName());
assertEquals(fs.getGroup(),
UserGroupInformation.getCurrentUser().getShortUserName());
AliyunOSSFileSystemStore store = ((AliyunOSSFileSystem)this.fs).getStore();
for (int i = 0; i < 29; ++i) {
store.deleteObjects(Arrays.asList("test/hadoop/file/sub" + i));
}
// HADOOP-16840, will throw FileNotFoundException without this fix
this.fs.getFileStatus(file);
}
@Test
public void testDeleteSubdir() throws IOException {
Path parentDir = this.path("/test/hadoop");
Path file = this.path("/test/hadoop/file");
Path subdir = this.path("/test/hadoop/subdir");
this.createFile(file);
assertTrue(this.fs.mkdirs(subdir), "Created subdir");
assertTrue(this.fs.exists(file), "File exists");
assertTrue(this.fs.exists(parentDir), "Parent dir exists");
assertTrue(this.fs.exists(subdir), "Subdir exists");
assertTrue(this.fs.delete(subdir, true), "Deleted subdir");
assertTrue(this.fs.exists(parentDir), "Parent should exist");
assertTrue(this.fs.delete(file, false), "Deleted file");
assertTrue(this.fs.exists(parentDir), "Parent should exist");
}
@Override
protected boolean renameSupported() {
return true;
}
@Test
public void testRenameNonExistentPath() throws Exception {
assumeTrue(renameSupported());
Path src = this.path("/test/hadoop/path");
Path dst = this.path("/test/new/newpath");
try {
super.rename(src, dst, false, false, false);
fail("Should throw FileNotFoundException!");
} catch (FileNotFoundException e) {
// expected
}
}
@Test
public void testRenameFileMoveToNonExistentDirectory() throws Exception {
assumeTrue(renameSupported());
Path src = this.path("/test/hadoop/file");
this.createFile(src);
Path dst = this.path("/test/new/newfile");
try {
super.rename(src, dst, false, true, false);
fail("Should throw FileNotFoundException!");
} catch (FileNotFoundException e) {
// expected
}
}
@Test
public void testRenameDirectoryConcurrent() throws Exception {
assumeTrue(renameSupported());
Path src = this.path("/test/hadoop/file/");
Path child1 = this.path("/test/hadoop/file/1");
Path child2 = this.path("/test/hadoop/file/2");
Path child3 = this.path("/test/hadoop/file/3");
Path child4 = this.path("/test/hadoop/file/4");
this.createFile(child1);
this.createFile(child2);
this.createFile(child3);
this.createFile(child4);
Path dst = this.path("/test/new");
super.rename(src, dst, true, false, true);
assertEquals(4, this.fs.listStatus(dst).length);
}
@Test
public void testRenameDirectoryCopyTaskAllSucceed() throws Exception {
assumeTrue(renameSupported());
Path srcOne = this.path("/test/hadoop/file/1");
this.createFile(srcOne);
Path dstOne = this.path("/test/new/file/1");
Path dstTwo = this.path("/test/new/file/2");
AliyunOSSCopyFileContext copyFileContext = new AliyunOSSCopyFileContext();
AliyunOSSFileSystemStore store = ((AliyunOSSFileSystem)this.fs).getStore();
store.storeEmptyFile("test/new/file/");
AliyunOSSCopyFileTask oneCopyFileTask = new AliyunOSSCopyFileTask(
store, srcOne.toUri().getPath().substring(1), data.length,
dstOne.toUri().getPath().substring(1), copyFileContext);
oneCopyFileTask.run();
assumeFalse(copyFileContext.isCopyFailure());
AliyunOSSCopyFileTask twoCopyFileTask = new AliyunOSSCopyFileTask(
store, srcOne.toUri().getPath().substring(1), data.length,
dstTwo.toUri().getPath().substring(1), copyFileContext);
twoCopyFileTask.run();
assumeFalse(copyFileContext.isCopyFailure());
copyFileContext.lock();
try {
copyFileContext.awaitAllFinish(2);
} catch (InterruptedException e) {
throw new Exception(e);
} finally {
copyFileContext.unlock();
}
assumeFalse(copyFileContext.isCopyFailure());
}
@Test
public void testRenameDirectoryCopyTaskAllFailed() throws Exception {
assumeTrue(renameSupported());
Path srcOne = this.path("/test/hadoop/file/1");
this.createFile(srcOne);
Path dstOne = new Path("1");
Path dstTwo = new Path("2");
AliyunOSSCopyFileContext copyFileContext = new AliyunOSSCopyFileContext();
AliyunOSSFileSystemStore store = ((AliyunOSSFileSystem)this.fs).getStore();
//store.storeEmptyFile("test/new/file/");
AliyunOSSCopyFileTask oneCopyFileTask = new AliyunOSSCopyFileTask(
store, srcOne.toUri().getPath().substring(1), data.length,
dstOne.toUri().getPath().substring(1), copyFileContext);
oneCopyFileTask.run();
assumeTrue(copyFileContext.isCopyFailure());
AliyunOSSCopyFileTask twoCopyFileTask = new AliyunOSSCopyFileTask(
store, srcOne.toUri().getPath().substring(1), data.length,
dstTwo.toUri().getPath().substring(1), copyFileContext);
twoCopyFileTask.run();
assumeTrue(copyFileContext.isCopyFailure());
copyFileContext.lock();
try {
copyFileContext.awaitAllFinish(2);
} catch (InterruptedException e) {
throw new Exception(e);
} finally {
copyFileContext.unlock();
}
assumeTrue(copyFileContext.isCopyFailure());
}
@Test
public void testRenameDirectoryCopyTaskPartialFailed() throws Exception {
assumeTrue(renameSupported());
Path srcOne = this.path("/test/hadoop/file/1");
this.createFile(srcOne);
Path dstOne = new Path("1");
Path dstTwo = new Path("/test/new/file/2");
Path dstThree = new Path("3");
AliyunOSSCopyFileContext copyFileContext = new AliyunOSSCopyFileContext();
AliyunOSSFileSystemStore store = ((AliyunOSSFileSystem)this.fs).getStore();
//store.storeEmptyFile("test/new/file/");
AliyunOSSCopyFileTask oneCopyFileTask = new AliyunOSSCopyFileTask(
store, srcOne.toUri().getPath().substring(1), data.length,
dstOne.toUri().getPath().substring(1), copyFileContext);
oneCopyFileTask.run();
assumeTrue(copyFileContext.isCopyFailure());
AliyunOSSCopyFileTask twoCopyFileTask = new AliyunOSSCopyFileTask(
store, srcOne.toUri().getPath().substring(1), data.length,
dstTwo.toUri().getPath().substring(1), copyFileContext);
twoCopyFileTask.run();
assumeTrue(copyFileContext.isCopyFailure());
AliyunOSSCopyFileTask threeCopyFileTask = new AliyunOSSCopyFileTask(
store, srcOne.toUri().getPath().substring(1), data.length,
dstThree.toUri().getPath().substring(1), copyFileContext);
threeCopyFileTask.run();
assumeTrue(copyFileContext.isCopyFailure());
copyFileContext.lock();
try {
copyFileContext.awaitAllFinish(3);
} catch (InterruptedException e) {
throw new Exception(e);
} finally {
copyFileContext.unlock();
}
assumeTrue(copyFileContext.isCopyFailure());
}
@Test
public void testRenameDirectoryMoveToNonExistentDirectory() throws Exception {
assumeTrue(renameSupported());
Path src = this.path("/test/hadoop/dir");
this.fs.mkdirs(src);
Path dst = this.path("/test/new/newdir");
try {
super.rename(src, dst, false, true, false);
fail("Should throw FileNotFoundException!");
} catch (FileNotFoundException e) {
// expected
}
}
@Test
public void testRenameFileMoveToExistingDirectory() throws Exception {
super.testRenameFileMoveToExistingDirectory();
}
@Test
public void testRenameFileAsExistingFile() throws Exception {
assumeTrue(renameSupported());
Path src = this.path("/test/hadoop/file");
this.createFile(src);
Path dst = this.path("/test/new/newfile");
this.createFile(dst);
try {
super.rename(src, dst, false, true, true);
fail("Should throw FileAlreadyExistsException");
} catch (FileAlreadyExistsException e) {
// expected
}
}
@Test
public void testRenameDirectoryAsExistingFile() throws Exception {
assumeTrue(renameSupported());
Path src = this.path("/test/hadoop/dir");
this.fs.mkdirs(src);
Path dst = this.path("/test/new/newfile");
this.createFile(dst);
try {
super.rename(src, dst, false, true, true);
fail("Should throw FileAlreadyExistsException");
} catch (FileAlreadyExistsException e) {
// expected
}
}
@Test
public void testGetFileStatusFileAndDirectory() throws Exception {
Path filePath = this.path("/test/oss/file1");
this.createFile(filePath);
assertTrue(this.fs.getFileStatus(filePath).isFile(), "Should be file");
assertFalse(
this.fs.getFileStatus(filePath).isDirectory(), "Should not be directory");
Path dirPath = this.path("/test/oss/dir");
this.fs.mkdirs(dirPath);
assertTrue(
this.fs.getFileStatus(dirPath).isDirectory(), "Should be directory");
assertFalse(this.fs.getFileStatus(dirPath).isFile(), "Should not be file");
Path parentPath = this.path("/test/oss");
for (FileStatus fileStatus: fs.listStatus(parentPath)) {
assertTrue(
fileStatus.getModificationTime() > 0L, "file and directory should be new");
}
}
@Test
public void testMkdirsForExistingFile() throws Exception {
Path testFile = this.path("/test/hadoop/file");
assertFalse(this.fs.exists(testFile));
this.createFile(testFile);
assertTrue(this.fs.exists(testFile));
try {
this.fs.mkdirs(testFile);
fail("Should throw FileAlreadyExistsException!");
} catch (FileAlreadyExistsException e) {
// expected
}
}
@Test
public void testRenameChangingDirShouldFail() throws Exception {
testRenameDir(true, false, false);
testRenameDir(true, true, true);
}
@Test
public void testRenameDir() throws Exception {
testRenameDir(false, true, false);
testRenameDir(false, true, true);
}
private void testRenameDir(boolean changing, boolean result, boolean empty)
throws Exception {
fs.getConf().setLong(Constants.FS_OSS_BLOCK_SIZE_KEY, 1024);
String key = "a/b/test.file";
for (int i = 0; i < 100; i++) {
if (empty) {
fs.createNewFile(this.path(key + "." + i));
} else {
createFile(this.path(key + "." + i));
}
}
Path srcPath = this.path("a");
Path dstPath = this.path("b");
TestRenameTask task = new TestRenameTask(fs, srcPath, dstPath);
Thread thread = new Thread(task);
thread.start();
while (!task.isRunning()) {
Thread.sleep(1);
}
if (changing) {
fs.delete(this.path("a/b"), true);
}
thread.join();
if (changing) {
assertTrue(task.isSucceed() || fs.exists(this.path("a")));
} else {
assertEquals(result, task.isSucceed());
}
}
|
TestAliyunOSSFileSystemContract
|
java
|
google__error-prone
|
core/src/main/java/com/google/errorprone/bugpatterns/LongDoubleConversion.java
|
{
"start": 1800,
"end": 2791
}
|
class ____ extends BugChecker implements MethodInvocationTreeMatcher {
@Override
public Description matchMethodInvocation(MethodInvocationTree tree, VisitorState state) {
for (ExpressionTree argument : tree.getArguments()) {
checkArgument(argument, state);
}
return NO_MATCH;
}
private void checkArgument(ExpressionTree argument, VisitorState state) {
if (!getType(argument).getKind().equals(TypeKind.LONG)) {
return;
}
Object constant = constValue(argument);
if (constant instanceof Long l && constant.equals((long) l.doubleValue())) {
return;
}
TargetType targetType = targetType(state.withPath(new TreePath(state.getPath(), argument)));
if (targetType != null && targetType.type().getKind().equals(TypeKind.DOUBLE)) {
String replacement = SuggestedFixes.castTree(argument, "double", state);
state.reportMatch(describeMatch(argument, SuggestedFix.replace(argument, replacement)));
}
}
}
|
LongDoubleConversion
|
java
|
spring-projects__spring-framework
|
spring-webmvc/src/test/java/org/springframework/web/context/ContextLoaderTests.java
|
{
"start": 16975,
"end": 17486
}
|
class ____ implements ApplicationContextInitializer<ConfigurableApplicationContext> {
@Override
public void initialize(ConfigurableApplicationContext applicationContext) {
ConfigurableEnvironment environment = applicationContext.getEnvironment();
environment.getPropertySources().addFirst(new PropertySource<>("testPropertySource") {
@Override
public Object getProperty(String key) {
return "name".equals(key) ? "testName" : null;
}
});
}
}
private static
|
TestContextInitializer
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStatsMXBean.java
|
{
"start": 1038,
"end": 1238
}
|
interface ____ {
/**
* The statistics of storage types.
*
* @return get storage statistics per storage type
*/
Map<StorageType, StorageTypeStats> getStorageTypeStats();
}
|
BlockStatsMXBean
|
java
|
google__guava
|
android/guava-tests/test/com/google/common/collect/MutableClassToInstanceMapTest.java
|
{
"start": 1473,
"end": 5132
}
|
class ____ extends TestCase {
@AndroidIncompatible // test-suite builders
public static Test suite() {
TestSuite suite = new TestSuite();
suite.addTestSuite(MutableClassToInstanceMapTest.class);
suite.addTest(
MapTestSuiteBuilder.using(
new TestClassToInstanceMapGenerator() {
// Other tests will verify what real, warning-free usage looks like
// but here we have to do some serious fudging
@Override
@SuppressWarnings({"unchecked", "rawtypes"})
public Map<Class, Impl> create(Object... elements) {
MutableClassToInstanceMap<Impl> map = MutableClassToInstanceMap.create();
for (Object object : elements) {
Entry<Class, Impl> entry = (Entry<Class, Impl>) object;
map.putInstance(entry.getKey(), entry.getValue());
}
return (Map) map;
}
})
.named("MutableClassToInstanceMap")
.withFeatures(
MapFeature.GENERAL_PURPOSE,
MapFeature.RESTRICTS_KEYS,
MapFeature.ALLOWS_NULL_VALUES,
CollectionSize.ANY,
CollectionFeature.SERIALIZABLE,
CollectionFeature.SUPPORTS_ITERATOR_REMOVE,
MapFeature.ALLOWS_ANY_NULL_QUERIES)
.createTestSuite());
return suite;
}
private ClassToInstanceMap<Number> map;
@Override
protected void setUp() throws Exception {
map = MutableClassToInstanceMap.create();
}
public void testConstraint() {
/*
* We'll give ourselves a pass on testing all the possible ways of breaking the constraint,
* because we know that newClassMap() is implemented using ConstrainedMap which is itself
* well-tested. A purist would object to this, but what can I say, we're dirty cheaters.
*/
map.put(Integer.class, new Integer(5));
assertThrows(ClassCastException.class, () -> map.put(Double.class, new Long(42)));
// Won't compile: map.put(String.class, "x");
}
public void testPutAndGetInstance() {
assertThat(map.putInstance(Integer.class, new Integer(5))).isNull();
Integer oldValue = map.putInstance(Integer.class, new Integer(7));
assertEquals(5, (int) oldValue);
Integer newValue = map.getInstance(Integer.class);
assertEquals(7, (int) newValue);
// Won't compile: map.putInstance(Double.class, new Long(42));
}
public void testNull() {
assertThrows(NullPointerException.class, () -> map.put(null, new Integer(1)));
map.putInstance(Integer.class, null);
assertThat(map.get(Integer.class)).isNull();
assertThat(map.getInstance(Integer.class)).isNull();
map.put(Long.class, null);
assertThat(map.get(Long.class)).isNull();
assertThat(map.getInstance(Long.class)).isNull();
}
public void testPrimitiveAndWrapper() {
assertThat(map.getInstance(int.class)).isNull();
assertThat(map.getInstance(Integer.class)).isNull();
assertThat(map.putInstance(int.class, 0)).isNull();
assertThat(map.putInstance(Integer.class, 1)).isNull();
assertEquals(2, map.size());
assertEquals(0, (int) map.getInstance(int.class));
assertEquals(1, (int) map.getInstance(Integer.class));
assertEquals(0, (int) map.putInstance(int.class, null));
assertEquals(1, (int) map.putInstance(Integer.class, null));
assertThat(map.getInstance(int.class)).isNull();
assertThat(map.getInstance(Integer.class)).isNull();
assertEquals(2, map.size());
}
}
|
MutableClassToInstanceMapTest
|
java
|
quarkusio__quarkus
|
extensions/hibernate-orm/deployment/src/test/java/io/quarkus/hibernate/orm/singlepersistenceunit/SinglePersistenceUnitPackageConfigurationTest.java
|
{
"start": 865,
"end": 2934
}
|
class ____ {
private static final Formatter LOG_FORMATTER = new PatternFormatter("%s");
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addPackage(EntityIncludedThroughPackageConfig.class.getPackage().getName())
.addPackage(ExcludedEntity.class.getPackage().getName()))
.withConfigurationResource("application.properties")
.overrideConfigKey("quarkus.hibernate-orm.packages",
EntityIncludedThroughPackageConfig.class.getPackage().getName())
// Expect a warning on startup
.setLogRecordPredicate(
record -> record.getMessage().contains("Could not find a suitable persistence unit for model classes"))
.assertLogRecords(records -> assertThat(records)
.as("Warnings on startup")
.hasSize(1)
.element(0).satisfies(record -> {
assertThat(record.getLevel()).isEqualTo(Level.WARNING);
assertThat(LOG_FORMATTER.formatMessage(record))
.contains(ExcludedEntity.class.getName());
}));
@Inject
EntityManager entityManager;
@Test
@Transactional
public void testIncluded() {
EntityIncludedThroughPackageConfig entity = new EntityIncludedThroughPackageConfig("default");
entityManager.persist(entity);
EntityIncludedThroughPackageConfig retrievedEntity = entityManager.find(EntityIncludedThroughPackageConfig.class,
entity.id);
assertEquals(entity.name, retrievedEntity.name);
}
@Test
@Transactional
public void testExcluded() {
ExcludedEntity entity = new ExcludedEntity("gsmet");
assertThatThrownBy(() -> entityManager.persist(entity)).isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Unknown entity type");
}
}
|
SinglePersistenceUnitPackageConfigurationTest
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest-client/deployment/src/test/java/io/quarkus/rest/client/reactive/redirect/EnablePostAdvancedRedirectHandler.java
|
{
"start": 257,
"end": 891
}
|
class ____ implements ContextResolver<AdvancedRedirectHandler> {
@Override
public AdvancedRedirectHandler getContext(Class<?> aClass) {
return context -> {
Response response = context.jaxRsResponse();
if (Response.Status.Family.familyOf(response.getStatus()) == Response.Status.Family.REDIRECTION) {
var result = new RequestOptions();
result.setAbsoluteURI(response.getLocation().toString());
result.addHeader("x-foo", "bar");
return result;
}
return null;
};
}
}
|
EnablePostAdvancedRedirectHandler
|
java
|
quarkusio__quarkus
|
extensions/websockets-next/deployment/src/test/java/io/quarkus/websockets/next/test/errors/MultiTextEncodeErrorTest.java
|
{
"start": 2586,
"end": 2882
}
|
class ____ {
private String name;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Override
public String toString() {
return name;
}
}
}
|
Pojo
|
java
|
spring-projects__spring-boot
|
integration-test/spring-boot-actuator-integration-tests/src/test/java/org/springframework/boot/actuate/endpoint/web/annotation/AbstractWebEndpointIntegrationTests.java
|
{
"start": 23596,
"end": 23879
}
|
class ____ {
@Bean
NullWriteResponseEndpoint nullWriteResponseEndpoint(EndpointDelegate delegate) {
return new NullWriteResponseEndpoint(delegate);
}
}
@Configuration(proxyBeanMethods = false)
@Import(BaseConfiguration.class)
static
|
NullWriteResponseEndpointConfiguration
|
java
|
apache__flink
|
flink-tests/src/test/java/org/apache/flink/test/scheduling/AdaptiveBatchSchedulerITCase.java
|
{
"start": 3137,
"end": 14502
}
|
class ____ {
private static final int DEFAULT_MAX_PARALLELISM = 4;
private static final int SOURCE_PARALLELISM_1 = 2;
private static final int SOURCE_PARALLELISM_2 = 8;
private static final int NUMBERS_TO_PRODUCE = 10000;
private static ConcurrentLinkedQueue<Map<Long, Long>> numberCountResults;
private Map<Long, Long> expectedResult;
@BeforeEach
void setUp() {
expectedResult =
LongStream.range(0, NUMBERS_TO_PRODUCE)
.boxed()
.collect(Collectors.toMap(Function.identity(), i -> 2L));
numberCountResults = new ConcurrentLinkedQueue<>();
}
@Test
void testScheduling() throws Exception {
testSchedulingBase(false);
}
@Test
void testSubmitJobGraphWithBroadcastEdge() throws Exception {
final Configuration configuration = createConfiguration();
// make sure the map operator has two sub-tasks
configuration.set(BatchExecutionOptions.ADAPTIVE_AUTO_PARALLELISM_MIN_PARALLELISM, 2);
configuration.set(BatchExecutionOptions.ADAPTIVE_AUTO_PARALLELISM_MAX_PARALLELISM, 2);
final StreamExecutionEnvironment env =
StreamExecutionEnvironment.createLocalEnvironment(configuration);
env.setRuntimeMode(RuntimeExecutionMode.BATCH);
env.fromSequence(0, NUMBERS_TO_PRODUCE - 1)
.setParallelism(1)
.broadcast()
.map(new NumberCounter());
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
JobGraphRunningUtil.execute(jobGraph, configuration, 1, 2);
Map<Long, Long> numberCountResultMap =
numberCountResults.stream()
.flatMap(map -> map.entrySet().stream())
.collect(
Collectors.toMap(
Map.Entry::getKey, Map.Entry::getValue, Long::sum));
Map<Long, Long> expectedResult =
LongStream.range(0, NUMBERS_TO_PRODUCE)
.boxed()
.collect(Collectors.toMap(Function.identity(), i -> 2L));
assertThat(numberCountResultMap).isEqualTo(expectedResult);
}
@Test
void testSchedulingWithDynamicSourceParallelismInference() throws Exception {
testSchedulingBase(true);
}
@Test
void testParallelismOfForwardGroupLargerThanGlobalMaxParallelism() throws Exception {
final Configuration configuration = createConfiguration();
final StreamExecutionEnvironment env =
StreamExecutionEnvironment.createLocalEnvironment(configuration);
env.setRuntimeMode(RuntimeExecutionMode.BATCH);
env.setParallelism(8);
final DataStream<Long> source =
env.fromSequence(0, NUMBERS_TO_PRODUCE - 1)
.setParallelism(8)
.name("source")
.slotSharingGroup("group1");
source.forward().map(new NumberCounter()).name("map").slotSharingGroup("group2");
env.execute();
}
@Test
void testDifferentConsumerParallelism() throws Exception {
final Configuration configuration = createConfiguration();
final StreamExecutionEnvironment env =
StreamExecutionEnvironment.createLocalEnvironment(configuration);
env.setRuntimeMode(RuntimeExecutionMode.BATCH);
env.setParallelism(8);
final DataStream<Long> source2 =
env.fromSequence(0, NUMBERS_TO_PRODUCE - 1)
.setParallelism(8)
.name("source2")
.slotSharingGroup("group2");
final DataStream<Long> source1 =
env.fromSequence(0, NUMBERS_TO_PRODUCE - 1)
.setParallelism(8)
.name("source1")
.slotSharingGroup("group1");
source1.forward()
.union(source2)
.map(new NumberCounter())
.name("map1")
.slotSharingGroup("group3");
source2.map(new NumberCounter()).name("map2").slotSharingGroup("group4");
env.execute();
}
@Test
void testAdaptiveOptimizeStreamGraph() throws Exception {
final Configuration configuration = createConfiguration();
configuration.set(
StreamGraphOptimizationStrategy.STREAM_GRAPH_OPTIMIZATION_STRATEGY,
List.of(TestingStreamGraphOptimizerStrategy.class.getName()));
final StreamExecutionEnvironment env =
StreamExecutionEnvironment.getExecutionEnvironment(configuration);
env.setRuntimeMode(RuntimeExecutionMode.BATCH);
env.disableOperatorChaining();
env.setParallelism(8);
SingleOutputStreamOperator<Long> source1 =
env.fromSequence(0, NUMBERS_TO_PRODUCE - 1)
.setParallelism(SOURCE_PARALLELISM_1)
.name("source1");
SingleOutputStreamOperator<Long> source2 =
env.fromSequence(0, NUMBERS_TO_PRODUCE - 1)
.setParallelism(SOURCE_PARALLELISM_2)
.name("source2");
source1.keyBy(i -> i % SOURCE_PARALLELISM_1)
.map(i -> i)
.name("map1")
.rebalance()
.union(source2)
.rebalance()
.map(new NumberCounter())
.name("map2")
.setParallelism(1);
StreamGraph streamGraph = env.getStreamGraph();
StreamNode sourceNode1 =
streamGraph.getStreamNodes().stream()
.filter(node -> node.getOperatorName().contains("source1"))
.findFirst()
.get();
StreamNode mapNode1 =
streamGraph.getStreamNodes().stream()
.filter(node -> node.getOperatorName().contains("map1"))
.findFirst()
.get();
TestingStreamGraphOptimizerStrategy.convertToRescaleEdgeIds.add(
sourceNode1.getOutEdges().get(0).getEdgeId());
TestingStreamGraphOptimizerStrategy.convertToBroadcastEdgeIds.add(
mapNode1.getOutEdges().get(0).getEdgeId());
env.execute(streamGraph);
Map<Long, Long> numberCountResultMap =
numberCountResults.stream()
.flatMap(map -> map.entrySet().stream())
.collect(
Collectors.toMap(
Map.Entry::getKey, Map.Entry::getValue, Long::sum));
// One part comes from source1, while the other parts come from the broadcast results of
// source2.
Map<Long, Long> expectedResult =
LongStream.range(0, NUMBERS_TO_PRODUCE)
.boxed()
.collect(Collectors.toMap(Function.identity(), i -> 2L));
assertThat(numberCountResultMap).isEqualTo(expectedResult);
}
private void testSchedulingBase(Boolean useSourceParallelismInference) throws Exception {
executeJob(useSourceParallelismInference);
Map<Long, Long> numberCountResultMap =
numberCountResults.stream()
.flatMap(map -> map.entrySet().stream())
.collect(
Collectors.toMap(
Map.Entry::getKey,
Map.Entry::getValue,
(v1, v2) -> v1 + v2));
for (int i = 0; i < NUMBERS_TO_PRODUCE; i++) {
if (numberCountResultMap.get(i) != expectedResult.get(i)) {
System.out.println(i + ": " + numberCountResultMap.get(i));
}
}
assertThat(numberCountResultMap).isEqualTo(expectedResult);
}
private void executeJob(Boolean useSourceParallelismInference) throws Exception {
final Configuration configuration = createConfiguration();
final StreamExecutionEnvironment env =
StreamExecutionEnvironment.createLocalEnvironment(configuration);
env.setRuntimeMode(RuntimeExecutionMode.BATCH);
List<SlotSharingGroup> slotSharingGroups = new ArrayList<>();
for (int i = 0; i < 3; ++i) {
SlotSharingGroup group =
SlotSharingGroup.newBuilder("group" + i)
.setCpuCores(1.0)
.setTaskHeapMemory(MemorySize.parse("100m"))
.build();
slotSharingGroups.add(group);
}
DataStream<Long> source1;
DataStream<Long> source2;
if (useSourceParallelismInference) {
source1 =
env.fromSource(
new TestingParallelismInferenceNumberSequenceSource(
0, NUMBERS_TO_PRODUCE - 1, SOURCE_PARALLELISM_1),
WatermarkStrategy.noWatermarks(),
"source1")
.slotSharingGroup(slotSharingGroups.get(0));
source2 =
env.fromSource(
new TestingParallelismInferenceNumberSequenceSource(
0, NUMBERS_TO_PRODUCE - 1, SOURCE_PARALLELISM_2),
WatermarkStrategy.noWatermarks(),
"source2")
.slotSharingGroup(slotSharingGroups.get(1));
} else {
source1 =
env.fromSequence(0, NUMBERS_TO_PRODUCE - 1)
.setParallelism(SOURCE_PARALLELISM_1)
.name("source1")
.slotSharingGroup(slotSharingGroups.get(0));
source2 =
env.fromSequence(0, NUMBERS_TO_PRODUCE - 1)
.setParallelism(SOURCE_PARALLELISM_2)
.name("source2")
.slotSharingGroup(slotSharingGroups.get(1));
}
source1.union(source2)
.rescale()
.map(new NumberCounter())
.name("map")
.slotSharingGroup(slotSharingGroups.get(2));
env.execute();
}
private static Configuration createConfiguration() {
final Configuration configuration = new Configuration();
configuration.set(RestOptions.BIND_PORT, "0");
configuration.set(JobManagerOptions.SLOT_REQUEST_TIMEOUT, Duration.ofMillis(5000L));
configuration.set(
BatchExecutionOptions.ADAPTIVE_AUTO_PARALLELISM_MAX_PARALLELISM,
DEFAULT_MAX_PARALLELISM);
configuration.set(
BatchExecutionOptions.ADAPTIVE_AUTO_PARALLELISM_AVG_DATA_VOLUME_PER_TASK,
MemorySize.parse("150kb"));
configuration.set(TaskManagerOptions.MEMORY_SEGMENT_SIZE, MemorySize.parse("4kb"));
configuration.set(TaskManagerOptions.NUM_TASK_SLOTS, 1);
return configuration;
}
private static
|
AdaptiveBatchSchedulerITCase
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/keyselector/RowDataKeySelector.java
|
{
"start": 1193,
"end": 1384
}
|
interface ____
extends KeySelector<RowData, RowData>, ResultTypeQueryable<RowData> {
InternalTypeInfo<RowData> getProducedType();
RowDataKeySelector copy();
}
|
RowDataKeySelector
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/callbacks/Source.java
|
{
"start": 231,
"end": 1148
}
|
class ____ {
private String foo;
public String getFoo() {
return foo;
}
public void setFoo(String foo) {
this.foo = foo;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ( ( foo == null ) ? 0 : foo.hashCode() );
return result;
}
@Override
public boolean equals(Object obj) {
if ( this == obj ) {
return true;
}
if ( obj == null ) {
return false;
}
if ( getClass() != obj.getClass() ) {
return false;
}
Source other = (Source) obj;
if ( foo == null ) {
return other.foo == null;
}
else {
return foo.equals( other.foo );
}
}
@Override
public String toString() {
return "Source [foo=" + foo + "]";
}
}
|
Source
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/operators/join/deltajoin/StreamingDeltaJoinOperatorTest.java
|
{
"start": 72317,
"end": 72694
}
|
class ____
extends TableFunctionResultFuture<RowData> {
private static final long serialVersionUID = -312754413938303160L;
@Override
public void complete(Collection<RowData> result) {
//noinspection unchecked
getResultFuture().complete((Collection) result);
}
}
private static
|
TestingFetcherResultFuture
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/batchfetch/NestedLazyManyToOneTest.java
|
{
"start": 1683,
"end": 6224
}
|
class ____ {
private static final String QUESTION_MARK = "\\?";
@BeforeAll
public void prepareData(SessionFactoryScope scope) {
final Entity1 entity1 = new Entity1();
entity1.setId( "0" );
final Set<Entity2> entities2 = new HashSet<>();
for ( int i = 0; i < 8; i++ ) {
final Entity2 entity2 = new Entity2();
entity2.setId( entity1.getId() + "_" + i );
entity2.setParent( entity1 );
entities2.add( entity2 );
// add nested children only to first and last entity
if ( i == 0 || i == 7 ) {
final Set<Entity3> entities3 = new HashSet<>();
for ( int j = 0; j < 5; j++ ) {
final Entity3 entity3 = new Entity3();
entity3.setId( entity2.getId() + "_" + j );
entity3.setParent( entity2 );
entities3.add( entity3 );
}
entity2.setChildren( entities3 );
}
}
entity1.setChildren( entities2 );
scope.inTransaction( session -> {
session.persist( entity1 );
} );
}
@Test
public void testGetFirstLevelChildren(SessionFactoryScope scope) {
final SQLStatementInspector statementInspector = scope.getCollectingStatementInspector();
statementInspector.clear();
scope.inTransaction( session -> {
Entity1 fromDb = session.find( Entity1.class, "0" );
Set<Entity2> children = fromDb.getChildren();
assertEquals( 8, children.size() );
statementInspector.assertExecutedCount( 2 ); // 1 for Entity1, 1 for Entity2
statementInspector.assertNumberOfOccurrenceInQueryNoSpace( 1, QUESTION_MARK, 1 );
} );
}
@Test
public void testGetNestedChildrenLessThanBatchSize(SessionFactoryScope scope) {
final SQLStatementInspector statementInspector = scope.getCollectingStatementInspector();
statementInspector.clear();
scope.inTransaction( session -> {
Entity1 entity1 = session.find( Entity1.class, "0" );
int i = 0;
for ( Entity2 child2 : entity1.getChildren() ) {
// get only first 5 (< batch size) elements
// this doesn't trigger an additional query only because entity1.children
// are ordered with @OrderBy, and we always get the first 5 first
if ( i++ >= 5 ) {
break;
}
else {
Set<Entity3> children3 = child2.getChildren();
if ( child2.getId().equals( "0_0" ) ) {
assertEquals( 5, children3.size(), "Size of `Child2(0_0).children3` did not match expectation" );
}
else {
assertEquals( 0, children3.size(), "Size of `Child2(" + child2.getId() + ").children3` did not match expectation" );
}
}
}
assertEquals( 8, entity1.getChildren().size() );
// 1 for Entity1, 1 for Entity2, 1 for Entity3
statementInspector.assertExecutedCount( 3 );
statementInspector.assertNumberOfOccurrenceInQueryNoSpace( 1, QUESTION_MARK, 1 );
if ( MultiKeyLoadHelper.supportsSqlArrayType( scope.getSessionFactory().getJdbcServices().getDialect() ) ) {
assertThat( StringHelper.count( statementInspector.getSqlQueries().get( 2 ), '?' ) ).isEqualTo( 1 );
}
else {
assertThat( StringHelper.count( statementInspector.getSqlQueries().get( 2 ), '?' ) ).isEqualTo( 5 );
}
} );
}
@Test
public void testGetNestedChildrenMoreThanBatchSize(SessionFactoryScope scope) {
final SQLStatementInspector statementInspector = scope.getCollectingStatementInspector();
statementInspector.clear();
scope.inTransaction( session -> {
Entity1 entity1 = session.find( Entity1.class, "0" );
for ( Entity2 child2 : entity1.getChildren() ) {
Set<Entity3> children3 = child2.getChildren();
if ( child2.getId().equals( "0_0" ) || child2.getId().equals( "0_7" ) ) {
assertEquals( 5, children3.size() );
}
else {
assertEquals( 0, children3.size() );
}
}
assertThat( entity1.getChildren() ).hasSize( 8 );
// 1 for Entity1, 1 for Entity2, 2 for Entity3
assertThat( statementInspector.getSqlQueries() ).hasSize( 4 );
assertThat( StringHelper.count( statementInspector.getSqlQueries().get( 1 ), '?' ) ).isEqualTo( 1 );
if ( MultiKeyLoadHelper.supportsSqlArrayType( scope.getSessionFactory().getJdbcServices().getDialect() ) ) {
assertThat( StringHelper.count( statementInspector.getSqlQueries().get( 2 ), '?' ) ).isEqualTo( 1 );
assertThat( StringHelper.count( statementInspector.getSqlQueries().get( 3 ), '?' ) ).isEqualTo( 1 );
}
else {
assertThat( StringHelper.count( statementInspector.getSqlQueries().get( 2 ), '?' ) ).isEqualTo( 5 );
assertThat( StringHelper.count( statementInspector.getSqlQueries().get( 3 ), '?' ) ).isEqualTo( 5 );
}
} );
}
@MappedSuperclass
public static
|
NestedLazyManyToOneTest
|
java
|
apache__thrift
|
lib/java/src/test/java/org/apache/thrift/transport/TestTSaslTransports.java
|
{
"start": 13281,
"end": 14212
}
|
class ____ implements SaslServer {
private String user;
@Override
public String getMechanismName() {
return "ANONYMOUS";
}
@Override
public byte[] evaluateResponse(byte[] response) throws SaslException {
this.user = new String(response, StandardCharsets.UTF_8);
return null;
}
@Override
public boolean isComplete() {
return user != null;
}
@Override
public String getAuthorizationID() {
return user;
}
@Override
public byte[] unwrap(byte[] incoming, int offset, int len) {
throw new UnsupportedOperationException();
}
@Override
public byte[] wrap(byte[] outgoing, int offset, int len) {
throw new UnsupportedOperationException();
}
@Override
public Object getNegotiatedProperty(String propName) {
return null;
}
@Override
public void dispose() {}
}
public static
|
AnonymousServer
|
java
|
spring-projects__spring-framework
|
spring-jdbc/src/test/java/org/springframework/jdbc/support/DefaultLobHandlerTests.java
|
{
"start": 1263,
"end": 3926
}
|
class ____ {
private ResultSet rs = mock();
private PreparedStatement ps = mock();
private LobHandler lobHandler = new DefaultLobHandler();
private LobCreator lobCreator = lobHandler.getLobCreator();
@Test
void testGetBlobAsBytes() throws SQLException {
lobHandler.getBlobAsBytes(rs, 1);
verify(rs).getBytes(1);
}
@Test
void testGetBlobAsBinaryStream() throws SQLException {
lobHandler.getBlobAsBinaryStream(rs, 1);
verify(rs).getBinaryStream(1);
}
@Test
void testGetClobAsString() throws SQLException {
lobHandler.getClobAsString(rs, 1);
verify(rs).getString(1);
}
@Test
void testGetClobAsAsciiStream() throws SQLException {
lobHandler.getClobAsAsciiStream(rs, 1);
verify(rs).getAsciiStream(1);
}
@Test
void testGetClobAsCharacterStream() throws SQLException {
lobHandler.getClobAsCharacterStream(rs, 1);
verify(rs).getCharacterStream(1);
}
@Test
void testSetBlobAsBytes() throws SQLException {
byte[] content = "testContent".getBytes();
lobCreator.setBlobAsBytes(ps, 1, content);
verify(ps).setBytes(1, content);
}
@Test
void testSetBlobAsBinaryStream() throws SQLException {
InputStream bis = new ByteArrayInputStream("testContent".getBytes());
lobCreator.setBlobAsBinaryStream(ps, 1, bis, 11);
verify(ps).setBinaryStream(1, bis, 11);
}
@Test
void testSetBlobAsBinaryStreamWithoutLength() throws SQLException {
InputStream bis = new ByteArrayInputStream("testContent".getBytes());
lobCreator.setBlobAsBinaryStream(ps, 1, bis, -1);
verify(ps).setBinaryStream(1, bis);
}
@Test
void testSetClobAsString() throws SQLException {
String content = "testContent";
lobCreator.setClobAsString(ps, 1, content);
verify(ps).setString(1, content);
}
@Test
void testSetClobAsAsciiStream() throws SQLException {
InputStream bis = new ByteArrayInputStream("testContent".getBytes());
lobCreator.setClobAsAsciiStream(ps, 1, bis, 11);
verify(ps).setAsciiStream(1, bis, 11);
}
@Test
void testSetClobAsAsciiStreamWithoutLength() throws SQLException {
InputStream bis = new ByteArrayInputStream("testContent".getBytes());
lobCreator.setClobAsAsciiStream(ps, 1, bis, -1);
verify(ps).setAsciiStream(1, bis);
}
@Test
void testSetClobAsCharacterStream() throws SQLException {
Reader str = new StringReader("testContent");
lobCreator.setClobAsCharacterStream(ps, 1, str, 11);
verify(ps).setCharacterStream(1, str, 11);
}
@Test
void testSetClobAsCharacterStreamWithoutLength() throws SQLException {
Reader str = new StringReader("testContent");
lobCreator.setClobAsCharacterStream(ps, 1, str, -1);
verify(ps).setCharacterStream(1, str);
}
}
|
DefaultLobHandlerTests
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/language/simple/MyCloneBean.java
|
{
"start": 853,
"end": 1365
}
|
class ____ implements Cloneable {
private int myField;
public MyCloneBean() {
}
public MyCloneBean(int myField) {
this.myField = myField;
}
public MyCloneBean(MyCloneBean other) {
this.myField = other.myField;
}
public int getMyField() {
return myField;
}
@Override
public MyCloneBean clone() {
return new MyCloneBean(this);
}
public MyCloneBean deepCopy() {
return new MyCloneBean(this.myField);
}
}
|
MyCloneBean
|
java
|
apache__camel
|
dsl/camel-jbang/camel-jbang-plugin-generate/src/main/java/org/apache/camel/dsl/jbang/core/commands/generate/CodeSchemaGenerator.java
|
{
"start": 12884,
"end": 13630
}
|
class ____ generate schema for
* @return The generated schema or null if failed
*/
private JsonNode generateSchema(SchemaGenerator generator, Class<?> targetClass) {
try {
if (verbose) {
printer().println("Generating schema for class: " + targetClass.getName());
}
return generator.generateSchema(targetClass);
} catch (Exception e) {
printer().printErr("Error generating schema for class '" + targetClass.getName() + "':");
printer().printErr(" " + e.getMessage());
printer().printErr(System.lineSeparator());
printer().printErr("This might be caused by:");
printer().printErr(" - Complex
|
to
|
java
|
google__guava
|
android/guava/src/com/google/common/collect/ComparisonChain.java
|
{
"start": 3773,
"end": 5802
}
|
class ____. */
public static ComparisonChain start() {
return ACTIVE;
}
private static final ComparisonChain ACTIVE =
new ComparisonChain() {
@SuppressWarnings("unchecked") // unsafe; see discussion on supertype
@Override
public ComparisonChain compare(Comparable<?> left, Comparable<?> right) {
return classify(((Comparable<Object>) left).compareTo(right));
}
@Override
public <T extends @Nullable Object> ComparisonChain compare(
@ParametricNullness T left, @ParametricNullness T right, Comparator<T> comparator) {
return classify(comparator.compare(left, right));
}
@Override
public ComparisonChain compare(int left, int right) {
return classify(Integer.compare(left, right));
}
@Override
public ComparisonChain compare(long left, long right) {
return classify(Long.compare(left, right));
}
@Override
public ComparisonChain compare(float left, float right) {
return classify(Float.compare(left, right));
}
@Override
public ComparisonChain compare(double left, double right) {
return classify(Double.compare(left, right));
}
@Override
public ComparisonChain compareTrueFirst(boolean left, boolean right) {
return classify(Boolean.compare(right, left)); // reversed
}
@Override
public ComparisonChain compareFalseFirst(boolean left, boolean right) {
return classify(Boolean.compare(left, right));
}
ComparisonChain classify(int result) {
return (result < 0) ? LESS : (result > 0) ? GREATER : ACTIVE;
}
@Override
public int result() {
return 0;
}
};
private static final ComparisonChain LESS = new InactiveComparisonChain(-1);
private static final ComparisonChain GREATER = new InactiveComparisonChain(1);
private static final
|
documentation
|
java
|
quarkusio__quarkus
|
core/deployment/src/main/java/io/quarkus/deployment/dev/testing/ModuleTestRunner.java
|
{
"start": 470,
"end": 4546
}
|
class ____ {
final TestState testState = new TestState();
private final TestSupport testSupport;
private final CuratedApplication testApplication;
private final DevModeContext.ModuleInfo moduleInfo;
private final TestClassUsages testClassUsages = new TestClassUsages();
private JunitTestRunner runner;
public ModuleTestRunner(TestSupport testSupport, CuratedApplication testApplication,
DevModeContext.ModuleInfo moduleInfo) {
this.testSupport = testSupport;
this.testApplication = testApplication;
this.moduleInfo = moduleInfo;
}
public synchronized void abort() {
notifyAll();
if (runner != null) {
runner.abort();
}
}
Runnable prepare(ClassScanResult classScanResult, boolean reRunFailures, long runId, TestRunListener listener) {
var old = Thread.currentThread().getContextClassLoader();
Thread.currentThread().setContextClassLoader(testApplication.getOrCreateAugmentClassLoader());
try {
synchronized (this) {
if (runner != null) {
throw new IllegalStateException("Tests already in progress");
}
JunitTestRunner.Builder builder = new JunitTestRunner.Builder()
.setClassScanResult(classScanResult)
.setRunId(runId)
.setTestState(testState)
.setTestClassUsages(testClassUsages)
.setTestApplication(testApplication)
.setIncludeTags(testSupport.includeTags)
.setExcludeTags(testSupport.excludeTags)
.setInclude(testSupport.include)
.setExclude(testSupport.exclude)
.setSpecificSelection(testSupport.specificSelection)
.setIncludeEngines(testSupport.includeEngines)
.setExcludeEngines(testSupport.excludeEngines)
.setTestType(testSupport.testType)
.setModuleInfo(moduleInfo)
.addListener(listener)
.setFailingTestsOnly(classScanResult != null && testSupport.brokenOnlyMode); //broken only mode is only when changes are made, not for forced runs
if (reRunFailures) {
Set<UniqueId> ids = new HashSet<>();
for (Map.Entry<String, TestClassResult> e : testSupport.testRunResults.getCurrentFailing().entrySet()) {
for (TestResult test : e.getValue().getFailing()) {
ids.add(test.uniqueId);
}
}
builder.addAdditionalFilter(new PostDiscoveryFilter() {
@Override
public FilterResult apply(TestDescriptor testDescriptor) {
return FilterResult.includedIf(ids.contains(testDescriptor.getUniqueId()));
}
});
}
runner = builder
.build();
}
var prepared = runner.prepare();
return new Runnable() {
@Override
public void run() {
var old = Thread.currentThread().getContextClassLoader();
Thread.currentThread().setContextClassLoader(testApplication.getOrCreateAugmentClassLoader());
try {
prepared.run();
} finally {
synchronized (ModuleTestRunner.this) {
runner = null;
}
Thread.currentThread().setContextClassLoader(old);
}
}
};
} finally {
Thread.currentThread().setContextClassLoader(old);
}
}
public TestState getTestState() {
return testState;
}
}
|
ModuleTestRunner
|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/main/java/org/springframework/boot/logging/structured/StructuredLoggingJsonProperties.java
|
{
"start": 6794,
"end": 7174
}
|
enum ____ {
LAST, FIRST
}
}
/**
* Properties that influence context values (usually elements propagated from the
* logging MDC).
*
* @param include if context elements should be included
* @param prefix the prefix to use for context elements
* @since 3.5.0
*/
record Context(@DefaultValue("true") boolean include, @Nullable String prefix) {
}
static
|
Root
|
java
|
apache__camel
|
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/AzureFilesComponentBuilderFactory.java
|
{
"start": 6802,
"end": 7972
}
|
class ____
extends AbstractComponentBuilder<FilesComponent>
implements AzureFilesComponentBuilder {
@Override
protected FilesComponent buildConcreteComponent() {
return new FilesComponent();
}
@Override
protected boolean setPropertyOnComponent(
Component component,
String name,
Object value) {
switch (name) {
case "bridgeErrorHandler": ((FilesComponent) component).setBridgeErrorHandler((boolean) value); return true;
case "lazyStartProducer": ((FilesComponent) component).setLazyStartProducer((boolean) value); return true;
case "autowiredEnabled": ((FilesComponent) component).setAutowiredEnabled((boolean) value); return true;
case "healthCheckConsumerEnabled": ((FilesComponent) component).setHealthCheckConsumerEnabled((boolean) value); return true;
case "healthCheckProducerEnabled": ((FilesComponent) component).setHealthCheckProducerEnabled((boolean) value); return true;
default: return false;
}
}
}
}
|
AzureFilesComponentBuilderImpl
|
java
|
apache__maven
|
its/core-it-suite/src/test/resources/mng-3503/mng-3503-xpp3Shading-pu11/maven-it-plugin-plexus-utils-11/src/main/java/org/apache/maven/its/plugins/SerializeMojo.java
|
{
"start": 1584,
"end": 2333
}
|
class ____ extends AbstractMojo {
/**
*/
@Parameter(defaultValue = "${project.build.directory}/serialized.xml")
private File file;
public void execute() throws MojoExecutionException, MojoFailureException {
Writer writer = null;
XmlSerializer s = new MXSerializer();
try {
file.getParentFile().mkdirs();
writer = new OutputStreamWriter(new FileOutputStream(file), "UTF-8");
s.setOutput(writer);
Xpp3Dom dom = new Xpp3Dom("root");
dom.writeToSerializer("", s);
} catch (IOException e) {
throw new MojoExecutionException(e.getMessage(), e);
} finally {
IOUtil.close(writer);
}
}
}
|
SerializeMojo
|
java
|
hibernate__hibernate-orm
|
hibernate-envers/src/main/java/org/hibernate/envers/internal/synchronization/EntityChangeNotifier.java
|
{
"start": 683,
"end": 2150
}
|
class ____ {
private final RevisionInfoGenerator revisionInfoGenerator;
private final SharedSessionContractImplementor sessionImplementor;
public EntityChangeNotifier(RevisionInfoGenerator revisionInfoGenerator, SharedSessionContractImplementor sessionImplementor) {
this.revisionInfoGenerator = revisionInfoGenerator;
this.sessionImplementor = sessionImplementor;
}
/**
* Notifies {@link RevisionInfoGenerator} about changes made in the current revision. Provides information
* about modified entity class, entity name and its id, as well as {@link org.hibernate.envers.RevisionType}
* and revision log entity.
*
* @param session Active session.
* @param currentRevisionData Revision log entity.
* @param vwu Performed work unit.
*/
public void entityChanged(SharedSessionContractImplementor session, Object currentRevisionData, AuditWorkUnit vwu) {
Object entityId = vwu.getEntityId();
if ( entityId instanceof PersistentCollectionChangeWorkUnit.PersistentCollectionChangeWorkUnitId ) {
// Notify about a change in collection owner entity.
entityId = ( (PersistentCollectionChangeWorkUnit.PersistentCollectionChangeWorkUnitId) entityId ).getOwnerId();
}
final Class entityClass = EntityTools.getEntityClass( sessionImplementor, vwu.getEntityName() );
revisionInfoGenerator.entityChanged(
entityClass,
vwu.getEntityName(),
entityId,
vwu.getRevisionType(),
currentRevisionData
);
}
}
|
EntityChangeNotifier
|
java
|
quarkusio__quarkus
|
extensions/funqy/funqy-http/deployment/src/test/java/io/quarkus/funqy/test/GreetingTemplate.java
|
{
"start": 39,
"end": 453
}
|
class ____ {
private String greeting;
private String punctuation;
public String getGreeting() {
return greeting;
}
public void setGreeting(String greeting) {
this.greeting = greeting;
}
public String getPunctuation() {
return punctuation;
}
public void setPunctuation(String punctuation) {
this.punctuation = punctuation;
}
}
|
GreetingTemplate
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/AutoCloseTests.java
|
{
"start": 18663,
"end": 19018
}
|
class ____ implements TestInterface {
@AutoClose
static AutoCloseSpy nestedStaticClosable;
@AutoClose
final AutoCloseable nestedClosable = new AutoCloseSpy("nestedClosable");
@BeforeAll
static void setup() {
nestedStaticClosable = new AutoCloseSpy("nestedStaticClosable");
}
}
}
@TestInstance(PER_CLASS)
static
|
NestedTestCase
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/StatementSwitchToExpressionSwitchTest.java
|
{
"start": 138346,
"end": 139820
}
|
class ____ {
public int foo(Suit suit) {
int x = 0;
x *=
switch (suit) {
case HEART, DIAMOND, SPADE ->
/* red suit */
// Heart comment
/* red suit */
// sparkles
// Diamond comment
/* black suit */
2;
// Before break comment
// After break comment
case CLUB ->
/* black suit */
// Club comment
throw new NullPointerException();
// Club after throw comment
};
return x;
}
}
""")
.setArgs(
"-XepOpt:StatementSwitchToExpressionSwitch:EnableAssignmentSwitchConversion",
"-XepOpt:StatementSwitchToExpressionSwitch:EnableDirectConversion=false")
.setFixChooser(StatementSwitchToExpressionSwitchTest::assertOneFixAndChoose)
.doTest(TEXT_MATCH);
}
@Test
public void switchByEnum_compoundAssignmentExampleInDocumentation_error() {
// This code appears as an example in the documentation (added surrounding class)
refactoringHelper
.addInputLines(
"Test.java",
"""
|
Test
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/AutoValueBoxedValuesTest.java
|
{
"start": 22860,
"end": 23524
}
|
class ____ {",
" abstract Builder setLongId(long value);",
" abstract Builder setSuperClassLongId(Long value);",
" abstract BaseClass build();",
" }"),
lines(" }", "}")))
.doTest();
}
@Test
public void nullableGettersWithNonNullableSetters_noChange() {
if (!withBuilder) {
return;
}
compilationHelper
.addSourceLines(
"in/Test.java",
"""
import com.google.auto.value.AutoValue;
import javax.annotation.Nullable;
@AutoValue
abstract
|
Builder
|
java
|
apache__camel
|
dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/DataSetEndpointBuilderFactory.java
|
{
"start": 1457,
"end": 1592
}
|
interface ____ {
/**
* Builder for endpoint consumers for the Dataset component.
*/
public
|
DataSetEndpointBuilderFactory
|
java
|
dropwizard__dropwizard
|
dropwizard-servlets/src/main/java/io/dropwizard/servlets/tasks/PostBodyTask.java
|
{
"start": 164,
"end": 273
}
|
interface ____ provides the post body of the request.
*
* @see Task
* @see TaskServlet
*/
public abstract
|
and
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/engine/extension/TimeoutExtensionTests.java
|
{
"start": 27521,
"end": 27791
}
|
class ____ {
@Test
@Timeout(value = 1, unit = NANOSECONDS)
void test() {
new EventuallyInterruptibleInvocation().proceed();
throw new OutOfMemoryError();
}
}
@SuppressWarnings("JUnitMalformedDeclaration")
@Timeout(10)
static
|
UnrecoverableExceptionTestCase
|
java
|
apache__camel
|
components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/spi/ParentContextRegistryTest.java
|
{
"start": 1199,
"end": 2501
}
|
class ____ extends SpringTestSupport {
private static final List<String> EXPECTED_BEAN = Collections.singletonList("TestValue");
@Override
protected AbstractXmlApplicationContext createApplicationContext() {
ClassPathXmlApplicationContext parentContext = new ClassPathXmlApplicationContext(
"parentContextRegistryTestParent.xml", ParentContextRegistryTest.class);
return new ClassPathXmlApplicationContext(
new String[] { "parentContextRegistryTestChild.xml" },
ParentContextRegistryTest.class, parentContext);
}
@Test
public void testLookupByName() {
assertEquals(EXPECTED_BEAN, context.getRegistry().lookupByName("testParentBean"));
}
@Test
public void testLookupByNameAndType() {
assertEquals(EXPECTED_BEAN, context.getRegistry().lookupByNameAndType("testParentBean", List.class));
}
@Test
public void testFindByType() {
assertEquals(Collections.singleton(EXPECTED_BEAN), context.getRegistry().findByType(List.class));
}
@Test
public void testFindByTypeWithName() {
assertEquals(Collections.singletonMap("testParentBean", EXPECTED_BEAN),
context.getRegistry().findByTypeWithName(List.class));
}
}
|
ParentContextRegistryTest
|
java
|
netty__netty
|
codec-http2/src/main/java/io/netty/handler/codec/http2/Http2Exception.java
|
{
"start": 12263,
"end": 12838
}
|
class ____ extends StreamException {
private static final long serialVersionUID = -8807603212183882637L;
private final boolean decode;
HeaderListSizeException(int streamId, Http2Error error, String message, boolean decode) {
super(streamId, error, message);
this.decode = decode;
}
public boolean duringDecode() {
return decode;
}
}
/**
* Provides the ability to handle multiple stream exceptions with one throw statement.
*/
public static final
|
HeaderListSizeException
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/CanonicalDurationTest.java
|
{
"start": 5283,
"end": 5992
}
|
class ____ {
static final int S = 60;
static final int M = 60;
static final int H = 24;
{
Duration.ofSeconds(S);
Duration.ofMinutes(H);
Duration.ofHours(24);
}
}
""")
.expectUnchanged()
.doTest();
}
@Test
public void consistentWithinExpression() {
helper
.addInputLines(
"A.java",
"""
package a;
import static java.time.Duration.ofSeconds;
import static java.util.Arrays.asList;
import java.time.Duration;
import java.util.List;
public
|
A
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/customproviders/AsyncResponseWithExceptionAndFiltersTargetTest.java
|
{
"start": 6036,
"end": 6382
}
|
class ____ extends RuntimeException {
private final boolean handle;
public DummyException2(boolean handle) {
super("dummy2");
this.handle = handle;
setStackTrace(new StackTraceElement[0]);
}
public boolean isHandle() {
return handle;
}
}
}
|
DummyException2
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/search/arguments/SortByArgs.java
|
{
"start": 508,
"end": 1322
}
|
class ____<K> {
private K attribute;
private boolean isDescending;
private boolean withCount;
/**
* Used to build a new instance of the {@link SortByArgs}.
*
* @return a {@link SortByArgs.Builder} that provides the option to build up a new instance of the {@link SearchArgs}
* @param <K> the key type
*/
public static <K> SortByArgs.Builder<K> builder() {
return new SortByArgs.Builder<>();
}
/**
* Builder for {@link SortByArgs}.
* <p>
* As a final step the {@link SortByArgs.Builder#build()} method needs to be executed to create the final {@link SortByArgs}
* instance.
*
* @param <K> the key type
* @see <a href="https://redis.io/docs/latest/commands/ft.create/">FT.CREATE</a>
*/
public static
|
SortByArgs
|
java
|
redisson__redisson
|
redisson/src/main/java/org/redisson/api/RBatchRx.java
|
{
"start": 16874,
"end": 18123
}
|
interface ____ Redis Function feature using provided codec
*
* @param codec - codec for params and result
* @return function interface
*/
RFunctionRx getFunction(Codec codec);
/**
* Returns keys operations.
* Each of Redis/Redisson object associated with own key
*
* @return Keys object
*/
RKeysRx getKeys();
/**
* Returns API for RediSearch module
*
* @return RSearchRx object
*/
RSearchRx getSearch();
/**
* Returns API for RediSearch module using defined codec for attribute values.
*
* @param codec codec for entry
* @return RSearchRx object
*/
RSearchRx getSearch(Codec codec);
/**
* Executes all operations accumulated during Reactive methods invocations Reactivehronously.
*
* In cluster configurations operations grouped by slot ids
* so may be executed on different servers. Thus command execution order could be changed
*
* @return List with result object for each command
*/
Maybe<BatchResult<?>> execute();
/**
* Discard batched commands and release allocated buffers used for parameters encoding.
*
* @return void
*/
Completable discard();
}
|
for
|
java
|
spring-projects__spring-framework
|
spring-context-indexer/src/test/java/org/springframework/context/index/test/TestCompiler.java
|
{
"start": 1172,
"end": 3150
}
|
class ____ {
public static final File ORIGINAL_SOURCE_FOLDER = new File("src/test/java");
private final JavaCompiler compiler;
private final StandardJavaFileManager fileManager;
private final File outputLocation;
public TestCompiler(Path tempDir) throws IOException {
this(ToolProvider.getSystemJavaCompiler(), tempDir);
}
public TestCompiler(JavaCompiler compiler, Path tempDir) throws IOException {
this.compiler = compiler;
this.fileManager = compiler.getStandardFileManager(null, null, null);
this.outputLocation = tempDir.toFile();
Iterable<? extends File> temp = Collections.singletonList(this.outputLocation);
this.fileManager.setLocation(StandardLocation.CLASS_OUTPUT, temp);
this.fileManager.setLocation(StandardLocation.SOURCE_OUTPUT, temp);
}
public TestCompilationTask getTask(Class<?>... types) {
return getTask(Arrays.stream(types).map(Class::getName).toArray(String[]::new));
}
public TestCompilationTask getTask(String... types) {
Iterable<? extends JavaFileObject> javaFileObjects = getJavaFileObjects(types);
return getTask(javaFileObjects);
}
private TestCompilationTask getTask(Iterable<? extends JavaFileObject> javaFileObjects) {
return new TestCompilationTask(
this.compiler.getTask(null, this.fileManager, null, null, null, javaFileObjects));
}
public File getOutputLocation() {
return this.outputLocation;
}
private Iterable<? extends JavaFileObject> getJavaFileObjects(String... types) {
File[] files = new File[types.length];
for (int i = 0; i < types.length; i++) {
files[i] = getFile(types[i]);
}
return this.fileManager.getJavaFileObjects(files);
}
private File getFile(String type) {
return new File(getSourceFolder(), sourcePathFor(type));
}
private static String sourcePathFor(String type) {
return type.replace(".", "/") + ".java";
}
private File getSourceFolder() {
return ORIGINAL_SOURCE_FOLDER;
}
/**
* A compilation task.
*/
public static
|
TestCompiler
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/testdata/MultipleTopLevelClassesWithErrors.java
|
{
"start": 789,
"end": 891
}
|
class ____ {
public int poo() {
int i = 10;
i = i;
return i;
}
}
}
final
|
Poo2
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java
|
{
"start": 2615,
"end": 36964
}
|
class ____ implements LifecycleAction {
private static final Logger logger = LogManager.getLogger(SearchableSnapshotAction.class);
public static final String NAME = "searchable_snapshot";
public static final ParseField SNAPSHOT_REPOSITORY = new ParseField("snapshot_repository");
public static final ParseField FORCE_MERGE_INDEX = new ParseField("force_merge_index");
public static final ParseField TOTAL_SHARDS_PER_NODE = new ParseField("total_shards_per_node");
public static final ParseField REPLICATE_FOR = new ParseField("replicate_for");
public static final ParseField FORCE_MERGE_ON_CLONE = new ParseField("force_merge_on_clone");
private static final TransportVersion FORCE_MERGE_ON_CLONE_TRANSPORT_VERSION = TransportVersion.fromName(
"ilm_searchable_snapshot_opt_out_clone"
);
public static final String CONDITIONAL_SKIP_ACTION_STEP = BranchingStep.NAME + "-check-prerequisites";
public static final String CONDITIONAL_SKIP_GENERATE_AND_CLEAN = BranchingStep.NAME + "-check-existing-snapshot";
public static final String CONDITIONAL_SKIP_CLONE_STEP = BranchingStep.NAME + "-skip-clone-check";
public static final String WAIT_FOR_CLONED_INDEX_GREEN = WaitForIndexColorStep.NAME + "-cloned-index";
public static final String CONDITIONAL_DATASTREAM_CHECK_KEY = BranchingStep.NAME + "-on-datastream-check";
public static final String CONDITIONAL_DELETE_FORCE_MERGED_INDEX_KEY = BranchingStep.NAME + "-delete-force-merged-index";
public static final String DELETE_FORCE_MERGED_INDEX_KEY = DeleteStep.NAME + "-force-merged-index";
public static final String FULL_RESTORED_INDEX_PREFIX = "restored-";
public static final String PARTIAL_RESTORED_INDEX_PREFIX = "partial-";
public static final String FORCE_MERGE_CLONE_INDEX_PREFIX = "fm-clone-";
/** An index name supplier that always returns the force merge index name (possibly null). */
public static final BiFunction<String, LifecycleExecutionState, String> FORCE_MERGE_CLONE_INDEX_NAME_SUPPLIER = (
indexName,
state) -> state.forceMergeCloneIndexName();
/** An index name supplier that returns the force merge index name if it exists, or the original index name if not. */
public static final BiFunction<String, LifecycleExecutionState, String> FORCE_MERGE_CLONE_INDEX_NAME_FALLBACK_SUPPLIER = (
indexName,
state) -> state.forceMergeCloneIndexName() != null ? state.forceMergeCloneIndexName() : indexName;
/** The cloned index should have 0 replicas, so we also need to remove the auto_expand_replicas setting if present. */
private static final Settings CLONE_SETTINGS = Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, (String) null)
.build();
private static final Function<IndexMetadata, Settings> CLONE_SETTINGS_SUPPLIER = indexMetadata -> CLONE_SETTINGS;
private static final ConstructingObjectParser<SearchableSnapshotAction, Void> PARSER = new ConstructingObjectParser<>(
NAME,
a -> new SearchableSnapshotAction((String) a[0], a[1] == null || (boolean) a[1], (Integer) a[2], (TimeValue) a[3], (Boolean) a[4])
);
static {
PARSER.declareString(ConstructingObjectParser.constructorArg(), SNAPSHOT_REPOSITORY);
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FORCE_MERGE_INDEX);
PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), TOTAL_SHARDS_PER_NODE);
PARSER.declareField(
ConstructingObjectParser.optionalConstructorArg(),
p -> TimeValue.parseTimeValue(p.textOrNull(), REPLICATE_FOR.getPreferredName()),
REPLICATE_FOR,
ObjectParser.ValueType.STRING
);
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FORCE_MERGE_ON_CLONE);
}
public static SearchableSnapshotAction parse(XContentParser parser) {
return PARSER.apply(parser, null);
}
private final String snapshotRepository;
private final boolean forceMergeIndex;
@Nullable
private final Integer totalShardsPerNode;
@Nullable
private final TimeValue replicateFor;
/** Opt-out field for forcing the force-merge step to run on the source index instead of a cloned version with 0 replicas. */
@Nullable
private final Boolean forceMergeOnClone;
public SearchableSnapshotAction(
String snapshotRepository,
boolean forceMergeIndex,
@Nullable Integer totalShardsPerNode,
@Nullable TimeValue replicateFor,
@Nullable Boolean forceMergeOnClone
) {
if (Strings.hasText(snapshotRepository) == false) {
throw new IllegalArgumentException("the snapshot repository must be specified");
}
this.snapshotRepository = snapshotRepository;
this.forceMergeIndex = forceMergeIndex;
if (totalShardsPerNode != null && totalShardsPerNode < 1) {
throw new IllegalArgumentException("[" + TOTAL_SHARDS_PER_NODE.getPreferredName() + "] must be >= 1");
}
this.totalShardsPerNode = totalShardsPerNode;
if (replicateFor != null && replicateFor.millis() <= 0) {
throw new IllegalArgumentException(
"[" + REPLICATE_FOR.getPreferredName() + "] must be positive [" + replicateFor.getStringRep() + "]"
);
}
this.replicateFor = replicateFor;
if (forceMergeIndex == false && forceMergeOnClone != null) {
throw new IllegalArgumentException(
Strings.format(
"[%s] is not allowed when [%s] is [false]",
FORCE_MERGE_ON_CLONE.getPreferredName(),
FORCE_MERGE_INDEX.getPreferredName()
)
);
}
this.forceMergeOnClone = forceMergeOnClone;
}
public SearchableSnapshotAction(String snapshotRepository, boolean forceMergeIndex) {
this(snapshotRepository, forceMergeIndex, null, null, null);
}
public SearchableSnapshotAction(String snapshotRepository) {
this(snapshotRepository, true, null, null, null);
}
public SearchableSnapshotAction(StreamInput in) throws IOException {
this.snapshotRepository = in.readString();
this.forceMergeIndex = in.readBoolean();
this.totalShardsPerNode = in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readOptionalInt() : null;
this.replicateFor = in.getTransportVersion().supports(TransportVersions.V_8_18_0) ? in.readOptionalTimeValue() : null;
this.forceMergeOnClone = in.getTransportVersion().supports(FORCE_MERGE_ON_CLONE_TRANSPORT_VERSION)
? in.readOptionalBoolean()
: null;
}
public boolean isForceMergeIndex() {
return forceMergeIndex;
}
public String getSnapshotRepository() {
return snapshotRepository;
}
@Nullable
public Integer getTotalShardsPerNode() {
return totalShardsPerNode;
}
@Nullable
public TimeValue getReplicateFor() {
return replicateFor;
}
@Nullable
public Boolean isForceMergeOnClone() {
return forceMergeOnClone;
}
@Override
public List<Step> toSteps(Client client, String phase, StepKey nextStepKey) {
assert false;
throw new UnsupportedOperationException();
}
@Override
public List<Step> toSteps(Client client, String phase, StepKey nextStepKey, XPackLicenseState licenseState) {
StepKey preActionBranchingKey = new StepKey(phase, NAME, CONDITIONAL_SKIP_ACTION_STEP);
StepKey checkNoWriteIndex = new StepKey(phase, NAME, CheckNotDataStreamWriteIndexStep.NAME);
StepKey waitForNoFollowerStepKey = new StepKey(phase, NAME, WaitForNoFollowersStep.NAME);
StepKey waitTimeSeriesEndTimePassesKey = new StepKey(phase, NAME, WaitUntilTimeSeriesEndTimePassesStep.NAME);
StepKey skipGeneratingSnapshotKey = new StepKey(phase, NAME, CONDITIONAL_SKIP_GENERATE_AND_CLEAN);
StepKey conditionalSkipCloneKey = new StepKey(phase, NAME, CONDITIONAL_SKIP_CLONE_STEP);
StepKey readOnlyKey = new StepKey(phase, NAME, ReadOnlyStep.NAME);
StepKey cleanupClonedIndexKey = new StepKey(phase, NAME, CleanupGeneratedIndexStep.NAME);
StepKey generateCloneIndexNameKey = new StepKey(phase, NAME, GenerateUniqueIndexNameStep.NAME);
StepKey cloneIndexKey = new StepKey(phase, NAME, ResizeIndexStep.CLONE);
StepKey waitForClonedIndexGreenKey = new StepKey(phase, NAME, WAIT_FOR_CLONED_INDEX_GREEN);
StepKey forceMergeStepKey = new StepKey(phase, NAME, ForceMergeStep.NAME);
StepKey waitForSegmentCountKey = new StepKey(phase, NAME, SegmentCountStep.NAME);
StepKey generateSnapshotNameKey = new StepKey(phase, NAME, GenerateSnapshotNameStep.NAME);
StepKey cleanSnapshotKey = new StepKey(phase, NAME, CleanupSnapshotStep.NAME);
StepKey createSnapshotKey = new StepKey(phase, NAME, CreateSnapshotStep.NAME);
StepKey waitForDataTierKey = new StepKey(phase, NAME, WaitForDataTierStep.NAME);
StepKey mountSnapshotKey = new StepKey(phase, NAME, MountSnapshotStep.NAME);
StepKey waitForGreenRestoredIndexKey = new StepKey(phase, NAME, WaitForIndexColorStep.NAME);
StepKey copyMetadataKey = new StepKey(phase, NAME, CopyExecutionStateStep.NAME);
StepKey dataStreamCheckBranchingKey = new StepKey(phase, NAME, CONDITIONAL_DATASTREAM_CHECK_KEY);
StepKey copyLifecyclePolicySettingKey = new StepKey(phase, NAME, CopySettingsStep.NAME);
StepKey swapAliasesKey = new StepKey(phase, NAME, SwapAliasesAndDeleteSourceIndexStep.NAME);
StepKey replaceDataStreamIndexKey = new StepKey(phase, NAME, ReplaceDataStreamBackingIndexStep.NAME);
StepKey deleteSourceIndexKey = new StepKey(phase, NAME, DeleteStep.NAME);
StepKey conditionalDeleteForceMergedIndexKey = new StepKey(phase, NAME, CONDITIONAL_DELETE_FORCE_MERGED_INDEX_KEY);
StepKey deleteForceMergedIndexKey = new StepKey(phase, NAME, DELETE_FORCE_MERGED_INDEX_KEY);
StepKey replicateForKey = new StepKey(phase, NAME, WaitUntilReplicateForTimePassesStep.NAME);
StepKey dropReplicasKey = new StepKey(phase, NAME, UpdateSettingsStep.NAME);
// Before going through all these steps, first check if we need to do them at all. For example, the index could already be
// a searchable snapshot of the same type and repository, in which case we don't need to do anything. If that is detected,
// this branching step jumps right to the end, skipping the searchable snapshot action entirely. We also check the license
// here before generating snapshots that can't be used if the user doesn't have the right license level.
BranchingStep conditionalSkipActionStep = new BranchingStep(
preActionBranchingKey,
checkNoWriteIndex,
nextStepKey,
(index, project) -> {
if (SEARCHABLE_SNAPSHOT_FEATURE.checkWithoutTracking(licenseState) == false) {
logger.error("[{}] action is not available in the current license", SearchableSnapshotAction.NAME);
throw LicenseUtils.newComplianceException("searchable-snapshots");
}
IndexMetadata indexMetadata = project.index(index);
assert indexMetadata != null : "index " + index.getName() + " must exist in the cluster state";
String policyName = indexMetadata.getLifecyclePolicyName();
SearchableSnapshotMetadata searchableSnapshotMetadata = extractSearchableSnapshotFromSettings(indexMetadata);
if (searchableSnapshotMetadata != null) {
// TODO: allow this behavior instead of returning false, in this case the index is already a searchable a snapshot
// so the most graceful way of recovery might be to use this repo
// The index is already a searchable snapshot, let's see if the repository matches
if (this.snapshotRepository.equals(searchableSnapshotMetadata.repositoryName) == false) {
// Okay, different repo, we need to go ahead with the searchable snapshot
logger.debug(
"[{}] action is configured for index [{}] in policy [{}] which is already mounted as a searchable "
+ "snapshot, but with a different repository (existing: [{}] vs new: [{}]), a new snapshot and "
+ "index will be created",
SearchableSnapshotAction.NAME,
index.getName(),
policyName,
searchableSnapshotMetadata.repositoryName,
this.snapshotRepository
);
return false;
}
// Check to the storage type to see if we need to convert between full <-> partial
MountSearchableSnapshotRequest.Storage existingType = searchableSnapshotMetadata.partial
? MountSearchableSnapshotRequest.Storage.SHARED_CACHE
: MountSearchableSnapshotRequest.Storage.FULL_COPY;
MountSearchableSnapshotRequest.Storage type = getConcreteStorageType(preActionBranchingKey);
if (existingType == type) {
logger.debug(
"[{}] action is configured for index [{}] in policy [{}] which is already mounted "
+ "as a searchable snapshot with the same repository [{}] and storage type [{}], skipping this action",
SearchableSnapshotAction.NAME,
index.getName(),
policyName,
searchableSnapshotMetadata.repositoryName,
type
);
return true;
}
logger.debug(
"[{}] action is configured for index [{}] in policy [{}] which is already mounted "
+ "as a searchable snapshot in repository [{}], however, the storage type ([{}] vs [{}]) "
+ "differs, so a new index will be created",
SearchableSnapshotAction.NAME,
index.getName(),
policyName,
this.snapshotRepository,
existingType,
type
);
// Perform the searchable snapshot
return false;
}
// Perform the searchable snapshot, as the index is not currently a searchable snapshot
return false;
}
);
CheckNotDataStreamWriteIndexStep checkNoWriteIndexStep = new CheckNotDataStreamWriteIndexStep(
checkNoWriteIndex,
waitForNoFollowerStepKey
);
WaitForNoFollowersStep waitForNoFollowersStep = new WaitForNoFollowersStep(
waitForNoFollowerStepKey,
waitTimeSeriesEndTimePassesKey,
client
);
WaitUntilTimeSeriesEndTimePassesStep waitUntilTimeSeriesEndTimeStep = new WaitUntilTimeSeriesEndTimePassesStep(
waitTimeSeriesEndTimePassesKey,
skipGeneratingSnapshotKey,
Instant::now
);
// We force-merge on the clone by default, but allow the user to opt-out of this behavior if there is any reason why they don't want
// to clone the index (e.g. if something is preventing the cloned index shards from being assigned).
StepKey keyForForceMerge = shouldForceMergeOnClone() ? conditionalSkipCloneKey : forceMergeStepKey;
// When generating a snapshot, we either jump to the force merge section, or we skip the
// forcemerge and go straight to steps for creating the snapshot
StepKey keyForSnapshotGeneration = forceMergeIndex ? keyForForceMerge : generateSnapshotNameKey;
// Branch, deciding whether there is an existing searchable snapshot that can be used for mounting the index
// (in which case, skip generating a new name and the snapshot cleanup), or if we need to generate a new snapshot
BranchingStep skipGeneratingSnapshotStep = new BranchingStep(
skipGeneratingSnapshotKey,
keyForSnapshotGeneration,
waitForDataTierKey,
(index, project) -> {
IndexMetadata indexMetadata = project.index(index);
String policyName = indexMetadata.getLifecyclePolicyName();
LifecycleExecutionState lifecycleExecutionState = indexMetadata.getLifecycleExecutionState();
SearchableSnapshotMetadata searchableSnapshotMetadata = extractSearchableSnapshotFromSettings(indexMetadata);
if (lifecycleExecutionState.snapshotName() == null && searchableSnapshotMetadata == null) {
// No name exists, so it must be generated
logger.trace(
"no snapshot name for index [{}] in policy [{}] exists, so one will be generated",
index.getName(),
policyName
);
return false;
}
String snapshotIndexName;
String snapshotName;
String repoName;
if (lifecycleExecutionState.snapshotName() != null) {
snapshotIndexName = lifecycleExecutionState.snapshotIndexName();
snapshotName = lifecycleExecutionState.snapshotName();
repoName = lifecycleExecutionState.snapshotRepository();
} else {
snapshotIndexName = searchableSnapshotMetadata.sourceIndex;
snapshotName = searchableSnapshotMetadata.snapshotName;
repoName = searchableSnapshotMetadata.repositoryName;
}
if (this.snapshotRepository.equals(repoName) == false) {
// A different repository is being used
// TODO: allow this behavior instead of throwing an exception
throw new IllegalArgumentException("searchable snapshot indices may be converted only within the same repository");
}
// We can skip the generate, initial cleanup, and snapshot taking for this index, as we already have a generated snapshot.
// This will jump ahead directly to the "mount snapshot" step
logger.debug(
"Policy [{}] will use an existing snapshot [{}] in repository [{}] (index name: [{}]) "
+ "to mount [{}] as a searchable snapshot. This snapshot was found in the {}.",
policyName,
snapshotName,
snapshotRepository,
snapshotIndexName,
index.getName(),
lifecycleExecutionState.snapshotName() != null ? "lifecycle execution state" : "metadata of " + index.getName()
);
return true;
}
);
// If a new snapshot is needed, these steps are executed
// If the index has replicas, we need to clone the index first with 0 replicas and perform the force-merge on that index.
// That avoids us having to force-merge the replica shards too, which is a waste, as the snapshot will only be taken from the
// primary shards. If the index already has 0 replicas, we can skip the clone steps.
BranchingStep conditionalSkipCloneStep = new BranchingStep(
conditionalSkipCloneKey,
readOnlyKey,
forceMergeStepKey,
(index, project) -> {
IndexMetadata indexMetadata = project.index(index);
assert indexMetadata != null : "index " + index.getName() + " must exist in the cluster state";
return indexMetadata.getNumberOfReplicas() == 0;
}
);
ReadOnlyStep readOnlyStep = new ReadOnlyStep(readOnlyKey, cleanupClonedIndexKey, client, false);
// If a previous step created a clone index but the action did not complete, we need to clean up the old clone index.
CleanupGeneratedIndexStep cleanupClonedIndexStep = new CleanupGeneratedIndexStep(
cleanupClonedIndexKey,
generateCloneIndexNameKey,
client,
FORCE_MERGE_CLONE_INDEX_NAME_SUPPLIER
);
GenerateUniqueIndexNameStep generateCloneIndexNameStep = new GenerateUniqueIndexNameStep(
generateCloneIndexNameKey,
cloneIndexKey,
FORCE_MERGE_CLONE_INDEX_PREFIX,
(generatedIndexName, lifecycleStateBuilder) -> lifecycleStateBuilder.setForceMergeCloneIndexName(generatedIndexName)
);
// Clone the index with 0 replicas.
ResizeIndexStep cloneIndexStep = new ResizeIndexStep(
cloneIndexKey,
waitForClonedIndexGreenKey,
client,
ResizeType.CLONE,
FORCE_MERGE_CLONE_INDEX_NAME_SUPPLIER,
CLONE_SETTINGS_SUPPLIER,
null
);
// Wait for the cloned index to be green before proceeding with the force-merge. We wrap this with a
// ClusterStateWaitUntilThresholdStep to avoid waiting forever if the index cannot be started for some reason.
// On timeout, ILM will move back to the cleanup step, remove the cloned index, and retry the clone.
ClusterStateWaitUntilThresholdStep waitForClonedIndexGreenStep = new ClusterStateWaitUntilThresholdStep(
new WaitForIndexColorStep(
waitForClonedIndexGreenKey,
forceMergeStepKey,
ClusterHealthStatus.GREEN,
FORCE_MERGE_CLONE_INDEX_NAME_SUPPLIER
),
cleanupClonedIndexKey
);
ForceMergeStep forceMergeStep = new ForceMergeStep(
forceMergeStepKey,
waitForSegmentCountKey,
client,
1,
FORCE_MERGE_CLONE_INDEX_NAME_FALLBACK_SUPPLIER
);
SegmentCountStep segmentCountStep = new SegmentCountStep(
waitForSegmentCountKey,
generateSnapshotNameKey,
client,
1,
FORCE_MERGE_CLONE_INDEX_NAME_FALLBACK_SUPPLIER
);
GenerateSnapshotNameStep generateSnapshotNameStep = new GenerateSnapshotNameStep(
generateSnapshotNameKey,
cleanSnapshotKey,
snapshotRepository,
FORCE_MERGE_CLONE_INDEX_NAME_FALLBACK_SUPPLIER
);
CleanupSnapshotStep cleanupSnapshotStep = new CleanupSnapshotStep(cleanSnapshotKey, createSnapshotKey, client);
CreateSnapshotStep createSnapshotStep = new CreateSnapshotStep(createSnapshotKey, waitForDataTierKey, cleanSnapshotKey, client);
MountSearchableSnapshotRequest.Storage storageType = getConcreteStorageType(mountSnapshotKey);
// If the skipGeneratingSnapshotStep determined a snapshot already existed that
// can be used, it jumps directly here, skipping the snapshot generation steps above.
WaitForDataTierStep waitForDataTierStep = new WaitForDataTierStep(
waitForDataTierKey,
mountSnapshotKey,
MountSnapshotStep.overrideTierPreference(phase).orElse(storageType.defaultDataTiersPreference())
);
MountSnapshotStep mountSnapshotStep = new MountSnapshotStep(
mountSnapshotKey,
waitForGreenRestoredIndexKey,
client,
getRestoredIndexPrefix(mountSnapshotKey),
storageType,
totalShardsPerNode,
replicateFor != null ? 1 : 0 // if the 'replicate_for' option is set, then have a replica, otherwise don't
);
WaitForIndexColorStep waitForGreenIndexHealthStep = new WaitForIndexColorStep(
waitForGreenRestoredIndexKey,
copyMetadataKey,
ClusterHealthStatus.GREEN,
getRestoredIndexPrefix(waitForGreenRestoredIndexKey)
);
StepKey keyForReplicateForOrContinue = replicateFor != null ? replicateForKey : nextStepKey;
CopyExecutionStateStep copyMetadataStep = new CopyExecutionStateStep(
copyMetadataKey,
copyLifecyclePolicySettingKey,
(index, executionState) -> getRestoredIndexPrefix(copyMetadataKey) + index,
keyForReplicateForOrContinue
);
CopySettingsStep copySettingsStep = new CopySettingsStep(
copyLifecyclePolicySettingKey,
forceMergeIndex ? conditionalDeleteForceMergedIndexKey : dataStreamCheckBranchingKey,
(index, lifecycleState) -> getRestoredIndexPrefix(copyLifecyclePolicySettingKey) + index,
LifecycleSettings.LIFECYCLE_NAME
);
// If we cloned the index, we need to delete it before we swap the mounted snapshot in place of the original index.
// If we did not clone the index, there's nothing else for us to do.
BranchingStep conditionalDeleteForceMergedIndexStep = new BranchingStep(
conditionalDeleteForceMergedIndexKey,
dataStreamCheckBranchingKey,
deleteForceMergedIndexKey,
(index, project) -> {
IndexMetadata indexMetadata = project.index(index);
assert indexMetadata != null : "index " + index.getName() + " must exist in the cluster state";
String cloneIndexName = indexMetadata.getLifecycleExecutionState().forceMergeCloneIndexName();
return cloneIndexName != null && project.index(cloneIndexName) != null;
}
);
DeleteStep deleteForceMergedIndexStep = new DeleteStep(
deleteForceMergedIndexKey,
dataStreamCheckBranchingKey,
client,
FORCE_MERGE_CLONE_INDEX_NAME_SUPPLIER,
true
);
BranchingStep isDataStreamBranchingStep = new BranchingStep(
dataStreamCheckBranchingKey,
swapAliasesKey,
replaceDataStreamIndexKey,
(index, project) -> {
IndexAbstraction indexAbstraction = project.getIndicesLookup().get(index.getName());
assert indexAbstraction != null : "invalid cluster metadata. index [" + index.getName() + "] was not found";
return indexAbstraction.getParentDataStream() != null;
}
);
ReplaceDataStreamBackingIndexStep replaceDataStreamBackingIndex = new ReplaceDataStreamBackingIndexStep(
replaceDataStreamIndexKey,
deleteSourceIndexKey,
(index, executionState) -> getRestoredIndexPrefix(replaceDataStreamIndexKey) + index
);
DeleteStep deleteSourceIndexStep = new DeleteStep(deleteSourceIndexKey, null, client);
// sending this step to null as the restored index (which will after this step essentially be the source index) was sent to the next
// key after we restored the lifecycle execution state
SwapAliasesAndDeleteSourceIndexStep swapAliasesAndDeleteSourceIndexStep = new SwapAliasesAndDeleteSourceIndexStep(
swapAliasesKey,
null,
client,
getRestoredIndexPrefix(swapAliasesKey)
);
// note that the replicateForStep and dropReplicasStep will only be used if replicateFor != null, see the construction of
// the list of steps below
Step replicateForStep = new WaitUntilReplicateForTimePassesStep(replicateForKey, dropReplicasKey, replicateFor);
UpdateSettingsStep dropReplicasStep = new UpdateSettingsStep(
dropReplicasKey,
nextStepKey,
client,
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()
);
List<Step> steps = new ArrayList<>();
steps.add(conditionalSkipActionStep);
steps.add(checkNoWriteIndexStep);
steps.add(waitForNoFollowersStep);
steps.add(waitUntilTimeSeriesEndTimeStep);
steps.add(skipGeneratingSnapshotStep);
if (forceMergeIndex) {
if (shouldForceMergeOnClone()) {
steps.add(conditionalSkipCloneStep);
steps.add(readOnlyStep);
steps.add(cleanupClonedIndexStep);
steps.add(generateCloneIndexNameStep);
steps.add(cloneIndexStep);
steps.add(waitForClonedIndexGreenStep);
}
steps.add(forceMergeStep);
steps.add(segmentCountStep);
}
steps.add(generateSnapshotNameStep);
steps.add(cleanupSnapshotStep);
steps.add(createSnapshotStep);
steps.add(waitForDataTierStep);
steps.add(mountSnapshotStep);
steps.add(waitForGreenIndexHealthStep);
steps.add(copyMetadataStep);
steps.add(copySettingsStep);
if (replicateFor != null) {
steps.add(replicateForStep);
steps.add(dropReplicasStep);
}
if (forceMergeIndex) {
steps.add(conditionalDeleteForceMergedIndexStep);
steps.add(deleteForceMergedIndexStep);
}
steps.add(isDataStreamBranchingStep);
steps.add(replaceDataStreamBackingIndex);
steps.add(deleteSourceIndexStep);
steps.add(swapAliasesAndDeleteSourceIndexStep);
return steps;
}
/**
* Resolves the prefix to be used for the mounted index depending on the provided key
*/
static String getRestoredIndexPrefix(StepKey currentKey) {
if (currentKey.phase().equals(TimeseriesLifecycleType.FROZEN_PHASE)) {
return PARTIAL_RESTORED_INDEX_PREFIX;
} else {
return FULL_RESTORED_INDEX_PREFIX;
}
}
// Resolves the storage type depending on which phase the index is in
static MountSearchableSnapshotRequest.Storage getConcreteStorageType(StepKey currentKey) {
if (currentKey.phase().equals(TimeseriesLifecycleType.FROZEN_PHASE)) {
return MountSearchableSnapshotRequest.Storage.SHARED_CACHE;
} else {
return MountSearchableSnapshotRequest.Storage.FULL_COPY;
}
}
/**
* Returns whether we should first clone the index and perform the force-merge on that cloned index (true) or force-merge on the
* original index (false). Defaults to true when {@link #forceMergeOnClone} is null/unspecified. Note that this value is ignored when
* {@link #forceMergeIndex} is false.
*/
private boolean shouldForceMergeOnClone() {
return forceMergeOnClone == null || forceMergeOnClone;
}
@Override
public boolean isSafeAction() {
return true;
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(snapshotRepository);
out.writeBoolean(forceMergeIndex);
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeOptionalInt(totalShardsPerNode);
}
if (out.getTransportVersion().supports(TransportVersions.V_8_18_0)) {
out.writeOptionalTimeValue(replicateFor);
}
if (out.getTransportVersion().supports(FORCE_MERGE_ON_CLONE_TRANSPORT_VERSION)) {
out.writeOptionalBoolean(forceMergeOnClone);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(SNAPSHOT_REPOSITORY.getPreferredName(), snapshotRepository);
builder.field(FORCE_MERGE_INDEX.getPreferredName(), forceMergeIndex);
if (totalShardsPerNode != null) {
builder.field(TOTAL_SHARDS_PER_NODE.getPreferredName(), totalShardsPerNode);
}
if (replicateFor != null) {
builder.field(REPLICATE_FOR.getPreferredName(), replicateFor);
}
if (forceMergeOnClone != null) {
builder.field(FORCE_MERGE_ON_CLONE.getPreferredName(), forceMergeOnClone);
}
builder.endObject();
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
SearchableSnapshotAction that = (SearchableSnapshotAction) o;
return Objects.equals(snapshotRepository, that.snapshotRepository)
&& Objects.equals(forceMergeIndex, that.forceMergeIndex)
&& Objects.equals(totalShardsPerNode, that.totalShardsPerNode)
&& Objects.equals(replicateFor, that.replicateFor)
&& Objects.equals(forceMergeOnClone, that.forceMergeOnClone);
}
@Override
public int hashCode() {
return Objects.hash(snapshotRepository, forceMergeIndex, totalShardsPerNode, replicateFor, forceMergeOnClone);
}
@Nullable
static SearchableSnapshotMetadata extractSearchableSnapshotFromSettings(IndexMetadata indexMetadata) {
String indexName = indexMetadata.getSettings().get(LifecycleSettings.SNAPSHOT_INDEX_NAME);
if (indexName == null) {
return null;
}
String snapshotName = indexMetadata.getSettings().get(SEARCHABLE_SNAPSHOTS_SNAPSHOT_NAME_SETTING_KEY);
String repo = indexMetadata.getSettings().get(SEARCHABLE_SNAPSHOTS_REPOSITORY_NAME_SETTING_KEY);
final boolean partial = indexMetadata.getSettings().getAsBoolean(SEARCHABLE_SNAPSHOT_PARTIAL_SETTING_KEY, false);
return new SearchableSnapshotMetadata(indexName, repo, snapshotName, partial);
}
record SearchableSnapshotMetadata(String sourceIndex, String repositoryName, String snapshotName, boolean partial) {}
}
|
SearchableSnapshotAction
|
java
|
netty__netty
|
common/src/main/java/io/netty/util/NetUtil.java
|
{
"start": 1673,
"end": 1975
}
|
class ____ some of its methods from a modified fork of the
* <a href="https://svn.apache.org/repos/asf/harmony/enhanced/java/branches/java6/classlib/modules/luni/
* src/main/java/org/apache/harmony/luni/util/Inet6Util.java">Inet6Util class</a> which was part of Apache Harmony.
*/
public final
|
borrowed
|
java
|
elastic__elasticsearch
|
build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/GitInfoPlugin.java
|
{
"start": 993,
"end": 1962
}
|
class ____ implements Plugin<Project> {
private ProviderFactory factory;
private Provider<String> revision;
@Inject
public GitInfoPlugin(ProviderFactory factory) {
this.factory = factory;
}
@Override
public void apply(Project project) {
File rootDir = getGitRootDir(project);
getGitInfo().convention(factory.of(GitInfoValueSource.class, spec -> { spec.getParameters().getPath().set(rootDir); }));
revision = getGitInfo().map(info -> info.getRevision() == null ? info.getRevision() : "main");
}
private static File getGitRootDir(Project project) {
File rootDir = project.getRootDir();
if (new File(rootDir, ".git").exists()) {
return rootDir;
}
return Util.locateElasticsearchWorkspace(project.getGradle());
}
public abstract Property<GitInfo> getGitInfo();
public Provider<String> getRevision() {
return revision;
}
}
|
GitInfoPlugin
|
java
|
assertj__assertj-core
|
assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/error/uri/ShouldHaveFragment_create_Test.java
|
{
"start": 947,
"end": 2169
}
|
class ____ {
@Test
void should_create_error_message_for_has_fragment() {
// GIVEN
URI uri = URI.create("http://assertj.org/news#print");
// WHEN
String error = shouldHaveFragment(uri, "foo").create(new TestDescription("TEST"));
// THEN
then(error).isEqualTo(format("[TEST] %n" +
"Expecting fragment of%n" +
" <http://assertj.org/news#print>%n" +
"to be:%n" +
" <\"foo\">%n" +
"but was:%n" +
" <\"print\">"));
}
@Test
void should_create_error_message_for_has_no_fragment() {
// GIVEN
URI uri = URI.create("http://assertj.org/news#print");
// WHEN
String error = shouldHaveFragment(uri, null).create(new TestDescription("TEST"));
// THEN
then(error).isEqualTo(format("[TEST] %n" +
"Expecting URI:%n" +
" <http://assertj.org/news#print>%n" +
"not to have a fragment but had:%n" +
" <\"print\">"));
}
}
|
ShouldHaveFragment_create_Test
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/executiongraph/failover/partitionrelease/PipelinedRegionExecutionViewTest.java
|
{
"start": 1426,
"end": 3618
}
|
class ____ {
private static final ExecutionVertexID TEST_EXECUTION_VERTEX_ID =
new ExecutionVertexID(new JobVertexID(), 0);
private static final TestingSchedulingPipelinedRegion TEST_PIPELINED_REGION =
new TestingSchedulingPipelinedRegion(
Collections.singleton(
TestingSchedulingExecutionVertex.withExecutionVertexID(
TEST_EXECUTION_VERTEX_ID.getJobVertexId(),
TEST_EXECUTION_VERTEX_ID.getSubtaskIndex())));
@Test
public void regionIsUnfinishedIfNotAllVerticesAreFinished() {
final PipelinedRegionExecutionView pipelinedRegionExecutionView =
new PipelinedRegionExecutionView(TEST_PIPELINED_REGION);
assertThat(pipelinedRegionExecutionView.isFinished()).isFalse();
}
@Test
public void regionIsFinishedIfAllVerticesAreFinished() {
final PipelinedRegionExecutionView pipelinedRegionExecutionView =
new PipelinedRegionExecutionView(TEST_PIPELINED_REGION);
pipelinedRegionExecutionView.vertexFinished(TEST_EXECUTION_VERTEX_ID);
assertThat(pipelinedRegionExecutionView.isFinished()).isTrue();
}
@Test
public void vertexCanBeUnfinished() {
final PipelinedRegionExecutionView pipelinedRegionExecutionView =
new PipelinedRegionExecutionView(TEST_PIPELINED_REGION);
pipelinedRegionExecutionView.vertexFinished(TEST_EXECUTION_VERTEX_ID);
pipelinedRegionExecutionView.vertexUnfinished(TEST_EXECUTION_VERTEX_ID);
assertThat(pipelinedRegionExecutionView.isFinished()).isFalse();
}
@Test
public void finishingUnknownVertexThrowsException() {
final PipelinedRegionExecutionView pipelinedRegionExecutionView =
new PipelinedRegionExecutionView(TEST_PIPELINED_REGION);
final ExecutionVertexID unknownVertexId = new ExecutionVertexID(new JobVertexID(), 0);
assertThatThrownBy(() -> pipelinedRegionExecutionView.vertexFinished(unknownVertexId))
.isInstanceOf(IllegalArgumentException.class);
}
}
|
PipelinedRegionExecutionViewTest
|
java
|
alibaba__nacos
|
console/src/test/java/com/alibaba/nacos/console/controller/v3/naming/ConsoleServiceControllerTest.java
|
{
"start": 2608,
"end": 13142
}
|
class ____ {
@Mock
private ServiceProxy serviceProxy;
@Mock
private SelectorManager selectorManager;
@InjectMocks
private ConsoleServiceController consoleServiceController;
@BeforeEach
void setUp() {
consoleServiceController = new ConsoleServiceController(serviceProxy, selectorManager);
}
@Test
void testCreateService() throws Exception {
ServiceForm serviceForm = new ServiceForm();
serviceForm.setServiceName("testService");
serviceForm.setNamespaceId("testNamespace");
serviceForm.setGroupName("testGroup");
serviceForm.setProtectThreshold(0.8f);
serviceForm.setEphemeral(true);
serviceForm.setMetadata("{\"key\":\"value\"}");
serviceForm.setSelector("{\"type\":\"label\",\"expression\":\"role=admin\"}");
when(selectorManager.parseSelector(any(String.class), any(String.class))).thenReturn(new LabelSelector());
Result<String> actual = consoleServiceController.createService(serviceForm);
verify(serviceProxy).createService(eq(serviceForm), any(ServiceMetadata.class));
assertEquals(ErrorCode.SUCCESS.getCode(), actual.getCode());
assertEquals("ok", actual.getData());
}
@Test
void testCreateServiceWithoutSelector() throws Exception {
ServiceForm serviceForm = new ServiceForm();
serviceForm.setServiceName("testService");
serviceForm.setNamespaceId("testNamespace");
serviceForm.setGroupName("testGroup");
serviceForm.setProtectThreshold(0.8f);
serviceForm.setEphemeral(true);
serviceForm.setMetadata("{\"key\":\"value\"}");
Result<String> actual = consoleServiceController.createService(serviceForm);
verify(serviceProxy).createService(eq(serviceForm), any(ServiceMetadata.class));
assertEquals(ErrorCode.SUCCESS.getCode(), actual.getCode());
assertEquals("ok", actual.getData());
}
@Test
void testCreateServiceWithoutSelectorType() throws Exception {
ServiceForm serviceForm = new ServiceForm();
serviceForm.setServiceName("testService");
serviceForm.setNamespaceId("testNamespace");
serviceForm.setGroupName("testGroup");
serviceForm.setProtectThreshold(0.8f);
serviceForm.setEphemeral(true);
serviceForm.setMetadata("{\"key\":\"value\"}");
serviceForm.setSelector("{\"expression\":\"role=admin\"}");
assertThrows(NacosApiException.class, () -> consoleServiceController.createService(serviceForm));
}
@Test
void testCreateServiceNotFoundSelector() throws Exception {
ServiceForm serviceForm = new ServiceForm();
serviceForm.setServiceName("testService");
serviceForm.setNamespaceId("testNamespace");
serviceForm.setGroupName("testGroup");
serviceForm.setProtectThreshold(0.8f);
serviceForm.setEphemeral(true);
serviceForm.setMetadata("{\"key\":\"value\"}");
serviceForm.setSelector("{\"type\":\"non-exist\"}");
assertThrows(NacosApiException.class, () -> consoleServiceController.createService(serviceForm));
}
@Test
void testDeleteService() throws Exception {
ServiceForm serviceForm = new ServiceForm();
serviceForm.setNamespaceId("testNamespace");
serviceForm.setServiceName("testService");
serviceForm.setGroupName("testGroup");
Result<String> actual = consoleServiceController.deleteService(serviceForm);
verify(serviceProxy).deleteService(eq("testNamespace"), eq("testService"), eq("testGroup"));
assertEquals("ok", actual.getData());
assertEquals(ErrorCode.SUCCESS.getCode(), actual.getCode());
}
@Test
void testUpdateService() throws Exception {
ServiceForm serviceForm = new ServiceForm();
serviceForm.setServiceName("testService");
serviceForm.setNamespaceId("testNamespace");
serviceForm.setGroupName("testGroup");
serviceForm.setProtectThreshold(0.8f);
serviceForm.setEphemeral(true);
serviceForm.setMetadata("{\"key\":\"value\"}");
serviceForm.setSelector("{\"type\":\"label\",\"expression\":\"role=admin\"}");
when(selectorManager.parseSelector(any(String.class), any(String.class))).thenReturn(new LabelSelector());
Result<String> actual = consoleServiceController.updateService(serviceForm);
verify(serviceProxy).updateService(eq(serviceForm), any(ServiceMetadata.class));
assertEquals(ErrorCode.SUCCESS.getCode(), actual.getCode());
assertEquals("ok", actual.getData());
}
@Test
void testGetServiceDetail() throws Exception {
ServiceDetailInfo serviceDetail = new ServiceDetailInfo();
serviceDetail.setNamespaceId("testNamespace");
serviceDetail.setServiceName("testService");
serviceDetail.setGroupName("testGroup");
serviceDetail.setClusterMap(Collections.emptyMap());
when(serviceProxy.getServiceDetail(any(String.class), any(String.class), any(String.class))).thenReturn(
serviceDetail);
ServiceForm serviceForm = new ServiceForm();
serviceForm.setServiceName("testService");
serviceForm.setNamespaceId("testNamespace");
serviceForm.setGroupName("testGroup");
Result<ServiceDetailInfo> actual = consoleServiceController.getServiceDetail(serviceForm);
verify(serviceProxy).getServiceDetail(any(String.class), any(String.class), any(String.class));
assertEquals(ErrorCode.SUCCESS.getCode(), actual.getCode());
assertEquals(serviceDetail, actual.getData());
}
@Test
void testGetSelectorTypeList() throws Exception {
when(serviceProxy.getSelectorTypeList()).thenReturn(Collections.singletonList("label"));
Result<List<String>> actual = consoleServiceController.getSelectorTypeList();
verify(serviceProxy).getSelectorTypeList();
assertEquals(ErrorCode.SUCCESS.getCode(), actual.getCode());
assertEquals(1, actual.getData().size());
assertEquals("label", actual.getData().get(0));
}
@Test
void testGetSubscribers() throws Exception {
Page<SubscriberInfo> subscribers = new Page<>();
subscribers.setTotalCount(1);
subscribers.setPagesAvailable(1);
subscribers.setPageItems(Collections.singletonList(new SubscriberInfo()));
subscribers.setPageNumber(1);
subscribers.getPageItems().get(0).setNamespaceId("testNamespace");
subscribers.getPageItems().get(0).setServiceName("testService");
subscribers.getPageItems().get(0).setGroupName("testGroup");
when(serviceProxy.getSubscribers(anyInt(), anyInt(), anyString(), anyString(), anyString(),
anyBoolean())).thenReturn(subscribers);
PageForm pageForm = new PageForm();
pageForm.setPageNo(1);
pageForm.setPageSize(10);
ServiceForm serviceForm = new ServiceForm();
serviceForm.setNamespaceId("testNamespace");
serviceForm.setServiceName("testService");
serviceForm.setGroupName("testGroup");
AggregationForm aggregationForm = new AggregationForm();
Result<Page<SubscriberInfo>> actual = consoleServiceController.subscribers(serviceForm, pageForm,
aggregationForm);
verify(serviceProxy).getSubscribers(anyInt(), anyInt(), anyString(), anyString(), anyString(), anyBoolean());
assertEquals(ErrorCode.SUCCESS.getCode(), actual.getCode());
assertEquals(1, actual.getData().getTotalCount());
assertEquals(1, actual.getData().getPageItems().size());
assertEquals("testGroup", actual.getData().getPageItems().get(0).getGroupName());
assertEquals("testService", actual.getData().getPageItems().get(0).getServiceName());
assertEquals("testNamespace", actual.getData().getPageItems().get(0).getNamespaceId());
}
@Test
void testGetServiceList() throws Exception {
Page<ServiceView> expected = new Page<>();
expected.setTotalCount(1);
expected.getPageItems().add(new ServiceView());
when(serviceProxy.getServiceList(anyBoolean(), anyString(), anyInt(), anyInt(), anyString(), anyString(),
anyBoolean())).thenReturn(expected);
PageForm pageForm = new PageForm();
pageForm.setPageNo(1);
pageForm.setPageSize(10);
ServiceListForm serviceForm = new ServiceListForm();
serviceForm.setNamespaceId("testNamespace");
serviceForm.setServiceNameParam("testService");
serviceForm.setGroupNameParam("testGroup");
Result<Object> actual = consoleServiceController.getServiceList(serviceForm, pageForm);
verify(serviceProxy).getServiceList(anyBoolean(), anyString(), anyInt(), anyInt(), anyString(), anyString(),
anyBoolean());
assertEquals(ErrorCode.SUCCESS.getCode(), actual.getCode());
assertInstanceOf(Page.class, actual.getData());
assertEquals(1, ((Page<?>) actual.getData()).getPageItems().size());
}
@Test
void testUpdateCluster() throws Exception {
UpdateClusterForm updateClusterForm = getUpdateClusterForm();
Result<String> actual = consoleServiceController.updateCluster(updateClusterForm);
verify(serviceProxy).updateClusterMetadata(anyString(), anyString(), anyString(), anyString(),
any(ClusterMetadata.class));
assertEquals("ok", actual.getData());
assertEquals(ErrorCode.SUCCESS.getCode(), actual.getCode());
}
private static UpdateClusterForm getUpdateClusterForm() {
UpdateClusterForm updateClusterForm = new UpdateClusterForm();
updateClusterForm.setNamespaceId("testNamespace");
updateClusterForm.setGroupName("testGroup");
updateClusterForm.setClusterName("testCluster");
updateClusterForm.setServiceName("testService");
updateClusterForm.setCheckPort(8080);
updateClusterForm.setUseInstancePort4Check(true);
updateClusterForm.setHealthChecker("{\"type\":\"TCP\"}");
updateClusterForm.setMetadata("{\"key\":\"value\"}");
return updateClusterForm;
}
}
|
ConsoleServiceControllerTest
|
java
|
playframework__playframework
|
web/play-openid/src/main/java/play/libs/openid/OpenIdComponents.java
|
{
"start": 430,
"end": 819
}
|
interface ____ extends WSClientComponents, PekkoComponents {
default Discovery openIdDiscovery() {
return new WsDiscovery(wsClient().asScala(), executionContext());
}
default OpenIdClient openIdClient() {
return new DefaultOpenIdClient(
new WsOpenIdClient(wsClient().asScala(), openIdDiscovery(), executionContext()),
executionContext());
}
}
|
OpenIdComponents
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/search/runtime/StringScriptFieldRegexpQueryTests.java
|
{
"start": 915,
"end": 4904
}
|
class ____ extends AbstractStringScriptFieldQueryTestCase<StringScriptFieldRegexpQuery> {
@Override
protected StringScriptFieldRegexpQuery createTestInstance() {
int matchFlags = randomBoolean() ? 0 : RegExp.ASCII_CASE_INSENSITIVE;
return new StringScriptFieldRegexpQuery(
randomScript(),
leafFactory,
randomAlphaOfLength(5),
randomAlphaOfLength(6),
randomInt(RegExp.ALL),
matchFlags,
Operations.DEFAULT_DETERMINIZE_WORK_LIMIT
);
}
@Override
protected StringScriptFieldRegexpQuery copy(StringScriptFieldRegexpQuery orig) {
return new StringScriptFieldRegexpQuery(
orig.script(),
leafFactory,
orig.fieldName(),
orig.pattern(),
orig.syntaxFlags(),
orig.matchFlags(),
Operations.DEFAULT_DETERMINIZE_WORK_LIMIT
);
}
@Override
protected StringScriptFieldRegexpQuery mutate(StringScriptFieldRegexpQuery orig) {
Script script = orig.script();
String fieldName = orig.fieldName();
String pattern = orig.pattern();
int syntaxFlags = orig.syntaxFlags();
int matchFlags = orig.matchFlags();
switch (randomInt(4)) {
case 0 -> script = randomValueOtherThan(script, this::randomScript);
case 1 -> fieldName += "modified";
case 2 -> pattern += "modified";
case 3 -> syntaxFlags = randomValueOtherThan(syntaxFlags, () -> randomInt(RegExp.ALL));
case 4 -> matchFlags = (matchFlags & RegExp.ASCII_CASE_INSENSITIVE) != 0 ? 0 : RegExp.ASCII_CASE_INSENSITIVE;
default -> fail();
}
return new StringScriptFieldRegexpQuery(
script,
leafFactory,
fieldName,
pattern,
syntaxFlags,
matchFlags,
Operations.DEFAULT_DETERMINIZE_WORK_LIMIT
);
}
@Override
public void testMatches() {
StringScriptFieldRegexpQuery query = new StringScriptFieldRegexpQuery(
randomScript(),
leafFactory,
"test",
"a.+b",
0,
0,
Operations.DEFAULT_DETERMINIZE_WORK_LIMIT
);
BytesRefBuilder scratch = new BytesRefBuilder();
assertTrue(query.matches(List.of("astuffb"), scratch));
assertFalse(query.matches(List.of("astuffB"), scratch));
assertFalse(query.matches(List.of("fffff"), scratch));
assertFalse(query.matches(List.of("ab"), scratch));
assertFalse(query.matches(List.of("aasdf"), scratch));
assertFalse(query.matches(List.of("dsfb"), scratch));
assertTrue(query.matches(List.of("astuffb", "fffff"), scratch));
StringScriptFieldRegexpQuery ciQuery = new StringScriptFieldRegexpQuery(
randomScript(),
leafFactory,
"test",
"a.+b",
0,
RegExp.ASCII_CASE_INSENSITIVE,
Operations.DEFAULT_DETERMINIZE_WORK_LIMIT
);
assertTrue(ciQuery.matches(List.of("astuffB"), scratch));
assertTrue(ciQuery.matches(List.of("Astuffb", "fffff"), scratch));
}
@Override
protected void assertToString(StringScriptFieldRegexpQuery query) {
assertThat(query.toString(query.fieldName()), equalTo("/" + query.pattern() + "/"));
}
@Override
public void testVisit() {
StringScriptFieldRegexpQuery query = new StringScriptFieldRegexpQuery(
randomScript(),
leafFactory,
"test",
"a.+b",
0,
0,
Operations.DEFAULT_DETERMINIZE_WORK_LIMIT
);
ByteRunAutomaton automaton = visitForSingleAutomata(query);
BytesRef term = new BytesRef("astuffb");
assertThat(automaton.run(term.bytes, term.offset, term.length), is(true));
}
}
|
StringScriptFieldRegexpQueryTests
|
java
|
square__moshi
|
moshi/src/test/java/com/squareup/moshi/DeferredAdapterTest.java
|
{
"start": 1069,
"end": 3689
}
|
class ____ {
/**
* When a type's JsonAdapter is circularly-dependent, Moshi creates a 'deferred adapter' to make
* the cycle work. It's important that any adapters that depend on this deferred adapter don't
* leak out until it's ready.
*
* <p>This test sets up a circular dependency [BlueNode -> GreenNode -> BlueNode] and then tries
* to use a GreenNode JSON adapter before the BlueNode JSON adapter is built. It creates a similar
* cycle [BlueNode -> RedNode -> BlueNode] so the order adapters are retrieved is insignificant.
*
* <p>This used to trigger a crash because we'd incorrectly put the GreenNode JSON adapter in the
* cache even though it depended upon an incomplete BlueNode JSON adapter.
*/
@Test
public void concurrentSafe() {
final List<Throwable> failures = new ArrayList<>();
JsonAdapter.Factory factory =
new JsonAdapter.Factory() {
int redAndGreenCount = 0;
@Override
public @Nullable JsonAdapter<?> create(
Type type, Set<? extends Annotation> annotations, final Moshi moshi) {
if ((type == RedNode.class || type == GreenNode.class) && redAndGreenCount++ == 1) {
doInAnotherThread(
new Runnable() {
@Override
public void run() {
GreenNode greenBlue = new GreenNode(new BlueNode(null, null));
assertThat(moshi.adapter(GreenNode.class).toJson(greenBlue))
.isEqualTo("{\"blue\":{}}");
RedNode redBlue = new RedNode(new BlueNode(null, null));
assertThat(moshi.adapter(RedNode.class).toJson(redBlue))
.isEqualTo("{\"blue\":{}}");
}
});
}
return null;
}
};
Moshi moshi = new Moshi.Builder().add(factory).build();
JsonAdapter<BlueNode> jsonAdapter = moshi.adapter(BlueNode.class);
assertThat(jsonAdapter.toJson(new BlueNode(new GreenNode(new BlueNode(null, null)), null)))
.isEqualTo("{\"green\":{\"blue\":{}}}");
assertThat(failures).isEmpty();
}
private void doInAnotherThread(Runnable runnable) {
ExecutorService executor = Executors.newSingleThreadExecutor();
Future<?> future = executor.submit(runnable);
executor.shutdown();
try {
future.get();
} catch (InterruptedException e) {
throw new RuntimeException(e);
} catch (ExecutionException e) {
throw new RuntimeException(e.getCause());
}
}
static
|
DeferredAdapterTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/actions/ActionErrorIntegrationTests.java
|
{
"start": 1418,
"end": 4970
}
|
class ____ extends AbstractWatcherIntegrationTestCase {
/**
* This test makes sure that when an action encounters an error it should
* not be subject to throttling. Also, the ack status of the action in the
* watch should remain awaits_successful_execution as long as the execution
* fails.
*/
public void testErrorInAction() throws Exception {
createIndex("foo");
updateIndexSettings(Settings.builder().put("index.blocks.write", true), "foo");
PutWatchResponse putWatchResponse = new PutWatchRequestBuilder(client(), "_id").setSource(
watchBuilder().trigger(schedule(interval("10m")))
// adding an action that throws an error and is associated with a 60 minute throttle period
// with such a period, on successful execution we other executions of the watch will be
// throttled within the hour... but on failed execution there should be no throttling
.addAction("_action", TimeValue.timeValueMinutes(60), IndexAction.builder("foo"))
).get();
assertThat(putWatchResponse.isCreated(), is(true));
timeWarp().trigger("_id");
flush();
// there should be a single history record with a failure status for the action:
assertBusy(() -> {
try {
long count = watchRecordCount(
QueryBuilders.boolQuery()
.must(termsQuery("result.actions.id", "_action"))
.must(termsQuery("result.actions.status", "failure"))
);
assertThat(count, is(1L));
} catch (ElasticsearchException e) {
/*
* Since the history is written asynchronously, it is possible that we try to query it after the history index is
* created, but before the shards are allocated, which throws an exception.
*/
throw new AssertionError(e);
}
});
// now we'll trigger the watch again and make sure that it's not throttled and instead
// writes another record to the history
// within the 60 minute throttling period
timeWarp().clock().fastForward(TimeValue.timeValueMinutes(randomIntBetween(1, 50)));
timeWarp().trigger("_id");
flush();
// there should be a single history record with a failure status for the action:
assertBusy(() -> {
try {
long count = watchRecordCount(
QueryBuilders.boolQuery()
.must(termsQuery("result.actions.id", "_action"))
.must(termsQuery("result.actions.status", "failure"))
);
assertThat(count, is(2L));
} catch (ElasticsearchException e) {
/*
* Since the history is written asynchronously, it is possible that we try to query it after the history index is
* created, but before the shards are allocated, which throws an exception.
*/
throw new AssertionError(e);
}
});
// now lets confirm that the ack status of the action is awaits_successful_execution
GetWatchResponse getWatchResponse = new GetWatchRequestBuilder(client(), "_id").get();
XContentSource watch = getWatchResponse.getSource();
watch.getValue("status.actions._action.ack.awaits_successful_execution");
}
}
|
ActionErrorIntegrationTests
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/engine/internal/Nullability.java
|
{
"start": 1934,
"end": 2653
}
|
class ____
* @param isUpdate whether it is intended to be updated or saved
*
* @throws PropertyValueException Break the nullability of one property
* @throws HibernateException error while getting Component values
*
* @deprecated Use {@link #checkNullability(Object[], EntityPersister)}
*/
@Deprecated(forRemoval = true, since = "7")
public void checkNullability(
final Object[] values,
final EntityPersister persister,
final boolean isUpdate) {
checkType = isUpdate ? NullabilityCheckType.UPDATE : NullabilityCheckType.CREATE;
checkNullability( values, persister );
}
/**
* Check nullability of the entity properties
*
* @param values entity properties
* @param persister
|
persister
|
java
|
quarkusio__quarkus
|
test-framework/junit5/src/main/java/io/quarkus/test/junit/IntegrationTestUtil.java
|
{
"start": 21446,
"end": 21916
}
|
enum ____ {
MAVEN,
GRADLE,
UNKNOWN
}
static Path determineBuildOutputDirectory(ExtensionContext context) {
String buildOutputDirStr = System.getProperty("build.output.directory");
Path result = null;
if (buildOutputDirStr != null) {
result = Paths.get(buildOutputDirStr);
} else {
// we need to guess where the artifact properties file is based on the location of the test
|
TestLauncher
|
java
|
hibernate__hibernate-orm
|
hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/HANALegacyDialect.java
|
{
"start": 9068,
"end": 45270
}
|
class ____ extends Dialect {
static final DatabaseVersion DEFAULT_VERSION = DatabaseVersion.make( 1, 0, 120 );
private final LockingSupport lockingSupport;
public HANALegacyDialect(DialectResolutionInfo info) {
this( HANAServerConfiguration.fromDialectResolutionInfo( info ), true );
registerKeywords( info );
}
public HANALegacyDialect() {
this( DEFAULT_VERSION );
}
public HANALegacyDialect(DatabaseVersion version) {
this( new HANAServerConfiguration( version ), true );
}
public HANALegacyDialect(DatabaseVersion version, boolean defaultTableTypeColumn) {
this( new HANAServerConfiguration( version ), defaultTableTypeColumn );
}
public HANALegacyDialect(HANAServerConfiguration configuration, boolean defaultTableTypeColumn) {
super( configuration.getFullVersion() );
this.defaultTableTypeColumn = defaultTableTypeColumn;
this.maxLobPrefetchSize = configuration.getMaxLobPrefetchSize();
this.useUnicodeStringTypes = useUnicodeStringTypesDefault();
this.lockingSupport = HANALockingSupport.forDialectVersion( configuration.getFullVersion() );
}
private LockingSupport buildLockingSupport() {
// HANA supports IGNORE LOCKED since HANA 2.0 SPS3 (2.0.030)
final boolean supportsSkipLocked = getVersion().isSameOrAfter(2, 0, 30);
return new HANALockingSupport( supportsSkipLocked );
}
@Override
public DatabaseVersion determineDatabaseVersion(DialectResolutionInfo info) {
return HANALegacyServerConfiguration.determineDatabaseVersion( info );
}
// Use column or row tables by default
public static final String USE_DEFAULT_TABLE_TYPE_COLUMN = "hibernate.dialect.hana.use_default_table_type_column";
// Use TINYINT instead of the native BOOLEAN type
private static final String USE_LEGACY_BOOLEAN_TYPE_PARAMETER_NAME = "hibernate.dialect.hana.use_legacy_boolean_type";
// Use unicode (NVARCHAR, NCLOB, etc.) instead of non-unicode (VARCHAR, CLOB) string types
private static final String USE_UNICODE_STRING_TYPES_PARAMETER_NAME = "hibernate.dialect.hana.use_unicode_string_types";
// Read and write double-typed fields as BigDecimal instead of Double to get around precision issues of the HANA
// JDBC driver (https://service.sap.com/sap/support/notes/2590160)
private static final String TREAT_DOUBLE_TYPED_FIELDS_AS_DECIMAL_PARAMETER_NAME = "hibernate.dialect.hana.treat_double_typed_fields_as_decimal";
private static final Boolean USE_LEGACY_BOOLEAN_TYPE_DEFAULT_VALUE = Boolean.FALSE;
private static final Boolean TREAT_DOUBLE_TYPED_FIELDS_AS_DECIMAL_DEFAULT_VALUE = Boolean.FALSE;
private static final String SQL_IGNORE_LOCKED = " ignore locked";
private final int maxLobPrefetchSize;
private boolean defaultTableTypeColumn;
private boolean useLegacyBooleanType = USE_LEGACY_BOOLEAN_TYPE_DEFAULT_VALUE;
private boolean useUnicodeStringTypes;
private boolean treatDoubleTypedFieldsAsDecimal;
/*
* Tables named "TYPE" need to be quoted
*/
private final StandardTableExporter hanaTableExporter = new StandardTableExporter( this ) {
@Override
public String[] getSqlCreateStrings(Table table, Metadata metadata, SqlStringGenerationContext context) {
String[] sqlCreateStrings = super.getSqlCreateStrings( table, metadata, context );
return quoteTypeIfNecessary( table, sqlCreateStrings, getCreateTableString() );
}
@Override
public String[] getSqlDropStrings(Table table, Metadata metadata, SqlStringGenerationContext context) {
String[] sqlDropStrings = super.getSqlDropStrings( table, metadata, context );
return quoteTypeIfNecessary( table, sqlDropStrings, "drop table" );
}
private String[] quoteTypeIfNecessary(Table table, String[] strings, String prefix) {
if ( table.getNameIdentifier() == null || table.getNameIdentifier().isQuoted()
|| !"type".equalsIgnoreCase( table.getNameIdentifier().getText() ) ) {
return strings;
}
Pattern createTableTypePattern = Pattern.compile( "(" + prefix + "\\s+)(" + table.getNameIdentifier().getText() + ")(.+)" );
Pattern commentOnTableTypePattern = Pattern.compile( "(comment\\s+on\\s+table\\s+)(" + table.getNameIdentifier().getText() + ")(.+)" );
for ( int i = 0; i < strings.length; i++ ) {
Matcher createTableTypeMatcher = createTableTypePattern.matcher( strings[i] );
Matcher commentOnTableTypeMatcher = commentOnTableTypePattern.matcher( strings[i] );
if ( createTableTypeMatcher.matches() ) {
strings[i] = createTableTypeMatcher.group( 1 ) + "\"TYPE\"" + createTableTypeMatcher.group( 3 );
}
if ( commentOnTableTypeMatcher.matches() ) {
strings[i] = commentOnTableTypeMatcher.group( 1 ) + "\"TYPE\"" + commentOnTableTypeMatcher.group( 3 );
}
}
return strings;
}
};
@Override
public void contribute(TypeContributions typeContributions, ServiceRegistry serviceRegistry) {
// This is the best hook for consuming dialect configuration that we have for now,
// since this method is called very early in the bootstrap process
final ConfigurationService configurationService = serviceRegistry.requireService( ConfigurationService.class );
this.defaultTableTypeColumn = configurationService.getSetting(
USE_DEFAULT_TABLE_TYPE_COLUMN,
StandardConverters.BOOLEAN,
this.defaultTableTypeColumn
);
if ( supportsAsciiStringTypes() ) {
this.useUnicodeStringTypes = configurationService.getSetting(
USE_UNICODE_STRING_TYPES_PARAMETER_NAME,
StandardConverters.BOOLEAN,
useUnicodeStringTypesDefault()
);
}
this.useLegacyBooleanType = configurationService.getSetting(
USE_LEGACY_BOOLEAN_TYPE_PARAMETER_NAME,
StandardConverters.BOOLEAN,
USE_LEGACY_BOOLEAN_TYPE_DEFAULT_VALUE
);
this.treatDoubleTypedFieldsAsDecimal = configurationService.getSetting(
TREAT_DOUBLE_TYPED_FIELDS_AS_DECIMAL_PARAMETER_NAME,
StandardConverters.BOOLEAN,
TREAT_DOUBLE_TYPED_FIELDS_AS_DECIMAL_DEFAULT_VALUE
);
super.contribute( typeContributions, serviceRegistry );
}
protected boolean isDefaultTableTypeColumn() {
return defaultTableTypeColumn;
}
protected boolean isCloud() {
return getVersion().isSameOrAfter( 4 );
}
@Override
protected String columnType(int sqlTypeCode) {
return switch ( sqlTypeCode ) {
case BOOLEAN -> useLegacyBooleanType ? "tinyint" : super.columnType( sqlTypeCode );
//there is no 'numeric' type in HANA
case NUMERIC -> columnType( DECIMAL );
//'double precision' syntax not supported
case DOUBLE -> "double";
//no explicit precision
case TIME, TIME_WITH_TIMEZONE -> "time";
case TIMESTAMP, TIMESTAMP_WITH_TIMEZONE -> "timestamp";
//there is no 'char' or 'nchar' type in HANA
case CHAR, VARCHAR -> isUseUnicodeStringTypes() ? columnType( NVARCHAR ) : super.columnType( VARCHAR );
case NCHAR -> columnType( NVARCHAR );
case LONG32VARCHAR -> isUseUnicodeStringTypes() ? columnType( LONG32NVARCHAR ) : super.columnType( LONG32VARCHAR );
case CLOB -> isUseUnicodeStringTypes() ? columnType( NCLOB ) : super.columnType( CLOB );
// map tinyint to smallint since tinyint is unsigned on HANA
case TINYINT -> "smallint";
default -> super.columnType( sqlTypeCode );
};
}
@Override
protected void registerColumnTypes(TypeContributions typeContributions, ServiceRegistry serviceRegistry) {
super.registerColumnTypes( typeContributions, serviceRegistry );
final DdlTypeRegistry ddlTypeRegistry = typeContributions.getTypeConfiguration().getDdlTypeRegistry();
// varbinary max length 5000
ddlTypeRegistry.addDescriptor(
CapacityDependentDdlType.builder( BINARY, CapacityDependentDdlType.LobKind.BIGGEST_LOB, "blob", this )
.withTypeCapacity( getMaxVarbinaryLength(), "varbinary($l)" )
.build()
);
ddlTypeRegistry.addDescriptor( new DdlTypeImpl( GEOMETRY, "st_geometry", this ) );
ddlTypeRegistry.addDescriptor( new DdlTypeImpl( POINT, "st_point", this ) );
}
@Override
public boolean getDefaultNonContextualLobCreation() {
// createBlob() and createClob() are not supported by the HANA JDBC driver
return true;
}
@Override
public boolean getDefaultUseGetGeneratedKeys() {
// getGeneratedKeys() is not supported by the HANA JDBC driver
return false;
}
@Override
public String castPattern(CastType from, CastType to) {
if ( to == CastType.BOOLEAN ) {
switch ( from ) {
case INTEGER_BOOLEAN:
case INTEGER:
case LONG:
return "case ?1 when 1 then true when 0 then false else null end";
case YN_BOOLEAN:
return "case ?1 when 'Y' then true when 'N' then false else null end";
case TF_BOOLEAN:
return "case ?1 when 'T' then true when 'F' then false else null end";
}
}
return super.castPattern( from, to );
}
@Override
public int getDefaultTimestampPrecision() {
return 7;
}
@Override
public int getDefaultDecimalPrecision() {
//the maximum on HANA
return 34;
}
@Override
public int getMaxVarcharLength() {
return 5000;
}
@Override
public int getMaxNVarcharLength() {
return 5000;
}
@Override
public int getMaxVarbinaryLength() {
return 5000;
}
@Override
public void initializeFunctionRegistry(FunctionContributions functionContributions) {
super.initializeFunctionRegistry(functionContributions);
final TypeConfiguration typeConfiguration = functionContributions.getTypeConfiguration();
functionContributions.getFunctionRegistry().registerBinaryTernaryPattern(
"locate",
typeConfiguration.getBasicTypeRegistry().resolve( StandardBasicTypes.INTEGER ),
"locate(?2,?1)",
"locate(?2,?1,?3)",
FunctionParameterType.STRING, FunctionParameterType.STRING, FunctionParameterType.INTEGER,
typeConfiguration
).setArgumentListSignature("(pattern, string[, start])");
CommonFunctionFactory functionFactory = new CommonFunctionFactory(functionContributions);
functionFactory.ceiling_ceil();
functionFactory.concat_pipeOperator();
functionFactory.trim2();
functionFactory.cot();
functionFactory.cosh();
functionFactory.sinh();
functionFactory.tanh();
functionFactory.trunc_roundMode();
functionFactory.log10_log();
functionFactory.log();
functionFactory.bitand();
functionFactory.bitor();
functionFactory.bitxor();
functionFactory.bitnot();
functionFactory.hourMinuteSecond();
functionFactory.yearMonthDay();
functionFactory.dayofweekmonthyear();
functionFactory.weekQuarter();
functionFactory.daynameMonthname();
functionFactory.lastDay();
functionFactory.characterLength_length( SqlAstNodeRenderingMode.DEFAULT );
functionFactory.ascii();
functionFactory.chr_char();
functionFactory.addYearsMonthsDaysHoursMinutesSeconds();
functionFactory.daysBetween();
functionFactory.secondsBetween();
functionFactory.format_toVarchar();
functionFactory.currentUtcdatetimetimestamp();
functionFactory.everyAny_minMaxCase();
functionFactory.octetLength_pattern( "length(to_binary(?1))" );
functionFactory.bitLength_pattern( "length(to_binary(?1))*8" );
functionFactory.repeat_rpad();
functionFactory.median();
functionFactory.windowFunctions();
functionFactory.listagg_stringAgg( "varchar" );
functionFactory.inverseDistributionOrderedSetAggregates();
functionFactory.hypotheticalOrderedSetAggregates_windowEmulation();
functionFactory.radians_acos();
functionFactory.degrees_acos();
functionContributions.getFunctionRegistry().register( "timestampadd",
new IntegralTimestampaddFunction( this, typeConfiguration ) );
// full-text search functions
functionContributions.getFunctionRegistry().registerNamed(
"score",
typeConfiguration.getBasicTypeRegistry().resolve( StandardBasicTypes.DOUBLE )
);
functionContributions.getFunctionRegistry().registerNamed( "snippets" );
functionContributions.getFunctionRegistry().registerNamed( "highlighted" );
functionContributions.getFunctionRegistry().registerBinaryTernaryPattern(
"contains",
typeConfiguration.getBasicTypeRegistry().resolve( StandardBasicTypes.BOOLEAN ),
"contains(?1,?2)",
"contains(?1,?2,?3)",
ANY, ANY, ANY,
typeConfiguration
);
if ( getVersion().isSameOrAfter( 2, 0 ) ) {
// Introduced in 2.0 SPS 00
functionFactory.jsonValue_no_passing();
functionFactory.jsonQuery_no_passing();
functionFactory.jsonExists_hana();
functionFactory.unnest_hana();
functionFactory.jsonTable_hana();
functionFactory.generateSeries_hana( getMaximumSeriesSize() );
if ( getVersion().isSameOrAfter(2, 0, 20 ) ) {
if ( getVersion().isSameOrAfter( 2, 0, 40 ) ) {
// Introduced in 2.0 SPS 04
functionFactory.jsonObject_hana();
functionFactory.jsonArray_hana();
functionFactory.jsonArrayAgg_hana();
functionFactory.jsonObjectAgg_hana();
}
functionFactory.xmltable_hana();
}
// functionFactory.xmlextract();
}
functionFactory.regexpLike_like_regexp();
}
/**
* HANA doesn't support the {@code generate_series} function or {@code lateral} recursive CTEs,
* so it has to be emulated with the {@code xmltable} and {@code lpad} functions.
*/
protected int getMaximumSeriesSize() {
return 10000;
}
@Override
public SqlAstTranslatorFactory getSqlAstTranslatorFactory() {
return new StandardSqlAstTranslatorFactory() {
@Override
protected <T extends JdbcOperation> SqlAstTranslator<T> buildTranslator(
SessionFactoryImplementor sessionFactory, org.hibernate.sql.ast.tree.Statement statement) {
return new HANASqlAstTranslator<>( sessionFactory, statement );
}
};
}
@Override
public AggregateSupport getAggregateSupport() {
return HANAAggregateSupport.valueOf( this );
}
/**
* HANA has no extract() function, but we can emulate
* it using the appropriate named functions instead of
* extract().
*
* The supported fields are
* {@link TemporalUnit#YEAR},
* {@link TemporalUnit#MONTH}
* {@link TemporalUnit#DAY},
* {@link TemporalUnit#HOUR},
* {@link TemporalUnit#MINUTE},
* {@link TemporalUnit#SECOND}
* {@link TemporalUnit#WEEK},
* {@link TemporalUnit#DAY_OF_WEEK},
* {@link TemporalUnit#DAY_OF_MONTH},
* {@link TemporalUnit#DAY_OF_YEAR}.
*/
@Override
public String extractPattern(TemporalUnit unit) {
return switch (unit) {
case DAY_OF_WEEK -> "(mod(weekday(?2)+1,7)+1)";
case DAY, DAY_OF_MONTH -> "dayofmonth(?2)";
case DAY_OF_YEAR -> "dayofyear(?2)";
case QUARTER -> "((month(?2)+2)/3)";
case EPOCH -> "seconds_between('1970-01-01', ?2)";
//I think week() returns the ISO week number
default -> "?1(?2)";
};
}
@Override
public SQLExceptionConversionDelegate buildSQLExceptionConversionDelegate() {
return (sqlException, message, sql) -> {
final int errorCode = JdbcExceptionHelper.extractErrorCode( sqlException );
if ( errorCode == 131 ) {
// 131 - Transaction rolled back by lock wait timeout
return new LockTimeoutException( message, sqlException, sql );
}
if ( errorCode == 146 ) {
// 146 - Resource busy and acquire with NOWAIT specified
return new LockTimeoutException( message, sqlException, sql );
}
if ( errorCode == 132 ) {
// 132 - Transaction rolled back due to unavailable resource
return new LockAcquisitionException( message, sqlException, sql );
}
if ( errorCode == 133 ) {
// 133 - Transaction rolled back by detected deadlock
return new LockAcquisitionException( message, sqlException, sql );
}
// 259 - Invalid table name
// 260 - Invalid column name
// 261 - Invalid index name
// 262 - Invalid query name
// 263 - Invalid alias name
if ( errorCode == 257 || ( errorCode >= 259 && errorCode <= 263 ) ) {
return new SQLGrammarException( message, sqlException, sql );
}
// 257 - Cannot insert NULL or update to NULL
// 301 - Unique constraint violated
// 461 - foreign key constraint violation
// 462 - failed on update or delete by foreign key constraint violation
if ( errorCode == 287 || errorCode == 301 || errorCode == 461 || errorCode == 462 ) {
final String constraintName = getViolatedConstraintNameExtractor()
.extractConstraintName( sqlException );
return new ConstraintViolationException(
message,
sqlException,
sql,
errorCode == 301
? ConstraintViolationException.ConstraintKind.UNIQUE
: ConstraintViolationException.ConstraintKind.OTHER,
constraintName
);
}
return null;
};
}
@Override
public LockingSupport getLockingSupport() {
return lockingSupport;
}
@Override
public String getCreateTableString() {
return isDefaultTableTypeColumn() ? "create column table" : "create row table";
}
@Override
public String getAddColumnString() {
return "add (";
}
@Override
public String getAddColumnSuffixString() {
return ")";
}
@Override
public String getCascadeConstraintsString() {
return " cascade";
}
@Override
public String getCurrentTimestampSelectString() {
return "select current_timestamp from sys.dummy";
}
@Override
public String getForUpdateString(final String aliases) {
return getForUpdateString() + " of " + aliases;
}
@Override
public String getForUpdateString(final String aliases, final LockOptions lockOptions) {
// not sure why this is sometimes empty
if ( aliases == null || aliases.isEmpty() ) {
return getForUpdateString( lockOptions );
}
return getForUpdateString( aliases, lockOptions.getLockMode(), lockOptions.getTimeout() );
}
private String getForUpdateString(String aliases, LockMode lockMode, Timeout timeout) {
return switch ( lockMode ) {
case PESSIMISTIC_READ -> getReadLockString( aliases, timeout );
case PESSIMISTIC_WRITE -> getWriteLockString( aliases, timeout );
case UPGRADE_NOWAIT, PESSIMISTIC_FORCE_INCREMENT -> getForUpdateNowaitString( aliases );
case UPGRADE_SKIPLOCKED -> getForUpdateSkipLockedString( aliases );
default -> "";
};
}
@Override
public String getForUpdateNowaitString() {
return getForUpdateString() + " nowait";
}
@Override
public String getQuerySequencesString() {
return "select * from sys.sequences";
}
@Override
public SequenceInformationExtractor getSequenceInformationExtractor() {
return SequenceInformationExtractorHANADatabaseImpl.INSTANCE;
}
@Override
public boolean isCurrentTimestampSelectStringCallable() {
return false;
}
@Override
protected void registerDefaultKeywords() {
super.registerDefaultKeywords();
// https://help.sap.com/docs/SAP_HANA_PLATFORM/4fe29514fd584807ac9f2a04f6754767/28bcd6af3eb6437892719f7c27a8a285.html?locale=en-US
registerKeyword( "all" );
registerKeyword( "alter" );
registerKeyword( "as" );
registerKeyword( "before" );
registerKeyword( "begin" );
registerKeyword( "both" );
registerKeyword( "case" );
registerKeyword( "char" );
registerKeyword( "condition" );
registerKeyword( "connect" );
registerKeyword( "cross" );
registerKeyword( "cube" );
registerKeyword( "current_connection" );
registerKeyword( "current_date" );
registerKeyword( "current_schema" );
registerKeyword( "current_time" );
registerKeyword( "current_timestamp" );
registerKeyword( "current_transaction_isolation_level" );
registerKeyword( "current_user" );
registerKeyword( "current_utcdate" );
registerKeyword( "current_utctime" );
registerKeyword( "current_utctimestamp" );
registerKeyword( "currval" );
registerKeyword( "cursor" );
registerKeyword( "declare" );
registerKeyword( "deferred" );
registerKeyword( "distinct" );
registerKeyword( "else" );
registerKeyword( "elseif" );
registerKeyword( "end" );
registerKeyword( "except" );
registerKeyword( "exception" );
registerKeyword( "exec" );
registerKeyword( "false" );
registerKeyword( "for" );
registerKeyword( "from" );
registerKeyword( "full" );
registerKeyword( "group" );
registerKeyword( "having" );
registerKeyword( "if" );
registerKeyword( "in" );
registerKeyword( "inner" );
registerKeyword( "inout" );
registerKeyword( "intersect" );
registerKeyword( "into" );
registerKeyword( "is" );
registerKeyword( "join" );
registerKeyword( "lateral" );
registerKeyword( "leading" );
registerKeyword( "left" );
registerKeyword( "limit" );
registerKeyword( "loop" );
registerKeyword( "minus" );
registerKeyword( "natural" );
registerKeyword( "nchar" );
registerKeyword( "nextval" );
registerKeyword( "null" );
registerKeyword( "on" );
registerKeyword( "order" );
registerKeyword( "out" );
registerKeyword( "prior" );
registerKeyword( "return" );
registerKeyword( "returns" );
registerKeyword( "reverse" );
registerKeyword( "right" );
registerKeyword( "rollup" );
registerKeyword( "rowid" );
registerKeyword( "select" );
registerKeyword( "session_user" );
registerKeyword( "set" );
registerKeyword( "sql" );
registerKeyword( "start" );
registerKeyword( "sysuuid" );
registerKeyword( "tablesample" );
registerKeyword( "top" );
registerKeyword( "trailing" );
registerKeyword( "true" );
registerKeyword( "union" );
registerKeyword( "unknown" );
registerKeyword( "using" );
registerKeyword( "utctimestamp" );
registerKeyword( "values" );
registerKeyword( "when" );
registerKeyword( "where" );
registerKeyword( "while" );
registerKeyword( "with" );
if ( isCloud() ) {
// https://help.sap.com/docs/hana-cloud-database/sap-hana-cloud-sap-hana-database-sql-reference-guide/reserved-words
registerKeyword( "array" );
registerKeyword( "at" );
registerKeyword( "authorization" );
registerKeyword( "between" );
registerKeyword( "by" );
registerKeyword( "collate" );
registerKeyword( "empty" );
registerKeyword( "filter" );
registerKeyword( "grouping" );
registerKeyword( "no" );
registerKeyword( "not" );
registerKeyword( "of" );
registerKeyword( "over" );
registerKeyword( "recursive" );
registerKeyword( "row" );
registerKeyword( "table" );
registerKeyword( "to" );
registerKeyword( "unnest" );
registerKeyword( "window" );
registerKeyword( "within" );
}
}
@Override
public ScrollMode defaultScrollMode() {
return ScrollMode.FORWARD_ONLY;
}
/**
* HANA currently does not support check constraints.
*/
@Override
public boolean supportsColumnCheck() {
return false;
}
@Override
public boolean supportsCurrentTimestampSelection() {
return true;
}
@Override
public boolean doesRoundTemporalOnOverflow() {
// HANA does truncation
return false;
}
@Override
public boolean supportsExistsInSelect() {
return false;
}
@Override
public boolean supportsExpectedLobUsagePattern() {
// http://scn.sap.com/thread/3221812
return false;
}
@Override
public boolean supportsUnboundedLobLocatorMaterialization() {
return false;
}
@Override
public SequenceSupport getSequenceSupport() {
return HANASequenceSupport.INSTANCE;
}
@Override
public boolean dropConstraints() {
return false;
}
@Override
public int getMaxAliasLength() {
return 128;
}
@Override
public int getMaxIdentifierLength() {
return 127;
}
@Override
public LimitHandler getLimitHandler() {
return LimitOffsetLimitHandler.INSTANCE;
}
@Override
public String getSelectGUIDString() {
return "select sysuuid from sys.dummy";
}
@Override
public NameQualifierSupport getNameQualifierSupport() {
return NameQualifierSupport.SCHEMA;
}
@Override
public IdentifierHelper buildIdentifierHelper(IdentifierHelperBuilder builder, DatabaseMetaData metadata)
throws SQLException {
/*
* HANA-specific extensions
*/
builder.setQuotedCaseStrategy( IdentifierCaseStrategy.MIXED );
builder.setUnquotedCaseStrategy( IdentifierCaseStrategy.UPPER );
final IdentifierHelper identifierHelper = super.buildIdentifierHelper( builder, metadata );
return new IdentifierHelper() {
private final IdentifierHelper helper = identifierHelper;
@Override
public String toMetaDataSchemaName(Identifier schemaIdentifier) {
return this.helper.toMetaDataSchemaName( schemaIdentifier );
}
@Override
public String toMetaDataObjectName(Identifier identifier) {
return this.helper.toMetaDataObjectName( identifier );
}
@Override
public String toMetaDataCatalogName(Identifier catalogIdentifier) {
return this.helper.toMetaDataCatalogName( catalogIdentifier );
}
@Override
public Identifier toIdentifier(String text) {
return normalizeQuoting( Identifier.toIdentifier( text ) );
}
@Override
public Identifier toIdentifier(String text, boolean quoted) {
return normalizeQuoting( Identifier.toIdentifier( text, quoted ) );
}
@Override
public Identifier normalizeQuoting(Identifier identifier) {
Identifier normalizedIdentifier = this.helper.normalizeQuoting( identifier );
if ( normalizedIdentifier == null ) {
return null;
}
// need to quote names containing special characters like ':'
if ( !normalizedIdentifier.isQuoted() && !normalizedIdentifier.getText().matches( "\\w+" ) ) {
normalizedIdentifier = normalizedIdentifier.quoted();
}
return normalizedIdentifier;
}
@Override
public boolean isReservedWord(String word) {
return this.helper.isReservedWord( word );
}
@Override
public Identifier applyGlobalQuoting(String text) {
return this.helper.applyGlobalQuoting( text );
}
};
}
@Override
public String getCurrentSchemaCommand() {
return "select current_schema from sys.dummy";
}
@Override
public String getForUpdateNowaitString(String aliases) {
return getForUpdateString( aliases ) + " nowait";
}
@Override
public String getReadLockString(Timeout timeout) {
return getWriteLockString( timeout );
}
@Override
public String getForUpdateString(Timeout timeout) {
return withTimeout( getForUpdateString(), timeout.milliseconds() );
}
@Override
public String getReadLockString(String aliases, Timeout timeout) {
return getWriteLockString( aliases, timeout );
}
@Override
public String getWriteLockString(String aliases, Timeout timeout) {
return withTimeout( getForUpdateString( aliases ), timeout.milliseconds() );
}
@Override
public String getReadLockString(int timeout) {
return getWriteLockString( timeout );
}
@Override
public String getReadLockString(String aliases, int timeout) {
return getWriteLockString( aliases, timeout );
}
@Override
public String getWriteLockString(String aliases, int timeout) {
return withTimeout( getForUpdateString( aliases ), timeout );
}
private String withTimeout(String lockString, int timeout) {
return switch (timeout) {
case Timeouts.NO_WAIT_MILLI -> supportsNoWait() ? lockString + " nowait" : lockString;
case Timeouts.SKIP_LOCKED_MILLI -> supportsSkipLocked() ? lockString + SQL_IGNORE_LOCKED : lockString;
case Timeouts.WAIT_FOREVER_MILLI -> lockString;
default -> supportsWait() ? lockString + " wait " + getTimeoutInSeconds( timeout ) : lockString;
};
}
@Override
public String getQueryHintString(String query, List<String> hints) {
return query + " with hint (" + String.join( ",", hints ) + ")";
}
@Override
public String getTableComment(String comment) {
return " comment '" + comment + "'";
}
@Override
public String getColumnComment(String comment) {
return " comment '" + comment + "'";
}
@Override
public boolean supportsCommentOn() {
return true;
}
@Override
public boolean supportsPartitionBy() {
return true;
}
@Override
public void contributeTypes(TypeContributions typeContributions, ServiceRegistry serviceRegistry) {
super.contributeTypes( typeContributions, serviceRegistry );
final TypeConfiguration typeConfiguration = typeContributions.getTypeConfiguration();
final JdbcTypeRegistry jdbcTypeRegistry = typeConfiguration.getJdbcTypeRegistry();
if ( treatDoubleTypedFieldsAsDecimal ) {
typeConfiguration.getBasicTypeRegistry()
.register(
new BasicTypeImpl<>( DoubleJavaType.INSTANCE, NumericJdbcType.INSTANCE ),
Double.class.getName()
);
final Map<Integer, Set<String>> jdbcToHibernateTypeContributionMap = typeConfiguration.getJdbcToHibernateTypeContributionMap();
jdbcToHibernateTypeContributionMap.computeIfAbsent( Types.FLOAT, code -> new HashSet<>() ).clear();
jdbcToHibernateTypeContributionMap.computeIfAbsent( Types.REAL, code -> new HashSet<>() ).clear();
jdbcToHibernateTypeContributionMap.computeIfAbsent( Types.DOUBLE, code -> new HashSet<>() ).clear();
jdbcToHibernateTypeContributionMap.get( Types.FLOAT ).add( StandardBasicTypes.BIG_DECIMAL.getName() );
jdbcToHibernateTypeContributionMap.get( Types.REAL ).add( StandardBasicTypes.BIG_DECIMAL.getName() );
jdbcToHibernateTypeContributionMap.get( Types.DOUBLE ).add( StandardBasicTypes.BIG_DECIMAL.getName() );
jdbcTypeRegistry.addDescriptor( Types.FLOAT, NumericJdbcType.INSTANCE );
jdbcTypeRegistry.addDescriptor( Types.REAL, NumericJdbcType.INSTANCE );
jdbcTypeRegistry.addDescriptor( Types.DOUBLE, NumericJdbcType.INSTANCE );
}
jdbcTypeRegistry.addDescriptor( Types.CLOB, new HANAClobJdbcType( maxLobPrefetchSize, useUnicodeStringTypes ) );
jdbcTypeRegistry.addDescriptor( Types.NCLOB, new HANANClobJdbcType( maxLobPrefetchSize ) );
jdbcTypeRegistry.addDescriptor( Types.BLOB, new HANABlobType( maxLobPrefetchSize ) );
// tinyint is unsigned on HANA
jdbcTypeRegistry.addDescriptor( Types.TINYINT, TinyIntAsSmallIntJdbcType.INSTANCE );
if ( isUseUnicodeStringTypes() ) {
jdbcTypeRegistry.addDescriptor( Types.VARCHAR, NVarcharJdbcType.INSTANCE );
jdbcTypeRegistry.addDescriptor( Types.CHAR, NCharJdbcType.INSTANCE );
}
if ( treatDoubleTypedFieldsAsDecimal ) {
jdbcTypeRegistry.addDescriptor( Types.DOUBLE, DecimalJdbcType.INSTANCE );
}
}
@Override
public void appendBooleanValueString(SqlAppender appender, boolean bool) {
if ( this.useLegacyBooleanType ) {
appender.appendSql( bool ? '1' : '0' );
}
else {
appender.appendSql( bool );
}
}
@Override
public IdentityColumnSupport getIdentityColumnSupport() {
return HANAIdentityColumnSupport.INSTANCE;
}
@Override
public Exporter<Table> getTableExporter() {
return this.hanaTableExporter;
}
/*
* HANA doesn't really support REF_CURSOR returns from a procedure, but REF_CURSOR support can be emulated by using
* procedures or functions with an OUT parameter of type TABLE. The results will be returned as result sets on the
* callable statement.
*/
@Override
public CallableStatementSupport getCallableStatementSupport() {
return StandardCallableStatementSupport.REF_CURSOR_INSTANCE;
}
@Override
public int registerResultSetOutParameter(CallableStatement statement, int position) throws SQLException {
// Result set (TABLE) OUT parameters don't need to be registered
return position;
}
@Override
public int registerResultSetOutParameter(CallableStatement statement, String name) throws SQLException {
// Result set (TABLE) OUT parameters don't need to be registered
return 0;
}
@Override
public boolean supportsOffsetInSubquery() {
return true;
}
@Override
public boolean supportsWindowFunctions() {
return true;
}
@Override
public boolean supportsLateral() {
return getVersion().isSameOrAfter( 2, 0, 40 );
}
@Override
public boolean supportsJdbcConnectionLobCreation(DatabaseMetaData databaseMetaData) {
return false;
}
@Override
public boolean supportsNoColumnsInsert() {
return false;
}
@Override
public boolean supportsOrderByInSubquery() {
// Seems to work, though I don't know as of which version
return true;
}
@Override
public NullOrdering getNullOrdering() {
return NullOrdering.SMALLEST;
}
@Override
public void appendDatetimeFormat(SqlAppender appender, String format) {
//I don't think HANA needs FM
appender.appendSql( OracleDialect.datetimeFormat( format, false, false ).result() );
}
@Override
public boolean supportsFractionalTimestampArithmetic() {
return false;
}
@Override
public long getFractionalSecondPrecisionInNanos() {
return 100;
}
@Override
public String timestampaddPattern(TemporalUnit unit, TemporalType temporalType, IntervalType intervalType) {
switch (unit) {
case NANOSECOND:
if ( temporalType == TemporalType.TIME ) {
return "cast(add_nano100(cast('1970-01-01 '||(?3) as timestamp),?2/100) as time)";
}
else {
return "add_nano100(?3,?2/100)";
}
case NATIVE:
if ( temporalType == TemporalType.TIME ) {
return "cast(add_nano100(cast('1970-01-01 '||(?3) as timestamp),?2) as time)";
}
else {
return "add_nano100(?3,?2)";
}
case QUARTER:
return "add_months(?3,3*?2)";
case WEEK:
return "add_days(?3,7*?2)";
case MINUTE:
if ( temporalType == TemporalType.TIME ) {
return "cast(add_seconds(cast('1970-01-01 '||(?3) as timestamp),60*?2) as time)";
}
else {
return "add_seconds(?3,60*?2)";
}
case HOUR:
if ( temporalType == TemporalType.TIME ) {
return "cast(add_seconds(cast('1970-01-01 '||(?3) as timestamp),3600*?2) as time)";
}
else {
return "add_seconds(?3,3600*?2)";
}
case SECOND:
if ( temporalType == TemporalType.TIME ) {
return "cast(add_seconds(cast('1970-01-01 '||(?3) as timestamp),?2) as time)";
}
// Fall through on purpose
default:
return "add_?1s(?3,?2)";
}
}
@Override
public String timestampdiffPattern(TemporalUnit unit, TemporalType fromTemporalType, TemporalType toTemporalType) {
switch (unit) {
case NANOSECOND:
if ( fromTemporalType == TemporalType.TIME && toTemporalType == TemporalType.TIME ) {
return "seconds_between(?2,?3)*1000000000";
}
else {
return "nano100_between(?2,?3)*100";
}
case NATIVE:
if ( fromTemporalType == TemporalType.TIME && toTemporalType == TemporalType.TIME ) {
return "seconds_between(?2,?3)*10000000";
}
else {
return "nano100_between(?2,?3)";
}
case QUARTER:
return "months_between(?2,?3)/3";
case WEEK:
return "days_between(?2,?3)/7";
case MINUTE:
return "seconds_between(?2,?3)/60";
case HOUR:
return "seconds_between(?2,?3)/3600";
default:
return "?1s_between(?2,?3)";
}
}
@Override
public void appendDateTimeLiteral(
SqlAppender appender,
TemporalAccessor temporalAccessor,
TemporalType precision,
TimeZone jdbcTimeZone) {
switch ( precision ) {
case DATE:
appender.appendSql( JDBC_ESCAPE_START_DATE );
appendAsDate( appender, temporalAccessor );
appender.appendSql( JDBC_ESCAPE_END );
break;
case TIME:
appender.appendSql( JDBC_ESCAPE_START_TIME );
appendAsTime( appender, temporalAccessor, supportsTemporalLiteralOffset(), jdbcTimeZone );
appender.appendSql( JDBC_ESCAPE_END );
break;
case TIMESTAMP:
appender.appendSql( JDBC_ESCAPE_START_TIMESTAMP );
appendAsTimestampWithMicros( appender, temporalAccessor, supportsTemporalLiteralOffset(), jdbcTimeZone );
appender.appendSql( JDBC_ESCAPE_END );
break;
default:
throw new IllegalArgumentException();
}
}
@Override
public void appendDateTimeLiteral(SqlAppender appender, Date date, TemporalType precision, TimeZone jdbcTimeZone) {
switch ( precision ) {
case DATE:
appender.appendSql( JDBC_ESCAPE_START_DATE );
appendAsDate( appender, date );
appender.appendSql( JDBC_ESCAPE_END );
break;
case TIME:
appender.appendSql( JDBC_ESCAPE_START_TIME );
appendAsTime( appender, date );
appender.appendSql( JDBC_ESCAPE_END );
break;
case TIMESTAMP:
appender.appendSql( JDBC_ESCAPE_START_TIMESTAMP );
appendAsTimestampWithMicros( appender, date, jdbcTimeZone );
appender.appendSql( JDBC_ESCAPE_END );
break;
default:
throw new IllegalArgumentException();
}
}
@Override
public String generatedAs(String generatedAs) {
return " generated always as (" + generatedAs + ")";
}
public boolean isUseUnicodeStringTypes() {
return this.useUnicodeStringTypes || isDefaultTableTypeColumn() && isCloud();
}
protected boolean supportsAsciiStringTypes() {
return !isDefaultTableTypeColumn() || !isCloud();
}
protected Boolean useUnicodeStringTypesDefault() {
return isDefaultTableTypeColumn() ? isCloud() : Boolean.FALSE;
}
private static
|
HANALegacyDialect
|
java
|
alibaba__nacos
|
naming/src/main/java/com/alibaba/nacos/naming/pojo/Subscriber.java
|
{
"start": 818,
"end": 4079
}
|
class ____ implements Serializable {
private static final long serialVersionUID = -6256968317172033867L;
private String addrStr;
private String agent;
private String app;
private String ip;
private int port;
private String namespaceId;
private String serviceName;
private String cluster;
public Subscriber() {
}
public Subscriber(String addrStr, String agent, String app, String ip, String namespaceId, String serviceName,
int port) {
this(addrStr, agent, app, ip, namespaceId, serviceName, port, StringUtils.EMPTY);
}
public Subscriber(String addrStr, String agent, String app, String ip, String namespaceId, String serviceName,
int port, String clusters) {
this.addrStr = addrStr;
this.agent = agent;
this.app = app;
this.ip = ip;
this.port = port;
this.namespaceId = namespaceId;
this.serviceName = serviceName;
this.cluster = clusters;
}
public String getAddrStr() {
return addrStr;
}
public void setAddrStr(String addrStr) {
this.addrStr = addrStr;
}
public String getAgent() {
return agent;
}
public void setAgent(String agent) {
this.agent = agent;
}
public String getApp() {
return app;
}
public void setApp(String app) {
this.app = app;
}
public String getIp() {
return ip;
}
public void setIp(String ip) {
this.ip = ip;
}
public String getNamespaceId() {
return namespaceId;
}
public void setNamespaceId(String namespaceId) {
this.namespaceId = namespaceId;
}
public String getServiceName() {
return serviceName;
}
public void setServiceName(String serviceName) {
this.serviceName = serviceName;
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
public String getCluster() {
return cluster;
}
public void setCluster(String cluster) {
this.cluster = cluster;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Subscriber that = (Subscriber) o;
return Objects.equals(addrStr, that.addrStr) && Objects.equals(agent, that.agent) && Objects
.equals(app, that.app) && Objects.equals(ip, that.ip) && Objects.equals(namespaceId, that.namespaceId)
&& Objects.equals(serviceName, that.serviceName);
}
@Override
public int hashCode() {
return Objects.hash(addrStr, agent, app, ip, namespaceId, serviceName);
}
@Override
public String toString() {
return "Subscriber{" + "addrStr='" + addrStr + '\'' + ", agent='" + agent + '\'' + ", app='" + app + '\''
+ ", ip='" + ip + '\'' + ", namespaceId='" + namespaceId + '\'' + ", serviceName='" + serviceName + '\''
+ '}';
}
}
|
Subscriber
|
java
|
spring-projects__spring-boot
|
documentation/spring-boot-actuator-docs/src/test/java/org/springframework/boot/actuate/docs/web/mappings/MappingsEndpointReactiveDocumentationTests.java
|
{
"start": 5938,
"end": 6644
}
|
class ____ the method."),
fieldWithPath("*.[].details.handlerMethod.name").type(JsonFieldType.STRING)
.description("Name of the method."),
fieldWithPath("*.[].details.handlerMethod.descriptor").type(JsonFieldType.STRING)
.description("Descriptor of the method as specified in the Java Language Specification."));
List<FieldDescriptor> handlerFunction = List.of(
fieldWithPath("*.[].details.handlerFunction").optional()
.type(JsonFieldType.OBJECT)
.description("Details of the function, if any, that will handle requests to this mapping."),
fieldWithPath("*.[].details.handlerFunction.className").type(JsonFieldType.STRING)
.description("Fully qualified name of the
|
of
|
java
|
apache__flink
|
flink-metrics/flink-metrics-datadog/src/main/java/org/apache/flink/metrics/datadog/Clock.java
|
{
"start": 931,
"end": 985
}
|
interface ____ {
long getUnixEpochTimestamp();
}
|
Clock
|
java
|
apache__avro
|
lang/java/mapred/src/test/java/org/apache/avro/mapreduce/TestAvroKeyValueRecordWriter.java
|
{
"start": 4755,
"end": 9383
}
|
class ____ {
String attribute;
}
@Test
void usingReflection() throws Exception {
Job job = Job.getInstance();
Schema schema = ReflectData.get().getSchema(R1.class);
AvroJob.setOutputValueSchema(job, schema);
TaskAttemptContext context = mock(TaskAttemptContext.class);
R1 record = new R1();
record.attribute = "test";
AvroValue<R1> avroValue = new AvroValue<>(record);
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
AvroDatumConverterFactory factory = new AvroDatumConverterFactory(job.getConfiguration());
AvroDatumConverter<Text, ?> keyConverter = factory.create(Text.class);
@SuppressWarnings("unchecked")
AvroDatumConverter<AvroValue<R1>, R1> valueConverter = factory.create((Class<AvroValue<R1>>) avroValue.getClass());
AvroKeyValueRecordWriter<Text, AvroValue<R1>> writer = new AvroKeyValueRecordWriter<>(keyConverter, valueConverter,
new ReflectData(), CodecFactory.nullCodec(), outputStream);
writer.write(new Text("reflectionData"), avroValue);
writer.close(context);
ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray());
Schema readerSchema = AvroKeyValue.getSchema(Schema.create(Schema.Type.STRING), schema);
DatumReader<GenericRecord> datumReader = new ReflectDatumReader<>(readerSchema);
DataFileStream<GenericRecord> avroFileReader = new DataFileStream<>(inputStream, datumReader);
// Verify that the first record was written.
assertTrue(avroFileReader.hasNext());
// Verify that the record holds the same data that we've written
AvroKeyValue<CharSequence, R1> firstRecord = new AvroKeyValue<>(avroFileReader.next());
assertNotNull(firstRecord.get());
assertEquals("reflectionData", firstRecord.getKey().toString());
assertEquals(record.attribute, firstRecord.getValue().attribute);
avroFileReader.close();
verify(context, never()).getConfiguration();
}
@Test
void syncableWriteRecords() throws IOException {
Job job = Job.getInstance();
AvroJob.setOutputValueSchema(job, TextStats.SCHEMA$);
TaskAttemptContext context = mock(TaskAttemptContext.class);
AvroDatumConverterFactory factory = new AvroDatumConverterFactory(job.getConfiguration());
AvroDatumConverter<Text, ?> keyConverter = factory.create(Text.class);
AvroValue<TextStats> avroValue = new AvroValue<>(null);
@SuppressWarnings("unchecked")
AvroDatumConverter<AvroValue<TextStats>, ?> valueConverter = factory
.create((Class<AvroValue<TextStats>>) avroValue.getClass());
CodecFactory compressionCodec = CodecFactory.nullCodec();
FileOutputStream outputStream = new FileOutputStream(new File("target/temp.avro"));
// Write a marker followed by each record: <'apple', TextStats('apple')> and
// <'banana', TextStats('banana')>.
AvroKeyValueRecordWriter<Text, AvroValue<TextStats>> writer = new AvroKeyValueRecordWriter<>(keyConverter,
valueConverter, new ReflectData(), compressionCodec, outputStream);
TextStats appleStats = new TextStats();
appleStats.setName("apple");
long pointOne = writer.sync();
writer.write(new Text("apple"), new AvroValue<>(appleStats));
TextStats bananaStats = new TextStats();
bananaStats.setName("banana");
long pointTwo = writer.sync();
writer.write(new Text("banana"), new AvroValue<>(bananaStats));
writer.close(context);
Configuration conf = new Configuration();
conf.set("fs.default.name", "file:///");
Path avroFile = new Path("target/temp.avro");
DataFileReader<GenericData.Record> avroFileReader = new DataFileReader<>(new FsInput(avroFile, conf),
new SpecificDatumReader<>());
avroFileReader.seek(pointTwo);
// Verify that the second record was written;
assertTrue(avroFileReader.hasNext());
AvroKeyValue<CharSequence, TextStats> secondRecord = new AvroKeyValue<>(avroFileReader.next());
assertNotNull(secondRecord.get());
assertEquals("banana", secondRecord.getKey().toString());
assertEquals("banana", secondRecord.getValue().getName().toString());
avroFileReader.seek(pointOne);
// Verify that the first record was written.
assertTrue(avroFileReader.hasNext());
AvroKeyValue<CharSequence, TextStats> firstRecord = new AvroKeyValue<>(avroFileReader.next());
assertNotNull(firstRecord.get());
assertEquals("apple", firstRecord.getKey().toString());
assertEquals("apple", firstRecord.getValue().getName().toString());
// That's all, folks.
avroFileReader.close();
verify(context, never()).getConfiguration();
}
}
|
R1
|
java
|
elastic__elasticsearch
|
x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/SecurityHttpClientConfigCallbackTests.java
|
{
"start": 634,
"end": 2293
}
|
class ____ extends ESTestCase {
private final CredentialsProvider credentialsProvider = mock(CredentialsProvider.class);
private final SSLIOSessionStrategy sslStrategy = mock(SSLIOSessionStrategy.class);
/**
* HttpAsyncClientBuilder's methods are {@code final} and therefore not verifiable.
*/
private final HttpAsyncClientBuilder builder = mock(HttpAsyncClientBuilder.class);
public void testSSLIOSessionStrategyNullThrowsException() {
final CredentialsProvider optionalCredentialsProvider = randomFrom(credentialsProvider, null);
expectThrows(NullPointerException.class, () -> new SecurityHttpClientConfigCallback(null, optionalCredentialsProvider));
}
public void testCustomizeHttpClient() {
final SecurityHttpClientConfigCallback callback = new SecurityHttpClientConfigCallback(sslStrategy, credentialsProvider);
assertSame(credentialsProvider, callback.getCredentialsProvider());
assertSame(sslStrategy, callback.getSSLStrategy());
assertSame(builder, callback.customizeHttpClient(builder));
}
public void testCustomizeHttpClientWithOptionalParameters() {
final CredentialsProvider optionalCredentialsProvider = randomFrom(credentialsProvider, null);
final SecurityHttpClientConfigCallback callback = new SecurityHttpClientConfigCallback(sslStrategy, optionalCredentialsProvider);
assertSame(builder, callback.customizeHttpClient(builder));
assertSame(optionalCredentialsProvider, callback.getCredentialsProvider());
assertSame(sslStrategy, callback.getSSLStrategy());
}
}
|
SecurityHttpClientConfigCallbackTests
|
java
|
elastic__elasticsearch
|
x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/FoldNull.java
|
{
"start": 427,
"end": 571
}
|
class ____ extends OptimizerRules.FoldNull {
@Override
public Expression rule(Expression e) {
return super.rule(e);
}
}
|
FoldNull
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/UrlDecode.java
|
{
"start": 1506,
"end": 3525
}
|
class ____ extends UnaryScalarFunction {
public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(
Expression.class,
"UrlDecode",
UrlDecode::new
);
private UrlDecode(StreamInput in) throws IOException {
super(in);
}
@FunctionInfo(
returnType = "keyword",
description = "URL-decodes the input, or returns `null` and adds a warning header to the response if the input cannot be decoded.",
examples = { @Example(file = "string", tag = "url_decode") },
appliesTo = { @FunctionAppliesTo(lifeCycle = FunctionAppliesToLifecycle.GA, version = "9.2.0") }
)
public UrlDecode(
Source source,
@Param(name = "string", type = { "keyword", "text" }, description = "The URL-encoded string to decode.") Expression str
) {
super(source, str);
}
@Override
public Expression replaceChildren(List<Expression> newChildren) {
return new UrlDecode(source(), newChildren.get(0));
}
@Override
protected NodeInfo<? extends Expression> info() {
return NodeInfo.create(this, UrlDecode::new, field());
}
@Override
public String getWriteableName() {
return ENTRY.name;
}
@Override
protected TypeResolution resolveType() {
if (childrenResolved() == false) {
return new TypeResolution("Unresolved children");
}
return isString(field, sourceText(), TypeResolutions.ParamOrdinal.DEFAULT);
}
@Override
public EvalOperator.ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) {
return new UrlDecodeEvaluator.Factory(source(), toEvaluator.apply(field()));
}
@ConvertEvaluator(warnExceptions = { IllegalArgumentException.class })
static BytesRef process(final BytesRef val) {
String input = val.utf8ToString();
String decoded = URLDecoder.decode(input, StandardCharsets.UTF_8);
return new BytesRef(decoded);
}
}
|
UrlDecode
|
java
|
hibernate__hibernate-orm
|
hibernate-community-dialects/src/main/java/org/hibernate/community/dialect/sequence/PostgreSQLLegacySequenceSupport.java
|
{
"start": 353,
"end": 1220
}
|
class ____ implements SequenceSupport {
public static final SequenceSupport INSTANCE = new PostgreSQLLegacySequenceSupport();
public static final SequenceSupport LEGACY_INSTANCE = new PostgreSQLLegacySequenceSupport() {
@Override
public String getDropSequenceString(String sequenceName) {
return "drop sequence " + sequenceName;
}
};
@Override
public String getSelectSequenceNextValString(String sequenceName) {
return "nextval('" + sequenceName + "')";
}
@Override
public String getSelectSequencePreviousValString(String sequenceName) throws MappingException {
return "currval('" + sequenceName + "')";
}
@Override
public boolean sometimesNeedsStartingValue() {
return true;
}
@Override
public String getDropSequenceString(String sequenceName) {
return "drop sequence if exists " + sequenceName;
}
}
|
PostgreSQLLegacySequenceSupport
|
java
|
spring-projects__spring-framework
|
spring-aop/src/main/java/org/springframework/aop/aspectj/TypePatternClassFilter.java
|
{
"start": 2317,
"end": 3031
}
|
interface ____ any class
* that implements it.
* <p>These conventions are established by AspectJ, not Spring AOP.
* @param typePattern the type pattern that AspectJ weaver should parse
*/
public void setTypePattern(String typePattern) {
Assert.notNull(typePattern, "Type pattern must not be null");
this.typePattern = typePattern;
this.aspectJTypePatternMatcher =
PointcutParser.getPointcutParserSupportingAllPrimitivesAndUsingContextClassloaderForResolution().
parseTypePattern(replaceBooleanOperators(typePattern));
}
/**
* Return the AspectJ type pattern to match.
*/
public String getTypePattern() {
return this.typePattern;
}
/**
* Should the pointcut apply to the given
|
and
|
java
|
apache__dubbo
|
dubbo-spring-boot-project/dubbo-spring-boot-autoconfigure/src/main/java/org/apache/dubbo/spring/boot/autoconfigure/observability/brave/BraveAutoConfiguration.java
|
{
"start": 8041,
"end": 9089
}
|
class ____ {
@Bean
@ConditionalOnMissingBean
brave.propagation.Propagation.Factory propagationFactory(DubboConfigurationProperties tracing) {
String type = tracing.getTracing().getPropagation().getType();
switch (type) {
case org.apache.dubbo.config.nested.PropagationConfig.B3:
return brave.propagation.B3Propagation.newFactoryBuilder()
.injectFormat(brave.propagation.B3Propagation.Format.SINGLE_NO_PARENT)
.build();
case org.apache.dubbo.config.nested.PropagationConfig.W3C:
return new io.micrometer.tracing.brave.bridge.W3CPropagation();
default:
throw new IllegalArgumentException("UnSupport propagation type");
}
}
}
@ConditionalOnProperty(value = ObservabilityUtils.DUBBO_TRACING_BAGGAGE_ENABLED, matchIfMissing = true)
@Configuration(proxyBeanMethods = false)
static
|
BraveNoBaggageConfiguration
|
java
|
spring-projects__spring-data-jpa
|
spring-data-jpa/src/test/java/org/springframework/data/jpa/repository/projections/ProjectionJoinIntegrationTests.java
|
{
"start": 2749,
"end": 3403
}
|
class ____ {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
@Access(value = AccessType.PROPERTY) int id;
@OneToOne(cascade = CascadeType.ALL) Address address;
public User() {}
public int getId() {
return this.id;
}
public Address getAddress() {
return this.address;
}
public void setId(int id) {
this.id = id;
}
public void setAddress(Address address) {
this.address = address;
}
public String toString() {
return "ProjectionJoinIntegrationTests.User(id=" + this.getId() + ", address=" + this.getAddress() + ")";
}
}
@Table(name = "ProjectionJoinIntegrationTests_Address")
@Entity
static
|
User
|
java
|
apache__camel
|
components/camel-fhir/camel-fhir-component/src/generated/java/org/apache/camel/component/fhir/internal/FhirMetaApiMethod.java
|
{
"start": 654,
"end": 2712
}
|
enum ____ implements ApiMethod {
ADD(
org.hl7.fhir.instance.model.api.IBaseMetaType.class,
"add",
arg("meta", org.hl7.fhir.instance.model.api.IBaseMetaType.class),
arg("id", org.hl7.fhir.instance.model.api.IIdType.class),
arg("extraParameters", java.util.Map.class)),
DELETE(
org.hl7.fhir.instance.model.api.IBaseMetaType.class,
"delete",
arg("meta", org.hl7.fhir.instance.model.api.IBaseMetaType.class),
arg("id", org.hl7.fhir.instance.model.api.IIdType.class),
arg("extraParameters", java.util.Map.class)),
GET_FROM_RESOURCE(
org.hl7.fhir.instance.model.api.IBaseMetaType.class,
"getFromResource",
arg("metaType", Class.class),
arg("id", org.hl7.fhir.instance.model.api.IIdType.class),
arg("extraParameters", java.util.Map.class)),
GET_FROM_SERVER(
org.hl7.fhir.instance.model.api.IBaseMetaType.class,
"getFromServer",
arg("metaType", Class.class),
arg("extraParameters", java.util.Map.class)),
GET_FROM_TYPE(
org.hl7.fhir.instance.model.api.IBaseMetaType.class,
"getFromType",
arg("metaType", Class.class),
arg("resourceType", String.class),
arg("extraParameters", java.util.Map.class));
private final ApiMethod apiMethod;
FhirMetaApiMethod(Class<?> resultType, String name, ApiMethodArg... args) {
this.apiMethod = new ApiMethodImpl(FhirMeta.class, resultType, name, args);
}
@Override
public String getName() { return apiMethod.getName(); }
@Override
public Class<?> getResultType() { return apiMethod.getResultType(); }
@Override
public List<String> getArgNames() { return apiMethod.getArgNames(); }
@Override
public List<String> getSetterArgNames() { return apiMethod.getSetterArgNames(); }
@Override
public List<Class<?>> getArgTypes() { return apiMethod.getArgTypes(); }
@Override
public Method getMethod() { return apiMethod.getMethod(); }
}
|
FhirMetaApiMethod
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/BanJNDITest.java
|
{
"start": 6127,
"end": 6801
}
|
class ____ {
private static DirContext FakeDirContext = ((DirContext) new Object());
// Check we didn't ban all of Context by accident.
private void callsList() throws NamingException {
FakeDirContext.list(((Name) new Object()));
}
}
""")
.doTest();
}
@Test
public void negativeCaseUnchanged() {
refactoringHelper
.addInputLines(
"BanJNDINegativeCases.java",
"""
package com.google.errorprone.bugpatterns.testdata;
import javax.naming.Name;
import javax.naming.NamingException;
import javax.naming.directory.DirContext;
/**
* {@link BanJNDITest}
*
* @author tshadwell@google.com (Thomas Shadwell)
*/
|
BanJNDIPositiveCases
|
java
|
spring-projects__spring-framework
|
spring-test/src/main/java/org/springframework/test/web/reactive/server/WebTestClient.java
|
{
"start": 36863,
"end": 37596
}
|
interface ____<E extends @Nullable Object> extends BodySpec<List<E>, ListBodySpec<E>> {
/**
* Assert the extracted list of values is of the given size.
* @param size the expected size
*/
ListBodySpec<E> hasSize(int size);
/**
* Assert the extracted list of values contains the given elements.
* @param elements the elements to check
*/
@SuppressWarnings("unchecked")
ListBodySpec<E> contains(E... elements);
/**
* Assert the extracted list of values doesn't contain the given elements.
* @param elements the elements to check
*/
@SuppressWarnings("unchecked")
ListBodySpec<E> doesNotContain(E... elements);
}
/**
* Spec for expectations on the response body content.
*/
|
ListBodySpec
|
java
|
elastic__elasticsearch
|
x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/googlevertexai/request/GoogleVertexAiRerankRequest.java
|
{
"start": 948,
"end": 3059
}
|
class ____ implements GoogleVertexAiRequest {
private final GoogleVertexAiRerankModel model;
private final String query;
private final List<String> input;
private final Boolean returnDocuments;
private final Integer topN;
public GoogleVertexAiRerankRequest(
String query,
List<String> input,
@Nullable Boolean returnDocuments,
@Nullable Integer topN,
GoogleVertexAiRerankModel model
) {
this.model = Objects.requireNonNull(model);
this.query = Objects.requireNonNull(query);
this.input = Objects.requireNonNull(input);
this.returnDocuments = returnDocuments;
this.topN = topN;
}
@Override
public HttpRequest createHttpRequest() {
HttpPost httpPost = new HttpPost(model.nonStreamingUri());
ByteArrayEntity byteEntity = new ByteArrayEntity(
Strings.toString(
new GoogleVertexAiRerankRequestEntity(
query,
input,
returnDocuments,
topN != null ? topN : model.getTaskSettings().topN(),
model.getServiceSettings().modelId()
)
).getBytes(StandardCharsets.UTF_8)
);
httpPost.setEntity(byteEntity);
httpPost.setHeader(HttpHeaders.CONTENT_TYPE, XContentType.JSON.mediaType());
decorateWithAuth(httpPost);
return new HttpRequest(httpPost, getInferenceEntityId());
}
public void decorateWithAuth(HttpPost httpPost) {
GoogleVertexAiRequest.decorateWithBearerToken(httpPost, model.getSecretSettings());
}
public GoogleVertexAiRerankModel model() {
return model;
}
@Override
public String getInferenceEntityId() {
return model.getInferenceEntityId();
}
@Override
public URI getURI() {
return model.nonStreamingUri();
}
@Override
public Request truncate() {
return this;
}
@Override
public boolean[] getTruncationInfo() {
return null;
}
}
|
GoogleVertexAiRerankRequest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/boot/models/annotations/internal/OverriddenSQLRestrictionsAnnotation.java
|
{
"start": 757,
"end": 1977
}
|
class ____
implements DialectOverride.SQLRestrictions, RepeatableContainer<DialectOverride.SQLRestriction> {
private DialectOverride.SQLRestriction[] value;
/**
* Used in creating dynamic annotation instances (e.g. from XML)
*/
public OverriddenSQLRestrictionsAnnotation(ModelsContext modelContext) {
}
/**
* Used in creating annotation instances from JDK variant
*/
public OverriddenSQLRestrictionsAnnotation(SQLRestrictions annotation, ModelsContext modelContext) {
this.value = extractJdkValue(
annotation,
DialectOverrideAnnotations.DIALECT_OVERRIDE_SQL_RESTRICTIONS,
"value",
modelContext
);
}
/**
* Used in creating annotation instances from Jandex variant
*/
public OverriddenSQLRestrictionsAnnotation(
Map<String, Object> attributeValues,
ModelsContext modelContext) {
this.value = (DialectOverride.SQLRestriction[]) attributeValues.get( "value" );
}
@Override
public Class<? extends Annotation> annotationType() {
return SQLRestrictions.class;
}
@Override
public DialectOverride.SQLRestriction[] value() {
return value;
}
public void value(DialectOverride.SQLRestriction[] value) {
this.value = value;
}
}
|
OverriddenSQLRestrictionsAnnotation
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java
|
{
"start": 3098,
"end": 9971
}
|
class ____ {
private static final File testRootDir = new File("target",
TestNMWebServer.class.getSimpleName());
private static File testLogDir = new File("target",
TestNMWebServer.class.getSimpleName() + "LogDir");
@BeforeEach
public void setup() {
testRootDir.mkdirs();
testLogDir.mkdir();
}
@AfterEach
public void tearDown() {
FileUtil.fullyDelete(testRootDir);
FileUtil.fullyDelete(testLogDir);
}
private NodeHealthCheckerService createNodeHealthCheckerService() {
LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
return new NodeHealthCheckerService(dirsHandler);
}
private int startNMWebAppServer(String webAddr) {
Configuration conf = new Configuration();
NodeManager.NMContext nmContext = new NodeManager.NMContext(null, null, null, null,
null, false, conf);
NodeId nodeId = NodeId.newInstance("testhost.foo.com", 8042);
nmContext.setNodeId(nodeId);
ResourceView resourceView = new ResourceView() {
@Override
public long getVmemAllocatedForContainers() {
return 0;
}
@Override
public long getPmemAllocatedForContainers() {
return 0;
}
@Override
public long getVCoresAllocatedForContainers() {
return 0;
}
@Override
public boolean isVmemCheckEnabled() {
return true;
}
@Override
public boolean isPmemCheckEnabled() {
return true;
}
};
conf.set(YarnConfiguration.NM_LOCAL_DIRS, testRootDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOG_DIRS, testLogDir.getAbsolutePath());
NodeHealthCheckerService healthChecker = createNodeHealthCheckerService();
healthChecker.init(conf);
LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
conf.set(YarnConfiguration.NM_WEBAPP_ADDRESS, webAddr);
WebServer server = new WebServer(nmContext, resourceView,
new ApplicationACLsManager(conf), dirsHandler);
try {
server.init(conf);
server.start();
return server.getPort();
} finally {
server.stop();
healthChecker.stop();
}
}
@Test
public void testNMWebAppWithOutPort() {
int port = startNMWebAppServer("0.0.0.0");
validatePortVal(port);
}
private void validatePortVal(int portVal) {
assertTrue(portVal > 0, "Port is not updated");
assertTrue(portVal != YarnConfiguration.DEFAULT_NM_PORT,
"Port is default "+ YarnConfiguration.DEFAULT_NM_PORT);
}
@Test
public void testNMWebAppWithEphemeralPort() {
int port = startNMWebAppServer("0.0.0.0:0");
validatePortVal(port);
}
@Test
public void testNMWebApp() throws IOException, YarnException {
Configuration conf = new Configuration();
Context nmContext = new NodeManager.NMContext(null, null, null, null,
null, false, conf);
ResourceView resourceView = new ResourceView() {
@Override
public long getVmemAllocatedForContainers() {
return 0;
}
@Override
public long getPmemAllocatedForContainers() {
return 0;
}
@Override
public long getVCoresAllocatedForContainers() {
return 0;
}
@Override
public boolean isVmemCheckEnabled() {
return true;
}
@Override
public boolean isPmemCheckEnabled() {
return true;
}
};
conf.set(YarnConfiguration.NM_LOCAL_DIRS, testRootDir.getAbsolutePath());
conf.set(YarnConfiguration.NM_LOG_DIRS, testLogDir.getAbsolutePath());
NodeHealthCheckerService healthChecker = createNodeHealthCheckerService();
healthChecker.init(conf);
LocalDirsHandlerService dirsHandler = healthChecker.getDiskHandler();
WebServer server = new WebServer(nmContext, resourceView,
new ApplicationACLsManager(conf), dirsHandler);
server.init(conf);
server.start();
// Add an application and the corresponding containers
RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(conf);
Dispatcher dispatcher = new AsyncDispatcher();
String user = "nobody";
long clusterTimeStamp = 1234;
ApplicationId appId =
BuilderUtils.newApplicationId(recordFactory, clusterTimeStamp, 1);
Application app = mock(Application.class);
when(app.getUser()).thenReturn(user);
when(app.getAppId()).thenReturn(appId);
nmContext.getApplications().put(appId, app);
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
appId, 1);
ContainerId container1 =
BuilderUtils.newContainerId(recordFactory, appId, appAttemptId, 0);
ContainerId container2 =
BuilderUtils.newContainerId(recordFactory, appId, appAttemptId, 1);
NodeManagerMetrics metrics = mock(NodeManagerMetrics.class);
NMStateStoreService stateStore = new NMNullStateStoreService();
for (ContainerId containerId : new ContainerId[] { container1,
container2}) {
// TODO: Use builder utils
ContainerLaunchContext launchContext =
recordFactory.newRecordInstance(ContainerLaunchContext.class);
long currentTime = System.currentTimeMillis();
Token containerToken =
BuilderUtils.newContainerToken(containerId, 0, "127.0.0.1", 1234,
user, Resources.createResource(1024), currentTime + 10000L,
123, "password".getBytes(), currentTime);
Context context = mock(Context.class);
Container container =
new ContainerImpl(conf, dispatcher, launchContext,
null, metrics, BuilderUtils.newContainerTokenIdentifier(
containerToken), context) {
@Override
public ContainerState getContainerState() {
return ContainerState.RUNNING;
}
};
nmContext.getContainers().put(containerId, container);
//TODO: Gross hack. Fix in code.
ApplicationId applicationId =
containerId.getApplicationAttemptId().getApplicationId();
nmContext.getApplications().get(applicationId).getContainers()
.put(containerId, container);
writeContainerLogs(containerId, dirsHandler);
}
// TODO: Pull logs and test contents.
// Thread.sleep(1000000);
}
private void writeContainerLogs(ContainerId containerId, LocalDirsHandlerService dirsHandler)
throws IOException, YarnException {
// ContainerLogDir should be created
File containerLogDir =
ContainerLogsUtils.getContainerLogDirs(containerId,
dirsHandler).get(0);
containerLogDir.mkdirs();
for (String fileType : new String[] { "stdout", "stderr", "syslog" }) {
Writer writer = new FileWriter(new File(containerLogDir, fileType));
writer.write(containerId.toString() + "\n Hello "
+ fileType + "!");
writer.close();
}
}
}
|
TestNMWebServer
|
java
|
spring-projects__spring-framework
|
spring-tx/src/main/java/org/springframework/transaction/interceptor/TransactionAspectSupport.java
|
{
"start": 4075,
"end": 10588
}
|
class ____ AspectJ aspects (which are not allowed to implement Serializable)!
/**
* Key to use to store the default transaction manager.
*/
private static final Object DEFAULT_TRANSACTION_MANAGER_KEY = new Object();
private static final String COROUTINES_FLOW_CLASS_NAME = "kotlinx.coroutines.flow.Flow";
/**
* Reactive Streams API present on the classpath?
*/
private static final boolean REACTIVE_STREAMS_PRESENT = ClassUtils.isPresent(
"org.reactivestreams.Publisher", TransactionAspectSupport.class.getClassLoader());
/**
* Vavr library present on the classpath?
*/
private static final boolean VAVR_PRESENT = ClassUtils.isPresent(
"io.vavr.control.Try", TransactionAspectSupport.class.getClassLoader());
/**
* Holder to support the {@code currentTransactionStatus()} method,
* and to support communication between different cooperating advices
* (for example, before and after advice) if the aspect involves more than a
* single method (as will be the case for around advice).
*/
private static final ThreadLocal<TransactionInfo> transactionInfoHolder =
new NamedThreadLocal<>("Current aspect-driven transaction");
/**
* Subclasses can use this to return the current TransactionInfo.
* Only subclasses that cannot handle all operations in one method,
* such as an AspectJ aspect involving distinct before and after advice,
* need to use this mechanism to get at the current TransactionInfo.
* An around advice such as an AOP Alliance MethodInterceptor can hold a
* reference to the TransactionInfo throughout the aspect method.
* <p>A TransactionInfo will be returned even if no transaction was created.
* The {@code TransactionInfo.hasTransaction()} method can be used to query this.
* <p>To find out about specific transaction characteristics, consider using
* TransactionSynchronizationManager's {@code isSynchronizationActive()}
* and/or {@code isActualTransactionActive()} methods.
* @return the TransactionInfo bound to this thread, or {@code null} if none
* @see TransactionInfo#hasTransaction()
* @see org.springframework.transaction.support.TransactionSynchronizationManager#isSynchronizationActive()
* @see org.springframework.transaction.support.TransactionSynchronizationManager#isActualTransactionActive()
*/
protected static @Nullable TransactionInfo currentTransactionInfo() throws NoTransactionException {
return transactionInfoHolder.get();
}
/**
* Return the transaction status of the current method invocation.
* Mainly intended for code that wants to set the current transaction
* rollback-only but not throw an application exception.
* <p>This exposes the locally declared transaction boundary with its declared name
* and characteristics, as managed by the aspect. Ar runtime, the local boundary may
* participate in an outer transaction: If you need transaction metadata from such
* an outer transaction (the actual resource transaction) instead, consider using
* {@link org.springframework.transaction.support.TransactionSynchronizationManager}.
* @throws NoTransactionException if the transaction info cannot be found,
* because the method was invoked outside an AOP invocation context
* @see org.springframework.transaction.support.TransactionSynchronizationManager#getCurrentTransactionName()
* @see org.springframework.transaction.support.TransactionSynchronizationManager#isCurrentTransactionReadOnly()
*/
public static TransactionStatus currentTransactionStatus() throws NoTransactionException {
TransactionInfo info = currentTransactionInfo();
if (info == null || info.transactionStatus == null) {
throw new NoTransactionException("No transaction aspect-managed TransactionStatus in scope");
}
return info.transactionStatus;
}
protected final Log logger = LogFactory.getLog(getClass());
private final @Nullable ReactiveAdapterRegistry reactiveAdapterRegistry;
private @Nullable String transactionManagerBeanName;
private @Nullable TransactionManager transactionManager;
private @Nullable TransactionAttributeSource transactionAttributeSource;
private @Nullable BeanFactory beanFactory;
private final ConcurrentMap<Object, TransactionManager> transactionManagerCache =
new ConcurrentReferenceHashMap<>(4);
private final ConcurrentMap<Method, ReactiveTransactionSupport> transactionSupportCache =
new ConcurrentReferenceHashMap<>(1024);
protected TransactionAspectSupport() {
if (REACTIVE_STREAMS_PRESENT) {
this.reactiveAdapterRegistry = ReactiveAdapterRegistry.getSharedInstance();
}
else {
this.reactiveAdapterRegistry = null;
}
}
/**
* Specify the name of the default transaction manager bean.
* <p>This can either point to a traditional {@link PlatformTransactionManager} or a
* {@link ReactiveTransactionManager} for reactive transaction management.
*/
public void setTransactionManagerBeanName(@Nullable String transactionManagerBeanName) {
this.transactionManagerBeanName = transactionManagerBeanName;
}
/**
* Return the name of the default transaction manager bean.
*/
protected final @Nullable String getTransactionManagerBeanName() {
return this.transactionManagerBeanName;
}
/**
* Specify the <em>default</em> transaction manager to use to drive transactions.
* <p>This can either be a traditional {@link PlatformTransactionManager} or a
* {@link ReactiveTransactionManager} for reactive transaction management.
* <p>The default transaction manager will be used if a <em>qualifier</em>
* has not been declared for a given transaction or if an explicit name for the
* default transaction manager bean has not been specified.
* @see #setTransactionManagerBeanName
*/
public void setTransactionManager(@Nullable TransactionManager transactionManager) {
this.transactionManager = transactionManager;
}
/**
* Return the default transaction manager, or {@code null} if unknown.
* <p>This can either be a traditional {@link PlatformTransactionManager} or a
* {@link ReactiveTransactionManager} for reactive transaction management.
*/
public @Nullable TransactionManager getTransactionManager() {
return this.transactionManager;
}
/**
* Set properties with method names as keys and transaction attribute
* descriptors (parsed via TransactionAttributeEditor) as values:
* for example, key = "myMethod", value = "PROPAGATION_REQUIRED,readOnly".
* <p>Note: Method names are always applied to the target class,
* no matter if defined in an
|
for
|
java
|
apache__camel
|
components/camel-salesforce/camel-salesforce-component/src/test/java/org/apache/camel/component/salesforce/dto/generated/QueryRecordsAccount.java
|
{
"start": 952,
"end": 1024
}
|
class ____ extends AbstractQueryRecordsBase<Account> {
}
|
QueryRecordsAccount
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/test/java/org/apache/flink/table/planner/plan/stream/sql/ChangelogNormalizeOptimizationTest.java
|
{
"start": 1614,
"end": 6791
}
|
class ____ extends TableTestBase {
private final JavaStreamTableTestUtil util = javaStreamTestUtil();
static List<TestSpec> getTests() {
return Arrays.asList(
TestSpec.select(SourceTable.UPSERT_SOURCE_PARTIAL_DELETES, SinkTable.UPSERT_SINK),
TestSpec.select(SourceTable.UPSERT_SOURCE_FULL_DELETES, SinkTable.UPSERT_SINK),
TestSpec.select(
SourceTable.UPSERT_SOURCE_PARTIAL_DELETES,
SinkTable.UPSERT_SINK_FULL_DELETES),
TestSpec.select(
SourceTable.UPSERT_SOURCE_FULL_DELETES, SinkTable.UPSERT_SINK_FULL_DELETES),
TestSpec.select(SourceTable.UPSERT_SOURCE_PARTIAL_DELETES, SinkTable.RETRACT_SINK),
TestSpec.select(SourceTable.UPSERT_SOURCE_FULL_DELETES, SinkTable.RETRACT_SINK),
TestSpec.selectWithFilter(
SourceTable.UPSERT_SOURCE_PARTIAL_DELETES, SinkTable.UPSERT_SINK),
TestSpec.selectWithFilter(
SourceTable.UPSERT_SOURCE_FULL_DELETES, SinkTable.UPSERT_SINK),
TestSpec.selectWithFilter(
SourceTable.UPSERT_SOURCE_PARTIAL_DELETES, SinkTable.RETRACT_SINK),
TestSpec.selectWithFilter(
SourceTable.UPSERT_SOURCE_FULL_DELETES, SinkTable.RETRACT_SINK),
TestSpec.join(
SourceTable.UPSERT_SOURCE_FULL_DELETES,
SourceTable.UPSERT_SOURCE_FULL_DELETES,
SinkTable.UPSERT_SINK),
TestSpec.join(
SourceTable.UPSERT_SOURCE_PARTIAL_DELETES,
SourceTable.UPSERT_SOURCE_PARTIAL_DELETES,
SinkTable.UPSERT_SINK),
TestSpec.join(
SourceTable.UPSERT_SOURCE_FULL_DELETES,
SourceTable.UPSERT_SOURCE_PARTIAL_DELETES,
SinkTable.UPSERT_SINK),
TestSpec.join(
SourceTable.UPSERT_SOURCE_PARTIAL_DELETES,
SourceTable.UPSERT_SOURCE_FULL_DELETES,
SinkTable.UPSERT_SINK),
TestSpec.join(
SourceTable.UPSERT_SOURCE_PARTIAL_DELETES,
SourceTable.UPSERT_SOURCE_PARTIAL_DELETES,
SinkTable.UPSERT_SINK_FULL_DELETES),
TestSpec.join(
SourceTable.UPSERT_SOURCE_PARTIAL_DELETES,
SourceTable.UPSERT_SOURCE_PARTIAL_DELETES,
SinkTable.RETRACT_SINK),
TestSpec.select(
SourceTable.UPSERT_SOURCE_PARTIAL_DELETES_METADATA,
SinkTable.UPSERT_SINK_METADATA),
TestSpec.selectWithoutMetadata(
SourceTable.UPSERT_SOURCE_PARTIAL_DELETES_METADATA, SinkTable.UPSERT_SINK),
TestSpec.select(
SourceTable.UPSERT_SOURCE_PARTIAL_DELETES_METADATA_NO_PUSHDOWN,
SinkTable.UPSERT_SINK_METADATA),
TestSpec.selectWithoutMetadata(
SourceTable.UPSERT_SOURCE_PARTIAL_DELETES_METADATA_NO_PUSHDOWN,
SinkTable.UPSERT_SINK),
TestSpec.select(SourceTable.RETRACT_SOURCE_PARTIAL_DELETES, SinkTable.UPSERT_SINK)
.withSessionOption("table.exec.source.cdc-events-duplicate", "true"));
}
@AfterEach
void tearDown() {
Arrays.stream(util.tableEnv().listTables())
.forEach(t -> util.tableEnv().executeSql("DROP TABLE " + t));
}
@ParameterizedTest()
@MethodSource("getTests")
void testChangelogNormalizePlan(TestSpec spec) {
spec.sessionOptions.forEach((key, value) -> util.tableEnv().getConfig().set(key, value));
for (TableProperties tableProperties : spec.tablesToCreate) {
final String additionalColumns =
String.join(",\n", tableProperties.getAdditionalColumns());
util.tableEnv()
.executeSql(
String.format(
"CREATE TABLE %s ( id INT,\n"
+ " col1 INT,\n"
+ " col2 STRING,\n"
+ "%s"
+ " PRIMARY KEY(id) NOT ENFORCED) WITH (%s)",
tableProperties.getTableName(),
StringUtils.isNullOrWhitespaceOnly(additionalColumns)
? ""
: additionalColumns + ",\n",
String.join(",\n", tableProperties.getOptions())));
}
util.verifyRelPlanInsert(
spec.query,
JavaScalaConversionUtil.toScala(
Collections.singletonList(ExplainDetail.CHANGELOG_MODE)));
}
|
ChangelogNormalizeOptimizationTest
|
java
|
apache__kafka
|
connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceConfigTest.java
|
{
"start": 1501,
"end": 4644
}
|
class ____ {
@Test
public void testTaskConfigTopicPartitions() {
List<TopicPartition> topicPartitions = List.of(new TopicPartition("topic-1", 2),
new TopicPartition("topic-3", 4), new TopicPartition("topic-5", 6));
MirrorSourceConfig config = new MirrorSourceConfig(makeProps());
Map<String, String> props = config.taskConfigForTopicPartitions(topicPartitions, 1);
MirrorSourceTaskConfig taskConfig = new MirrorSourceTaskConfig(props);
assertEquals(taskConfig.taskTopicPartitions(), new HashSet<>(topicPartitions),
"Setting topic property configuration failed");
}
@Test
public void testTopicMatching() {
MirrorSourceConfig config = new MirrorSourceConfig(makeProps("topics", "topic1"));
assertTrue(config.topicFilter().shouldReplicateTopic("topic1"),
"topic1 replication property configuration failed");
assertFalse(config.topicFilter().shouldReplicateTopic("topic2"),
"topic2 replication property configuration failed");
}
@Test
public void testConfigPropertyMatching() {
MirrorSourceConfig config = new MirrorSourceConfig(
makeProps("config.properties.exclude", "prop2"));
assertTrue(config.configPropertyFilter().shouldReplicateConfigProperty("prop1"),
"config.properties.exclude incorrectly excluded prop1");
assertFalse(config.configPropertyFilter().shouldReplicateConfigProperty("prop2"),
"config.properties.exclude incorrectly included prop2");
}
@Test
public void testNoTopics() {
MirrorSourceConfig config = new MirrorSourceConfig(makeProps("topics", ""));
assertFalse(config.topicFilter().shouldReplicateTopic("topic1"), "topic1 shouldn't exist");
assertFalse(config.topicFilter().shouldReplicateTopic("topic2"), "topic2 shouldn't exist");
assertFalse(config.topicFilter().shouldReplicateTopic(""), "Empty topic shouldn't exist");
}
@Test
public void testAllTopics() {
MirrorSourceConfig config = new MirrorSourceConfig(makeProps("topics", ".*"));
assertTrue(config.topicFilter().shouldReplicateTopic("topic1"),
"topic1 created from wildcard should exist");
assertTrue(config.topicFilter().shouldReplicateTopic("topic2"),
"topic2 created from wildcard should exist");
}
@Test
public void testListOfTopics() {
MirrorSourceConfig config = new MirrorSourceConfig(makeProps("topics", "topic1, topic2"));
assertTrue(config.topicFilter().shouldReplicateTopic("topic1"), "topic1 created from list should exist");
assertTrue(config.topicFilter().shouldReplicateTopic("topic2"), "topic2 created from list should exist");
assertFalse(config.topicFilter().shouldReplicateTopic("topic3"), "topic3 created from list should exist");
}
@Test
public void testNonMutationOfConfigDef() {
// Sanity check to make sure that these properties are actually defined for the task config,
// and that the task config
|
MirrorSourceConfigTest
|
java
|
apache__dubbo
|
dubbo-config/dubbo-config-spring/src/main/java/org/apache/dubbo/config/spring/beans/factory/annotation/ReferenceAnnotationBeanPostProcessor.java
|
{
"start": 10121,
"end": 11199
}
|
class ____ {
*
* @Bean
* @DubboReference(group="demo", version="1.2.3")
* public ReferenceBean<DemoService> demoService() {
* return new ReferenceBean();
* }
*
* }
* </pre>
*
* @param beanName
* @param beanDefinition
*/
protected void processReferenceAnnotatedBeanDefinition(String beanName, AnnotatedBeanDefinition beanDefinition) {
MethodMetadata factoryMethodMetadata = SpringCompatUtils.getFactoryMethodMetadata(beanDefinition);
// Extract beanClass from generic return type of java-config bean method: ReferenceBean<DemoService>
// see
// org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.getTypeForFactoryBeanFromMethod
Class beanClass = getBeanFactory().getType(beanName);
if (beanClass == Object.class) {
beanClass = SpringCompatUtils.getGenericTypeOfReturnType(factoryMethodMetadata);
}
if (beanClass == Object.class) {
// bean
|
ConsumerConfig
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/test/java/io/vertx/tests/http/compression/Http1xCompressionThresholdTest.java
|
{
"start": 1126,
"end": 4289
}
|
class ____ extends HttpCompressionTestBase {
public Http1xCompressionThresholdTest() {
super(HttpConfig.Http1x.DEFAULT);
}
@Override
protected String encoding() {
return "gzip";
}
@Override
protected MessageToByteEncoder<ByteBuf> encoder() {
return new JdkZlibEncoder(ZlibWrapper.GZIP, 6);
}
@Override
protected Optional<HttpCompressionOptions> serverCompressionConfig() {
GzipOptions compressor = StandardCompressionOptions.gzip(6, StandardCompressionOptions.gzip().windowBits(), StandardCompressionOptions.gzip().memLevel());
return Optional.of(new HttpCompressionOptions().addCompressor(compressor));
}
@Test
public void testServerCompressionBelowThreshold() throws Exception {
// set compression threshold to be greater than the content string size so it WILL NOT be compressed
HttpServerConfig httpServerOptions = config.forServer();
httpServerOptions.setCompression(new HttpCompressionOptions()
.addCompressor(CompressionConfig.gzip(6).compressor)
.setContentSizeThreshold(COMPRESS_TEST_STRING.length() * 2)
);
doTest(httpServerOptions, onSuccess(resp -> {
// check content encoding header is not set
assertNull(resp.getHeader(HttpHeaders.CONTENT_ENCODING));
resp.body().onComplete(onSuccess(responseBuffer -> {
// check that the response body bytes is itself
String responseBody = responseBuffer.toString(CharsetUtil.UTF_8);
assertEquals(COMPRESS_TEST_STRING, responseBody);
testComplete();
}));
}));
}
@Test
public void testServerCompressionAboveThreshold() throws Exception {
// set compression threshold to be less than the content string size so it WILL be compressed
HttpServerConfig config = this.config.forServer();
config.setCompression(new HttpCompressionOptions()
.addCompressor(CompressionConfig.gzip(6).compressor)
.setContentSizeThreshold(COMPRESS_TEST_STRING.length() / 2)
);
doTest(config, onSuccess(resp -> {
// check content encoding header is set
assertEquals(encoding(), resp.getHeader(HttpHeaders.CONTENT_ENCODING));
resp.body().onComplete(onSuccess(responseBuffer -> {
// check that response body bytes is compressed
assertEquals(StringUtil.toHexString(compressedTestString.getBytes()), StringUtil.toHexString(responseBuffer.getBytes()));
testComplete();
}));
}));
}
private void doTest(HttpServerConfig config, Handler<AsyncResult<HttpClientResponse>> handler) throws Exception {
HttpServer server = config.create(vertx);
try {
server.requestHandler(req -> {
assertNotNull(req.headers().get(HttpHeaders.ACCEPT_ENCODING));
req.response()
.end(Buffer.buffer(COMPRESS_TEST_STRING).toString(CharsetUtil.UTF_8));
});
server.listen().await();
client.request(new RequestOptions())
.onComplete(onSuccess(req -> {
req.putHeader(HttpHeaders.ACCEPT_ENCODING, encoding());
req.send().onComplete(handler);
}));
await();
} finally {
server.close().await();
}
}
}
|
Http1xCompressionThresholdTest
|
java
|
spring-projects__spring-boot
|
module/spring-boot-jackson2/src/main/java/org/springframework/boot/jackson2/autoconfigure/Jackson2ObjectMapperBuilderCustomizer.java
|
{
"start": 753,
"end": 1212
}
|
interface ____ can be implemented by beans wishing to further customize the
* {@link ObjectMapper} through
* {@link org.springframework.http.converter.json.Jackson2ObjectMapperBuilder} to
* fine-tune its auto-configuration.
*
* @author Grzegorz Poznachowski
* @since 4.0.0
* @deprecated since 4.0.0 for removal in 4.2.0 in favor of Jackson 3.
*/
@FunctionalInterface
@Deprecated(since = "4.0.0", forRemoval = true)
@SuppressWarnings("removal")
public
|
that
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/config/MonitorResource.java
|
{
"start": 1419,
"end": 1657
}
|
class ____ {
private final URI uri;
@PluginBuilderFactory
public static Builder newBuilder() {
return new Builder();
}
/**
* Builds MonitorResource instances.
*/
public static final
|
MonitorResource
|
java
|
google__guava
|
android/guava-tests/test/com/google/common/collect/ImmutableClassToInstanceMapTest.java
|
{
"start": 7285,
"end": 7728
}
|
class ____ implements One, Two, Three, Four, Five, Serializable {
final int value;
Impl(int value) {
this.value = value;
}
@Override
public boolean equals(@Nullable Object obj) {
return obj instanceof Impl && value == ((Impl) obj).value;
}
@Override
public int hashCode() {
return value;
}
@Override
public String toString() {
return Integer.toString(value);
}
}
}
|
Impl
|
java
|
apache__flink
|
flink-test-utils-parent/flink-connector-test-utils/src/main/java/org/apache/flink/connector/testutils/source/reader/SourceReaderTestBase.java
|
{
"start": 7283,
"end": 8957
}
|
class ____ implements ReaderOutput<Integer> {
private Set<Integer> consumedValues = new HashSet<>();
private int max = Integer.MIN_VALUE;
private int min = Integer.MAX_VALUE;
private int count = 0;
@Override
public void collect(Integer element) {
max = Math.max(element, max);
min = Math.min(element, min);
count++;
consumedValues.add(element);
}
@Override
public void collect(Integer element, long timestamp) {
collect(element);
}
public void validate() {
assertThat(consumedValues)
.as("Should be %d distinct elements in total", totalNumRecords)
.hasSize(totalNumRecords);
assertThat(count)
.as("Should be %d elements in total", totalNumRecords)
.isEqualTo(totalNumRecords);
assertThat(min).as("The min value should be 0", totalNumRecords).isZero();
assertThat(max)
.as("The max value should be %d", totalNumRecords - 1)
.isEqualTo(totalNumRecords - 1);
}
public int count() {
return count;
}
@Override
public void emitWatermark(Watermark watermark) {}
@Override
public void markIdle() {}
@Override
public void markActive() {}
@Override
public SourceOutput<Integer> createOutputForSplit(String splitId) {
return this;
}
@Override
public void releaseOutputForSplit(String splitId) {}
}
}
|
ValidatingSourceOutput
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/bug/Bug_for_liuying.java
|
{
"start": 200,
"end": 652
}
|
class ____ extends TestCase {
public void test_for_bug() throws Exception {
String aa = "[{\"dictFont\":\"\",\"dictId\":\"wap\",\"dictName\":\"无线&手淘\"},{\"dictFont\":\"\",\"dictId\":\"etao\",\"dictName\":\"搜索\"}]";
JSONObject jsonResult = new JSONObject();
JSONArray jsonArray = JSONArray.parseArray(aa);
jsonResult.put("aaa", jsonArray);
System.out.println(jsonResult);
}
}
|
Bug_for_liuying
|
java
|
quarkusio__quarkus
|
extensions/opentelemetry/runtime/src/main/java/io/quarkus/opentelemetry/runtime/config/runtime/TracesRuntimeConfig.java
|
{
"start": 256,
"end": 2543
}
|
interface ____ {
/**
* Suppress non-application uris from trace collection.
* This will suppress tracing of `/q` endpoints.
* <p>
* Providing a custom {@code io.opentelemetry.sdk.trace.samplers.Sampler} CDI Bean
* will ignore this setting.
* <p>
* This is a Quarkus specific property. Suppressing non-application uris is enabled by default.
* <p>
* Fallbacks to the legacy property <code>quarkus.opentelemetry.tracer.suppress-non-application-uris</code>
* or defaults to `true`.
*/
@WithName("suppress-non-application-uris")
@WithDefault("true")
Boolean suppressNonApplicationUris();
/**
* Comma-separated, suppress application uris from trace collection.
* <p>
* This will suppress all uris set by this property.
* <p>
* If you are using <code>quarkus.http.root-path</code>, you need to consider it when setting your uris, in
* other words, you need to configure it using the root-path if necessary.
*/
@WithName("suppress-application-uris")
Optional<List<String>> suppressApplicationUris();
/**
* Include static resources from trace collection.
* <p>
* This is a Quarkus specific property. Include static resources is disabled by default. Providing a
* custom {@code io.opentelemetry.sdk.trace.samplers.Sampler} CDI Bean will ignore this setting.
* <p>
* Fallbacks to the legacy property <code>quarkus.opentelemetry.tracer.include-static-resources</code>
* or defaults to `false`.
*/
@WithName("include-static-resources")
@WithDefault("false")
Boolean includeStaticResources();
/**
* Sampler argument. Depends on the `quarkus.otel.traces.sampler` property.
* Fallbacks to the legacy property <code>quarkus.opentelemetry.tracer.sampler.ratio</code>.
* <p>
* When setting the stock sampler to `traceidratio` or `parentbased_traceidratio` you need to set a `double` compatible
* value between `0.0d` and `1.0d`, like `0.01d` or `0.5d`. It is kept as a `String` to allow the flexible customisation of
* alternative samplers.
* <p>
* Defaults to `1.0d`.
*/
@WithName("sampler.arg")
@WithDefault("1.0d")
Optional<String> samplerArg();
}
|
TracesRuntimeConfig
|
java
|
apache__maven
|
impl/maven-core/src/main/java/org/apache/maven/internal/aether/ReverseTreeRepositoryListener.java
|
{
"start": 2065,
"end": 10327
}
|
class ____ extends AbstractRepositoryListener {
@Override
public void artifactResolved(RepositoryEvent event) {
requireNonNull(event, "event cannot be null");
if (!isLocalRepositoryArtifactOrMissing(event.getSession(), event.getArtifact())) {
return;
}
RequestTrace trace = event.getTrace();
CollectStepData collectStepTrace = null;
ArtifactRequest artifactRequest = null;
ArtifactDescriptorRequest artifactDescriptorRequest = null;
Plugin plugin = null;
while (trace != null) {
Object data = trace.getData();
if (data instanceof CollectStepData collectStepData) {
collectStepTrace = collectStepData;
} else if (data instanceof ArtifactDescriptorRequest artifactDescriptorRequestData) {
artifactDescriptorRequest = artifactDescriptorRequestData;
} else if (data instanceof ArtifactRequest artifactRequestData) {
artifactRequest = artifactRequestData;
} else if (data instanceof Plugin pluginData) {
plugin = pluginData;
} else if (data instanceof org.apache.maven.model.Plugin pluginData) {
plugin = pluginData.getDelegate();
}
trace = trace.getParent();
}
Path trackingDir;
boolean missing = event.getFile() == null;
if (missing) {
// missing artifact - let's track the path anyway
File dir = event.getSession().getLocalRepository().getBasedir();
dir = new File(
dir, event.getSession().getLocalRepositoryManager().getPathForLocalArtifact(event.getArtifact()));
trackingDir = dir.getParentFile().toPath().resolve(".tracking");
} else {
trackingDir = event.getFile().getParentFile().toPath().resolve(".tracking");
}
String baseName;
String ext = missing ? ".miss" : ".dep";
Path trackingFile = null;
StringBuilder indent = new StringBuilder();
ArrayList<String> trackingData = new ArrayList<>();
if (collectStepTrace == null && plugin != null) {
ext = ".plugin";
baseName = plugin.getGroupId() + "_" + plugin.getArtifactId() + "_" + plugin.getVersion();
trackingFile = trackingDir.resolve(baseName + ext);
if (Files.exists(trackingFile)) {
return;
}
if (event.getArtifact() != null) {
trackingData.add(indent.toString() + event.getArtifact());
indent.append(" ");
}
trackingData.add(indent + plugin.getGroupId() + ":" + plugin.getArtifactId() + ":" + plugin.getVersion());
indent.append(" ");
InputLocation location = plugin.getLocation("");
if (location != null && location.getSource() != null) {
trackingData.add(indent + location.getSource().getModelId() + " (implicit)");
indent.append(" ");
}
} else if (collectStepTrace != null) {
if (collectStepTrace.getPath().get(0).getArtifact() == null) {
return;
}
baseName = ArtifactIdUtils.toId(collectStepTrace.getPath().get(0).getArtifact())
.replace(":", "_");
trackingFile = trackingDir.resolve(baseName + ext);
if (Files.exists(trackingFile)) {
return;
}
Artifact resolvedArtifact = event.getArtifact();
Artifact nodeArtifact = collectStepTrace.getNode().getArtifact();
if (isInScope(resolvedArtifact, nodeArtifact) || "pom".equals(resolvedArtifact.getExtension())) {
Dependency node = collectStepTrace.getNode();
trackingData.add(resolvedArtifact.toString());
indent.append(" ");
trackingData.add(indent.toString() + node + " (" + collectStepTrace.getContext() + ")");
ListIterator<DependencyNode> iter = collectStepTrace
.getPath()
.listIterator(collectStepTrace.getPath().size());
while (iter.hasPrevious()) {
DependencyNode curr = iter.previous();
indent.append(" ");
trackingData.add(indent.toString() + curr + " (" + collectStepTrace.getContext() + ")");
}
}
}
if (trackingFile == null) {
return; // parent or imported bom ?
}
try {
Files.createDirectories(trackingDir);
trackingData.add("");
if (!missing) {
if (event.getRepository() != null) {
trackingData.add("Repository: " + event.getRepository());
}
} else {
List<RemoteRepository> repositories = new ArrayList<>();
if (artifactRequest != null && artifactRequest.getRepositories() != null) {
repositories.addAll(artifactRequest.getRepositories());
} else if (artifactDescriptorRequest != null && artifactDescriptorRequest.getRepositories() != null) {
repositories.addAll(artifactDescriptorRequest.getRepositories());
}
if (!repositories.isEmpty()) {
trackingData.add("Configured repositories:");
for (RemoteRepository r : repositories) {
trackingData.add(" - " + r.getId() + " : " + r.getUrl());
}
} else {
trackingData.add("No repositories configured");
}
}
Files.write(trackingFile, trackingData, StandardCharsets.UTF_8);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
/**
* Returns {@code true} if passed in artifact is originating from local repository. In other words, we want
* to process and store tracking information ONLY into local repository, not to any other place. This method
* filters out currently built artifacts, as events are fired for them as well, but their resolved artifact
* file would point to checked out source-tree, not the local repository.
* <p>
* Visible for testing.
*/
static boolean isLocalRepositoryArtifactOrMissing(RepositorySystemSession session, Artifact artifact) {
return artifact.getFile() == null
|| artifact.getFile()
.getPath()
.startsWith(session.getLocalRepository().getBasedir().getPath());
}
/**
* Unravels trace tree (going upwards from current node), looking for {@link CollectStepData} trace data.
* This method may return {@code null} if no collect step data found in passed trace data or it's parents.
* <p>
* Visible for testing.
*/
static CollectStepData lookupCollectStepData(RequestTrace trace) {
CollectStepData collectStepTrace = null;
while (trace != null) {
if (trace.getData() instanceof CollectStepData collectStepData) {
collectStepTrace = collectStepData;
break;
}
trace = trace.getParent();
}
return collectStepTrace;
}
/**
* The event "artifact resolved" if fired WHENEVER an artifact is resolved, BUT it happens also when an artifact
* descriptor (model, the POM) is being built, and parent (and parent of parent...) is being asked for. Hence, this
* method "filters" out in WHICH artifact are we interested in, but it intentionally neglects extension as
* ArtifactDescriptorReader modifies extension to "pom" during collect. So all we have to rely on is GAV only.
*/
static boolean isInScope(Artifact artifact, Artifact nodeArtifact) {
return Objects.equals(artifact.getGroupId(), nodeArtifact.getGroupId())
&& Objects.equals(artifact.getArtifactId(), nodeArtifact.getArtifactId())
&& Objects.equals(artifact.getVersion(), nodeArtifact.getVersion());
}
}
|
ReverseTreeRepositoryListener
|
java
|
apache__maven
|
its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng4393ParseExternalParenPomLenientTest.java
|
{
"start": 1040,
"end": 1900
}
|
class ____ extends AbstractMavenIntegrationTestCase {
/**
* Verify that parent POMs get parsed in lenient mode when resolved from the repository.
*
* @throws Exception in case of failure
*/
@Test
public void testit() throws Exception {
File testDir = extractResources("/mng-4393");
Verifier verifier = newVerifier(testDir.getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteDirectory("target");
verifier.deleteArtifacts("org.apache.maven.its.mng4393");
verifier.filterFile("settings-template.xml", "settings.xml");
verifier.addCliArgument("-s");
verifier.addCliArgument("settings.xml");
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
}
}
|
MavenITmng4393ParseExternalParenPomLenientTest
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/query/ManyToManyGroupByOrderByTest.java
|
{
"start": 1094,
"end": 5912
}
|
class ____ {
@Test
public void testSelectEntity(SessionFactoryScope scope) {
// explicit join group by
scope.inTransaction( session -> {
final Person result = session.createQuery(
"select owner from Cat cat join cat.owners owner group by owner",
Person.class
).getSingleResult();
assertThat( result.getId() ).isEqualTo( 1L );
} );
// explicit join group by + order by
scope.inTransaction( session -> {
final Person result = session.createQuery(
"select owner from Cat cat join cat.owners owner group by owner order by owner",
Person.class
).getSingleResult();
assertThat( result.getId() ).isEqualTo( 1L );
} );
// implicit join group by
scope.inTransaction( session -> {
final Person result = session.createQuery(
"select element(cat.owners) from Cat cat group by element(cat.owners)",
Person.class
).getSingleResult();
assertThat( result.getId() ).isEqualTo( 1L );
} );
// implicit join group by + order by
scope.inTransaction( session -> {
final Person result = session.createQuery(
"select element(cat.owners) from Cat cat group by element(cat.owners) order by element(cat.owners)",
Person.class
).getSingleResult();
assertThat( result.getId() ).isEqualTo( 1L );
} );
}
@Test
public void testSelectAssociationId(SessionFactoryScope scope) {
// explicit join group by
scope.inTransaction( session -> {
final Tuple result = session.createQuery(
"select owner.id, owner.name from Cat cat join cat.owners owner group by owner",
Tuple.class
).getSingleResult();
assertThat( result.get( 0, Long.class ) ).isEqualTo( 1L );
assertThat( result.get( 1, String.class ) ).isEqualTo( "Marco" );
} );
// explicit join group by + order by
scope.inTransaction( session -> {
final Tuple result = session.createQuery(
"select owner.id, owner.name from Cat cat join cat.owners owner group by owner order by owner",
Tuple.class
).getSingleResult();
assertThat( result.get( 0, Long.class ) ).isEqualTo( 1L );
assertThat( result.get( 1, String.class ) ).isEqualTo( "Marco" );
} );
// implicit join group by
scope.inTransaction( session -> {
final Tuple result = session.createQuery(
"select element(cat.owners).id from Cat cat group by element(cat.owners)",
Tuple.class
).getSingleResult();
assertThat( result.get( 0, Long.class ) ).isEqualTo( 1L );
} );
// implicit join group by + order by
scope.inTransaction( session -> {
final Tuple result = session.createQuery(
"select element(cat.owners).id from Cat cat group by element(cat.owners) order by element(cat.owners)",
Tuple.class
).getSingleResult();
assertThat( result.get( 0, Long.class ) ).isEqualTo( 1L );
} );
}
@Test
public void testDistinctAndAggregates(SessionFactoryScope scope) {
// explicit join distinct
scope.inTransaction( session -> {
final Tuple result = session.createQuery(
"select distinct owner.id from Cat cat join cat.owners owner group by owner.id order by owner.id",
Tuple.class
).getSingleResult();
assertThat( result.get( 0, Long.class ) ).isEqualTo( 1L );
} );
// explicit join distinct + aggregate
scope.inTransaction( session -> {
final Tuple result = session.createQuery(
"select distinct min(owner.id), cat.id from Cat cat join cat.owners owner group by cat.id order by min(owner.id), cat.id",
Tuple.class
).getSingleResult();
assertThat( result.get( 0, Long.class ) ).isEqualTo( 1L );
} );
// implicit join distinct
scope.inTransaction( session -> {
final Tuple result = session.createQuery(
"select distinct element(cat.owners).id from Cat cat group by element(cat.owners).id order by element(cat.owners).id",
Tuple.class
).getSingleResult();
assertThat( result.get( 0, Long.class ) ).isEqualTo( 1L );
} );
// implicit join distinct + aggregate
scope.inTransaction( session -> {
final Tuple result = session.createQuery(
"select distinct min(element(cat.owners).id), cat.id from Cat cat group by cat.id order by min(element(cat.owners).id), cat.id",
Tuple.class
).getSingleResult();
assertThat( result.get( 0, Long.class ) ).isEqualTo( 1L );
} );
}
@BeforeAll
public void setUp(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final Cat cat = new Cat();
final Person owner = new Person( 1L, "Marco" );
cat.addToOwners( owner );
session.persist( owner );
session.persist( cat );
} );
}
@AfterAll
public void tearDown(SessionFactoryScope scope) {
scope.inTransaction( session -> {
session.createMutationQuery( "delete from Cat" ).executeUpdate();
session.createMutationQuery( "delete from Person" ).executeUpdate();
} );
}
@Entity( name = "Person" )
static
|
ManyToManyGroupByOrderByTest
|
java
|
spring-projects__spring-framework
|
spring-jms/src/main/java/org/springframework/jms/core/JmsMessageOperations.java
|
{
"start": 5164,
"end": 7363
}
|
class ____ convert the payload to
* @return the converted payload of the reply message, possibly {@code null} if
* the message could not be received, for example due to a timeout
*/
<T> @Nullable T receiveAndConvert(String destinationName, Class<T> targetClass) throws MessagingException;
/**
* Receive a message from the default destination.
* @param messageSelector the JMS message selector expression (or {@code null} if none).
* See the JMS specification for a detailed definition of selector expressions.
* @return the received message, possibly {@code null} if the message could not
* be received, for example due to a timeout
* @since 7.0
*/
@Nullable Message<?> receiveSelected(@Nullable String messageSelector) throws MessagingException;
/**
* Receive a message from the given destination.
* @param destination the target destination
* @param messageSelector the JMS message selector expression (or {@code null} if none).
* See the JMS specification for a detailed definition of selector expressions.
* @return the received message, possibly {@code null} if the message could not
* be received, for example due to a timeout
* @since 7.0
*/
@Nullable Message<?> receiveSelected(Destination destination, @Nullable String messageSelector)
throws MessagingException;
/**
* Receive a message from the given destination.
* @param destinationName the name of the target destination
* @param messageSelector the JMS message selector expression (or {@code null} if none).
* See the JMS specification for a detailed definition of selector expressions.
* @return the received message, possibly {@code null} if the message could not
* be received, for example due to a timeout
* @since 7.0
*/
@Nullable Message<?> receiveSelected(String destinationName, @Nullable String messageSelector)
throws MessagingException;
/**
* Receive a message from the default destination and convert its payload to the
* specified target class.
* @param messageSelector the JMS message selector expression (or {@code null} if none).
* See the JMS specification for a detailed definition of selector expressions.
* @param targetClass the target
|
to
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/inheritance/joined/JoinedInheritanceSameTableDifferentSchemaTest.java
|
{
"start": 803,
"end": 1763
}
|
class ____ {
@Test
public void testMapping() {
StandardServiceRegistry ssr = ServiceRegistryUtil.serviceRegistry();
try {
final Metadata metadata = new MetadataSources( ServiceRegistryUtil.serviceRegistry() )
.addAnnotatedClass( EntityA.class )
.addAnnotatedClass( EntityB.class )
.buildMetadata();
org.hibernate.mapping.Table entity1Table = metadata.getEntityBinding( EntityA.class.getName() ).getTable();
org.hibernate.mapping.Table entity2Table = metadata.getEntityBinding( EntityB.class.getName() ).getTable();
assertThat( entity1Table.getName() ).isEqualTo( entity2Table.getName() );
assertThat( entity1Table.getSchema() ).isNotEqualTo( entity2Table.getSchema() );
}
finally {
StandardServiceRegistryBuilder.destroy( ssr );
}
}
@Entity(name = "EntityA")
@Inheritance(strategy = InheritanceType.JOINED)
@Table(schema = "schema_1", name = "my_table")
public static
|
JoinedInheritanceSameTableDifferentSchemaTest
|
java
|
FasterXML__jackson-databind
|
src/main/java/tools/jackson/databind/AnnotationIntrospector.java
|
{
"start": 46820,
"end": 49530
}
|
class ____ introspect
*/
public JsonPOJOBuilder.Value findPOJOBuilderConfig(MapperConfig<?> config, AnnotatedClass ac) {
return null;
}
/**
* Method called to check whether potential Creator (constructor or static factory
* method) has explicit annotation to indicate it as actual Creator; and if so,
* which {@link com.fasterxml.jackson.annotation.JsonCreator.Mode} to use.
*<p>
* NOTE: caller needs to consider possibility of both `null` (no annotation found)
* and {@link com.fasterxml.jackson.annotation.JsonCreator.Mode#DISABLED} (annotation found,
* but disabled); latter is necessary as marker in case multiple introspectors are chained,
* as well as possibly as when using mix-in annotations.
*
* @param config Configuration settings in effect (for serialization or deserialization)
* @param a Annotated accessor (usually constructor or static method) to check
*/
public JsonCreator.Mode findCreatorAnnotation(MapperConfig<?> config, Annotated a) {
return null;
}
/**
* Method called to check if introspector can find a Creator it considers
* its "Preferred Creator": Creator to use as the primary one, when no Creator has
* explicit annotation ({@link #findCreatorAnnotation} returns {@code null}).
* Examples of preferred creators include the canonical constructor defined by
* Java Records; "Data" classes by frameworks
* like Lombok and JVM languages like Kotlin and Scala (case classes) also have
* similar concepts.
* If introspector can determine that one of given {@link PotentialCreator}s should
* be considered preferred one, it should return it; if not, it should return {@code null}.
* Note that core databind functionality may call this method even in the presence of
* explicitly annotated creators; and may or may not use Creator returned depending
* on other criteria.
*<p>
* NOTE: when returning chosen Creator, it may be necessary to mark its "mode"
* with {@link PotentialCreator#overrideMode} (especially for "delegating" creators).
*<p>
* NOTE: method is NOT called for Java Record types; selection of the canonical constructor
* as the Primary creator is handled directly by {@link POJOPropertiesCollector}
*<p>
* NOTE: was called {@code findDefaultCreator()} in Jackson 2.x but was renamed
* due to possible confusion with 0-argument "default" constructor.
*
* @param config Configuration settings in effect (for deserialization)
* @param valueClass Class being instantiated; defines Creators passed
* @param declaredConstructors Constructors value
|
to
|
java
|
spring-projects__spring-boot
|
module/spring-boot-cache/src/main/java/org/springframework/boot/cache/metrics/RedisCacheMeterBinderProvider.java
|
{
"start": 955,
"end": 1179
}
|
class ____ implements CacheMeterBinderProvider<RedisCache> {
@Override
public MeterBinder getMeterBinder(RedisCache cache, Iterable<Tag> tags) {
return new RedisCacheMetrics(cache, tags);
}
}
|
RedisCacheMeterBinderProvider
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.