language stringclasses 1 value | repo stringclasses 60 values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | apache__camel | dsl/camel-yaml-dsl/camel-yaml-dsl-deserializers/src/generated/java/org/apache/camel/dsl/yaml/deserializers/ModelDeserializers.java | {
"start": 1117523,
"end": 1121207
} | class ____ extends YamlDeserializerBase<TemplatedRouteParameterDefinition> {
public TemplatedRouteParameterDefinitionDeserializer() {
super(TemplatedRouteParameterDefinition.class);
}
@Override
protected TemplatedRouteParameterDefinition newInstance() {
return new TemplatedRouteParameterDefinition();
}
@Override
protected boolean setProperty(TemplatedRouteParameterDefinition target, String propertyKey,
String propertyName, Node node) {
propertyKey = org.apache.camel.util.StringHelper.dashToCamelCase(propertyKey);
switch(propertyKey) {
case "name": {
String val = asText(node);
target.setName(val);
break;
}
case "value": {
String val = asText(node);
target.setValue(val);
break;
}
default: {
return false;
}
}
return true;
}
}
@YamlType(
nodes = "threadPoolProfile",
types = org.apache.camel.model.ThreadPoolProfileDefinition.class,
order = org.apache.camel.dsl.yaml.common.YamlDeserializerResolver.ORDER_LOWEST - 1,
displayName = "Thread Pool Profile",
description = "To configure thread pools",
deprecated = false,
properties = {
@YamlProperty(name = "allowCoreThreadTimeOut", type = "boolean", defaultValue = "false", description = "Whether idle core threads is allowed to timeout and therefore can shrink the pool size below the core pool size Is by default true", displayName = "Allow Core Thread Time Out"),
@YamlProperty(name = "defaultProfile", type = "boolean", defaultValue = "false", description = "Whether this profile is the default thread pool profile", displayName = "Default Profile"),
@YamlProperty(name = "description", type = "string", description = "Sets the description of this node", displayName = "Description"),
@YamlProperty(name = "id", type = "string", description = "Sets the id of this node", displayName = "Id"),
@YamlProperty(name = "keepAliveTime", type = "number", description = "Sets the keep alive time for idle threads in the pool", displayName = "Keep Alive Time"),
@YamlProperty(name = "maxPoolSize", type = "number", description = "Sets the maximum pool size", displayName = "Max Pool Size"),
@YamlProperty(name = "maxQueueSize", type = "number", description = "Sets the maximum number of tasks in the work queue. Use -1 or Integer.MAX_VALUE for an unbounded queue", displayName = "Max Queue Size"),
@YamlProperty(name = "note", type = "string", description = "Sets the note of this node", displayName = "Note"),
@YamlProperty(name = "poolSize", type = "number", description = "Sets the core pool size", displayName = "Pool Size"),
@YamlProperty(name = "rejectedPolicy", type = "enum:Abort,CallerRuns", description = "Sets the handler for tasks which cannot be executed by the thread pool.", displayName = "Rejected Policy"),
@YamlProperty(name = "timeUnit", type = "enum:NANOSECONDS,MICROSECONDS,MILLISECONDS,SECONDS,MINUTES,HOURS,DAYS", description = "Sets the time unit to use for keep alive time By default SECONDS is used.", displayName = "Time Unit")
}
)
public static | TemplatedRouteParameterDefinitionDeserializer |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/KnativeEndpointBuilderFactory.java | {
"start": 18328,
"end": 24953
} | interface ____
extends
EndpointProducerBuilder {
default AdvancedKnativeEndpointProducerBuilder advanced() {
return (AdvancedKnativeEndpointProducerBuilder) this;
}
/**
* CloudEvent headers to override.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.String></code> type.
*
* Group: common
*
* @param ceOverride the value to set
* @return the dsl builder
*/
default KnativeEndpointProducerBuilder ceOverride(Map<java.lang.String, java.lang.String> ceOverride) {
doSetProperty("ceOverride", ceOverride);
return this;
}
/**
* CloudEvent headers to override.
*
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.String></code>
* type.
*
* Group: common
*
* @param ceOverride the value to set
* @return the dsl builder
*/
default KnativeEndpointProducerBuilder ceOverride(String ceOverride) {
doSetProperty("ceOverride", ceOverride);
return this;
}
/**
* Set the version of the cloudevents spec.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: 1.0
* Group: common
*
* @param cloudEventsSpecVersion the value to set
* @return the dsl builder
*/
default KnativeEndpointProducerBuilder cloudEventsSpecVersion(String cloudEventsSpecVersion) {
doSetProperty("cloudEventsSpecVersion", cloudEventsSpecVersion);
return this;
}
/**
* Set the event-type information of the produced events.
*
* The option is a: <code>java.lang.String</code> type.
*
* Default: org.apache.camel.event
* Group: common
*
* @param cloudEventsType the value to set
* @return the dsl builder
*/
default KnativeEndpointProducerBuilder cloudEventsType(String cloudEventsType) {
doSetProperty("cloudEventsType", cloudEventsType);
return this;
}
/**
* The environment.
*
* The option is a:
* <code>org.apache.camel.component.knative.spi.KnativeEnvironment</code> type.
*
* Group: common
*
* @param environment the value to set
* @return the dsl builder
*/
default KnativeEndpointProducerBuilder environment(org.apache.camel.component.knative.spi.KnativeEnvironment environment) {
doSetProperty("environment", environment);
return this;
}
/**
* The environment.
*
* The option will be converted to a
* <code>org.apache.camel.component.knative.spi.KnativeEnvironment</code> type.
*
* Group: common
*
* @param environment the value to set
* @return the dsl builder
*/
default KnativeEndpointProducerBuilder environment(String environment) {
doSetProperty("environment", environment);
return this;
}
/**
* Set the filters.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.String></code> type.
*
* Group: common
*
* @param filters the value to set
* @return the dsl builder
*/
default KnativeEndpointProducerBuilder filters(Map<java.lang.String, java.lang.String> filters) {
doSetProperty("filters", filters);
return this;
}
/**
* Set the filters.
*
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.String></code>
* type.
*
* Group: common
*
* @param filters the value to set
* @return the dsl builder
*/
default KnativeEndpointProducerBuilder filters(String filters) {
doSetProperty("filters", filters);
return this;
}
/**
* The SinkBinding configuration.
*
* The option is a:
* <code>org.apache.camel.component.knative.spi.KnativeSinkBinding</code> type.
*
* Group: common
*
* @param sinkBinding the value to set
* @return the dsl builder
*/
default KnativeEndpointProducerBuilder sinkBinding(org.apache.camel.component.knative.spi.KnativeSinkBinding sinkBinding) {
doSetProperty("sinkBinding", sinkBinding);
return this;
}
/**
* The SinkBinding configuration.
*
* The option will be converted to a
* <code>org.apache.camel.component.knative.spi.KnativeSinkBinding</code> type.
*
* Group: common
*
* @param sinkBinding the value to set
* @return the dsl builder
*/
default KnativeEndpointProducerBuilder sinkBinding(String sinkBinding) {
doSetProperty("sinkBinding", sinkBinding);
return this;
}
/**
* Set the transport options.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
*
* Group: common
*
* @param transportOptions the value to set
* @return the dsl builder
*/
default KnativeEndpointProducerBuilder transportOptions(Map<java.lang.String, java.lang.Object> transportOptions) {
doSetProperty("transportOptions", transportOptions);
return this;
}
/**
* Set the transport options.
*
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.Object></code>
* type.
*
* Group: common
*
* @param transportOptions the value to set
* @return the dsl builder
*/
default KnativeEndpointProducerBuilder transportOptions(String transportOptions) {
doSetProperty("transportOptions", transportOptions);
return this;
}
}
/**
* Advanced builder for endpoint producers for the Knative component.
*/
public | KnativeEndpointProducerBuilder |
java | google__guava | android/guava-tests/benchmark/com/google/common/hash/HashCodeBenchmark.java | {
"start": 1891,
"end": 1978
} | enum ____ {
ONE_PERCENT_IN,
LAST_BYTE,
NOT_AT_ALL;
}
private | WhereToDiffer |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/TestDoubleJsonCreator.java | {
"start": 609,
"end": 1678
} | class ____ {
private final Base value;
@JsonCreator(mode = JsonCreator.Mode.DELEGATING)
private UnionExample(Base value) {
this.value = value;
}
@JsonValue
private Base getValue() {
return value;
}
public static UnionExample double_(AliasDouble value) {
return new UnionExample(new DoubleWrapper(value));
}
public <T> T accept(Visitor<T> visitor) {
return value.accept(visitor);
}
@Override
public boolean equals(Object other) {
return this == other || (other instanceof UnionExample && equalTo((UnionExample) other));
}
private boolean equalTo(UnionExample other) {
return this.value.equals(other.value);
}
@Override
public int hashCode() {
return Objects.hashCode(this.value);
}
@Override
public String toString() {
return "UnionExample{value: " + value + '}';
}
public | UnionExample |
java | eclipse-vertx__vert.x | vertx-core/src/main/java/io/vertx/core/eventbus/impl/clustered/ClusteredMessage.java | {
"start": 1103,
"end": 8079
} | class ____<U, V> extends MessageImpl<U, V> {
private static final Logger log = LoggerFactory.getLogger(ClusteredMessage.class);
private static final byte WIRE_PROTOCOL_VERSION = 2;
private String sender;
private String repliedTo;
private Buffer wireBuffer;
private int bodyPos;
private int headersPos;
private boolean fromWire;
private boolean toWire;
private String failure;
public ClusteredMessage(EventBusImpl bus) {
super(bus);
}
public ClusteredMessage(String sender, String address, MultiMap headers, U sentBody,
MessageCodec<U, V> messageCodec, boolean send, EventBusImpl bus) {
super(address, headers, sentBody, messageCodec, send, bus);
this.sender = sender;
}
protected ClusteredMessage(ClusteredMessage<U, V> other) {
super(other);
this.sender = other.sender;
if (other.sentBody == null) {
this.wireBuffer = other.wireBuffer;
this.bodyPos = other.bodyPos;
this.headersPos = other.headersPos;
}
this.fromWire = other.fromWire;
}
@Override
protected MessageImpl createReply(Object message, DeliveryOptions options) {
ClusteredMessage reply = (ClusteredMessage) super.createReply(message, options);
reply.repliedTo = sender;
return reply;
}
public ClusteredMessage<U, V> copyBeforeReceive() {
return new ClusteredMessage<>(this);
}
@Override
public MultiMap headers() {
// Lazily decode headers
if (headers == null) {
// The message has been read from the wire
if (headersPos != 0) {
decodeHeaders();
}
if (headers == null) {
headers = MultiMap.caseInsensitiveMultiMap();
}
}
return headers;
}
@Override
public V body() {
// Lazily decode the body
if (receivedBody == null && bodyPos != 0) {
// The message has been read from the wire
decodeBody();
}
return receivedBody;
}
@Override
public String replyAddress() {
return replyAddress;
}
public Buffer encodeToWire() {
toWire = true;
int length = 1024; // TODO make this configurable
Buffer buffer = Buffer.buffer(length);
buffer.appendInt(0);
buffer.appendByte(WIRE_PROTOCOL_VERSION);
byte systemCodecID = messageCodec.systemCodecID();
buffer.appendByte(systemCodecID);
if (systemCodecID == -1) {
// User codec
writeString(buffer, messageCodec.name());
}
buffer.appendByte(send ? (byte) 0 : (byte) 1);
writeString(buffer, address);
if (replyAddress != null) {
writeString(buffer, replyAddress);
} else {
buffer.appendInt(0);
}
writeString(buffer, sender);
encodeHeaders(buffer);
writeBody(buffer);
buffer.setInt(0, buffer.length() - 4);
return buffer;
}
public void readFromWire(Buffer buffer, CodecManager codecManager) {
int pos = 0;
// Overall Length already read when passed in here
byte protocolVersion = buffer.getByte(pos);
if (protocolVersion > WIRE_PROTOCOL_VERSION) {
setFailure("Invalid wire protocol version " + protocolVersion + " should be <= " + WIRE_PROTOCOL_VERSION);
}
pos++;
byte systemCodecCode = buffer.getByte(pos);
pos++;
if (systemCodecCode == -1) {
// User codec
int length = buffer.getInt(pos);
pos += 4;
byte[] bytes = buffer.getBytes(pos, pos + length);
String codecName = new String(bytes, CharsetUtil.UTF_8);
messageCodec = codecManager.getCodec(codecName);
if (messageCodec == null) {
setFailure("No message codec registered with name " + codecName);
}
pos += length;
} else {
messageCodec = codecManager.systemCodecs()[systemCodecCode];
}
byte bsend = buffer.getByte(pos);
send = bsend == 0;
pos++;
int length = buffer.getInt(pos);
pos += 4;
byte[] bytes = buffer.getBytes(pos, pos + length);
address = new String(bytes, CharsetUtil.UTF_8);
pos += length;
length = buffer.getInt(pos);
pos += 4;
if (length != 0) {
bytes = buffer.getBytes(pos, pos + length);
replyAddress = new String(bytes, CharsetUtil.UTF_8);
pos += length;
}
length = buffer.getInt(pos);
pos += 4;
bytes = buffer.getBytes(pos, pos + length);
sender = new String(bytes, CharsetUtil.UTF_8);
pos += length;
headersPos = pos;
int headersLength = buffer.getInt(pos);
pos += headersLength;
bodyPos = pos;
wireBuffer = buffer;
fromWire = true;
}
private void setFailure(String s) {
if (failure == null) {
failure = s;
}
}
private void decodeBody() {
receivedBody = messageCodec.decodeFromWire(bodyPos, wireBuffer);
bodyPos = 0;
}
private void encodeHeaders(Buffer buffer) {
if (headers != null && !headers.isEmpty()) {
int headersLengthPos = buffer.length();
buffer.appendInt(0);
buffer.appendInt(headers.entries().size());
List<Map.Entry<String, String>> entries = headers.entries();
for (Map.Entry<String, String> entry: entries) {
writeString(buffer, entry.getKey());
writeString(buffer, entry.getValue());
}
int headersEndPos = buffer.length();
buffer.setInt(headersLengthPos, headersEndPos - headersLengthPos);
} else {
buffer.appendInt(4);
}
}
private void decodeHeaders() {
int length = wireBuffer.getInt(headersPos);
if (length != 4) {
headersPos += 4;
int numHeaders = wireBuffer.getInt(headersPos);
headersPos += 4;
headers = MultiMap.caseInsensitiveMultiMap();
for (int i = 0; i < numHeaders; i++) {
int keyLength = wireBuffer.getInt(headersPos);
headersPos += 4;
byte[] bytes = wireBuffer.getBytes(headersPos, headersPos + keyLength);
String key = new String(bytes, CharsetUtil.UTF_8);
headersPos += keyLength;
int valLength = wireBuffer.getInt(headersPos);
headersPos += 4;
bytes = wireBuffer.getBytes(headersPos, headersPos + valLength);
String val = new String(bytes, CharsetUtil.UTF_8);
headersPos += valLength;
headers.add(key, val);
}
}
headersPos = 0;
}
private void writeBody(Buffer buff) {
messageCodec.encodeToWire(buff, sentBody);
}
private void writeString(Buffer buff, String str) {
byte[] strBytes = str.getBytes(CharsetUtil.UTF_8);
buff.appendInt(strBytes.length);
buff.appendBytes(strBytes);
}
String getSender() {
return sender;
}
String getRepliedTo() {
return repliedTo;
}
public boolean isFromWire() {
return fromWire;
}
public boolean isToWire() {
return toWire;
}
protected boolean isLocal() {
return !isFromWire();
}
boolean hasFailure() {
return failure != null;
}
void internalError() {
if (replyAddress != null) {
reply(new ReplyException(ReplyFailure.ERROR, failure));
} else {
log.error(failure);
}
}
}
| ClusteredMessage |
java | hibernate__hibernate-orm | tooling/metamodel-generator/src/test/java/org/hibernate/processor/test/classnamecollision/ClassNameCollisionTest.java | {
"start": 634,
"end": 1407
} | class ____ {
@Test
@WithClasses({
Something.class,
org.hibernate.processor.test.classnamecollision.somewhere.Something.class
})
void testAmbiguousSimpleName() {
System.out.println( getMetaModelSourceAsString( Something.class ) );
assertMetamodelClassGeneratedFor( Something.class );
System.out.println( getMetaModelSourceAsString( org.hibernate.processor.test.classnamecollision.somewhere.Something.class ) );
assertMetamodelClassGeneratedFor( org.hibernate.processor.test.classnamecollision.somewhere.Something.class );
assertEquals(
getMetamodelClassFor( org.hibernate.processor.test.classnamecollision.somewhere.Something.class ).getName(),
getMetamodelClassFor( Something.class ).getSuperclass()
.getName() );
}
}
| ClassNameCollisionTest |
java | google__guice | core/test/com/google/inject/errors/NullInjectedIntoNonNullableTest.java | {
"start": 1328,
"end": 1519
} | class ____ extends AbstractModule {
@Override
protected void configure() {
bind(String.class).annotatedWith(Bar.class).toProvider(() -> null);
}
}
static | FromProviderModule |
java | apache__dubbo | dubbo-common/src/main/java/org/apache/dubbo/common/utils/DefaultSerializeClassChecker.java | {
"start": 3994,
"end": 9380
} | class ____ blocked
*/
public Class<?> loadClass(ClassLoader classLoader, String className) throws ClassNotFoundException {
Class<?> aClass = loadClass0(classLoader, className);
if (!aClass.isPrimitive() && !Serializable.class.isAssignableFrom(aClass)) {
String msg = "[Serialization Security] Serialized class " + className
+ " has not implement Serializable interface. "
+ "Current mode is strict check, will disallow to deserialize it by default. ";
if (serializeSecurityManager.getWarnedClasses().add(className)) {
logger.error(PROTOCOL_UNTRUSTED_SERIALIZE_CLASS, "", "", msg);
}
if (checkSerializable) {
throw new IllegalArgumentException(msg);
}
}
return aClass;
}
private Class<?> loadClass0(ClassLoader classLoader, String className) throws ClassNotFoundException {
if (checkStatus == SerializeCheckStatus.DISABLE) {
return classForName(classLoader, className);
}
long hash = MAGIC_HASH_CODE;
for (int i = 0, typeNameLength = className.length(); i < typeNameLength; ++i) {
char ch = className.charAt(i);
if (ch == '$') {
ch = '.';
}
hash ^= ch;
hash *= MAGIC_PRIME;
if (Arrays.binarySearch(allowPrefixes, hash) >= 0) {
return classForName(classLoader, className);
}
}
if (checkStatus == SerializeCheckStatus.STRICT) {
String msg = "[Serialization Security] Serialized class " + className + " is not in allow list. "
+ "Current mode is `STRICT`, will disallow to deserialize it by default. "
+ "Please add it into security/serialize.allowlist or follow FAQ to configure it.";
if (serializeSecurityManager.getWarnedClasses().add(className)) {
logger.error(PROTOCOL_UNTRUSTED_SERIALIZE_CLASS, "", "", msg);
}
throw new IllegalArgumentException(msg);
}
hash = MAGIC_HASH_CODE;
for (int i = 0, typeNameLength = className.length(); i < typeNameLength; ++i) {
char ch = className.charAt(i);
if (ch == '$') {
ch = '.';
}
hash ^= ch;
hash *= MAGIC_PRIME;
if (Arrays.binarySearch(disAllowPrefixes, hash) >= 0) {
String msg = "[Serialization Security] Serialized class " + className + " is in disallow list. "
+ "Current mode is `WARN`, will disallow to deserialize it by default. "
+ "Please add it into security/serialize.allowlist or follow FAQ to configure it.";
if (serializeSecurityManager.getWarnedClasses().add(className)) {
logger.warn(PROTOCOL_UNTRUSTED_SERIALIZE_CLASS, "", "", msg);
}
throw new IllegalArgumentException(msg);
}
}
hash = MAGIC_HASH_CODE;
for (int i = 0, typeNameLength = className.length(); i < typeNameLength; ++i) {
char ch = Character.toLowerCase(className.charAt(i));
if (ch == '$') {
ch = '.';
}
hash ^= ch;
hash *= MAGIC_PRIME;
if (Arrays.binarySearch(disAllowPrefixes, hash) >= 0) {
String msg = "[Serialization Security] Serialized class " + className + " is in disallow list. "
+ "Current mode is `WARN`, will disallow to deserialize it by default. "
+ "Please add it into security/serialize.allowlist or follow FAQ to configure it.";
if (serializeSecurityManager.getWarnedClasses().add(className)) {
logger.warn(PROTOCOL_UNTRUSTED_SERIALIZE_CLASS, "", "", msg);
}
throw new IllegalArgumentException(msg);
}
}
Class<?> clazz = classForName(classLoader, className);
if (serializeSecurityManager.getWarnedClasses().add(className)) {
logger.warn(
PROTOCOL_UNTRUSTED_SERIALIZE_CLASS,
"",
"",
"[Serialization Security] Serialized class " + className + " is not in allow list. "
+ "Current mode is `WARN`, will allow to deserialize it by default. "
+ "Dubbo will set to `STRICT` mode by default in the future. "
+ "Please add it into security/serialize.allowlist or follow FAQ to configure it.");
}
return clazz;
}
private Class<?> classForName(ClassLoader classLoader, String className) throws ClassNotFoundException {
if (classHolder != null) {
Class<?> aClass = classHolder.loadClass(className, classLoader);
if (aClass != null) {
return aClass;
}
}
return ClassUtils.forName(className, classLoader);
}
public static DefaultSerializeClassChecker getInstance() {
return FrameworkModel.defaultModel().getBeanFactory().getBean(DefaultSerializeClassChecker.class);
}
public boolean isCheckSerializable() {
return checkSerializable;
}
}
| is |
java | elastic__elasticsearch | x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java | {
"start": 1908,
"end": 6617
} | class ____ extends ESTestCase {
private NativeRoleMappingStore store;
private TransportPutRoleMappingAction action;
private AtomicReference<PutRoleMappingRequest> requestRef;
private ProjectStateRoleMapper projectStateRoleMapper;
@SuppressWarnings("unchecked")
@Before
public void setupMocks() {
store = mock(NativeRoleMappingStore.class);
TransportService transportService = new TransportService(
Settings.EMPTY,
mock(Transport.class),
mock(ThreadPool.class),
TransportService.NOOP_TRANSPORT_INTERCEPTOR,
x -> null,
null,
Collections.emptySet()
);
projectStateRoleMapper = mock();
when(projectStateRoleMapper.hasMapping(any())).thenReturn(false);
action = new TransportPutRoleMappingAction(mock(ActionFilters.class), transportService, store, projectStateRoleMapper);
requestRef = new AtomicReference<>(null);
doAnswer(invocation -> {
Object[] args = invocation.getArguments();
assert args.length == 2;
requestRef.set((PutRoleMappingRequest) args[0]);
ActionListener<Boolean> listener = (ActionListener<Boolean>) args[1];
listener.onResponse(true);
return null;
}).when(store).putRoleMapping(any(PutRoleMappingRequest.class), any(ActionListener.class));
}
public void testPutValidMapping() throws Exception {
final FieldExpression expression = new FieldExpression("username", Collections.singletonList(new FieldExpression.FieldValue("*")));
final PutRoleMappingResponse response = put("anarchy", expression, "superuser", Collections.singletonMap("dumb", true));
assertThat(response.isCreated(), equalTo(true));
final ExpressionRoleMapping mapping = requestRef.get().getMapping();
assertThat(mapping.getExpression(), is(expression));
assertThat(mapping.isEnabled(), equalTo(true));
assertThat(mapping.getName(), equalTo("anarchy"));
assertThat(mapping.getRoles(), iterableWithSize(1));
assertThat(mapping.getRoles(), contains("superuser"));
assertThat(mapping.getMetadata(), aMapWithSize(1));
assertThat(mapping.getMetadata().get("dumb"), equalTo(true));
}
public void testValidMappingClashingClusterStateMapping() throws Exception {
final FieldExpression expression = new FieldExpression("username", Collections.singletonList(new FieldExpression.FieldValue("*")));
final PutRoleMappingResponse response = put("anarchy", expression, "superuser", Collections.singletonMap("dumb", true));
when(projectStateRoleMapper.hasMapping(any())).thenReturn(true);
assertThat(response.isCreated(), equalTo(true));
final ExpressionRoleMapping mapping = requestRef.get().getMapping();
assertThat(mapping.getExpression(), is(expression));
assertThat(mapping.isEnabled(), equalTo(true));
assertThat(mapping.getName(), equalTo("anarchy"));
assertThat(mapping.getRoles(), iterableWithSize(1));
assertThat(mapping.getRoles(), contains("superuser"));
assertThat(mapping.getMetadata(), aMapWithSize(1));
assertThat(mapping.getMetadata().get("dumb"), equalTo(true));
}
public void testInvalidSuffix() {
final FieldExpression expression = new FieldExpression("username", Collections.singletonList(new FieldExpression.FieldValue("*")));
String name = ExpressionRoleMapping.addReadOnlySuffix("anarchy");
final var ex = expectThrows(IllegalArgumentException.class, () -> {
put(name, expression, "superuser", Collections.singletonMap("dumb", true));
});
assertThat(
ex.getMessage(),
containsString(
"Invalid mapping name ["
+ name
+ "]. ["
+ ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX
+ "] is not an allowed suffix"
)
);
}
private PutRoleMappingResponse put(String name, FieldExpression expression, String role, Map<String, Object> metadata)
throws Exception {
final PutRoleMappingRequest request = new PutRoleMappingRequest();
request.setName(name);
request.setRoles(Arrays.asList(role));
request.setRules(expression);
request.setMetadata(metadata);
request.setEnabled(true);
final PlainActionFuture<PutRoleMappingResponse> future = new PlainActionFuture<>();
action.doExecute(mock(Task.class), request, future);
return future.get();
}
}
| TransportPutRoleMappingActionTests |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/http/OAuth2ResourceServerBeanDefinitionParserTests.java | {
"start": 45731,
"end": 46658
} | class ____ implements FactoryBean<JwtDecoder> {
private RestOperations rest;
private RSAPublicKey key;
private OAuth2TokenValidator<Jwt> jwtValidator;
@Override
public JwtDecoder getObject() {
NimbusJwtDecoder decoder;
if (this.key != null) {
decoder = NimbusJwtDecoder.withPublicKey(this.key).build();
}
else {
decoder = NimbusJwtDecoder.withJwkSetUri("https://idp.example.org").restOperations(this.rest).build();
}
if (this.jwtValidator != null) {
decoder.setJwtValidator(this.jwtValidator);
}
return decoder;
}
@Override
public Class<?> getObjectType() {
return JwtDecoder.class;
}
public void setJwtValidator(OAuth2TokenValidator<Jwt> jwtValidator) {
this.jwtValidator = jwtValidator;
}
public void setKey(RSAPublicKey key) {
this.key = key;
}
public void setRest(RestOperations rest) {
this.rest = rest;
}
}
static | JwtDecoderFactoryBean |
java | netty__netty | transport-native-io_uring/src/test/java/io/netty/channel/uring/IoUringSocketStartTlsTest.java | {
"start": 1003,
"end": 1358
} | class ____ extends SocketStartTlsTest {
@BeforeAll
public static void loadJNI() {
assumeTrue(IoUring.isAvailable());
}
@Override
protected List<TestsuitePermutation.BootstrapComboFactory<ServerBootstrap, Bootstrap>> newFactories() {
return IoUringSocketTestPermutation.INSTANCE.socket();
}
}
| IoUringSocketStartTlsTest |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/PineconeVectorDbEndpointBuilderFactory.java | {
"start": 20235,
"end": 20602
} | class ____ extends AbstractEndpointBuilder implements PineconeVectorDbEndpointBuilder, AdvancedPineconeVectorDbEndpointBuilder {
public PineconeVectorDbEndpointBuilderImpl(String path) {
super(componentName, path);
}
}
return new PineconeVectorDbEndpointBuilderImpl(path);
}
} | PineconeVectorDbEndpointBuilderImpl |
java | micronaut-projects__micronaut-core | core/src/main/java/io/micronaut/core/order/OrderUtil.java | {
"start": 1549,
"end": 2068
} | class ____ avoid lambda overhead during the startup
public static final Comparator<Object> COMPARATOR = new Comparator<Object>() {
@Override
public int compare(Object o1, Object o2) {
int order1 = getOrder(o1);
int order2 = getOrder(o2);
return Integer.compare(order1, order2);
}
};
/**
* Orders objects using {@link #getOrderWithDefaultPrecedence(Object, int)} using zero as the
* default precedence.
*/
// Keep as an anonymous | to |
java | qos-ch__slf4j | slf4j-api/src/main/java/org/slf4j/spi/SLF4JServiceProvider.java | {
"start": 753,
"end": 1497
} | class ____ bind to.
*
* @return instance of {@link IMarkerFactory}
*/
public IMarkerFactory getMarkerFactory();
/**
* Return the instance of {@link MDCAdapter} that
* {@link MDC} should bind to.
*
* @return instance of {@link MDCAdapter}
*/
public MDCAdapter getMDCAdapter();
/**
* Return the maximum API version for SLF4J that the logging
* implementation supports.
*
* <p>For example: {@code "2.0.1"}.
*
* @return the string API version.
*/
public String getRequestedApiVersion();
/**
* Initialize the logging back-end.
*
* <p><b>WARNING:</b> This method is intended to be called once by
* {@link LoggerFactory} | should |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/DoNotCallSuggesterTest.java | {
"start": 870,
"end": 1187
} | class ____ {
private final CompilationTestHelper testHelper =
CompilationTestHelper.newInstance(DoNotCallSuggester.class, getClass());
@Test
public void finalClass_publicFinalMethod() {
testHelper
.addSourceLines(
"Test.java",
"""
final | DoNotCallSuggesterTest |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYMaxFromWKBGeoEvaluator.java | {
"start": 1238,
"end": 4715
} | class ____ extends AbstractConvertFunction.AbstractEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(StYMaxFromWKBGeoEvaluator.class);
private final EvalOperator.ExpressionEvaluator wkb;
public StYMaxFromWKBGeoEvaluator(Source source, EvalOperator.ExpressionEvaluator wkb,
DriverContext driverContext) {
super(driverContext, source);
this.wkb = wkb;
}
@Override
public EvalOperator.ExpressionEvaluator next() {
return wkb;
}
@Override
public Block evalVector(Vector v) {
BytesRefVector vector = (BytesRefVector) v;
int positionCount = v.getPositionCount();
BytesRef scratchPad = new BytesRef();
if (vector.isConstant()) {
try {
return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0, scratchPad), positionCount);
} catch (IllegalArgumentException e) {
registerException(e);
return driverContext.blockFactory().newConstantNullBlock(positionCount);
}
}
try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) {
for (int p = 0; p < positionCount; p++) {
try {
builder.appendDouble(evalValue(vector, p, scratchPad));
} catch (IllegalArgumentException e) {
registerException(e);
builder.appendNull();
}
}
return builder.build();
}
}
private double evalValue(BytesRefVector container, int index, BytesRef scratchPad) {
BytesRef value = container.getBytesRef(index, scratchPad);
return StYMax.fromWellKnownBinaryGeo(value);
}
@Override
public Block evalBlock(Block b) {
BytesRefBlock block = (BytesRefBlock) b;
int positionCount = block.getPositionCount();
try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) {
BytesRef scratchPad = new BytesRef();
for (int p = 0; p < positionCount; p++) {
int valueCount = block.getValueCount(p);
int start = block.getFirstValueIndex(p);
int end = start + valueCount;
boolean positionOpened = false;
boolean valuesAppended = false;
for (int i = start; i < end; i++) {
try {
double value = evalValue(block, i, scratchPad);
if (positionOpened == false && valueCount > 1) {
builder.beginPositionEntry();
positionOpened = true;
}
builder.appendDouble(value);
valuesAppended = true;
} catch (IllegalArgumentException e) {
registerException(e);
}
}
if (valuesAppended == false) {
builder.appendNull();
} else if (positionOpened) {
builder.endPositionEntry();
}
}
return builder.build();
}
}
private double evalValue(BytesRefBlock container, int index, BytesRef scratchPad) {
BytesRef value = container.getBytesRef(index, scratchPad);
return StYMax.fromWellKnownBinaryGeo(value);
}
@Override
public String toString() {
return "StYMaxFromWKBGeoEvaluator[" + "wkb=" + wkb + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(wkb);
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
baseRamBytesUsed += wkb.baseRamBytesUsed();
return baseRamBytesUsed;
}
public static | StYMaxFromWKBGeoEvaluator |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/network/netty/NettyProtocol.java | {
"start": 1234,
"end": 6614
} | class ____ {
private final NettyMessage.NettyMessageEncoder messageEncoder =
new NettyMessage.NettyMessageEncoder();
private final ResultPartitionProvider partitionProvider;
private final TaskEventPublisher taskEventPublisher;
NettyProtocol(
ResultPartitionProvider partitionProvider, TaskEventPublisher taskEventPublisher) {
this.partitionProvider = partitionProvider;
this.taskEventPublisher = taskEventPublisher;
}
/**
* Returns the server channel handlers.
*
* <pre>
* +-------------------------------------------------------------------+
* | SERVER CHANNEL PIPELINE |
* | |
* | +----------+----------+ (3) write +----------------------+ |
* | | Queue of queues +----------->| Message encoder | |
* | +----------+----------+ +-----------+----------+ |
* | /|\ \|/ |
* | | (2) enqueue | |
* | +----------+----------+ | |
* | | Request handler | | |
* | +----------+----------+ | |
* | /|\ | |
* | | | |
* | +-----------+-----------+ | |
* | | Message+Frame decoder | | |
* | +-----------+-----------+ | |
* | /|\ | |
* +---------------+-----------------------------------+---------------+
* | | (1) client request \|/
* +---------------+-----------------------------------+---------------+
* | | | |
* | [ Socket.read() ] [ Socket.write() ] |
* | |
* | Netty Internal I/O Threads (Transport Implementation) |
* +-------------------------------------------------------------------+
* </pre>
*
* @return channel handlers
*/
public ChannelHandler[] getServerChannelHandlers() {
PartitionRequestQueue queueOfPartitionQueues = new PartitionRequestQueue();
PartitionRequestServerHandler serverHandler =
new PartitionRequestServerHandler(
partitionProvider, taskEventPublisher, queueOfPartitionQueues);
return new ChannelHandler[] {
messageEncoder,
new NettyMessage.NettyMessageDecoder(),
serverHandler,
queueOfPartitionQueues
};
}
/**
* Returns the client channel handlers.
*
* <pre>
* +-----------+----------+ +----------------------+
* | Remote input channel | | request client |
* +-----------+----------+ +-----------+----------+
* | | (1) write
* +---------------+-----------------------------------+---------------+
* | | CLIENT CHANNEL PIPELINE | |
* | | \|/ |
* | +----------+----------+ +----------------------+ |
* | | Request handler + | Message encoder | |
* | +----------+----------+ +-----------+----------+ |
* | /|\ \|/ |
* | | | |
* | +----------+------------+ | |
* | | Message+Frame decoder | | |
* | +----------+------------+ | |
* | /|\ | |
* +---------------+-----------------------------------+---------------+
* | | (3) server response \|/ (2) client request
* +---------------+-----------------------------------+---------------+
* | | | |
* | [ Socket.read() ] [ Socket.write() ] |
* | |
* | Netty Internal I/O Threads (Transport Implementation) |
* +-------------------------------------------------------------------+
* </pre>
*
* @return channel handlers
*/
public ChannelHandler[] getClientChannelHandlers() {
NetworkClientHandler networkClientHandler = new CreditBasedPartitionRequestClientHandler();
return new ChannelHandler[] {
messageEncoder,
new NettyMessageClientDecoderDelegate(networkClientHandler),
networkClientHandler
};
}
}
| NettyProtocol |
java | apache__flink | flink-runtime/src/test/java/org/apache/flink/runtime/registration/ManualResponseTestRegistrationGateway.java | {
"start": 1180,
"end": 2595
} | class ____ extends TestingGatewayBase
implements TestRegistrationGateway {
private final BlockingQueue<RegistrationCall> invocations;
private final RegistrationResponse[] responses;
private int pos;
public ManualResponseTestRegistrationGateway(RegistrationResponse... responses) {
Preconditions.checkArgument(responses != null && responses.length > 0);
this.invocations = new LinkedBlockingQueue<>();
this.responses = responses;
}
// ------------------------------------------------------------------------
@Override
public CompletableFuture<RegistrationResponse> registrationCall(UUID leaderId, long timeout) {
invocations.add(new RegistrationCall(leaderId, timeout));
RegistrationResponse response = responses[pos];
if (pos < responses.length - 1) {
pos++;
}
// return a completed future (for a proper value), or one that never completes and will time
// out (for null)
return response != null
? CompletableFuture.completedFuture(response)
: futureWithTimeout(timeout);
}
public BlockingQueue<RegistrationCall> getInvocations() {
return invocations;
}
// ------------------------------------------------------------------------
/** Invocation parameters. */
public static | ManualResponseTestRegistrationGateway |
java | grpc__grpc-java | xds/src/main/java/io/grpc/xds/ClusterManagerLoadBalancerProvider.java | {
"start": 1251,
"end": 1453
} | class ____ not be directly
* referenced in code. The policy should be accessed through
* {@link LoadBalancerRegistry#getProvider} with the name "cluster_manager_experimental".
*/
@Internal
public | should |
java | apache__logging-log4j2 | log4j-core/src/main/java/org/apache/logging/log4j/core/appender/AbstractManager.java | {
"start": 1599,
"end": 2080
} | class ____ {@link AutoCloseable} mostly to allow unit tests to be written safely and succinctly. While
* managers do need to allocate resources (usually on construction) and then free these resources, a manager is longer
* lived than other auto-closeable objects like streams. None the less, making a manager AutoCloseable forces readers to
* be aware of the pattern: allocate resources on construction and call {@link #close()} at some point.
* </p>
*/
public abstract | implements |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/query/MaxInExpressionParameterPaddingTest.java | {
"start": 6930,
"end": 7231
} | class ____ {
@Id
private Integer id;
private String name;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
public static | Person |
java | elastic__elasticsearch | x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelActionTests.java | {
"start": 572,
"end": 1833
} | class ____ extends ESTestCase {
public void testResolveTaskType() {
assertEquals(TaskType.SPARSE_EMBEDDING, ServiceUtils.resolveTaskType(TaskType.SPARSE_EMBEDDING, null));
assertEquals(TaskType.SPARSE_EMBEDDING, ServiceUtils.resolveTaskType(TaskType.ANY, TaskType.SPARSE_EMBEDDING.toString()));
var e = expectThrows(ElasticsearchStatusException.class, () -> ServiceUtils.resolveTaskType(TaskType.ANY, null));
assertThat(e.getMessage(), containsString("model is missing required setting [task_type]"));
e = expectThrows(ElasticsearchStatusException.class, () -> ServiceUtils.resolveTaskType(TaskType.ANY, TaskType.ANY.toString()));
assertThat(e.getMessage(), containsString("task_type [any] is not valid type for inference"));
e = expectThrows(
ElasticsearchStatusException.class,
() -> ServiceUtils.resolveTaskType(TaskType.SPARSE_EMBEDDING, TaskType.TEXT_EMBEDDING.toString())
);
assertThat(
e.getMessage(),
containsString(
"Cannot resolve conflicting task_type parameter in the request URL [sparse_embedding] and the request body [text_embedding]"
)
);
}
}
| TransportPutInferenceModelActionTests |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/model/source/spi/PluralAttributeElementSource.java | {
"start": 204,
"end": 290
} | interface ____ {
PluralAttributeElementNature getNature();
}
| PluralAttributeElementSource |
java | google__dagger | javatests/dagger/hilt/android/DefaultViewModelFactoryTest.java | {
"start": 3468,
"end": 3561
} | class ____ extends Hilt_DefaultViewModelFactoryTest_TestActivity {}
public static | TestActivity |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/StaticGuardedByInstanceTest.java | {
"start": 2899,
"end": 3322
} | class ____ {
final Object lock = new Object();
boolean init = false;
void m() {
synchronized (lock) {
init = true;
}
}
}
""")
.doTest();
}
@Test
public void negative_method() {
compilationHelper
.addSourceLines(
"Test.java",
"""
| Test |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/context/support/AbstractRefreshableWebApplicationContext.java | {
"start": 1833,
"end": 3643
} | class ____ as easy to subclass as AbstractRefreshableApplicationContext:
* All you need to implement is the {@link #loadBeanDefinitions} method;
* see the superclass javadoc for details. Note that implementations are supposed
* to load bean definitions from the files specified by the locations returned
* by the {@link #getConfigLocations} method.
*
* <p>Interprets resource paths as servlet context resources, i.e. as paths beneath
* the web application root. Absolute paths, for example, for files outside the web app root,
* can be accessed via "file:" URLs, as implemented by
* {@link org.springframework.core.io.DefaultResourceLoader}.
*
* <p><b>This is the web context to be subclassed for a different bean definition format.</b>
* Such a context implementation can be specified as "contextClass" context-param
* for {@link org.springframework.web.context.ContextLoader} or as "contextClass"
* init-param for {@link org.springframework.web.servlet.FrameworkServlet},
* replacing the default {@link XmlWebApplicationContext}. It will then automatically
* receive the "contextConfigLocation" context-param or init-param, respectively.
*
* <p>Note that WebApplicationContext implementations are generally supposed
* to configure themselves based on the configuration received through the
* {@link ConfigurableWebApplicationContext} interface. In contrast, a standalone
* application context might allow for configuration in custom startup code
* (for example, {@link org.springframework.context.support.GenericApplicationContext}).
*
* @author Juergen Hoeller
* @since 1.1.3
* @see #loadBeanDefinitions
* @see org.springframework.web.context.ConfigurableWebApplicationContext#setConfigLocations
* @see XmlWebApplicationContext
*/
@SuppressWarnings("deprecation")
public abstract | is |
java | apache__flink | flink-core/src/test/java/org/apache/flink/core/memory/ByteArrayOutputStreamWithPosTest.java | {
"start": 1213,
"end": 4094
} | class ____ {
private static final int BUFFER_SIZE = 32;
private ByteArrayOutputStreamWithPos stream;
@BeforeEach
void setup() {
stream = new ByteArrayOutputStreamWithPos(BUFFER_SIZE);
}
/** Test setting position which is exactly the same with the buffer size. */
@Test
void testSetPositionWhenBufferIsFull() throws Exception {
stream.write(new byte[BUFFER_SIZE]);
// check whether the buffer is filled fully
assertThat(stream.getBuf()).hasSize(BUFFER_SIZE);
// check current position is the end of the buffer
assertThat(stream.getPosition()).isEqualTo(BUFFER_SIZE);
stream.setPosition(BUFFER_SIZE);
// confirm current position is at where we expect.
assertThat(stream.getPosition()).isEqualTo(BUFFER_SIZE);
}
/** Test setting negative position. */
@Test
void testSetNegativePosition() {
assertThatThrownBy(
() -> {
stream.write(new byte[BUFFER_SIZE]);
stream.setPosition(-1);
})
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Position out of bounds");
}
/** Test setting position larger than buffer size. */
@Test
void testSetPositionLargerThanBufferSize() throws Exception {
// fully fill the buffer
stream.write(new byte[BUFFER_SIZE]);
assertThat(stream.getBuf()).hasSize(BUFFER_SIZE);
// expand the buffer by setting position beyond the buffer length
stream.setPosition(BUFFER_SIZE + 1);
assertThat(stream.getBuf()).hasSize(BUFFER_SIZE * 2);
assertThat(stream.getPosition()).isEqualTo(BUFFER_SIZE + 1);
}
/** Test that toString returns a substring of the buffer with range(0, position). */
@Test
void testToString() throws IOException {
byte[] data = "1234567890".getBytes(ConfigConstants.DEFAULT_CHARSET);
try (ByteArrayOutputStreamWithPos stream = new ByteArrayOutputStreamWithPos(data.length)) {
stream.write(data);
assertThat(stream.toString().getBytes(ConfigConstants.DEFAULT_CHARSET))
.containsExactly(data);
for (int i = 0; i < data.length; i++) {
stream.setPosition(i);
assertThat(stream.toString().getBytes(ConfigConstants.DEFAULT_CHARSET))
.containsExactly(Arrays.copyOf(data, i));
}
// validate that the stored bytes are still tracked properly even when expanding array
stream.setPosition(data.length + 1);
assertThat(stream.toString().getBytes(ConfigConstants.DEFAULT_CHARSET))
.containsExactly(Arrays.copyOf(data, data.length + 1));
}
}
}
| ByteArrayOutputStreamWithPosTest |
java | reactor__reactor-core | reactor-core/src/test/java/reactor/core/publisher/FluxDelaySequenceTest.java | {
"start": 1320,
"end": 10139
} | class ____ {
@Test
public void delayFirstInterval() {
Supplier<Flux<Tuple2<Long, Long>>> test = () -> Flux.interval(Duration.ofMillis(50))
.delaySequence(Duration.ofMillis(500))
.elapsed()
.take(33, false);
StepVerifier.withVirtualTime(test)
.thenAwait(Duration.ofMillis(500 + 50))
.recordWith(ArrayList::new)
.assertNext(t2 -> assertThat(t2.getT1()).isEqualTo(550))
.thenAwait(Duration.ofMillis(33 * 50))
.thenConsumeWhile(t2 -> t2.getT1() == 50)
.consumeRecordedWith(record -> {
assertThat(record.stream().mapToLong(Tuple2::getT2))
.startsWith(0L, 1L, 2L)
.endsWith(30L, 31L, 32L)
.isSorted()
.hasSize(33);
})
.verifyComplete();
}
@Test
public void delayFirstAsymmetricDelays() {
Supplier<Flux<Tuple2<Long, Long>>> test = () -> {
Flux<Long> asymmetricDelays = Flux.concat(
Mono.delay(Duration.ofMillis(400)).then(Mono.just(0L)),
Mono.delay(Duration.ofMillis(800)).then(Mono.just(1L)),
Mono.delay(Duration.ofMillis(200)).then(Mono.just(2L)),
Mono.delay(Duration.ofMillis(300)).then(Mono.just(3L))
);
return asymmetricDelays
.delaySequence(Duration.ofMillis(500))
.take(33, false)
.elapsed();
};
StepVerifier.withVirtualTime(test)
//first is delayed (from subscription) by additional 500ms
.thenAwait(Duration.ofMillis(500 + 400))
.assertNext(t2 -> {
assertThat(t2.getT1()).isEqualTo(400L + 500L);
assertThat(t2.getT2()).isEqualTo(0L);
})
//rest follow same delays as in source
.thenAwait(Duration.ofMillis(800))
.assertNext(t2 -> {
assertThat(t2.getT1()).isEqualTo(800L);
assertThat(t2.getT2()).isEqualTo(1L);
})
.thenAwait(Duration.ofMillis(200))
.assertNext(t2 -> {
assertThat(t2.getT1()).isEqualTo(200L);
assertThat(t2.getT2()).isEqualTo(2L);
})
.thenAwait(Duration.ofMillis(300))
.assertNext(t2 -> {
assertThat(t2.getT1()).isEqualTo(300L);
assertThat(t2.getT2()).isEqualTo(3L);
})
.verifyComplete();
}
@Disabled("delayElements test for local comparison run")
@Test
public void delayElements() {
Flux<Tuple2<Long, Long>> test = Flux.interval(Duration.ofMillis(50))
.onBackpressureDrop()
.delayElements(Duration.ofMillis(500))
.take(33, false)
.elapsed()
.log();
StepVerifier.create(test)
.thenConsumeWhile(t2 -> t2.getT1() >= 500)
.verifyComplete();
}
@Test
public void every50msThenErrorDelaysError() {
Supplier<Flux<Long>> test = () -> {
Flux<Long> source = Flux.concat(Mono.delay(Duration.ofMillis(50))
.then(Mono.just(0L)),
Mono.delay(Duration.ofMillis(50))
.then(Mono.just(1L)),
Mono.delay(Duration.ofMillis(50))
.then(Mono.just(2L)),
Mono.error(new IllegalStateException("boom")));
return source.delaySequence(Duration.ofMillis(1000));
};
StepVerifier.withVirtualTime(test)
.expectSubscription()
.expectNoEvent(Duration.ofMillis(1050))
.expectNext(0L)
.expectNoEvent(Duration.ofMillis(50))
.expectNext(1L)
.expectNoEvent(Duration.ofMillis(50))
.expectNext(2L)
.verifyErrorMessage("boom");
}
@Test
public void emptyErrorErrorsImmediately() {
Flux<Long> source = Flux.error(new IllegalStateException("boom"));
Flux<Long> test = source
.delaySequence(Duration.ofMillis(1000));
Duration took = StepVerifier.create(test)
.expectSubscription()
.expectErrorMessage("boom")
.verify();
assertThat(took.toMillis())
.as("errors immediately")
.isLessThan(50);
}
@Test
public void emptyCompletesImmediately() {
Flux<Long> source = Flux.empty();
Flux<Long> test = source
.delaySequence(Duration.ofMillis(1000));
Duration took = StepVerifier.create(test)
.expectComplete()
.verify();
assertThat(took.toMillis())
.as("completes immediately")
.isLessThan(50);
}
@Test
public void allDelayInNanos() {
Duration longDelay = Duration.ofMillis(59_999);
long expected = longDelay.toNanos();
DelaySubscriber<String> subscriber = new DelaySubscriber<>(null, longDelay, null);
assertThat(subscriber.delay).isEqualTo(expected);
assertThat(subscriber.timeUnit).isSameAs(TimeUnit.NANOSECONDS);
}
@Test
public void onNextAfterCompleteDrops() {
TestPublisher<String> testPublisher = TestPublisher.createNoncompliant(
TestPublisher.Violation.CLEANUP_ON_TERMINATE);
StepVerifier.create(testPublisher
.flux()
.delaySequence(Duration.ofMillis(500)))
.then(testPublisher::complete)
.then(() -> testPublisher.next("foo"))
.expectComplete()
.verifyThenAssertThat()
.hasDropped("foo")
.hasNotDroppedErrors();
}
@Test
public void onNextAfterErrorDrops() {
TestPublisher<String> testPublisher = TestPublisher.createNoncompliant(
TestPublisher.Violation.CLEANUP_ON_TERMINATE);
StepVerifier.create(testPublisher
.flux()
.delaySequence(Duration.ofMillis(500)))
.then(() -> testPublisher.error(new IllegalStateException("boom")))
.then(() -> testPublisher.next("foo"))
.expectErrorMessage("boom")
.verifyThenAssertThat()
.hasDropped("foo")
.hasNotDroppedErrors();
}
@Test
public void onCompleteAfterComplete() {
TestPublisher<String> testPublisher = TestPublisher.createNoncompliant(
TestPublisher.Violation.CLEANUP_ON_TERMINATE);
StepVerifier.create(testPublisher
.flux()
.delaySequence(Duration.ofMillis(500)))
.then(testPublisher::complete)
.then(testPublisher::complete)
.expectComplete()
.verifyThenAssertThat()
.hasNotDroppedElements()
.hasNotDroppedErrors();
}
@Test
public void onErrorAfterCompleteDrops() {
TestPublisher<String> testPublisher = TestPublisher.createNoncompliant(
TestPublisher.Violation.CLEANUP_ON_TERMINATE);
StepVerifier.create(testPublisher
.flux()
.delaySequence(Duration.ofMillis(500)))
.then(testPublisher::complete)
.then(() -> testPublisher.error(new IllegalStateException("boom")))
.expectComplete()
.verifyThenAssertThat()
.hasNotDroppedElements()
.hasDroppedErrorWithMessage("boom");
}
@Test
public void scanOperator() {
FluxDelaySequence<String> test = new FluxDelaySequence<>(Flux.empty(), Duration.ofSeconds(1), Schedulers.immediate());
assertThat(test.scan(Scannable.Attr.RUN_ON)).isSameAs(Schedulers.immediate());
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.ASYNC);
}
@Test
public void scanSubscriber() {
Scheduler.Worker worker = Schedulers.immediate().createWorker();
CoreSubscriber<String> actual = new LambdaSubscriber<>(null, null, null, null);
Subscription s = Operators.emptySubscription();
FluxDelaySequence.DelaySubscriber test = new DelaySubscriber<>(actual, Duration.ofSeconds(1), worker);
test.onSubscribe(s);
assertThat(test.scan(Scannable.Attr.PARENT)).isSameAs(s);
SerializedSubscriber serializedSubscriber = (SerializedSubscriber) test.scan(Scannable.Attr.ACTUAL);
assertThat(serializedSubscriber)
.isNotNull()
.satisfies(ser -> assertThat(ser.actual).isSameAs(actual));
assertThat(test.scan(Scannable.Attr.RUN_ON)).isEqualTo(worker);
assertThat(test.scan(Scannable.Attr.RUN_STYLE)).isSameAs(Scannable.Attr.RunStyle.ASYNC);
assertThat(test.scan(Scannable.Attr.TERMINATED)).isFalse();
assertThat(test.scan(Scannable.Attr.CANCELLED)).isFalse();
test.cancel();
assertThat(test.scan(Scannable.Attr.TERMINATED)).isFalse();
assertThat(test.scan(Scannable.Attr.CANCELLED)).isTrue();
test.done = true;
assertThat(test.scan(Scannable.Attr.TERMINATED)).isTrue();
}
}
| FluxDelaySequenceTest |
java | apache__flink | flink-libraries/flink-cep/src/test/java/org/apache/flink/cep/utils/OutputAsserter.java | {
"start": 1280,
"end": 2578
} | class ____ {
private final Queue<?> output;
private OutputAsserter(Queue<?> output) {
this.output = output;
}
public static OutputAsserter assertOutput(Queue<?> output) {
return new OutputAsserter(output);
}
private AssertionError fail(Object record) {
return new AssertionError("Received unexpected element: " + record);
}
public <T> OutputAsserter nextElementEquals(T expected) {
final Object record = output.poll();
final Object actual;
if (record instanceof StreamRecord) {
// This is in case we assert side output
actual = ((StreamRecord) record).getValue();
} else {
// This is in case we assert the main output
actual = record;
}
assertThat(actual, is(expected));
return this;
}
public void hasNoMoreElements() {
assertTrue(output.isEmpty());
}
public OutputAsserter watermarkEquals(long timestamp) {
Object record = output.poll();
if (record instanceof Watermark) {
Watermark watermark = (Watermark) record;
assertThat(watermark.getTimestamp(), is(timestamp));
} else {
throw fail(record);
}
return this;
}
}
| OutputAsserter |
java | apache__camel | components/camel-jms/src/test/java/org/apache/camel/component/jms/integration/spring/tx/error/JMSTransactionRollbackIT.java | {
"start": 1257,
"end": 1931
} | class ____ extends AbstractSpringJMSITSupport {
@Override
protected ClassPathXmlApplicationContext createApplicationContext() {
return new ClassPathXmlApplicationContext(
"/org/apache/camel/component/jms/integration/spring/tx/error/JMSTransactionRollbackIT.xml");
}
@Test
public void testTransactionRollback() throws Exception {
getMockEndpoint("mock:before").expectedMessageCount(6);
getMockEndpoint("mock:result").expectedMessageCount(0);
template.sendBody("activemq:queue:okay.JMSTransactionRollbackTest", "Hello World");
MockEndpoint.assertIsSatisfied(context);
}
}
| JMSTransactionRollbackIT |
java | apache__hadoop | hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestClassWithNoPackage.java | {
"start": 1697,
"end": 2046
} | class ____ no package name.
String defaultPackage = this.getClass().getPackage().getName();
Class c = StreamUtil.goodClassOrNull(conf, NAME, defaultPackage);
assertNotNull(c, "Class " + NAME + " not found!");
}
public static void main(String[]args) throws Exception
{
new TestClassWithNoPackage().testGoodClassOrNull();
}
}
| with |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_1353/Issue1353Test.java | {
"start": 755,
"end": 1896
} | class ____ {
@ProcessorTest
@ExpectedCompilationOutcome (
value = CompilationResult.SUCCEEDED,
diagnostics = {
@Diagnostic (type = Issue1353Mapper.class,
kind = javax.tools.Diagnostic.Kind.WARNING,
line = 22,
message = "The property named \" source.string1\" has whitespaces,"
+ " using trimmed property \"source.string1\" instead."
),
@Diagnostic (type = Issue1353Mapper.class,
kind = javax.tools.Diagnostic.Kind.WARNING,
line = 22,
message = "The property named \"string2 \" has whitespaces,"
+ " using trimmed property \"string2\" instead."
)
}
)
public void shouldTrimArguments() {
Source source = new Source();
source.setString1( "TestString" );
Target target = Issue1353Mapper.INSTANCE.sourceToTarget( source );
assertThat( target.getString2() ).isNotNull();
assertThat( target.getString2() ).isEqualTo( source.getString1() );
}
}
| Issue1353Test |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/oauth2/server/resource/OAuth2ResourceServerConfigurerTests.java | {
"start": 99918,
"end": 100450
} | class ____ {
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.authorizeHttpRequests((requests) -> requests
.anyRequest().authenticated())
.oauth2ResourceServer((server) -> server
.authenticationManagerResolver(mock(AuthenticationManagerResolver.class))
.opaqueToken(Customizer.withDefaults()));
return http.build();
// @formatter:on
}
}
@Configuration
@EnableWebSecurity
@EnableWebMvc
static | AuthenticationManagerResolverPlusOtherConfig |
java | square__retrofit | retrofit/src/main/java14/retrofit2/DefaultMethodSupport.java | {
"start": 897,
"end": 1258
} | class ____ {
@Nullable
static Object invoke(
Method method, Class<?> declaringClass, Object proxy, @Nullable Object[] args)
throws Throwable {
return MethodHandles.lookup()
.unreflectSpecial(method, declaringClass)
.bindTo(proxy)
.invokeWithArguments(args);
}
private DefaultMethodSupport() {}
}
| DefaultMethodSupport |
java | mapstruct__mapstruct | processor/src/main/java/org/mapstruct/ap/internal/util/Nouns.java | {
"start": 387,
"end": 3938
} | class ____ {
private Nouns() {
}
private static final List<ReplaceRule> SINGULAR_RULES = Arrays.asList(
new ReplaceRule( "(equipment|information|rice|money|species|series|fish|sheep)$", "$1" ),
new ReplaceRule( "(f)eet$", "$1oot" ),
new ReplaceRule( "(t)eeth$", "$1ooth" ),
new ReplaceRule( "(g)eese$", "$1oose" ),
new ReplaceRule( "(s)tadiums$", "$1tadium" ),
new ReplaceRule( "(m)oves$", "$1ove" ),
new ReplaceRule( "(s)exes$", "$1ex" ),
new ReplaceRule( "(c)hildren$", "$1hild" ),
new ReplaceRule( "(m)en$", "$1an" ),
new ReplaceRule( "(p)eople$", "$1erson" ),
new ReplaceRule( "(quiz)zes$", "$1" ),
new ReplaceRule( "(matr)ices$", "$1ix" ),
new ReplaceRule( "(vert|ind)ices$", "$1ex" ),
new ReplaceRule( "^(ox)en", "$1" ),
new ReplaceRule( "(alias|status)$", "$1" ), // already singular, but ends in 's'
new ReplaceRule( "(alias|status)es$", "$1" ),
new ReplaceRule( "(octop|vir)us$", "$1us" ), // already singular, but ends in 's'
new ReplaceRule( "(octop|vir)i$", "$1us" ),
new ReplaceRule( "(cris|ax|test)es$", "$1is" ),
new ReplaceRule( "(cris|ax|test)is$", "$1is" ), // already singular, but ends in 's'
new ReplaceRule( "(shoe)s$", "$1" ),
new ReplaceRule( "(o)es$", "$1" ),
new ReplaceRule( "(bus)es$", "$1" ),
new ReplaceRule( "([m|l])ice$", "$1ouse" ),
new ReplaceRule( "(x|ch|ss|sh)es$", "$1" ),
new ReplaceRule( "(m)ovies$", "$1ovie" ),
new ReplaceRule( "(s)eries$", "$1eries" ),
new ReplaceRule( "([^aeiouy]|qu)ies$", "$1y" ),
new ReplaceRule( "([lr])ves$", "$1f" ),
new ReplaceRule( "(tive)s$", "$1" ),
new ReplaceRule( "(hive)s$", "$1" ),
new ReplaceRule( "([^f])ves$", "$1fe" ),
new ReplaceRule( "(^analy)sis$", "$1sis" ), // already singular, but ends in 's'
new ReplaceRule( "(^analy)ses$", "$1sis" ),
new ReplaceRule( "((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)ses$", "$1$2sis" ),
new ReplaceRule( "([ti])a$", "$1um" ),
new ReplaceRule( "(n)ews$", "$1ews" ),
new ReplaceRule( "(s|si|u)s$", "$1s" ), // '-us' and '-ss' are already singular
new ReplaceRule( "s$", "" )
);
/**
* Replacement rules based on the routine applied by the <a href="http://www.eclipse.org/webtools/dali/">Dali</a>
* project. Applied as a fallback if the other rules didn't yield a match.
*/
private static final List<ReplaceRule> SINGULAR_DALI_RULES = Arrays.asList(
new ReplaceRule( "(us|ss)$", "$1" ),
new ReplaceRule( "(ch|s)es$", "$1" ),
new ReplaceRule( "([^aeiouy])ies$", "$1y" )
);
/**
* Converts given pluralized noun into the singular form. If no singular form could be determined, the given word
* itself is returned.
*
* @param plural plural word
* @return singular form, if available
*/
public static String singularize(String plural) {
for ( ReplaceRule replaceRule : SINGULAR_RULES ) {
String match = replaceRule.apply( plural );
if ( match != null ) {
return match;
}
}
for ( ReplaceRule replaceRule : SINGULAR_DALI_RULES ) {
String match = replaceRule.apply( plural );
if ( match != null ) {
return match;
}
}
return plural;
}
private static final | Nouns |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/TruncateOp.java | {
"start": 1593,
"end": 4311
} | class ____ extends Operation {
private static final Logger LOG = LoggerFactory.getLogger(TruncateOp.class);
TruncateOp(ConfigExtractor cfg, Random rnd) {
super(TruncateOp.class.getSimpleName(), cfg, rnd);
}
/**
* Gets the file to truncate from
*
* @return Path
*/
protected Path getTruncateFile() {
Path fn = getFinder().getFile();
return fn;
}
@Override // Operation
List<OperationOutput> run(FileSystem fs) {
List<OperationOutput> out = super.run(fs);
try {
Path fn = getTruncateFile();
boolean waitOnTruncate = getConfig().shouldWaitOnTruncate();
long currentSize = fs.getFileStatus(fn).getLen();
// determine file status for file length requirement
// to know if should fill in partial bytes
Range<Long> truncateSizeRange = getConfig().getTruncateSize();
if (getConfig().shouldTruncateUseBlockSize()) {
truncateSizeRange = getConfig().getBlockSize();
}
long truncateSize = Math.max(0L,
currentSize - Range.betweenPositive(getRandom(), truncateSizeRange));
long timeTaken = 0;
LOG.info("Attempting to truncate file at " + fn + " to size "
+ Helper.toByteInfo(truncateSize));
{
// truncate
long startTime = Timer.now();
boolean completed = fs.truncate(fn, truncateSize);
if(!completed && waitOnTruncate)
waitForRecovery(fs, fn, truncateSize);
timeTaken += Timer.elapsed(startTime);
}
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.BYTES_WRITTEN, 0));
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.OK_TIME_TAKEN, timeTaken));
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.SUCCESSES, 1L));
LOG.info("Truncate file " + fn + " to " + Helper.toByteInfo(truncateSize)
+ " in " + timeTaken + " milliseconds");
} catch (FileNotFoundException e) {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.NOT_FOUND, 1L));
LOG.warn("Error with truncating", e);
} catch (IOException | UnsupportedOperationException e) {
out.add(new OperationOutput(OutputType.LONG, getType(),
ReportWriter.FAILURES, 1L));
LOG.warn("Error with truncating", e);
}
return out;
}
private void waitForRecovery(FileSystem fs, Path fn, long newLength)
throws IOException {
LOG.info("Waiting on truncate file recovery for " + fn);
for(;;) {
FileStatus stat = fs.getFileStatus(fn);
if(stat.getLen() == newLength) break;
try {Thread.sleep(1000);} catch(InterruptedException ignored) {}
}
}
}
| TruncateOp |
java | apache__camel | components/camel-elasticsearch/src/test/java/org/apache/camel/component/es/integration/ElasticsearchTestSupport.java | {
"start": 2054,
"end": 5973
} | class ____ extends CamelTestSupport {
@RegisterExtension
protected static ElasticSearchService service = ElasticSearchServiceFactory.createSingletonService();
@RegisterExtension
@Order(10)
TestNameExtension testNameExtension = new TestNameExtension();
protected static String clusterName = "docker-cluster";
protected static RestClient restClient;
protected static ElasticsearchClient client;
private static final Logger LOG = LoggerFactory.getLogger(ElasticsearchTestSupport.class);
@Override
protected void setupResources() throws Exception {
super.setupResources();
String scheme = service.getSslContext().isPresent() ? "https" : "http";
HttpHost host
= new HttpHost(service.getElasticSearchHost(), service.getPort(), scheme);
final RestClientBuilder builder = RestClient.builder(host);
final CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
credentialsProvider.setCredentials(AuthScope.ANY,
new UsernamePasswordCredentials(service.getUsername(), service.getPassword()));
builder.setHttpClientConfigCallback(
httpClientBuilder -> {
httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider);
service.getSslContext().ifPresent(sslContext -> {
httpClientBuilder.setSSLContext(sslContext);
});
return httpClientBuilder;
});
restClient = builder.build();
client = new ElasticsearchClient(new RestClientTransport(restClient, new JacksonJsonpMapper()));
}
@Override
protected void cleanupResources() throws Exception {
super.cleanupResources();
if (restClient != null) {
restClient.close();
}
}
@Override
protected CamelContext createCamelContext() throws Exception {
final ElasticsearchComponent elasticsearchComponent = new ElasticsearchComponent();
elasticsearchComponent.setEnableSSL(service.getSslContext().isPresent());
elasticsearchComponent.setHostAddresses(service.getHttpHostAddress());
elasticsearchComponent.setUser(service.getUsername());
elasticsearchComponent.setPassword(service.getPassword());
service.getCertificatePath().ifPresent(certificatePath -> {
elasticsearchComponent.setCertificatePath("file:%s".formatted(certificatePath));
});
CamelContext context = super.createCamelContext();
context.addComponent("elasticsearch", elasticsearchComponent);
return context;
}
/**
* As we don't delete the {@code target/data} folder for <b>each</b> test below (otherwise they would run much
* slower), we need to make sure there's no side effect of the same used data through creating unique indexes.
*/
Map<String, String> createIndexedData(String... additionalPrefixes) {
String prefix = createPrefix();
// take over any potential prefixes we may have been asked for
if (additionalPrefixes.length > 0) {
StringBuilder sb = new StringBuilder(prefix);
for (String additionalPrefix : additionalPrefixes) {
sb.append(additionalPrefix).append("-");
}
prefix = sb.toString();
}
String key = prefix + "key";
String value = prefix + "value";
LOG.info("Creating indexed data using the key/value pair {} => {}", key, value);
Map<String, String> map = new HashMap<>();
map.put(key, value);
return map;
}
String createPrefix() {
// make use of the test method name to avoid collision
return testNameExtension.getCurrentTestName().toLowerCase() + "-";
}
RestClient getClient() {
return restClient;
}
}
| ElasticsearchTestSupport |
java | spring-projects__spring-framework | spring-webmvc/src/test/java/org/springframework/web/servlet/mvc/method/annotation/ResponseEntityExceptionHandlerTests.java | {
"start": 17119,
"end": 17620
} | class ____ extends ResponseEntityExceptionHandler {
@Override
protected ResponseEntity<Object> handleServletRequestBindingException(
ServletRequestBindingException ex, HttpHeaders headers, HttpStatusCode status, WebRequest request) {
headers = new HttpHeaders();
headers.set("someHeader", "someHeaderValue");
return handleExceptionInternal(ex, "error content", headers, status, request);
}
}
@SuppressWarnings("unused")
void handle(String arg) {
}
}
| ApplicationExceptionHandler |
java | elastic__elasticsearch | x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStore.java | {
"start": 4017,
"end": 16378
} | class ____ extends CachingServiceAccountTokenStore {
private static final Logger logger = LogManager.getLogger(IndexServiceAccountTokenStore.class);
static final String SERVICE_ACCOUNT_TOKEN_DOC_TYPE = "service_account_token";
private final Clock clock;
private final Client client;
private final SecurityIndexManager securityIndex;
private final ClusterService clusterService;
private final Hasher hasher;
@SuppressWarnings("this-escape")
public IndexServiceAccountTokenStore(
Settings settings,
ThreadPool threadPool,
Clock clock,
Client client,
SecurityIndexManager securityIndex,
ClusterService clusterService,
CacheInvalidatorRegistry cacheInvalidatorRegistry
) {
super(settings, threadPool);
this.clock = clock;
this.client = client;
this.securityIndex = securityIndex;
this.clusterService = clusterService;
cacheInvalidatorRegistry.registerCacheInvalidator("index_service_account_token", this);
this.hasher = Hasher.resolve(XPackSettings.SERVICE_TOKEN_HASHING_ALGORITHM.get(settings));
}
@Override
void doAuthenticate(ServiceAccountToken token, ActionListener<StoreAuthenticationResult> listener) {
final GetRequest getRequest = client.prepareGet(SECURITY_MAIN_ALIAS, docIdForToken(token.getQualifiedName()))
.setFetchSource(true)
.request();
securityIndex.forCurrentProject()
.checkIndexVersionThenExecute(
listener::onFailure,
() -> executeAsyncWithOrigin(
client,
SECURITY_ORIGIN,
TransportGetAction.TYPE,
getRequest,
ActionListener.<GetResponse>wrap(response -> {
if (response.isExists()) {
final String tokenHash = (String) response.getSource().get("password");
assert tokenHash != null : "service account token hash cannot be null";
listener.onResponse(
StoreAuthenticationResult.fromBooleanResult(
getTokenSource(),
Hasher.verifyHash(token.getSecret(), tokenHash.toCharArray())
)
);
} else {
logger.trace("service account token [{}] not found in index", token.getQualifiedName());
listener.onResponse(StoreAuthenticationResult.failed(getTokenSource()));
}
}, listener::onFailure)
)
);
}
@Override
public TokenSource getTokenSource() {
return TokenSource.INDEX;
}
void createToken(
Authentication authentication,
CreateServiceAccountTokenRequest request,
ActionListener<CreateServiceAccountTokenResponse> listener
) {
final ServiceAccountId accountId = new ServiceAccountId(request.getNamespace(), request.getServiceName());
if (false == ServiceAccountService.isServiceAccountPrincipal(accountId.asPrincipal())) {
listener.onFailure(new IllegalArgumentException("service account [" + accountId + "] does not exist"));
return;
}
final ServiceAccountToken token = ServiceAccountToken.newToken(accountId, request.getTokenName());
try (XContentBuilder builder = newDocument(authentication, token)) {
final IndexRequest indexRequest = client.prepareIndex(SECURITY_MAIN_ALIAS)
.setId(docIdForToken(token.getQualifiedName()))
.setSource(builder)
.setOpType(OpType.CREATE)
.setRefreshPolicy(request.getRefreshPolicy())
.request();
final BulkRequest bulkRequest = toSingleItemBulkRequest(indexRequest);
securityIndex.forCurrentProject().prepareIndexIfNeededThenExecute(listener::onFailure, () -> {
executeAsyncWithOrigin(
client,
SECURITY_ORIGIN,
TransportBulkAction.TYPE,
bulkRequest,
TransportBulkAction.<IndexResponse>unwrappingSingleItemBulkResponse(ActionListener.wrap(response -> {
assert DocWriteResponse.Result.CREATED == response.getResult()
: "an successful response of an OpType.CREATE request must have result of CREATED";
listener.onResponse(CreateServiceAccountTokenResponse.created(token.getTokenName(), token.asBearerString()));
}, listener::onFailure))
);
});
} catch (IOException e) {
listener.onFailure(e);
}
}
void findTokensFor(ServiceAccountId accountId, ActionListener<Collection<TokenInfo>> listener) {
final IndexState projectSecurityIndex = this.securityIndex.forCurrentProject();
if (false == projectSecurityIndex.indexExists()) {
listener.onResponse(List.of());
} else if (false == projectSecurityIndex.isAvailable(SEARCH_SHARDS)) {
listener.onFailure(projectSecurityIndex.getUnavailableReason(SEARCH_SHARDS));
} else {
projectSecurityIndex.checkIndexVersionThenExecute(listener::onFailure, () -> {
final Supplier<ThreadContext.StoredContext> contextSupplier = client.threadPool()
.getThreadContext()
.newRestorableContext(false);
try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(SECURITY_ORIGIN)) {
// TODO: wildcard support?
final BoolQueryBuilder query = QueryBuilders.boolQuery()
.filter(QueryBuilders.termQuery("doc_type", SERVICE_ACCOUNT_TOKEN_DOC_TYPE))
.must(QueryBuilders.termQuery("username", accountId.asPrincipal()));
final SearchRequest request = client.prepareSearch(SECURITY_MAIN_ALIAS)
.setScroll(DEFAULT_KEEPALIVE_SETTING.get(getSettings()))
.setQuery(query)
.setSize(1000)
.setFetchSource(false)
.request();
request.indicesOptions().ignoreUnavailable();
logger.trace("Searching tokens for service account [{}]", accountId);
ScrollHelper.fetchAllByEntity(
client,
request,
new ContextPreservingActionListener<>(contextSupplier, listener),
hit -> extractTokenInfo(hit.getId(), accountId)
);
}
});
}
}
void deleteToken(DeleteServiceAccountTokenRequest request, ActionListener<Boolean> listener) {
final IndexState projectSecurityIndex = this.securityIndex.forCurrentProject();
if (false == projectSecurityIndex.indexExists()) {
listener.onResponse(false);
} else if (false == projectSecurityIndex.isAvailable(PRIMARY_SHARDS)) {
listener.onFailure(projectSecurityIndex.getUnavailableReason(PRIMARY_SHARDS));
} else {
final ServiceAccountId accountId = new ServiceAccountId(request.getNamespace(), request.getServiceName());
if (false == ServiceAccountService.isServiceAccountPrincipal(accountId.asPrincipal())) {
listener.onResponse(false);
return;
}
final ServiceAccountTokenId accountTokenId = new ServiceAccountTokenId(accountId, request.getTokenName());
final String qualifiedTokenName = accountTokenId.getQualifiedName();
projectSecurityIndex.checkIndexVersionThenExecute(listener::onFailure, () -> {
final DeleteRequest deleteRequest = client.prepareDelete(SECURITY_MAIN_ALIAS, docIdForToken(qualifiedTokenName)).request();
deleteRequest.setRefreshPolicy(request.getRefreshPolicy());
executeAsyncWithOrigin(
client,
SECURITY_ORIGIN,
TransportDeleteAction.TYPE,
deleteRequest,
ActionListener.wrap(deleteResponse -> {
final ClearSecurityCacheRequest clearSecurityCacheRequest = new ClearSecurityCacheRequest().cacheName(
"index_service_account_token"
).keys(qualifiedTokenName);
executeAsyncWithOrigin(
client,
SECURITY_ORIGIN,
ClearSecurityCacheAction.INSTANCE,
clearSecurityCacheRequest,
ActionListener.wrap(clearSecurityCacheResponse -> {
listener.onResponse(deleteResponse.getResult() == DocWriteResponse.Result.DELETED);
}, e -> {
final String message = org.elasticsearch.core.Strings.format(
"clearing the cache for service token [%s] failed. please clear the cache manually",
qualifiedTokenName
);
logger.error(message, e);
listener.onFailure(new ElasticsearchException(message, e));
})
);
}, listener::onFailure)
);
});
}
}
private static String docIdForToken(String qualifiedTokenName) {
return SERVICE_ACCOUNT_TOKEN_DOC_TYPE + "-" + qualifiedTokenName;
}
private XContentBuilder newDocument(Authentication authentication, ServiceAccountToken serviceAccountToken) throws IOException {
final Version version = clusterService.state().nodes().getMinNodeVersion();
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.startObject()
.field("doc_type", SERVICE_ACCOUNT_TOKEN_DOC_TYPE)
.field("version", version.id)
.field("username", serviceAccountToken.getAccountId().asPrincipal())
.field("name", serviceAccountToken.getTokenName())
.field("creation_time", clock.instant().toEpochMilli())
.field("enabled", true);
{
final Subject effectiveSubject = authentication.getEffectiveSubject();
builder.startObject("creator")
.field("principal", effectiveSubject.getUser().principal())
.field("full_name", effectiveSubject.getUser().fullName())
.field("email", effectiveSubject.getUser().email())
.field("metadata", effectiveSubject.getUser().metadata())
.field("realm", effectiveSubject.getRealm().getName())
.field("realm_type", effectiveSubject.getRealm().getType());
if (effectiveSubject.getRealm().getDomain() != null) {
builder.field("realm_domain", effectiveSubject.getRealm().getDomain());
}
builder.endObject();
}
byte[] utf8Bytes = null;
final char[] tokenHash = hasher.hash(serviceAccountToken.getSecret());
try {
utf8Bytes = CharArrays.toUtf8Bytes(tokenHash);
builder.field("password").utf8Value(utf8Bytes, 0, utf8Bytes.length);
} finally {
if (utf8Bytes != null) {
Arrays.fill(utf8Bytes, (byte) 0);
}
Arrays.fill(tokenHash, (char) 0);
}
builder.endObject();
return builder;
}
private static TokenInfo extractTokenInfo(String docId, ServiceAccountId accountId) {
// Prefix is SERVICE_ACCOUNT_TOKEN_DOC_TYPE + "-" + accountId.asPrincipal() + "/"
final int prefixLength = SERVICE_ACCOUNT_TOKEN_DOC_TYPE.length() + accountId.asPrincipal().length() + 2;
return TokenInfo.indexToken(Strings.substring(docId, prefixLength, docId.length()));
}
}
| IndexServiceAccountTokenStore |
java | apache__hadoop | hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/contracts/annotations/ConfigurationValidationAnnotations.java | {
"start": 1408,
"end": 1741
} | interface ____ {
String ConfigurationKey();
int MaxValue() default Integer.MAX_VALUE;
int MinValue() default Integer.MIN_VALUE;
int DefaultValue();
boolean ThrowIfInvalid() default false;
}
@Target({ ElementType.FIELD })
@Retention(RetentionPolicy.RUNTIME)
public @ | IntegerConfigurationValidatorAnnotation |
java | quarkusio__quarkus | extensions/resteasy-classic/resteasy/deployment/src/test/java/io/quarkus/resteasy/test/ContextTest.java | {
"start": 544,
"end": 916
} | class ____ {
@RegisterExtension
static QuarkusUnitTest runner = new QuarkusUnitTest()
.withApplicationRoot((jar) -> jar
.addClasses(MyResource.class));
@Test
public void testContextInjection() {
Assertions.assertEquals("ok", RestAssured.get("/ctxt").asString());
}
@Path("/ctxt")
public static | ContextTest |
java | quarkusio__quarkus | extensions/grpc/deployment/src/test/java/io/quarkus/grpc/client/interceptors/GlobalClientInterceptorTest.java | {
"start": 924,
"end": 2057
} | class ____ {
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest().setArchiveProducer(
() -> ShrinkWrap.create(JavaArchive.class)
.addClasses(MutinyHelloService.class, MyFirstClientInterceptor.class, Calls.class,
GreeterGrpc.class, Greeter.class, GreeterBean.class, HelloRequest.class, HelloReply.class,
MutinyGreeterGrpc.class,
HelloRequestOrBuilder.class, HelloReplyOrBuilder.class))
.withConfigurationResource("hello-config.properties");
@GrpcClient("hello-service")
GreeterGrpc.GreeterBlockingStub client;
@Test
public void testInterceptorRegistration() {
Calls.LIST.clear();
HelloReply reply = client
.sayHello(HelloRequest.newBuilder().setName("neo").build());
assertThat(reply.getMessage()).isEqualTo("Hello neo");
List<String> calls = Calls.LIST;
assertEquals(1, calls.size());
assertEquals(MyFirstClientInterceptor.class.getName(), calls.get(0));
}
}
| GlobalClientInterceptorTest |
java | quarkusio__quarkus | extensions/reactive-mssql-client/deployment/src/test/java/io/quarkus/reactive/mssql/client/MultipleDataSourcesTest.java | {
"start": 1263,
"end": 1781
} | class ____ {
@Inject
Pool msSQLClient;
public CompletionStage<Void> verify() {
CompletableFuture<Void> cf = new CompletableFuture<>();
msSQLClient.query("SELECT 1").execute(ar -> {
if (ar.failed()) {
cf.completeExceptionally(ar.cause());
} else {
cf.complete(null);
}
});
return cf;
}
}
@ApplicationScoped
static | BeanUsingDefaultDataSource |
java | qos-ch__slf4j | slf4j-migrator/src/main/java/org/slf4j/migrator/line/SingleConversionRule.java | {
"start": 1489,
"end": 2480
} | class ____ implements ConversionRule {
final private Pattern pattern;
final private String replacementText;
final private String additionalLine;
public SingleConversionRule(Pattern pattern, String replacementText) {
this(pattern, replacementText, null);
}
public SingleConversionRule(Pattern pattern, String replacementText, String additionalLine) {
this.pattern = pattern;
this.replacementText = replacementText;
this.additionalLine = additionalLine;
}
/*
* (non-Javadoc)
*
* @see org.slf4j.converter.ConversionRule#getPattern()
*/
public Pattern getPattern() {
return pattern;
}
/*
* (non-Javadoc)
*
* @see org.slf4j.converter.ConversionRule#replace(java.util.regex.Matcher)
*/
public String replace(Matcher matcher) {
return replacementText;
}
public String getAdditionalLine() {
return additionalLine;
}
}
| SingleConversionRule |
java | google__dagger | javatests/dagger/internal/codegen/ScopingValidationTest.java | {
"start": 21590,
"end": 22001
} | interface ____ {",
" SimpleType type();",
"}");
Source unscopedComponent =
CompilerTests.javaSource(
"test.UnscopedComponent",
"package test;",
"",
"import dagger.Component;",
"import javax.inject.Singleton;",
"",
"@Component(dependencies = ScopedComponent.class)",
" | ScopedComponent |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/pool/ha/node/ZookeeperNodeInfo.java | {
"start": 759,
"end": 2165
} | class ____ {
private String prefix = "";
private String host;
private Integer port;
/**
* Database can be the following ones:
* 1. Database in MySQL and PostgreSQL JDBC URL
* 2. ServiceName or SID in Oracle JDBC URL
* etc.
*/
private String database;
private String username;
private String password;
public void setPrefix(String prefix) {
if (prefix != null && !prefix.trim().isEmpty()) {
this.prefix = prefix;
if (!prefix.endsWith(".")) {
this.prefix = prefix + ".";
}
}
}
public String getPrefix() {
return prefix;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public Integer getPort() {
return port;
}
public void setPort(Integer port) {
this.port = port;
}
public String getUsername() {
return username;
}
public void setUsername(String username) {
this.username = username;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public String getDatabase() {
return database;
}
public void setDatabase(String database) {
this.database = database;
}
}
| ZookeeperNodeInfo |
java | grpc__grpc-java | netty/src/test/java/io/grpc/netty/ProtocolNegotiatorsTest.java | {
"start": 59099,
"end": 61958
} | class ____ extends GrpcHttp2ConnectionHandler {
static FakeGrpcHttp2ConnectionHandler noopHandler() {
return newHandler(true);
}
static FakeGrpcHttp2ConnectionHandler newHandler() {
return newHandler(false);
}
private static FakeGrpcHttp2ConnectionHandler newHandler(boolean noop) {
DefaultHttp2Connection conn = new DefaultHttp2Connection(/*server=*/ false);
DefaultHttp2ConnectionEncoder encoder =
new DefaultHttp2ConnectionEncoder(conn, new DefaultHttp2FrameWriter());
DefaultHttp2ConnectionDecoder decoder =
new DefaultHttp2ConnectionDecoder(conn, encoder, new DefaultHttp2FrameReader());
Http2Settings settings = new Http2Settings();
return new FakeGrpcHttp2ConnectionHandler(
/*channelUnused=*/ null, decoder, encoder, settings, noop, noopLogger);
}
private final boolean noop;
private Attributes attrs;
private Security securityInfo;
private final CountDownLatch negotiated = new CountDownLatch(1);
private ChannelHandlerContext ctx;
FakeGrpcHttp2ConnectionHandler(ChannelPromise channelUnused,
Http2ConnectionDecoder decoder,
Http2ConnectionEncoder encoder,
Http2Settings initialSettings,
boolean noop,
ChannelLogger negotiationLogger) {
super(channelUnused, decoder, encoder, initialSettings, negotiationLogger);
this.noop = noop;
}
@Override
public void handleProtocolNegotiationCompleted(Attributes attrs, Security securityInfo) {
checkNotNull(ctx, "handleProtocolNegotiationCompleted cannot be called before handlerAdded");
super.handleProtocolNegotiationCompleted(attrs, securityInfo);
this.attrs = attrs;
this.securityInfo = securityInfo;
// Add a temp handler that verifies first message is a NOOP_MESSAGE
ctx.pipeline().addBefore(ctx.name(), null, new ChannelOutboundHandlerAdapter() {
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise)
throws Exception {
checkState(
msg == NettyClientHandler.NOOP_MESSAGE, "First message should be NOOP_MESSAGE");
promise.trySuccess();
ctx.pipeline().remove(this);
}
});
NettyClientHandler.writeBufferingAndRemove(ctx.channel());
negotiated.countDown();
}
@Override
public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
if (noop) {
ctx.pipeline().remove(ctx.name());
} else {
super.handlerAdded(ctx);
}
this.ctx = ctx;
}
@Override
public String getAuthority() {
return "foo.test.google.fr";
}
}
private static ByteBuf bb(String s, Channel c) {
return ByteBufUtil.writeUtf8(c.alloc(), s);
}
private static final | FakeGrpcHttp2ConnectionHandler |
java | FasterXML__jackson-databind | src/test/java/tools/jackson/databind/jsontype/SealedTypesWithPolymorphicDeductionTest.java | {
"start": 862,
"end": 1006
} | interface ____ permits Cat, Fleabag {}
@JsonTypeInfo(use = DEDUCTION)
// A supertype containing common properties
public static sealed | Feline |
java | spring-projects__spring-data-jpa | spring-data-jpa/src/main/java/org/springframework/data/jpa/repository/query/JpaEntityGraph.java | {
"start": 1140,
"end": 2957
} | class ____ {
private final String name;
private final EntityGraphType type;
private final List<String> attributePaths;
/**
* Creates an {@link JpaEntityGraph}.
*
* @param entityGraph must not be {@literal null}.
* @param nameFallback must not be {@literal null} or empty.
*/
public JpaEntityGraph(EntityGraph entityGraph, String nameFallback) {
this(StringUtils.hasText(entityGraph.value()) ? entityGraph.value() : nameFallback, entityGraph.type(),
entityGraph.attributePaths());
}
/**
* Creates an {@link JpaEntityGraph} with the given name, {@link EntityGraphType} and attribute paths.
*
* @param name must not be {@literal null} or empty.
* @param type must not be {@literal null}.
* @param attributePaths may be {@literal null}.
* @since 1.9
*/
public JpaEntityGraph(String name, EntityGraphType type, String @Nullable[] attributePaths) {
Assert.hasText(name, "The name of an EntityGraph must not be null or empty");
Assert.notNull(type, "FetchGraphType must not be null");
this.name = name;
this.type = type;
this.attributePaths = attributePaths != null ? List.of(attributePaths) : List.of();
}
/**
* Returns the name of the {@link EntityGraph} configuration to use.
*
* @return
*/
public String getName() {
return name;
}
/**
* Returns the {@link EntityGraphType} of the {@link EntityGraph} to use.
*
* @return
*/
public EntityGraphType getType() {
return type;
}
/**
* Returns the attribute node names to be used for this {@link JpaEntityGraph}.
*
* @return
* @since 1.9
*/
public List<String> getAttributePaths() {
return attributePaths;
}
@Override
public String toString() {
return "JpaEntityGraph [name=" + name + ", type=" + type + ", attributePaths=" + attributePaths.toString() + "]";
}
}
| JpaEntityGraph |
java | netty__netty | microbench/src/main/java/io/netty/handler/codec/http/HttpStatusValueOfBenchmark.java | {
"start": 1766,
"end": 8478
} | class ____ extends AbstractMicrobenchmark {
private static final SplittableRandom random = new SplittableRandom();
private static final DecimalFormat df = new DecimalFormat("##.##%");
private static final int[] data_1300 = new int[1300];
private static final int[] data_2600 = new int[2600];
private static final int[] data_5300 = new int[5300];
private static final int[] data_11000 = new int[11000];
private static final int[] data_23000 = new int[23000];
private static final boolean ENABLE_POLLUTE = false;
@Setup(Level.Invocation)
public void setup(Blackhole bh, BenchmarkParams benchmarkParams) {
switch (benchmarkParams.getOpsPerInvocation()) {
case 1300 :
polluteBranchIfEnabled(bh, data_1300);
fillBenchMarkData(data_1300);
break;
case 2600 :
polluteBranchIfEnabled(bh, data_2600);
fillBenchMarkData(data_2600);
break;
case 5300 :
polluteBranchIfEnabled(bh, data_5300);
fillBenchMarkData(data_5300);
break;
case 11000 :
polluteBranchIfEnabled(bh, data_11000);
fillBenchMarkData(data_11000);
break;
case 23000 :
polluteBranchIfEnabled(bh, data_23000);
fillBenchMarkData(data_23000);
break;
}
}
@Benchmark
@OperationsPerInvocation(1300)
public void valueOf_1300(Blackhole bh) {
for (int code : data_1300) {
bh.consume(HttpStatusClass.valueOf(code));
}
}
@Benchmark
@OperationsPerInvocation(2600)
public void valueOf_2600(Blackhole bh) {
for (int code : data_2600) {
bh.consume(HttpStatusClass.valueOf(code));
}
}
@Benchmark
@OperationsPerInvocation(5300)
public void valueOf_5300(Blackhole bh) {
for (int code : data_5300) {
bh.consume(HttpStatusClass.valueOf(code));
}
}
@Benchmark
@OperationsPerInvocation(11000)
public void valueOf_11000(Blackhole bh) {
for (int code : data_11000) {
bh.consume(HttpStatusClass.valueOf(code));
}
}
@Benchmark
@OperationsPerInvocation(23000)
public void valueOf_23000(Blackhole bh) {
for (int code : data_23000) {
bh.consume(HttpStatusClass.valueOf(code));
}
}
public HttpStatusValueOfBenchmark() {
// disable assertion
super(true);
}
private static void polluteBranchIfEnabled(Blackhole bh, int[] polluteData) {
if (ENABLE_POLLUTE) {
fillPolluteData(polluteData);
for (int code : polluteData) {
bh.consume(HttpStatusClass.valueOf(code));
}
}
}
private static void fillBenchMarkData(int[] benchMarkData) {
double c1x = 0, c2x = 0, c3x = 0, c4x = 0, c5x = 0, c6x = 0;
for (int i = 0; i < benchMarkData.length;) {
// [0, 100)
int code = random.nextInt(0, 100);
// 38%
if (code < 38) {
benchMarkData[i++] = random.nextInt(100, 200);
++c1x;
continue;
}
// 30%
if (code < 68) {
benchMarkData[i++] = random.nextInt(200, 300);
++c2x;
continue;
}
// 15%
if (code < 83) {
benchMarkData[i++] = random.nextInt(300, 400);
++c3x;
continue;
}
// 10%
if (code < 93) {
benchMarkData[i++] = random.nextInt(400, 500);
++c4x;
continue;
}
// 5%
if (code < 98) {
benchMarkData[i++] = random.nextInt(500, 600);
++c5x;
continue;
}
// 2%
benchMarkData[i++] = random.nextInt(-50, 50);
++c6x;
}
// printCodePercentage("fillBenchMarkData", benchMarkData.length, c1x, c2x, c3x, c4x, c5x, c6x);
}
private static void fillPolluteData(int[] polluteData) {
double c1x = 0, c2x = 0, c3x = 0, c4x = 0, c5x = 0, c6x = 0;
for (int i = 0; i < polluteData.length;) {
// [0, 96)
int code = random.nextInt(0, 96);
// (100/6) %
if (code < 16) {
polluteData[i++] = random.nextInt(100, 200);
++c1x;
continue;
}
// (100/6) %
if (code < 32) {
polluteData[i++] = random.nextInt(200, 300);
++c2x;
continue;
}
// (100/6) %
if (code < 48) {
polluteData[i++] = random.nextInt(300, 400);
++c3x;
continue;
}
// (100/6) %
if (code < 64) {
polluteData[i++] = random.nextInt(400, 500);
++c4x;
continue;
}
// (100/6) %
if (code < 80) {
polluteData[i++] = random.nextInt(500, 600);
++c5x;
continue;
}
// (100/6) %
polluteData[i++] = random.nextInt(-50, 50);
++c6x;
}
// printCodePercentage("fillPolluteData", polluteData.length, c1x, c2x, c3x, c4x, c5x, c6x);
}
@Override
protected ChainedOptionsBuilder newOptionsBuilder() throws Exception {
Class<LinuxPerfNormProfiler> profilerClass = LinuxPerfNormProfiler.class;
try {
ProfilerFactory.getProfilerOrException(new ProfilerConfig(profilerClass.getCanonicalName()));
} catch (ProfilerException t) {
// Fall back to default.
return super.newOptionsBuilder();
}
return super.newOptionsBuilder().addProfiler(profilerClass);
}
private static void printCodePercentage(String desc, int length, double c1x, double c2x, double c3x, double c4x,
double c5x, double c6x) {
System.out.println("\n" + desc + "===>"
+ "INFORMATIONAL:" + df.format(c1x / length)
+ ", SUCCESS:" + df.format(c2x / length)
+ ", REDIRECTION:" + df.format(c3x / length)
+ ", CLIENT_ERROR:" + df.format(c4x / length)
+ ", SERVER_ERROR:" + df.format(c5x / length)
+ ", UNKNOWN:" + df.format(c6x / length)
);
}
}
| HttpStatusValueOfBenchmark |
java | alibaba__nacos | plugin-default-impl/nacos-default-auth-plugin/src/main/java/com/alibaba/nacos/plugin/auth/impl/jwt/NacosJwtPayload.java | {
"start": 805,
"end": 1282
} | class ____ {
private String sub;
private long exp = System.currentTimeMillis() / 1000L;
public String getSub() {
return sub;
}
public void setSub(String sub) {
this.sub = sub;
}
public long getExp() {
return exp;
}
public void setExp(long exp) {
this.exp = exp;
}
@Override
public String toString() {
return JacksonUtils.toJson(this);
}
}
| NacosJwtPayload |
java | spring-projects__spring-boot | module/spring-boot-kotlinx-serialization-json/src/main/java/org/springframework/boot/kotlinx/serialization/json/autoconfigure/KotlinxSerializationJsonProperties.java | {
"start": 3049,
"end": 6700
} | enum ____ are decoded in a case-insensitive manner.
*/
private boolean decodeEnumsCaseInsensitive;
/**
* Whether Json instance makes use of JsonNames annotation.
*/
private boolean useAlternativeNames = true;
/**
* Whether to allow parser to accept trailing commas in JSON objects and arrays.
*/
private boolean allowTrailingComma;
/**
* Whether to allow parser to accept C/Java-style comments in JSON input.
*/
private boolean allowComments;
public @Nullable JsonNamingStrategy getNamingStrategy() {
return this.namingStrategy;
}
public void setNamingStrategy(@Nullable JsonNamingStrategy namingStrategy) {
this.namingStrategy = namingStrategy;
}
public boolean isPrettyPrint() {
return this.prettyPrint;
}
public void setPrettyPrint(boolean prettyPrint) {
this.prettyPrint = prettyPrint;
}
public boolean isLenient() {
return this.lenient;
}
public void setLenient(boolean lenient) {
this.lenient = lenient;
}
public boolean isIgnoreUnknownKeys() {
return this.ignoreUnknownKeys;
}
public void setIgnoreUnknownKeys(boolean ignoreUnknownKeys) {
this.ignoreUnknownKeys = ignoreUnknownKeys;
}
public boolean isEncodeDefaults() {
return this.encodeDefaults;
}
public void setEncodeDefaults(boolean encodeDefaults) {
this.encodeDefaults = encodeDefaults;
}
public boolean isExplicitNulls() {
return this.explicitNulls;
}
public void setExplicitNulls(boolean explicitNulls) {
this.explicitNulls = explicitNulls;
}
public boolean isCoerceInputValues() {
return this.coerceInputValues;
}
public void setCoerceInputValues(boolean coerceInputValues) {
this.coerceInputValues = coerceInputValues;
}
public boolean isAllowStructuredMapKeys() {
return this.allowStructuredMapKeys;
}
public void setAllowStructuredMapKeys(boolean allowStructuredMapKeys) {
this.allowStructuredMapKeys = allowStructuredMapKeys;
}
public boolean isAllowSpecialFloatingPointValues() {
return this.allowSpecialFloatingPointValues;
}
public void setAllowSpecialFloatingPointValues(boolean allowSpecialFloatingPointValues) {
this.allowSpecialFloatingPointValues = allowSpecialFloatingPointValues;
}
public String getClassDiscriminator() {
return this.classDiscriminator;
}
public void setClassDiscriminator(String classDiscriminator) {
this.classDiscriminator = classDiscriminator;
}
public ClassDiscriminatorMode getClassDiscriminatorMode() {
return this.classDiscriminatorMode;
}
public void setClassDiscriminatorMode(ClassDiscriminatorMode classDiscriminatorMode) {
this.classDiscriminatorMode = classDiscriminatorMode;
}
public boolean isDecodeEnumsCaseInsensitive() {
return this.decodeEnumsCaseInsensitive;
}
public void setDecodeEnumsCaseInsensitive(boolean decodeEnumsCaseInsensitive) {
this.decodeEnumsCaseInsensitive = decodeEnumsCaseInsensitive;
}
public boolean isUseAlternativeNames() {
return this.useAlternativeNames;
}
public void setUseAlternativeNames(boolean useAlternativeNames) {
this.useAlternativeNames = useAlternativeNames;
}
public boolean isAllowTrailingComma() {
return this.allowTrailingComma;
}
public void setAllowTrailingComma(boolean allowTrailingComma) {
this.allowTrailingComma = allowTrailingComma;
}
public boolean isAllowComments() {
return this.allowComments;
}
public void setAllowComments(boolean allowComments) {
this.allowComments = allowComments;
}
/**
* Enum representing strategies for JSON property naming. The values correspond to
* {@link kotlinx.serialization.json.JsonNamingStrategy} implementations that cannot
* be directly referenced.
*/
public | values |
java | elastic__elasticsearch | modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/OtelHelper.java | {
"start": 1108,
"end": 4273
} | class ____ {
private static final Logger logger = LogManager.getLogger(OtelHelper.class);
static Attributes fromMap(Map<String, Object> attributes) {
if (attributes == null || attributes.isEmpty()) {
return Attributes.empty();
}
assert MetricNameValidator.validateAttributeNames(attributes) : "invalid metric attributes";
var builder = Attributes.builder();
attributes.forEach((k, v) -> {
if (v instanceof String value) {
builder.put(k, value);
} else if (v instanceof Long value) {
builder.put(k, value);
} else if (v instanceof Integer value) {
builder.put(k, value);
} else if (v instanceof Byte value) {
builder.put(k, value);
} else if (v instanceof Short value) {
builder.put(k, value);
} else if (v instanceof Double value) {
builder.put(k, value);
} else if (v instanceof Float value) {
builder.put(k, value);
} else if (v instanceof Boolean value) {
builder.put(k, value);
} else {
throw new IllegalArgumentException("attributes do not support value type of [" + v.getClass().getCanonicalName() + "]");
}
});
return builder.build();
}
static Consumer<ObservableDoubleMeasurement> doubleMeasurementCallback(Supplier<Collection<DoubleWithAttributes>> observer) {
return measurement -> {
Collection<DoubleWithAttributes> observations;
try {
observations = observer.get();
} catch (RuntimeException err) {
assert false : "observer must not throw [" + err.getMessage() + "]";
logger.error("doubleMeasurementCallback observer unexpected error", err);
return;
}
if (observations == null) {
return;
}
for (DoubleWithAttributes observation : observations) {
if (observation != null) {
measurement.record(observation.value(), OtelHelper.fromMap(observation.attributes()));
}
}
};
}
static Consumer<ObservableLongMeasurement> longMeasurementCallback(Supplier<Collection<LongWithAttributes>> observer) {
return measurement -> {
Collection<LongWithAttributes> observations;
try {
observations = observer.get();
} catch (RuntimeException err) {
assert false : "observer must not throw [" + err.getMessage() + "]";
logger.error("longMeasurementCallback observer unexpected error", err);
return;
}
if (observations == null) {
return;
}
for (LongWithAttributes observation : observations) {
if (observation != null) {
measurement.record(observation.value(), OtelHelper.fromMap(observation.attributes()));
}
}
};
}
}
| OtelHelper |
java | apache__camel | components/camel-bonita/src/main/java/org/apache/camel/component/bonita/api/model/ProcessDefinitionResponse.java | {
"start": 919,
"end": 3926
} | class ____ {
@JsonProperty("id")
private String id;
@JsonProperty("displayDescription")
private String displayDescription;
@JsonProperty("deploymentDate")
private String deploymentDate;
@JsonProperty("description")
private String description;
@JsonProperty("activationState")
private String activationState;
@JsonProperty("name")
private String name;
@JsonProperty("deployedBy")
private String deployedBy;
@JsonProperty("displayName")
private String displayName;
@JsonProperty("actorinitiatorid")
private String actorInitiatorId;
@JsonProperty("last_update_date")
private String lastUpdateDate;
@JsonProperty("configurationState")
private String configurationState;
@JsonProperty("version")
private String version;
public ProcessDefinitionResponse() {
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getDisplayDescription() {
return displayDescription;
}
public void setDisplayDescription(String displayDescription) {
this.displayDescription = displayDescription;
}
public String getDeploymentDate() {
return deploymentDate;
}
public void setDeploymentDate(String deploymentDate) {
this.deploymentDate = deploymentDate;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public String getActivationState() {
return activationState;
}
public void setActivationState(String activationState) {
this.activationState = activationState;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDeployedBy() {
return deployedBy;
}
public void setDeployedBy(String deployedBy) {
this.deployedBy = deployedBy;
}
public String getDisplayName() {
return displayName;
}
public void setDisplayName(String displayName) {
this.displayName = displayName;
}
public String getActorInitiatorId() {
return actorInitiatorId;
}
public void setActorInitiatorId(String actorInitiatorId) {
this.actorInitiatorId = actorInitiatorId;
}
public String getLastUpdateDate() {
return lastUpdateDate;
}
public void setLastUpdateDate(String lastUpdateDate) {
this.lastUpdateDate = lastUpdateDate;
}
public String getConfigurationState() {
return configurationState;
}
public void setConfigurationState(String configurationState) {
this.configurationState = configurationState;
}
public String getVersion() {
return version;
}
public void setVersion(String version) {
this.version = version;
}
}
| ProcessDefinitionResponse |
java | apache__flink | flink-table/flink-table-runtime/src/main/java/org/apache/flink/table/runtime/operators/window/tvf/operator/WindowTableFunctionOperatorBase.java | {
"start": 2064,
"end": 4976
} | class ____ extends TableStreamOperator<RowData>
implements OneInputStreamOperator<RowData, RowData> {
private static final String NULL_ROW_TIME_ELEMENTS_DROPPED_METRIC_NAME =
"numNullRowTimeRecordsDropped";
/**
* The shift timezone of the window, if the proctime or rowtime type is TIMESTAMP_LTZ, the shift
* timezone is the timezone user configured in TableConfig, other cases the timezone is UTC
* which means never shift when assigning windows.
*/
protected final ZoneId shiftTimeZone;
protected final int rowtimeIndex;
protected final GroupWindowAssigner<TimeWindow> windowAssigner;
/** This is used for emitting elements with a given timestamp. */
private transient TimestampedCollector<RowData> collector;
private transient JoinedRowData outRow;
private transient GenericRowData windowProperties;
// ------------------------------------------------------------------------
// Metrics
// ------------------------------------------------------------------------
protected transient Counter numNullRowTimeRecordsDropped;
public WindowTableFunctionOperatorBase(
GroupWindowAssigner<TimeWindow> windowAssigner,
int rowtimeIndex,
ZoneId shiftTimeZone) {
this.shiftTimeZone = shiftTimeZone;
this.rowtimeIndex = rowtimeIndex;
this.windowAssigner = windowAssigner;
checkArgument(!windowAssigner.isEventTime() || rowtimeIndex >= 0);
}
@Override
public void open() throws Exception {
super.open();
this.collector = new TimestampedCollector<>(output);
collector.eraseTimestamp();
outRow = new JoinedRowData();
windowProperties = new GenericRowData(3);
// metrics
this.numNullRowTimeRecordsDropped =
metrics.counter(NULL_ROW_TIME_ELEMENTS_DROPPED_METRIC_NAME);
}
@Override
public void close() throws Exception {
super.close();
if (collector != null) {
collector.close();
}
}
protected void collect(RowData inputRow, Collection<TimeWindow> allWindows) {
for (TimeWindow window : allWindows) {
windowProperties.setField(0, TimestampData.fromEpochMillis(window.getStart()));
windowProperties.setField(1, TimestampData.fromEpochMillis(window.getEnd()));
windowProperties.setField(
2,
TimestampData.fromEpochMillis(
toEpochMills(window.maxTimestamp(), shiftTimeZone)));
outRow.replace(inputRow, windowProperties);
outRow.setRowKind(inputRow.getRowKind());
collector.collect(outRow);
}
}
@VisibleForTesting
public Counter getNumNullRowTimeRecordsDropped() {
return numNullRowTimeRecordsDropped;
}
}
| WindowTableFunctionOperatorBase |
java | elastic__elasticsearch | modules/lang-painless/src/main/java/org/elasticsearch/painless/LambdaBootstrap.java | {
"start": 19037,
"end": 19709
} | interface ____ method with no captures.
// delegateMethodType drops the 'this' parameter because it will be re-inserted
// when the method handle for the dynamically invoked delegate method is created.
// Example: Object::toString
if (captures.length == 0) {
Class<?> clazz = delegateMethodType.parameterType(0);
delegateClassType = Type.getType(clazz);
delegateMethodType = delegateMethodType.dropParameterTypes(0, 1);
functionalInterfaceWithCaptures = interfaceMethodType.toMethodDescriptorString();
// Handles the case for a virtual or | reference |
java | spring-projects__spring-security | config/src/test/java/org/springframework/security/config/annotation/web/configurers/ServletApiConfigurerTests.java | {
"start": 13621,
"end": 13925
} | class ____ {
@Bean
SecurityFilterChain filterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.servletApi((servletApi) -> servletApi
.rolePrefix("PERMISSION_")
);
return http.build();
// @formatter:on
}
}
@RestController
static | RolePrefixInLambdaConfig |
java | google__guava | android/guava-testlib/test/com/google/common/testing/GcFinalizationTest.java | {
"start": 1323,
"end": 3894
} | class ____ extends TestCase {
// ----------------------------------------------------------------
// Ordinary tests of successful method execution
// ----------------------------------------------------------------
public void testAwait_countDownLatch() {
CountDownLatch latch = new CountDownLatch(1);
Object unused =
new Object() {
@SuppressWarnings({"removal", "Finalize"}) // b/260137033
@Override
protected void finalize() {
latch.countDown();
}
};
unused = null; // Hint to the JIT that unused is unreachable
GcFinalization.await(latch);
assertEquals(0, latch.getCount());
}
public void testAwaitDone_future() {
SettableFuture<@Nullable Void> future = SettableFuture.create();
Object unused =
new Object() {
@SuppressWarnings({"removal", "Finalize"}) // b/260137033
@Override
protected void finalize() {
future.set(null);
}
};
unused = null; // Hint to the JIT that unused is unreachable
GcFinalization.awaitDone(future);
assertTrue(future.isDone());
assertFalse(future.isCancelled());
}
public void testAwaitDone_future_cancel() {
SettableFuture<@Nullable Void> future = SettableFuture.create();
Object unused =
new Object() {
@SuppressWarnings({"removal", "Finalize"}) // b/260137033
@Override
protected void finalize() {
future.cancel(false);
}
};
unused = null; // Hint to the JIT that unused is unreachable
GcFinalization.awaitDone(future);
assertTrue(future.isDone());
assertTrue(future.isCancelled());
}
public void testAwaitClear() {
WeakReference<Object> ref = new WeakReference<>(new Object());
GcFinalization.awaitClear(ref);
assertThat(ref.get()).isNull();
}
public void testAwaitDone_finalizationPredicate() {
WeakHashMap<Object, Object> map = new WeakHashMap<>();
map.put(new Object(), Boolean.TRUE);
GcFinalization.awaitDone(
new FinalizationPredicate() {
@Override
public boolean isDone() {
return map.isEmpty();
}
});
assertTrue(map.isEmpty());
}
// ----------------------------------------------------------------
// Test that interrupts result in RuntimeException, not InterruptedException.
// Trickier than it looks, because runFinalization swallows interrupts.
// ----------------------------------------------------------------
| GcFinalizationTest |
java | apache__camel | components/camel-aws/camel-aws2-timestream/src/test/java/org/apache/camel/component/aws2/timestream/query/Timestream2QueryProducerTest.java | {
"start": 1592,
"end": 13253
} | class ____ extends CamelTestSupport {
@BindToRegistry("awsTimestreamQueryClient")
AmazonTimestreamQueryClientMock clientMock = new AmazonTimestreamQueryClientMock();
@EndpointInject("mock:result")
private MockEndpoint mock;
@Test
public void timestreamDescribeQueryEndpointsTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:describeQueryEndpoints", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(Timestream2Constants.OPERATION, Timestream2Operations.describeEndpoints);
}
});
MockEndpoint.assertIsSatisfied(context);
DescribeEndpointsResponse resultGet = (DescribeEndpointsResponse) exchange.getIn().getBody();
assertEquals(1, resultGet.endpoints().size());
assertEquals("query.timestream.region.amazonaws.com", resultGet.endpoints().get(0).address());
}
@Test
public void timestreamDescribeQueryEndpointsPojoTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:describeQueryEndpointsPojo", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(Timestream2Constants.OPERATION, Timestream2Operations.describeEndpoints);
exchange.getIn().setBody(DescribeEndpointsRequest.builder().build());
}
});
MockEndpoint.assertIsSatisfied(context);
DescribeEndpointsResponse resultGet = (DescribeEndpointsResponse) exchange.getIn().getBody();
assertEquals(1, resultGet.endpoints().size());
assertEquals("query.timestream.region.amazonaws.com", resultGet.endpoints().get(0).address());
}
@Test
public void timestreamCreateScheduledQueryTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:createScheduledQuery", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(Timestream2Constants.OPERATION, Timestream2Operations.createScheduledQuery);
DimensionMapping.Builder builder = DimensionMapping.builder();
builder.dimensionValueType("dimensionValue");
List<DimensionMapping> dimensionMappingList = new ArrayList<>();
dimensionMappingList.add(builder.build());
MultiMeasureMappings.Builder multiMeasureMapping = MultiMeasureMappings.builder();
multiMeasureMapping.targetMultiMeasureName("MM1");
List<MultiMeasureMappings> multiMeasureMappings = new ArrayList<>();
multiMeasureMappings.add(multiMeasureMapping.build());
exchange.getIn().setHeader(Timestream2Constants.DIMENSION_MAPPING_LIST, dimensionMappingList);
exchange.getIn().setHeader(Timestream2Constants.MULTI_MEASURE_MAPPINGS, multiMeasureMappings);
exchange.getIn().setHeader(Timestream2Constants.DATABASE_NAME, "TESTDB");
exchange.getIn().setHeader(Timestream2Constants.TABLE_NAME, "TESTTABLE");
exchange.getIn().setHeader(Timestream2Constants.TIME_COLUMN, "time");
}
});
MockEndpoint.assertIsSatisfied(context);
CreateScheduledQueryResponse resultGet = (CreateScheduledQueryResponse) exchange.getIn().getBody();
assertEquals("aws-timestream:test:scheduled-query:arn", resultGet.arn());
}
@Test
public void timestreamDeleteScheduledQueryTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:deleteScheduledQuery", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(Timestream2Constants.OPERATION, Timestream2Operations.deleteScheduledQuery);
}
});
MockEndpoint.assertIsSatisfied(context);
DeleteScheduledQueryResponse resultGet = (DeleteScheduledQueryResponse) exchange.getIn().getBody();
assertNotNull(resultGet);
}
@Test
public void timestreamExecuteScheduledQueryTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:executeScheduledQuery", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(Timestream2Constants.OPERATION, Timestream2Operations.executeScheduledQuery);
}
});
MockEndpoint.assertIsSatisfied(context);
ExecuteScheduledQueryResponse resultGet = (ExecuteScheduledQueryResponse) exchange.getIn().getBody();
assertNotNull(resultGet);
}
@Test
public void timestreamUpdateScheduledQueryTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:updateScheduledQuery", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(Timestream2Constants.OPERATION, Timestream2Operations.updateScheduledQuery);
}
});
MockEndpoint.assertIsSatisfied(context);
UpdateScheduledQueryResponse resultGet = (UpdateScheduledQueryResponse) exchange.getIn().getBody();
assertNotNull(resultGet);
}
@Test
public void timestreamDescribeScheduledQueryTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:describeScheduledQuery", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(Timestream2Constants.OPERATION, Timestream2Operations.describeScheduledQuery);
}
});
MockEndpoint.assertIsSatisfied(context);
DescribeScheduledQueryResponse resultGet = (DescribeScheduledQueryResponse) exchange.getIn().getBody();
assertEquals("aws-timestream:test:scheduled-query:arn", resultGet.scheduledQuery().arn());
}
@Test
public void timestreamListScheduledQueriesTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:listScheduledQueries", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(Timestream2Constants.OPERATION, Timestream2Operations.listScheduledQueries);
}
});
MockEndpoint.assertIsSatisfied(context);
ListScheduledQueriesResponse resultGet = (ListScheduledQueriesResponse) exchange.getIn().getBody();
assertEquals(1, resultGet.scheduledQueries().size());
assertEquals("aws-timestream:test:scheduled-query:arn", resultGet.scheduledQueries().get(0).arn());
}
@Test
public void timestreamPrepareQueryTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:prepareQuery", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(Timestream2Constants.OPERATION, Timestream2Operations.prepareQuery);
}
});
MockEndpoint.assertIsSatisfied(context);
PrepareQueryResponse resultGet = (PrepareQueryResponse) exchange.getIn().getBody();
assertEquals("select * from test_db", resultGet.queryString());
}
@Test
public void timestreamQueryTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:query", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(Timestream2Constants.OPERATION, Timestream2Operations.query);
}
});
MockEndpoint.assertIsSatisfied(context);
QueryResponse resultGet = (QueryResponse) exchange.getIn().getBody();
assertEquals("query-1", resultGet.queryId());
}
@Test
public void timestreamCancelQueryTest() throws Exception {
mock.expectedMessageCount(1);
Exchange exchange = template.request("direct:cancelQuery", new Processor() {
@Override
public void process(Exchange exchange) {
exchange.getIn().setHeader(Timestream2Constants.OPERATION, Timestream2Operations.cancelQuery);
}
});
MockEndpoint.assertIsSatisfied(context);
CancelQueryResponse resultGet = (CancelQueryResponse) exchange.getIn().getBody();
assertEquals("Query Cancelled", resultGet.cancellationMessage());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:describeQueryEndpoints")
.to("aws2-timestream://query:test?awsTimestreamQueryClient=#awsTimestreamQueryClient&operation=describeEndpoints")
.to("mock:result");
from("direct:describeQueryEndpointsPojo")
.to("aws2-timestream://query:test?awsTimestreamQueryClient=#awsTimestreamQueryClient&operation=describeEndpoints&pojoRequest=true")
.to("mock:result");
from("direct:createScheduledQuery")
.to("aws2-timestream://query:test?awsTimestreamQueryClient=#awsTimestreamQueryClient&operation=createScheduledQuery")
.to("mock:result");
from("direct:deleteScheduledQuery")
.to("aws2-timestream://query:test?awsTimestreamQueryClient=#awsTimestreamQueryClient&operation=deleteScheduledQuery")
.to("mock:result");
from("direct:executeScheduledQuery")
.to("aws2-timestream://query:test?awsTimestreamQueryClient=#awsTimestreamQueryClient&operation=executeScheduledQuery")
.to("mock:result");
from("direct:updateScheduledQuery")
.to("aws2-timestream://query:test?awsTimestreamQueryClient=#awsTimestreamQueryClient&operation=updateScheduledQuery")
.to("mock:result");
from("direct:describeScheduledQuery")
.to("aws2-timestream://query:test?awsTimestreamQueryClient=#awsTimestreamQueryClient&operation=describeScheduledQuery")
.to("mock:result");
from("direct:listScheduledQueries")
.to("aws2-timestream://query:test?awsTimestreamQueryClient=#awsTimestreamQueryClient&operation=listScheduledQueries")
.to("mock:result");
from("direct:prepareQuery")
.to("aws2-timestream://query:test?awsTimestreamQueryClient=#awsTimestreamQueryClient&operation=prepareQuery")
.to("mock:result");
from("direct:query")
.to("aws2-timestream://query:test?awsTimestreamQueryClient=#awsTimestreamQueryClient&operation=query")
.to("mock:result");
from("direct:cancelQuery")
.to("aws2-timestream://query:test?awsTimestreamQueryClient=#awsTimestreamQueryClient&operation=cancelQuery")
.to("mock:result");
}
};
}
}
| Timestream2QueryProducerTest |
java | apache__flink | flink-runtime/src/main/java/org/apache/flink/runtime/io/network/partition/hybrid/tiered/storage/HashSubpartitionBufferAccumulatorContext.java | {
"start": 1305,
"end": 2078
} | interface ____ {
/**
* Request {@link BufferBuilder} from the {@link BufferPool}.
*
* @return the requested buffer
*/
BufferBuilder requestBufferBlocking();
/**
* Flush the accumulated {@link Buffer}s of the subpartition.
*
* @param subpartitionId the subpartition id
* @param accumulatedBuffer the accumulated buffer
* @param numRemainingConsecutiveBuffers number of buffers that would be passed in the following
* invocations and should be written to the same segment as this one
*/
void flushAccumulatedBuffers(
TieredStorageSubpartitionId subpartitionId,
Buffer accumulatedBuffer,
int numRemainingConsecutiveBuffers);
}
| HashSubpartitionBufferAccumulatorContext |
java | apache__flink | flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/sink/compactor/FileCompactor.java | {
"start": 1068,
"end": 1285
} | interface ____ but use either {@link
* OutputStreamBasedFileCompactor} or {@link RecordWiseFileCompactor}. Other implementations will
* cause UnsupportedOperationException at runtime.
*/
@PublicEvolving
public | directly |
java | elastic__elasticsearch | x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/cleaner/CleanerServiceTests.java | {
"start": 7033,
"end": 7384
} | class ____ implements CleanerService.ExecutionScheduler {
final long offset;
TestExecutionScheduler(long offset) {
this.offset = offset;
}
@Override
public TimeValue nextExecutionDelay(ZonedDateTime now) {
return TimeValue.timeValueMillis(offset);
}
}
}
| TestExecutionScheduler |
java | junit-team__junit5 | jupiter-tests/src/test/java/org/junit/jupiter/engine/discovery/DiscoveryTests.java | {
"start": 20659,
"end": 20819
} | class ____ {
@SuppressWarnings("unused")
@Test
void test() {
fail("should not be called");
}
@Nested
private static | InvalidTestClassTestCase |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMigrateToDataStreamService.java | {
"start": 2464,
"end": 14302
} | class ____ {
private static final Logger logger = LogManager.getLogger(MetadataMigrateToDataStreamService.class);
private static final CompressedXContent TIMESTAMP_MAPPING;
static {
try {
TIMESTAMP_MAPPING = new CompressedXContent(
((builder, params) -> builder.startObject(DataStreamTimestampFieldMapper.NAME).field("enabled", true).endObject())
);
} catch (IOException e) {
throw new AssertionError(e);
}
}
private final ClusterService clusterService;
private final IndicesService indexServices;
private final ThreadContext threadContext;
private final MetadataCreateIndexService metadataCreateIndexService;
private final boolean isDslOnlyMode;
public MetadataMigrateToDataStreamService(
ThreadPool threadPool,
ClusterService clusterService,
IndicesService indexServices,
MetadataCreateIndexService metadataCreateIndexService
) {
this.clusterService = clusterService;
this.indexServices = indexServices;
this.threadContext = threadPool.getThreadContext();
this.metadataCreateIndexService = metadataCreateIndexService;
this.isDslOnlyMode = isDataStreamsLifecycleOnlyMode(clusterService.getSettings());
}
public void migrateToDataStream(
ProjectId projectId,
MigrateToDataStreamClusterStateUpdateRequest request,
ActionListener<AcknowledgedResponse> finalListener
) {
metadataCreateIndexService.getSystemIndices().validateDataStreamAccess(request.aliasName, threadContext);
AtomicReference<String> writeIndexRef = new AtomicReference<>();
ActionListener<AcknowledgedResponse> listener = finalListener.delegateFailureAndWrap((delegate, response) -> {
if (response.isAcknowledged()) {
String writeIndexName = writeIndexRef.get();
assert writeIndexName != null;
ActiveShardsObserver.waitForActiveShards(
clusterService,
projectId,
new String[] { writeIndexName },
ActiveShardCount.DEFAULT,
request.masterNodeTimeout(),
delegate.map(shardsAcknowledged -> AcknowledgedResponse.TRUE)
);
} else {
delegate.onResponse(AcknowledgedResponse.FALSE);
}
});
var delegate = new AllocationActionListener<>(listener, threadContext);
submitUnbatchedTask(
"migrate-to-data-stream [" + request.aliasName + "]",
new AckedClusterStateUpdateTask(
Priority.HIGH,
request.masterNodeTimeout(),
request.ackTimeout(),
delegate.clusterStateUpdate()
) {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
ClusterState clusterState = migrateToDataStream(currentState.projectState(projectId), isDslOnlyMode, indexMetadata -> {
try {
return indexServices.createIndexMapperServiceForValidation(indexMetadata);
} catch (IOException e) {
throw new IllegalStateException(e);
}
}, request, metadataCreateIndexService, clusterService.getSettings(), delegate.reroute());
writeIndexRef.set(
clusterState.metadata().getProject(projectId).dataStreams().get(request.aliasName).getWriteIndex().getName()
);
return clusterState;
}
}
);
}
@SuppressForbidden(reason = "legacy usage of unbatched task") // TODO add support for batching here
private void submitUnbatchedTask(@SuppressWarnings("SameParameterValue") String source, ClusterStateUpdateTask task) {
clusterService.submitUnbatchedStateUpdateTask(source, task);
}
static ClusterState migrateToDataStream(
ProjectState projectState,
boolean isDslOnlyMode,
Function<IndexMetadata, MapperService> mapperSupplier,
MigrateToDataStreamClusterStateUpdateRequest request,
MetadataCreateIndexService metadataCreateIndexService,
Settings settings,
ActionListener<Void> listener
) throws Exception {
final var project = projectState.metadata();
validateRequest(project, request);
IndexAbstraction.Alias alias = (IndexAbstraction.Alias) project.getIndicesLookup().get(request.aliasName);
validateBackingIndices(project, request.aliasName);
ProjectMetadata.Builder mb = ProjectMetadata.builder(project);
for (Index index : alias.getIndices()) {
IndexMetadata im = project.index(index);
prepareBackingIndex(mb, im, request.aliasName, mapperSupplier, true, false, false, Settings.EMPTY);
}
ClusterState updatedState = ClusterState.builder(projectState.cluster()).putProjectMetadata(mb).build();
Index writeIndex = alias.getWriteIndex();
List<IndexMetadata> backingIndices = alias.getIndices()
.stream()
.filter(x -> writeIndex == null || x.equals(writeIndex) == false)
.map(x -> updatedState.metadata().getProject(project.id()).index(x))
.toList();
logger.info("submitting request to migrate alias [{}] to a data stream", request.aliasName);
CreateDataStreamClusterStateUpdateRequest req = new CreateDataStreamClusterStateUpdateRequest(project.id(), request.aliasName);
return createDataStream(
metadataCreateIndexService,
settings,
updatedState,
isDslOnlyMode,
req,
backingIndices,
updatedState.metadata().getProject(project.id()).index(writeIndex),
listener,
// No need to initialize the failure store when migrating to a data stream.
false
);
}
// package-visible for testing
static void validateRequest(ProjectMetadata project, MigrateToDataStreamClusterStateUpdateRequest request) {
IndexAbstraction ia = project.getIndicesLookup().get(request.aliasName);
if (ia == null || ia.getType() != IndexAbstraction.Type.ALIAS) {
throw new IllegalArgumentException("alias [" + request.aliasName + "] does not exist");
}
if (ia.getWriteIndex() == null) {
throw new IllegalArgumentException("alias [" + request.aliasName + "] must specify a write index");
}
// check for "clean" alias without routing or filter query
AliasMetadata aliasMetadata = AliasMetadata.getFirstAliasMetadata(project, ia);
assert aliasMetadata != null : "alias metadata may not be null";
if (aliasMetadata.filteringRequired() || aliasMetadata.getIndexRouting() != null || aliasMetadata.getSearchRouting() != null) {
throw new IllegalArgumentException("alias [" + request.aliasName + "] may not have custom filtering or routing");
}
}
/**
* Hides the index, optionally removes the alias, adds data stream timestamp field mapper, and configures any additional settings
* needed for the index to be included within a data stream.
* @param b Metadata.Builder to consume updates to the provided index
* @param im IndexMetadata to be migrated to a data stream
* @param dataStreamName The name of the data stream to migrate the index into
* @param mapperSupplier A function that returns a MapperService for the given index
* @param removeAlias <code>true</code> if the migration should remove any aliases present on the index, <code>false</code> if an
* exception should be thrown in that case instead
* @param failureStore <code>true</code> if the index is being migrated into the data stream's failure store, <code>false</code> if it
* is being migrated into the data stream's backing indices
* @param makeSystem <code>true</code> if the index is being migrated into the system data stream, <code>false</code> if it
* is being migrated into non-system data stream
* @param nodeSettings The settings for the current node
*/
static void prepareBackingIndex(
ProjectMetadata.Builder b,
IndexMetadata im,
String dataStreamName,
Function<IndexMetadata, MapperService> mapperSupplier,
boolean removeAlias,
boolean failureStore,
boolean makeSystem,
Settings nodeSettings
) throws IOException {
MappingMetadata mm = im.mapping();
if (mm == null || mm.equals(MappingMetadata.EMPTY_MAPPINGS)) {
throw new IllegalArgumentException("backing index [" + im.getIndex().getName() + "] must have mappings for a timestamp field");
}
MapperService mapperService = mapperSupplier.apply(im);
mapperService.merge(im, MapperService.MergeReason.MAPPING_RECOVERY);
mapperService.merge(MapperService.SINGLE_MAPPING_NAME, TIMESTAMP_MAPPING, MapperService.MergeReason.MAPPING_UPDATE);
DocumentMapper mapper = mapperService.documentMapper();
var imb = IndexMetadata.builder(im);
if (removeAlias) {
imb.removeAlias(dataStreamName);
}
Settings.Builder settingsUpdate = Settings.builder().put(im.getSettings()).put(IndexMetadata.SETTING_INDEX_HIDDEN, true);
if (failureStore) {
DataStreamFailureStoreDefinition.applyFailureStoreSettings(nodeSettings, settingsUpdate);
}
Settings maybeUpdatedSettings = settingsUpdate.build();
if (IndexSettings.same(im.getSettings(), maybeUpdatedSettings) == false) {
imb.settings(maybeUpdatedSettings).settingsVersion(im.getSettingsVersion() + 1);
}
imb.mappingVersion(im.getMappingVersion() + 1)
.mappingsUpdatedVersion(IndexVersion.current())
.putMapping(new MappingMetadata(mapper));
imb.system(makeSystem);
b.put(imb);
}
// package-visible for testing
static void validateBackingIndices(ProjectMetadata project, String dataStreamName) {
IndexAbstraction ia = project.getIndicesLookup().get(dataStreamName);
if (ia == null || ia.getType() != IndexAbstraction.Type.ALIAS) {
throw new IllegalArgumentException("alias [" + dataStreamName + "] does not exist");
}
IndexAbstraction.Alias alias = (IndexAbstraction.Alias) ia;
// ensure that no other aliases reference indices
List<String> indicesWithOtherAliases = new ArrayList<>();
for (Index index : alias.getIndices()) {
IndexMetadata im = project.index(index);
if (im.getAliases().size() > 1 || im.getAliases().containsKey(alias.getName()) == false) {
indicesWithOtherAliases.add(index.getName());
}
}
if (indicesWithOtherAliases.size() > 0) {
throw new IllegalArgumentException(
"other aliases referencing indices ["
+ Strings.collectionToCommaDelimitedString(indicesWithOtherAliases)
+ "] must be removed before migrating to a data stream"
);
}
}
public record MigrateToDataStreamClusterStateUpdateRequest(String aliasName, TimeValue masterNodeTimeout, TimeValue ackTimeout) {
public MigrateToDataStreamClusterStateUpdateRequest {
Objects.requireNonNull(aliasName);
Objects.requireNonNull(masterNodeTimeout);
Objects.requireNonNull(ackTimeout);
}
}
}
| MetadataMigrateToDataStreamService |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/streaming/runtime/SortingBoundedInputITCase.java | {
"start": 30565,
"end": 32432
} | class ____ extends AbstractStreamOperatorV2<Long>
implements MultipleInputStreamOperator<Long>, BoundedMultiInput {
private final Set<Integer> seenKeys = new HashSet<>();
private long seenRecords = 0;
private Integer currentKey = null;
private boolean input1Finished = false;
private boolean input2Finished = false;
private boolean input3Finished = false;
public AssertingThreeInputOperator(
StreamOperatorParameters<Long> parameters, int numberOfInputs) {
super(parameters, 3);
assert numberOfInputs == 3;
}
private void processElement(Tuple2<Integer, byte[]> element) {
this.seenRecords++;
Integer incomingKey = element.f0;
if (!Objects.equals(incomingKey, currentKey)) {
if (!seenKeys.add(incomingKey)) {
Assert.fail("Received an out of order key: " + incomingKey);
}
this.currentKey = incomingKey;
}
}
@Override
public void endInput(int inputId) {
if (inputId == 1) {
input1Finished = true;
}
if (inputId == 2) {
input2Finished = true;
}
if (inputId == 3) {
input3Finished = true;
}
if (input1Finished && input2Finished && input3Finished) {
output.collect(new StreamRecord<>(seenRecords));
}
}
@Override
public List<Input> getInputs() {
return Arrays.asList(
new SingleInput(this::processElement),
new SingleInput(this::processElement),
new SingleInput(this::processElement));
}
}
private static | AssertingThreeInputOperator |
java | micronaut-projects__micronaut-core | http/src/main/java/io/micronaut/http/cookie/HttpCookieFactory.java | {
"start": 865,
"end": 1065
} | class ____ implements CookieFactory {
@Override
public Cookie create(String name, String value) {
return new CookieHttpCookieAdapter(new HttpCookie(name, value));
}
}
| HttpCookieFactory |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlParser.java | {
"start": 1598,
"end": 3045
} | class ____ {
private static final Logger log = LogManager.getLogger(EsqlParser.class);
/**
* Maximum number of characters in an ESQL query. Antlr may parse the entire
* query into tokens to make the choices, buffering the world. There's a lot we
* can do in the grammar to prevent that, but let's be paranoid and assume we'll
* fail at preventing antlr from slurping in the world. Instead, let's make sure
* that the world just isn't that big.
*/
public static final int MAX_LENGTH = 1_000_000;
private static void replaceSymbolWithLiteral(Map<String, String> symbolReplacements, String[] literalNames, String[] symbolicNames) {
for (int i = 0, replacements = symbolReplacements.size(); i < symbolicNames.length && replacements > 0; i++) {
String symName = symbolicNames[i];
if (symName != null) {
String replacement = symbolReplacements.get(symName);
if (replacement != null && literalNames[i] == null) {
// literals are single quoted
literalNames[i] = "'" + replacement + "'";
replacements--;
}
}
}
}
/**
* Add the literal name to a number of tokens that due to ANTLR internals/ATN
* have their symbolic name returns instead during error reporting.
* When reporting token errors, ANTLR uses the Vocabulary | EsqlParser |
java | hibernate__hibernate-orm | hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/modifiedflags/naming/TestEntity.java | {
"start": 700,
"end": 3256
} | class ____ {
@Id
@GeneratedValue
private Integer id;
private String data1;
@Column(name = "mydata")
private String data2;
@Audited(modifiedColumnName = "data_3", withModifiedFlag = true)
private String data3;
@Column(name = "thedata")
@Audited(modifiedColumnName = "the_data_mod", withModifiedFlag = true)
private String data4;
@Embedded
private TestEmbeddable embeddable;
@ManyToOne
@JoinColumns({
@JoinColumn(name = "other_entity_id1", nullable = false),
@JoinColumn(name = "other_entity_id2", nullable = false)
})
private OtherEntity otherEntity;
@OneToOne
@JoinColumn(name = "single_id")
private SingleIdEntity singleIdEntity;
@OneToOne
private SingleIdEntity singleIdEntity2;
@Column(name = "client_option")
@Enumerated(EnumType.STRING)
private ClientOption clientOption;
@Column(name = "client_option2")
@Enumerated(EnumType.STRING)
@Audited(withModifiedFlag = true, modifiedColumnName = "cop_mod")
private ClientOption clientOption2;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getData1() {
return data1;
}
public void setData1(String data1) {
this.data1 = data1;
}
public String getData2() {
return data2;
}
public void setData2(String data2) {
this.data2 = data2;
}
public String getData3() {
return data3;
}
public void setData3(String data3) {
this.data3 = data3;
}
public String getData4() {
return data4;
}
public void setData4(String data4) {
this.data4 = data4;
}
public TestEmbeddable getEmbeddable() {
return embeddable;
}
public void setEmbeddable(TestEmbeddable embeddable) {
this.embeddable = embeddable;
}
public OtherEntity getOtherEntity() {
return otherEntity;
}
public void setOtherEntity(OtherEntity otherEntity) {
this.otherEntity = otherEntity;
}
public SingleIdEntity getSingleIdEntity() {
return singleIdEntity;
}
public void setSingleIdEntity(SingleIdEntity singleIdEntity) {
this.singleIdEntity = singleIdEntity;
}
public SingleIdEntity getSingleIdEntity2() {
return singleIdEntity2;
}
public void setSingleIdEntity2(SingleIdEntity singleIdEntity2) {
this.singleIdEntity2 = singleIdEntity2;
}
public ClientOption getClientOption() {
return clientOption;
}
public void setClientOption(ClientOption clientOption) {
this.clientOption = clientOption;
}
public ClientOption getClientOption2() {
return clientOption2;
}
public void setClientOption2(ClientOption clientOption2) {
this.clientOption2 = clientOption2;
}
}
| TestEntity |
java | apache__camel | tests/camel-itest/src/test/java/org/apache/camel/itest/validator/ValidatorSchemaImportTest.java | {
"start": 1207,
"end": 5908
} | class ____ extends CamelTestSupport {
protected MockEndpoint validEndpoint;
protected MockEndpoint finallyEndpoint;
protected MockEndpoint invalidEndpoint;
/**
* Test for the valid schema location
*
* @throws Exception
*/
@Test
void testRelativeParentSchemaImport() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.doTry()
.to("validator:org/apache/camel/component/validator/relativeparent/child/child.xsd")
.to("mock:valid")
.doCatch(ValidationException.class)
.to("mock:invalid")
.doFinally()
.to("mock:finally")
.end();
}
});
validEndpoint.expectedMessageCount(1);
finallyEndpoint.expectedMessageCount(1);
template.sendBody("direct:start",
"<childuser xmlns='http://foo.com/bar'><user><id>1</id><username>Test User</username></user></childuser>");
assertIsSatisfied(validEndpoint, invalidEndpoint, finallyEndpoint);
}
/**
* Test for the invalid schema import location.
*
* @throws Exception
*/
@Test
void testDotSlashSchemaImport() throws Exception {
this.context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("direct:start").doTry()
.to("validator:org/apache/camel/component/validator/dotslash/child.xsd").to("mock:valid")
.doCatch(ValidationException.class).to("mock:invalid").doFinally().to("mock:finally")
.end();
}
});
validEndpoint.expectedMessageCount(1);
finallyEndpoint.expectedMessageCount(1);
template
.sendBody("direct:start",
"<childuser xmlns='http://foo.com/bar'><user><id>1</id><username>Test User</username></user></childuser>");
assertIsSatisfied(validEndpoint, invalidEndpoint, finallyEndpoint);
}
/**
* Test for the invalid schema import location.
*
* @throws Exception
*/
@Test
void testRelativeDoubleSlashSchemaImport() throws Exception {
this.context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("direct:start").doTry()
.to("validator:org/apache/camel/component/validator/doubleslash/child.xsd")
.to("mock:valid").doCatch(ValidationException.class).to("mock:invalid").doFinally()
.to("mock:finally").end();
}
});
validEndpoint.expectedMessageCount(1);
finallyEndpoint.expectedMessageCount(1);
template
.sendBody("direct:start",
"<childuser xmlns='http://foo.com/bar'><user><id>1</id><username>Test User</username></user></childuser>");
assertIsSatisfied(validEndpoint, invalidEndpoint, finallyEndpoint);
}
/**
* Test for the valid schema location relative to a path other than the validating schema
*
* @throws Exception
*/
@Test
void testChildParentUncleSchemaImport() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
from("direct:start")
.doTry()
.to("validator:org/apache/camel/component/validator/childparentuncle/child/child.xsd")
.to("mock:valid")
.doCatch(ValidationException.class)
.to("mock:invalid")
.doFinally()
.to("mock:finally")
.end();
}
});
validEndpoint.expectedMessageCount(1);
finallyEndpoint.expectedMessageCount(1);
template.sendBody("direct:start",
"<childuser xmlns='http://foo.com/bar'><user><id>1</id><username>Test User</username></user></childuser>");
assertIsSatisfied(validEndpoint, invalidEndpoint, finallyEndpoint);
}
@Override
@BeforeEach
public void doPostSetup() {
validEndpoint = resolveMandatoryEndpoint("mock:valid", MockEndpoint.class);
invalidEndpoint = resolveMandatoryEndpoint("mock:invalid", MockEndpoint.class);
finallyEndpoint = resolveMandatoryEndpoint("mock:finally", MockEndpoint.class);
}
}
| ValidatorSchemaImportTest |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/manytomany/ManyToManyWhereTest.java | {
"start": 914,
"end": 3140
} | class ____ {
@Test
public void testManyToManyWithWhereConditional(SessionFactoryScope scope) {
scope.inTransaction(
session -> {
// create advertisements
Advertisement advertisement1 = new Advertisement();
Advertisement advertisement2 = new Advertisement();
session.persist( advertisement1 );
session.persist( advertisement2 );
// create attachment relationships to advertisements
Attachment a1 = new Attachment();
a1.setFileName( "memo.txt" );
a1.setAdvertisements( new LinkedHashSet<>( Arrays.asList( advertisement1, advertisement2 ) ) );
Attachment a2 = new Attachment();
a2.setFileName( "mickeymouse.jpg" );
a2.setDeleted( "true" );
a2.setAdvertisements( new LinkedHashSet<>( Arrays.asList( advertisement1, advertisement2 ) ) );
advertisement1.setAttachments( new HashSet<>( Arrays.asList( a1, a2 ) ) );
advertisement2.setAttachments( new HashSet<>( Arrays.asList( a1, a2 ) ) );
session.persist( a1 );
session.persist( a2 );
}
);
scope.inTransaction(
session -> {
// create page advertisement relationships with advertisements
List<Advertisement> advertisements = (List<Advertisement>) session.createQuery( "FROM Advertisement" )
.list();
assertEquals( 2, advertisements.size() );
for ( Advertisement advertisement : advertisements ) {
PageAdvertisement pageAd = new PageAdvertisement();
pageAd.setAdvertisement( advertisement );
session.persist( pageAd );
}
}
);
scope.inTransaction(
session -> {
// query relationships and verify @Where condition fragment applies correctly.
List<PageAdvertisement> ads = (List<PageAdvertisement>) session.createQuery(
"FROM PageAdvertisement" ).list();
assertEquals( 2, ads.size() );
for ( PageAdvertisement ad : ads ) {
// there is only 1 not deleted attachment
assertEquals( 1, ad.getAdvertisement().getAttachments().size() );
for ( Attachment attachment : ad.getAdvertisement().getAttachments() ) {
// each attachment linked with two advertisements
assertEquals( 2, attachment.getAdvertisements().size() );
}
}
}
);
}
}
| ManyToManyWhereTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/threadsafety/ThreadSafeCheckerTest.java | {
"start": 32663,
"end": 33144
} | class ____ {
final X badX = new X(ImmutableList.of(ImmutableList.<String>of()));
}
""")
.doTest();
}
@Ignore("b/26797524 - add tests for generic arguments")
@Test
public void mutableTypeParam() {
compilationHelper
.addSourceLines(
"X.java",
"""
import com.google.common.collect.ImmutableList;
import com.google.errorprone.annotations.ThreadSafe;
public | Test |
java | apache__flink | flink-table/flink-table-common/src/main/java/org/apache/flink/table/functions/TableFunction.java | {
"start": 2296,
"end": 2758
} | class ____ determining an output data type. Input
* arguments are derived from one or more {@code eval()} methods. If the reflective information is
* not sufficient, it can be supported and enriched with {@link DataTypeHint} and {@link
* FunctionHint} annotations.
*
* <p>The following examples show how to specify a table function:
*
* <pre>{@code
* // Function that accepts an arbitrary number of INT arguments and emits them as implicit ROW < INT >
* | for |
java | apache__dubbo | dubbo-common/src/main/java/org/apache/dubbo/common/lang/ShutdownHookCallback.java | {
"start": 1538,
"end": 1730
} | interface ____ extends Prioritized {
/**
* Callback execution
*
* @throws Throwable if met with some errors
*/
void callback() throws Throwable;
}
| ShutdownHookCallback |
java | spring-projects__spring-boot | module/spring-boot-http-client/src/test/java/org/springframework/boot/http/client/autoconfigure/HttpClientAutoConfigurationTests.java | {
"start": 2579,
"end": 2779
} | class ____ {
@Bean
HttpClientSettings httpClientSettings() {
return HttpClientSettings.defaults().withTimeouts(Duration.ofSeconds(1), Duration.ofSeconds(2));
}
}
}
| TestHttpClientConfiguration |
java | netty__netty | handler/src/main/java/io/netty/handler/ssl/ReferenceCountedOpenSslEngine.java | {
"start": 121458,
"end": 121833
} | class ____ extends SSLHandshakeException implements NativeSslException {
private final int errorCode;
OpenSslHandshakeException(String reason, int errorCode) {
super(reason);
this.errorCode = errorCode;
}
@Override
public int errorCode() {
return errorCode;
}
}
}
| OpenSslHandshakeException |
java | apache__camel | components/camel-rocketmq/src/main/java/org/apache/camel/component/rocketmq/RocketMQConsumer.java | {
"start": 1465,
"end": 4316
} | class ____ extends DefaultConsumer implements Suspendable {
private final RocketMQEndpoint endpoint;
private DefaultMQPushConsumer mqPushConsumer;
public RocketMQConsumer(RocketMQEndpoint endpoint, Processor processor) {
super(endpoint, processor);
this.endpoint = endpoint;
}
private void startConsumer() throws MQClientException {
mqPushConsumer = new DefaultMQPushConsumer(
endpoint.getConsumerGroup(),
RocketMQAclUtils.getAclRPCHook(getEndpoint().getAccessKey(), getEndpoint().getSecretKey()));
mqPushConsumer.setNamesrvAddr(endpoint.getNamesrvAddr());
mqPushConsumer.setNamespaceV2(endpoint.getNamespace());
MessageSelector messageSelector;
switch (endpoint.getMessageSelectorType().toLowerCase()) {
case "tag":
messageSelector = MessageSelector.byTag(endpoint.getSubscribeTags());
break;
case "sql":
messageSelector = MessageSelector.bySql(endpoint.getSubscribeSql());
break;
default:
throw new IllegalArgumentException("Unknown selector type: " + endpoint.getMessageSelectorType());
}
mqPushConsumer.setEnableTrace(endpoint.isEnableTrace());
mqPushConsumer.setAccessChannel(AccessChannel.valueOf(endpoint.getAccessChannel()));
mqPushConsumer.subscribe(endpoint.getTopicName(), messageSelector);
mqPushConsumer.registerMessageListener((MessageListenerConcurrently) (msgs, context) -> {
MessageExt messageExt = msgs.get(0);
Exchange exchange = endpoint.createRocketExchange(messageExt.getBody());
RocketMQMessageConverter.populateHeadersByMessageExt(exchange.getIn(), messageExt);
try {
getProcessor().process(exchange);
} catch (Exception e) {
getExceptionHandler().handleException(e);
return ConsumeConcurrentlyStatus.RECONSUME_LATER;
}
return ConsumeConcurrentlyStatus.CONSUME_SUCCESS;
});
mqPushConsumer.start();
}
private void stopConsumer() {
if (mqPushConsumer != null) {
mqPushConsumer.shutdown();
mqPushConsumer = null;
}
}
@Override
public RocketMQEndpoint getEndpoint() {
return (RocketMQEndpoint) super.getEndpoint();
}
@Override
protected void doSuspend() {
stopConsumer();
}
@Override
protected void doResume() throws Exception {
startConsumer();
}
@Override
protected void doStart() throws Exception {
super.doStart();
startConsumer();
}
@Override
protected void doStop() throws Exception {
super.doStop();
stopConsumer();
}
}
| RocketMQConsumer |
java | assertj__assertj-core | assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/recursive/comparison/legacy/RecursiveComparisonAssert_isEqualTo_with_arrays_Test.java | {
"start": 1244,
"end": 5473
} | class ____ extends WithLegacyIntrospectionStrategyBaseTest {
@ParameterizedTest(name = "author 1 {0} / author 2 {1}")
@MethodSource
void should_pass_when_comparing_same_array_fields(Author[] authors1, Author[] authors2) {
// GIVEN
WithArray<Author> actual = new WithArray<>(authors1);
WithArray<Author> expected = new WithArray<>(authors2);
// THEN
then(actual).usingRecursiveComparison(recursiveComparisonConfiguration)
.isEqualTo(expected);
}
static Stream<Arguments> should_pass_when_comparing_same_array_fields() {
Author pratchett = new Author("Terry Pratchett");
Author georgeMartin = new Author("George Martin");
Author none = null;
Author[] empty = array();
return Stream.of(Arguments.of(array(pratchett), array(pratchett)),
Arguments.of(array(pratchett, georgeMartin), array(pratchett, georgeMartin)),
Arguments.of(array(pratchett, none), array(pratchett, none)),
Arguments.of(empty, empty));
}
@ParameterizedTest(name = "authors 1 {0} / authors 2 {1} / difference {2}")
@MethodSource
void should_fail_when_comparing_different_array_fields(Author[] authors1, Author[] authors2, ComparisonDifference difference) {
// GIVEN
WithArray<Author> actual = new WithArray<>(authors1);
WithArray<Author> expected = new WithArray<>(authors2);
// WHEN/THEN
compareRecursivelyFailsWithDifferences(actual, expected, difference);
}
static Stream<Arguments> should_fail_when_comparing_different_array_fields() {
Author pratchett = new Author("Terry Pratchett");
Author georgeMartin = new Author("George Martin");
Author none = null;
return Stream.of(Arguments.of(array(pratchett), array(georgeMartin),
javaTypeDiff("group.[0].name", "Terry Pratchett", "George Martin")),
Arguments.of(array(pratchett, georgeMartin), array(pratchett),
diff("group", array(pratchett, georgeMartin), array(pratchett),
"actual and expected values are arrays of different size, actual size=2 when expected size=1")),
Arguments.of(array(pratchett), array(none), diff("group.[0]", pratchett, null, null)),
Arguments.of(array(none), array(pratchett), diff("group.[0]", null, pratchett, null)));
}
@ParameterizedTest(name = "authors {0} / object {1} / path {2} / value 1 {3}/ value 2 {4}")
@MethodSource
void should_fail_when_comparing_array_to_non_array(Object actualFieldValue, Author[] expectedFieldValue,
String path, Object value1, Object value2, String desc) {
// GIVEN
var actual = new WithGroupField(actualFieldValue);
WithArray<Author> expected = new WithArray<>(expectedFieldValue);
// WHEN/THEN
ComparisonDifference difference = desc == null ? diff(path, value1, value2) : diff(path, value1, value2, desc);
compareRecursivelyFailsWithDifferences(actual, expected, difference);
}
static Stream<Arguments> should_fail_when_comparing_array_to_non_array() {
Author pratchett = new Author("Terry Pratchett");
Author georgeMartin = new Author("George Martin");
// we need to use the actual array and the expected list otherwise compareRecursivelyFailsWithDifferences
// fails as actualArray and expectedList description includes their instance reference (@123ff3f) to differentiate their
// otherwise similar description
Author[] expectedFieldValue = array(pratchett, georgeMartin);
List<Author> actualFieldValue = list(pratchett, georgeMartin);
return Stream.of(Arguments.of(pratchett, array(pratchett), "group", pratchett, array(pratchett),
"expected field is an array but actual field is not (org.assertj.tests.core.api.recursive.data.Author)"),
Arguments.of(actualFieldValue, expectedFieldValue, "group", actualFieldValue, expectedFieldValue,
"expected field is an array but actual field is not (java.util.ArrayList)"));
}
public static | RecursiveComparisonAssert_isEqualTo_with_arrays_Test |
java | spring-projects__spring-boot | module/spring-boot-security/src/test/java/org/springframework/boot/security/autoconfigure/jpa/JpaUserDetailsTests.java | {
"start": 1864,
"end": 2142
} | class ____ {
@Test
void contextLoads() {
}
@Import({ EmbeddedDataSourceConfiguration.class, DataSourceAutoConfiguration.class,
HibernateJpaAutoConfiguration.class, PropertyPlaceholderAutoConfiguration.class,
SecurityAutoConfiguration.class })
static | JpaUserDetailsTests |
java | apache__commons-lang | src/main/java/org/apache/commons/lang3/reflect/MemberUtils.java | {
"start": 5398,
"end": 6092
} | class ____ the destination class. This represents the number of steps in the object hierarchy graph.
*
* @param srcClass The source class.
* @param destClass The destination class.
* @return The cost of transforming an object.
*/
private static float getObjectTransformationCost(Class<?> srcClass, final Class<?> destClass) {
if (destClass.isPrimitive()) {
return getPrimitivePromotionCost(srcClass, destClass);
}
float cost = 0.0f;
while (srcClass != null && !destClass.equals(srcClass)) {
if (destClass.isInterface() && ClassUtils.isAssignable(srcClass, destClass)) {
// slight penalty for | into |
java | netty__netty | transport-native-kqueue/src/test/java/io/netty/channel/kqueue/KQueueSocketShutdownOutputByPeerTest.java | {
"start": 878,
"end": 1144
} | class ____ extends SocketShutdownOutputByPeerTest {
@Override
protected List<TestsuitePermutation.BootstrapFactory<ServerBootstrap>> newFactories() {
return KQueueSocketTestPermutation.INSTANCE.serverSocket();
}
}
| KQueueSocketShutdownOutputByPeerTest |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/DuplicateBranchesTest.java | {
"start": 4561,
"end": 5005
} | class ____ {
String g(boolean a, String b, String c) {
// foo
// bar
return b;
}
}
""")
.doTest(TestMode.TEXT_MATCH);
}
@Test
public void commentRefactoringIfElse() {
BugCheckerRefactoringTestHelper.newInstance(DuplicateBranches.class, getClass())
.addInputLines(
"Test.java",
"""
| Test |
java | apache__hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolProxy.java | {
"start": 938,
"end": 1144
} | class ____ around a server's proxy,
* containing a list of its supported methods.
*
* A list of methods with a value of null indicates that the client and server
* have the same protocol.
*/
public | wraps |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser.java | {
"start": 25775,
"end": 33505
} | class ____ extends ParserRuleContext {
public EvalCommandContext evalCommand() {
return getRuleContext(EvalCommandContext.class,0);
}
public WhereCommandContext whereCommand() {
return getRuleContext(WhereCommandContext.class,0);
}
public KeepCommandContext keepCommand() {
return getRuleContext(KeepCommandContext.class,0);
}
public LimitCommandContext limitCommand() {
return getRuleContext(LimitCommandContext.class,0);
}
public StatsCommandContext statsCommand() {
return getRuleContext(StatsCommandContext.class,0);
}
public SortCommandContext sortCommand() {
return getRuleContext(SortCommandContext.class,0);
}
public DropCommandContext dropCommand() {
return getRuleContext(DropCommandContext.class,0);
}
public RenameCommandContext renameCommand() {
return getRuleContext(RenameCommandContext.class,0);
}
public DissectCommandContext dissectCommand() {
return getRuleContext(DissectCommandContext.class,0);
}
public GrokCommandContext grokCommand() {
return getRuleContext(GrokCommandContext.class,0);
}
public EnrichCommandContext enrichCommand() {
return getRuleContext(EnrichCommandContext.class,0);
}
public MvExpandCommandContext mvExpandCommand() {
return getRuleContext(MvExpandCommandContext.class,0);
}
public JoinCommandContext joinCommand() {
return getRuleContext(JoinCommandContext.class,0);
}
public ChangePointCommandContext changePointCommand() {
return getRuleContext(ChangePointCommandContext.class,0);
}
public CompletionCommandContext completionCommand() {
return getRuleContext(CompletionCommandContext.class,0);
}
public SampleCommandContext sampleCommand() {
return getRuleContext(SampleCommandContext.class,0);
}
public ForkCommandContext forkCommand() {
return getRuleContext(ForkCommandContext.class,0);
}
public RerankCommandContext rerankCommand() {
return getRuleContext(RerankCommandContext.class,0);
}
public InlineStatsCommandContext inlineStatsCommand() {
return getRuleContext(InlineStatsCommandContext.class,0);
}
public FuseCommandContext fuseCommand() {
return getRuleContext(FuseCommandContext.class,0);
}
public LookupCommandContext lookupCommand() {
return getRuleContext(LookupCommandContext.class,0);
}
public InsistCommandContext insistCommand() {
return getRuleContext(InsistCommandContext.class,0);
}
public PromqlCommandContext promqlCommand() {
return getRuleContext(PromqlCommandContext.class,0);
}
@SuppressWarnings("this-escape")
public ProcessingCommandContext(ParserRuleContext parent, int invokingState) {
super(parent, invokingState);
}
@Override public int getRuleIndex() { return RULE_processingCommand; }
@Override
public void enterRule(ParseTreeListener listener) {
if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).enterProcessingCommand(this);
}
@Override
public void exitRule(ParseTreeListener listener) {
if ( listener instanceof EsqlBaseParserListener ) ((EsqlBaseParserListener)listener).exitProcessingCommand(this);
}
@Override
public <T> T accept(ParseTreeVisitor<? extends T> visitor) {
if ( visitor instanceof EsqlBaseParserVisitor ) return ((EsqlBaseParserVisitor<? extends T>)visitor).visitProcessingCommand(this);
else return visitor.visitChildren(this);
}
}
public final ProcessingCommandContext processingCommand() throws RecognitionException {
ProcessingCommandContext _localctx = new ProcessingCommandContext(_ctx, getState());
enterRule(_localctx, 8, RULE_processingCommand);
try {
setState(253);
_errHandler.sync(this);
switch ( getInterpreter().adaptivePredict(_input,3,_ctx) ) {
case 1:
enterOuterAlt(_localctx, 1);
{
setState(227);
evalCommand();
}
break;
case 2:
enterOuterAlt(_localctx, 2);
{
setState(228);
whereCommand();
}
break;
case 3:
enterOuterAlt(_localctx, 3);
{
setState(229);
keepCommand();
}
break;
case 4:
enterOuterAlt(_localctx, 4);
{
setState(230);
limitCommand();
}
break;
case 5:
enterOuterAlt(_localctx, 5);
{
setState(231);
statsCommand();
}
break;
case 6:
enterOuterAlt(_localctx, 6);
{
setState(232);
sortCommand();
}
break;
case 7:
enterOuterAlt(_localctx, 7);
{
setState(233);
dropCommand();
}
break;
case 8:
enterOuterAlt(_localctx, 8);
{
setState(234);
renameCommand();
}
break;
case 9:
enterOuterAlt(_localctx, 9);
{
setState(235);
dissectCommand();
}
break;
case 10:
enterOuterAlt(_localctx, 10);
{
setState(236);
grokCommand();
}
break;
case 11:
enterOuterAlt(_localctx, 11);
{
setState(237);
enrichCommand();
}
break;
case 12:
enterOuterAlt(_localctx, 12);
{
setState(238);
mvExpandCommand();
}
break;
case 13:
enterOuterAlt(_localctx, 13);
{
setState(239);
joinCommand();
}
break;
case 14:
enterOuterAlt(_localctx, 14);
{
setState(240);
changePointCommand();
}
break;
case 15:
enterOuterAlt(_localctx, 15);
{
setState(241);
completionCommand();
}
break;
case 16:
enterOuterAlt(_localctx, 16);
{
setState(242);
sampleCommand();
}
break;
case 17:
enterOuterAlt(_localctx, 17);
{
setState(243);
forkCommand();
}
break;
case 18:
enterOuterAlt(_localctx, 18);
{
setState(244);
rerankCommand();
}
break;
case 19:
enterOuterAlt(_localctx, 19);
{
setState(245);
inlineStatsCommand();
}
break;
case 20:
enterOuterAlt(_localctx, 20);
{
setState(246);
fuseCommand();
}
break;
case 21:
enterOuterAlt(_localctx, 21);
{
setState(247);
if (!(this.isDevVersion())) throw new FailedPredicateException(this, "this.isDevVersion()");
setState(248);
lookupCommand();
}
break;
case 22:
enterOuterAlt(_localctx, 22);
{
setState(249);
if (!(this.isDevVersion())) throw new FailedPredicateException(this, "this.isDevVersion()");
setState(250);
insistCommand();
}
break;
case 23:
enterOuterAlt(_localctx, 23);
{
setState(251);
if (!(this.isDevVersion())) throw new FailedPredicateException(this, "this.isDevVersion()");
setState(252);
promqlCommand();
}
break;
}
}
catch (RecognitionException re) {
_localctx.exception = re;
_errHandler.reportError(this, re);
_errHandler.recover(this, re);
}
finally {
exitRule();
}
return _localctx;
}
@SuppressWarnings("CheckReturnValue")
public static | ProcessingCommandContext |
java | apache__kafka | metadata/src/test/java/org/apache/kafka/image/ScramImageTest.java | {
"start": 1935,
"end": 7957
} | class ____ {
public static final ScramImage IMAGE1;
public static final List<ApiMessageAndVersion> DELTA1_RECORDS;
static final ScramDelta DELTA1;
static final ScramImage IMAGE2;
static byte[] randomBuffer(Random random, int length) {
byte[] buf = new byte[length];
random.nextBytes(buf);
return buf;
}
static ScramCredentialData randomScramCredentialData(Random random) {
return new ScramCredentialData(
randomBuffer(random, 1024),
randomBuffer(random, 1024),
randomBuffer(random, 1024),
1024 + random.nextInt(1024));
}
static {
MockRandom random = new MockRandom();
Map<ScramMechanism, Map<String, ScramCredentialData>> image1mechanisms = new HashMap<>();
Map<String, ScramCredentialData> image1sha256 = new HashMap<>();
image1sha256.put("alpha", randomScramCredentialData(random));
image1sha256.put("beta", randomScramCredentialData(random));
image1mechanisms.put(SCRAM_SHA_256, image1sha256);
Map<String, ScramCredentialData> image1sha512 = new HashMap<>();
image1sha512.put("alpha", randomScramCredentialData(random));
image1sha512.put("gamma", randomScramCredentialData(random));
image1mechanisms.put(SCRAM_SHA_512, image1sha512);
IMAGE1 = new ScramImage(image1mechanisms);
DELTA1_RECORDS = new ArrayList<>();
// remove all sha512 credentials
DELTA1_RECORDS.add(new ApiMessageAndVersion(new RemoveUserScramCredentialRecord().
setName("alpha").
setMechanism(SCRAM_SHA_512.type()), (short) 0));
DELTA1_RECORDS.add(new ApiMessageAndVersion(new RemoveUserScramCredentialRecord().
setName("gamma").
setMechanism(SCRAM_SHA_512.type()), (short) 0));
ScramCredentialData secondAlpha256Credential = randomScramCredentialData(random);
// add sha256 credential
DELTA1_RECORDS.add(new ApiMessageAndVersion(new UserScramCredentialRecord().
setName("alpha").
setMechanism(SCRAM_SHA_256.type()).
setSalt(secondAlpha256Credential.salt()).
setStoredKey(secondAlpha256Credential.storedKey()).
setServerKey(secondAlpha256Credential.serverKey()).
setIterations(secondAlpha256Credential.iterations()), (short) 0));
// add sha512 credential re-using name
ScramCredentialData secondAlpha512Credential = randomScramCredentialData(random);
DELTA1_RECORDS.add(new ApiMessageAndVersion(new UserScramCredentialRecord().
setName("alpha").
setMechanism(SCRAM_SHA_512.type()).
setSalt(secondAlpha512Credential.salt()).
setStoredKey(secondAlpha512Credential.storedKey()).
setServerKey(secondAlpha512Credential.serverKey()).
setIterations(secondAlpha512Credential.iterations()), (short) 0));
DELTA1 = new ScramDelta(IMAGE1);
RecordTestUtils.replayAll(DELTA1, DELTA1_RECORDS);
Map<ScramMechanism, Map<String, ScramCredentialData>> image2mechanisms = new HashMap<>();
Map<String, ScramCredentialData> image2sha256 = new HashMap<>();
image2sha256.put("alpha", secondAlpha256Credential);
image2sha256.put("beta", image1sha256.get("beta"));
image2mechanisms.put(SCRAM_SHA_256, image2sha256);
Map<String, ScramCredentialData> image2sha512 = new HashMap<>();
image2sha512.put("alpha", secondAlpha512Credential);
image2mechanisms.put(SCRAM_SHA_512, image2sha512);
IMAGE2 = new ScramImage(image2mechanisms);
}
@Test
public void testEmptyImageRoundTrip() {
testToImage(ScramImage.EMPTY);
}
@Test
public void testImage1RoundTrip() {
testToImage(IMAGE1);
}
@Test
public void testApplyDelta1() {
assertEquals(IMAGE2, DELTA1.apply());
// check image1 + delta1 = image2, since records for image1 + delta1 might differ from records from image2
List<ApiMessageAndVersion> records = getImageRecords(IMAGE1);
records.addAll(DELTA1_RECORDS);
testToImage(IMAGE2, records);
}
@Test
public void testImage2RoundTrip() {
testToImage(IMAGE2);
}
private static void testToImage(ScramImage image) {
testToImage(image, Optional.empty());
}
private static void testToImage(ScramImage image, Optional<List<ApiMessageAndVersion>> fromRecords) {
testToImage(image, fromRecords.orElseGet(() -> getImageRecords(image)));
}
private static void testToImage(ScramImage image, List<ApiMessageAndVersion> fromRecords) {
// test from empty image stopping each of the various intermediate images along the way
new RecordTestUtils.TestThroughAllIntermediateImagesLeadingToFinalImageHelper<>(
() -> ScramImage.EMPTY,
ScramDelta::new
).test(image, fromRecords);
}
private static List<ApiMessageAndVersion> getImageRecords(ScramImage image) {
RecordListWriter writer = new RecordListWriter();
image.write(writer, new ImageWriterOptions.Builder(MetadataVersion.latestProduction()).build());
return writer.records();
}
@Test
public void testEmptyWithInvalidIBP() {
ImageWriterOptions imageWriterOptions = new ImageWriterOptions.Builder(MetadataVersion.IBP_3_4_IV0).build();
RecordListWriter writer = new RecordListWriter();
ScramImage.EMPTY.write(writer, imageWriterOptions);
}
@Test
public void testImage1withInvalidIBP() {
ImageWriterOptions imageWriterOptions = new ImageWriterOptions.Builder(MetadataVersion.IBP_3_4_IV0).build();
RecordListWriter writer = new RecordListWriter();
assertThrows(Exception.class, () -> IMAGE1.write(writer, imageWriterOptions),
"expected exception writing IMAGE with SCRAM records for MetadataVersion.IBP_3_4_IV0");
}
}
| ScramImageTest |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_1300/Issue1367.java | {
"start": 2316,
"end": 2722
} | class ____<ID extends Serializable, PO extends GenericEntity<ID>> {
@PostMapping(path = "/typeVariableBean",consumes = MediaType.APPLICATION_JSON_UTF8_VALUE, produces = MediaType.APPLICATION_JSON_UTF8_VALUE)
public PO save(@RequestBody PO dto) {
//do something
return dto;
}
}
@RestController
@RequestMapping()
public static | AbstractController |
java | apache__camel | components/camel-github/src/test/java/org/apache/camel/component/github/producer/PullRequestFilesProducerTest.java | {
"start": 2919,
"end": 3253
} | class ____ implements Processor {
@Override
public void process(Exchange exchange) {
Message in = exchange.getIn();
Map<String, Object> headers = in.getHeaders();
headers.put(GitHubConstants.GITHUB_PULLREQUEST, latestPullRequestNumber);
}
}
}
| MockPullFilesProducerProcessor |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/jpa/procedure/AnnotationTest.java | {
"start": 262,
"end": 323
} | class ____ extends AbstractStoredProcedureTest {
}
| AnnotationTest |
java | micronaut-projects__micronaut-core | http/src/main/java/io/micronaut/http/HttpRequestWrapper.java | {
"start": 1088,
"end": 3488
} | class ____<B> extends HttpMessageWrapper<B> implements HttpRequest<B> {
/**
* @param delegate The Http Request
*/
public HttpRequestWrapper(HttpRequest<B> delegate) {
super(delegate);
}
@Override
public HttpRequest<B> getDelegate() {
return (HttpRequest<B>) super.getDelegate();
}
@Override
public HttpVersion getHttpVersion() {
return getDelegate().getHttpVersion();
}
@Override
public Collection<MediaType> accept() {
return getDelegate().accept();
}
@NonNull
@Override
public Optional<Principal> getUserPrincipal() {
return getDelegate().getUserPrincipal();
}
@NonNull
@Override
public <T extends Principal> Optional<T> getUserPrincipal(Class<T> principalType) {
return getDelegate().getUserPrincipal(principalType);
}
@Override
public HttpRequest<B> setAttribute(CharSequence name, Object value) {
return getDelegate().setAttribute(name, value);
}
@Override
public Optional<Locale> getLocale() {
return getDelegate().getLocale();
}
@Override
public Optional<Certificate> getCertificate() {
return getDelegate().getCertificate();
}
@Override
public Optional<SSLSession> getSslSession() {
return getDelegate().getSslSession();
}
@Override
public Cookies getCookies() {
return getDelegate().getCookies();
}
@Override
public HttpParameters getParameters() {
return getDelegate().getParameters();
}
@Override
public HttpMethod getMethod() {
return getDelegate().getMethod();
}
@Override
public String getMethodName() {
return getDelegate().getMethodName();
}
@Override
public URI getUri() {
return getDelegate().getUri();
}
@Override
public String getPath() {
return getDelegate().getPath();
}
@Override
public InetSocketAddress getRemoteAddress() {
return getDelegate().getRemoteAddress();
}
@Override
public InetSocketAddress getServerAddress() {
return getDelegate().getServerAddress();
}
@Override
public String getServerName() {
return getDelegate().getServerName();
}
@Override
public boolean isSecure() {
return getDelegate().isSecure();
}
}
| HttpRequestWrapper |
java | apache__spark | examples/src/main/java/org/apache/spark/examples/mllib/JavaBinaryClassificationMetricsExample.java | {
"start": 1329,
"end": 4032
} | class ____ {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("Java Binary Classification Metrics Example");
SparkContext sc = new SparkContext(conf);
// $example on$
String path = "data/mllib/sample_binary_classification_data.txt";
JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(sc, path).toJavaRDD();
// Split initial RDD into two... [60% training data, 40% testing data].
JavaRDD<LabeledPoint>[] splits =
data.randomSplit(new double[]{0.6, 0.4}, 11L);
JavaRDD<LabeledPoint> training = splits[0].cache();
JavaRDD<LabeledPoint> test = splits[1];
// Run training algorithm to build the model.
LogisticRegressionModel model = new LogisticRegressionWithLBFGS()
.setNumClasses(2)
.run(training.rdd());
// Clear the prediction threshold so the model will return probabilities
model.clearThreshold();
// Compute raw scores on the test set.
JavaPairRDD<Object, Object> predictionAndLabels = test.mapToPair(p ->
new Tuple2<>(model.predict(p.features()), p.label()));
// Get evaluation metrics.
BinaryClassificationMetrics metrics =
new BinaryClassificationMetrics(predictionAndLabels.rdd());
// Precision by threshold
JavaRDD<Tuple2<Object, Object>> precision = metrics.precisionByThreshold().toJavaRDD();
System.out.println("Precision by threshold: " + precision.collect());
// Recall by threshold
JavaRDD<?> recall = metrics.recallByThreshold().toJavaRDD();
System.out.println("Recall by threshold: " + recall.collect());
// F Score by threshold
JavaRDD<?> f1Score = metrics.fMeasureByThreshold().toJavaRDD();
System.out.println("F1 Score by threshold: " + f1Score.collect());
JavaRDD<?> f2Score = metrics.fMeasureByThreshold(2.0).toJavaRDD();
System.out.println("F2 Score by threshold: " + f2Score.collect());
// Precision-recall curve
JavaRDD<?> prc = metrics.pr().toJavaRDD();
System.out.println("Precision-recall curve: " + prc.collect());
// Thresholds
JavaRDD<Double> thresholds = precision.map(t -> Double.parseDouble(t._1().toString()));
// ROC Curve
JavaRDD<?> roc = metrics.roc().toJavaRDD();
System.out.println("ROC curve: " + roc.collect());
// AUPRC
System.out.println("Area under precision-recall curve = " + metrics.areaUnderPR());
// AUROC
System.out.println("Area under ROC = " + metrics.areaUnderROC());
// Save and load model
model.save(sc, "target/tmp/LogisticRegressionModel");
LogisticRegressionModel.load(sc, "target/tmp/LogisticRegressionModel");
// $example off$
sc.stop();
}
}
| JavaBinaryClassificationMetricsExample |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java | {
"start": 1169,
"end": 2330
} | class ____ extends GeoGridAggregator<InternalGeoTileGrid> {
public GeoTileGridAggregator(
String name,
AggregatorFactories factories,
Function<LongConsumer, ValuesSource.Numeric> valuesSource,
int requiredSize,
int shardSize,
AggregationContext context,
Aggregator parent,
CardinalityUpperBound cardinality,
Map<String, Object> metadata
) throws IOException {
super(name, factories, valuesSource, requiredSize, shardSize, context, parent, cardinality, metadata);
}
@Override
protected InternalGeoTileGrid buildAggregation(
String name,
int requiredSize,
List<InternalGeoGridBucket> buckets,
Map<String, Object> metadata
) {
return new InternalGeoTileGrid(name, requiredSize, buckets, metadata);
}
@Override
public InternalGeoTileGrid buildEmptyAggregation() {
return new InternalGeoTileGrid(name, requiredSize, Collections.emptyList(), metadata());
}
protected InternalGeoGridBucket newEmptyBucket() {
return new InternalGeoTileGridBucket(0, 0, null);
}
}
| GeoTileGridAggregator |
java | junit-team__junit5 | junit-platform-engine/src/main/java/org/junit/platform/engine/discovery/DiscoverySelectors.java | {
"start": 1958,
"end": 8370
} | class ____ {
private DiscoverySelectors() {
/* no-op */
}
/**
* Create a {@code UriSelector} for the supplied URI.
*
* @param uri the URI to select; never {@code null} or blank
* @see UriSelector
* @see #selectUri(URI)
* @see #selectFile(String)
* @see #selectFile(File)
* @see #selectDirectory(String)
* @see #selectDirectory(File)
*/
public static UriSelector selectUri(String uri) {
Preconditions.notBlank(uri, "URI must not be null or blank");
try {
return new UriSelector(new URI(uri));
}
catch (URISyntaxException ex) {
throw new PreconditionViolationException("Failed to create a java.net.URI from: " + uri, ex);
}
}
/**
* Create a {@code UriSelector} for the supplied {@link URI}.
*
* @param uri the URI to select; never {@code null}
* @see UriSelector
* @see #selectUri(String)
* @see #selectFile(String)
* @see #selectFile(File)
* @see #selectDirectory(String)
* @see #selectDirectory(File)
*/
public static UriSelector selectUri(URI uri) {
Preconditions.notNull(uri, "URI must not be null");
return new UriSelector(uri);
}
/**
* Create a {@code FileSelector} for the supplied file path.
*
* <p>This method selects the file using the supplied path <em>as is</em>,
* without verifying if the file exists.
*
* @param path the path to the file to select; never {@code null} or blank
* @see FileSelector
* @see #selectFile(File)
* @see #selectFile(String, FilePosition)
* @see #selectFile(File, FilePosition)
* @see #selectDirectory(String)
* @see #selectDirectory(File)
*/
public static FileSelector selectFile(String path) {
return selectFile(path, null);
}
/**
* Create a {@code FileSelector} for the supplied {@linkplain File file}.
*
* <p>This method selects the file in its {@linkplain File#getCanonicalPath()
* canonical} form and throws a {@link PreconditionViolationException} if the
* file does not exist.
*
* @param file the file to select; never {@code null}
* @see FileSelector
* @see #selectFile(String)
* @see #selectFile(File, FilePosition)
* @see #selectFile(String, FilePosition)
* @see #selectDirectory(String)
* @see #selectDirectory(File)
*/
public static FileSelector selectFile(File file) {
return selectFile(file, null);
}
/**
* Create a {@code FileSelector} for the supplied file path.
*
* <p>This method selects the file using the supplied path <em>as is</em>,
* without verifying if the file exists.
*
* @param path the path to the file to select; never {@code null} or blank
* @param position the position inside the file; may be {@code null}
* @see FileSelector
* @see #selectFile(String)
* @see #selectFile(File)
* @see #selectFile(File, FilePosition)
* @see #selectDirectory(String)
* @see #selectDirectory(File)
*/
public static FileSelector selectFile(String path, @Nullable FilePosition position) {
Preconditions.notBlank(path, "File path must not be null or blank");
return new FileSelector(path, position);
}
/**
* Create a {@code FileSelector} for the supplied {@linkplain File file}.
*
* <p>This method selects the file in its {@linkplain File#getCanonicalPath()
* canonical} form and throws a {@link PreconditionViolationException} if the
* file does not exist.
*
* @param file the file to select; never {@code null}
* @param position the position inside the file; may be {@code null}
* @see FileSelector
* @see #selectFile(File)
* @see #selectFile(String)
* @see #selectFile(String, FilePosition)
* @see #selectDirectory(String)
* @see #selectDirectory(File)
*/
public static FileSelector selectFile(File file, @Nullable FilePosition position) {
Preconditions.notNull(file, "File must not be null");
Preconditions.condition(file.isFile(),
() -> "The supplied java.io.File [%s] must represent an existing file".formatted(file));
try {
return new FileSelector(file.getCanonicalPath(), position);
}
catch (IOException ex) {
throw new PreconditionViolationException("Failed to retrieve canonical path for file: " + file, ex);
}
}
/**
* Create a {@code DirectorySelector} for the supplied directory path.
*
* <p>This method selects the directory using the supplied path <em>as is</em>,
* without verifying if the directory exists.
*
* @param path the path to the directory to select; never {@code null} or blank
* @see DirectorySelector
* @see #selectDirectory(File)
* @see #selectFile(String)
* @see #selectFile(File)
*/
public static DirectorySelector selectDirectory(String path) {
Preconditions.notBlank(path, "Directory path must not be null or blank");
return new DirectorySelector(path);
}
/**
* Create a {@code DirectorySelector} for the supplied {@linkplain File directory}.
*
* <p>This method selects the directory in its {@linkplain File#getCanonicalPath()
* canonical} form and throws a {@link PreconditionViolationException} if the
* directory does not exist.
*
* @param directory the directory to select; never {@code null}
* @see DirectorySelector
* @see #selectDirectory(String)
* @see #selectFile(String)
* @see #selectFile(File)
*/
public static DirectorySelector selectDirectory(File directory) {
Preconditions.notNull(directory, "Directory must not be null");
Preconditions.condition(directory.isDirectory(),
() -> "The supplied java.io.File [%s] must represent an existing directory".formatted(directory));
try {
return new DirectorySelector(directory.getCanonicalPath());
}
catch (IOException ex) {
throw new PreconditionViolationException("Failed to retrieve canonical path for directory: " + directory,
ex);
}
}
/**
* Create a list of {@code ClasspathRootSelectors} for the supplied
* <em>classpath roots</em> (directories or JAR files).
*
* <p>Since the supplied paths are converted to {@link URI URIs}, the
* {@link java.nio.file.FileSystem} that created them must be the
* {@linkplain java.nio.file.FileSystems#getDefault() default} or one that
* has been created by an installed
* {@link java.nio.file.spi.FileSystemProvider}.
*
* <p>Since {@linkplain org.junit.platform.engine.TestEngine engines} are not
* expected to modify the classpath, the classpath roots represented by the
* resulting selectors must be on the classpath of the
* {@linkplain Thread#getContextClassLoader() context | DiscoverySelectors |
java | spring-projects__spring-framework | spring-webflux/src/main/java/org/springframework/web/reactive/resource/AbstractResourceResolver.java | {
"start": 1102,
"end": 2039
} | class ____ implements ResourceResolver {
protected final Log logger = LogFactory.getLog(getClass());
@Override
public Mono<Resource> resolveResource(@Nullable ServerWebExchange exchange, String requestPath,
List<? extends Resource> locations, ResourceResolverChain chain) {
return resolveResourceInternal(exchange, requestPath, locations, chain);
}
@Override
public Mono<String> resolveUrlPath(String resourceUrlPath, List<? extends Resource> locations,
ResourceResolverChain chain) {
return resolveUrlPathInternal(resourceUrlPath, locations, chain);
}
protected abstract Mono<Resource> resolveResourceInternal(@Nullable ServerWebExchange exchange,
String requestPath, List<? extends Resource> locations, ResourceResolverChain chain);
protected abstract Mono<String> resolveUrlPathInternal(String resourceUrlPath,
List<? extends Resource> locations, ResourceResolverChain chain);
}
| AbstractResourceResolver |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/lucene/search/uhighlight/BoundedBreakIteratorScanner.java | {
"start": 1832,
"end": 6836
} | class ____ extends BreakIterator {
private final BreakIterator mainBreak;
private final BreakIterator innerBreak;
private final int maxLen;
private int lastPrecedingOffset = -1;
private int windowStart = -1;
private int windowEnd = -1;
private int innerStart = -1;
private int innerEnd = 0;
private BoundedBreakIteratorScanner(BreakIterator mainBreak, BreakIterator innerBreak, int maxLen) {
this.mainBreak = mainBreak;
this.innerBreak = innerBreak;
this.maxLen = maxLen;
}
@Override
public CharacterIterator getText() {
return mainBreak.getText();
}
@Override
public void setText(CharacterIterator newText) {
reset();
mainBreak.setText(newText);
innerBreak.setText(newText);
}
@Override
public void setText(String newText) {
reset();
mainBreak.setText(newText);
innerBreak.setText(newText);
}
private void reset() {
lastPrecedingOffset = -1;
windowStart = -1;
windowEnd = -1;
innerStart = -1;
innerEnd = 0;
}
/**
* Must be called with increasing offset. See {@link FieldHighlighter} for usage.
*/
@Override
public int preceding(int offset) {
if (offset < lastPrecedingOffset) {
throw new IllegalArgumentException("offset < lastPrecedingOffset: " + "usage doesn't look like UnifiedHighlighter");
}
if (offset > windowStart && offset < windowEnd) {
innerStart = innerEnd;
innerEnd = windowEnd;
} else {
innerStart = Math.max(mainBreak.preceding(offset), 0);
final long targetEndOffset = (long) offset + Math.max(0, maxLen - (offset - innerStart));
final int textEndIndex = getText().getEndIndex();
if (targetEndOffset + 1 > textEndIndex) {
innerEnd = textEndIndex;
} else {
innerEnd = mainBreak.preceding((int) targetEndOffset + 1);
}
assert innerEnd != DONE && innerEnd >= innerStart;
// in case no break was found up to maxLen, find one afterwards.
if (innerStart == innerEnd) {
innerEnd = mainBreak.following((int) targetEndOffset);
assert innerEnd - innerStart > maxLen;
} else {
assert innerEnd - innerStart <= maxLen;
}
windowStart = innerStart;
windowEnd = innerEnd;
}
if (innerEnd - innerStart > maxLen) {
// the current split is too big,
// so starting from the current term we try to find boundaries on the left first
if (offset - maxLen > innerStart) {
innerStart = Math.max(innerStart, innerBreak.preceding(offset - maxLen));
}
// and then we try to expand the passage to the right with the remaining size
int remaining = Math.max(0, maxLen - (offset - innerStart));
if (offset + remaining < windowEnd) {
innerEnd = Math.min(windowEnd, innerBreak.following(offset + remaining));
}
}
lastPrecedingOffset = offset - 1;
return innerStart;
}
/**
* Can be invoked only after a call to preceding().
*
* See {@link FieldHighlighter} for usage.
*/
@Override
public int following(int offset) {
if (innerEnd == -1) {
throw new IllegalArgumentException("preceding should be called first, usage doesn't look like UnifiedHighlighter");
}
return Math.max(offset, innerEnd);
}
/**
* Returns a {@link BreakIterator#getSentenceInstance(Locale)} bounded to maxLen.
* Secondary boundaries are found using a {@link BreakIterator#getWordInstance(Locale)}.
*/
public static BreakIterator getSentence(Locale locale, int maxLen) {
final BreakIterator sBreak = BreakIterator.getSentenceInstance(locale);
final BreakIterator wBreak = BreakIterator.getWordInstance(locale);
return new BoundedBreakIteratorScanner(sBreak, wBreak, maxLen);
}
@Override
public int current() {
// Returns the last offset of the current split
return this.innerEnd;
}
@Override
public int first() {
throw new IllegalStateException("first() should not be called in this context");
}
@Override
public int next() {
throw new IllegalStateException("next() should not be called in this context");
}
@Override
public int last() {
throw new IllegalStateException("last() should not be called in this context");
}
@Override
public int next(int n) {
throw new IllegalStateException("next(n) should not be called in this context");
}
@Override
public int previous() {
throw new IllegalStateException("previous() should not be called in this context");
}
}
| BoundedBreakIteratorScanner |
java | ReactiveX__RxJava | src/test/java/io/reactivex/rxjava3/internal/operators/flowable/FlowableRefCountTest.java | {
"start": 32810,
"end": 39409
} | class ____ extends ConnectableFlowable<Object> {
@Override
public void connect(Consumer<? super Disposable> connection) {
try {
connection.accept(Disposable.empty());
} catch (Throwable ex) {
throw ExceptionHelper.wrapOrThrow(ex);
}
}
@Override
public void reset() {
throw new TestException("dispose");
}
@Override
protected void subscribeActual(Subscriber<? super Object> subscriber) {
subscriber.onSubscribe(new BooleanSubscription());
subscriber.onComplete();
}
}
@Test
public void badSourceCompleteDisconnect() {
List<Throwable> errors = TestHelper.trackPluginErrors();
try {
BadFlowableConnect2 bf = new BadFlowableConnect2();
try {
bf.refCount()
.test();
fail("Should have thrown");
} catch (NullPointerException ex) {
assertTrue(ex.getCause() instanceof TestException);
}
TestHelper.assertUndeliverable(errors, 0, TestException.class);
} finally {
RxJavaPlugins.reset();
}
}
@Test
public void blockingSourceAsnycCancel() throws Exception {
BehaviorProcessor<Integer> bp = BehaviorProcessor.createDefault(1);
Flowable<Integer> f = bp
.replay(1)
.refCount();
f.subscribe();
final AtomicBoolean interrupted = new AtomicBoolean();
f.switchMap(new Function<Integer, Publisher<? extends Object>>() {
@Override
public Publisher<? extends Object> apply(Integer v) throws Exception {
return Flowable.create(new FlowableOnSubscribe<Object>() {
@Override
public void subscribe(FlowableEmitter<Object> emitter) throws Exception {
while (!emitter.isCancelled()) {
Thread.sleep(100);
}
interrupted.set(true);
}
}, BackpressureStrategy.MISSING);
}
})
.takeUntil(Flowable.timer(500, TimeUnit.MILLISECONDS))
.test()
.awaitDone(5, TimeUnit.SECONDS)
.assertResult();
assertTrue(interrupted.get());
}
@Test
public void byCount() {
final int[] subscriptions = { 0 };
Flowable<Integer> source = Flowable.range(1, 5)
.doOnSubscribe(new Consumer<Subscription>() {
@Override
public void accept(Subscription s) throws Exception {
subscriptions[0]++;
}
})
.publish()
.refCount(2);
for (int i = 0; i < 3; i++) {
TestSubscriber<Integer> ts1 = source.test();
ts1.assertEmpty();
TestSubscriber<Integer> ts2 = source.test();
ts1.assertResult(1, 2, 3, 4, 5);
ts2.assertResult(1, 2, 3, 4, 5);
}
assertEquals(3, subscriptions[0]);
}
@Test
public void resubscribeBeforeTimeout() throws Exception {
final int[] subscriptions = { 0 };
PublishProcessor<Integer> pp = PublishProcessor.create();
Flowable<Integer> source = pp
.doOnSubscribe(new Consumer<Subscription>() {
@Override
public void accept(Subscription s) throws Exception {
subscriptions[0]++;
}
})
.publish()
.refCount(500, TimeUnit.MILLISECONDS);
TestSubscriber<Integer> ts1 = source.test(0);
assertEquals(1, subscriptions[0]);
ts1.cancel();
Thread.sleep(100);
ts1 = source.test(0);
assertEquals(1, subscriptions[0]);
Thread.sleep(500);
assertEquals(1, subscriptions[0]);
pp.onNext(1);
pp.onNext(2);
pp.onNext(3);
pp.onNext(4);
pp.onNext(5);
pp.onComplete();
ts1.requestMore(5)
.assertResult(1, 2, 3, 4, 5);
}
@Test
public void letitTimeout() throws Exception {
final int[] subscriptions = { 0 };
PublishProcessor<Integer> pp = PublishProcessor.create();
Flowable<Integer> source = pp
.doOnSubscribe(new Consumer<Subscription>() {
@Override
public void accept(Subscription s) throws Exception {
subscriptions[0]++;
}
})
.publish()
.refCount(1, 100, TimeUnit.MILLISECONDS);
TestSubscriber<Integer> ts1 = source.test(0);
assertEquals(1, subscriptions[0]);
ts1.cancel();
assertTrue(pp.hasSubscribers());
Thread.sleep(200);
assertFalse(pp.hasSubscribers());
}
@Test
public void error() {
Flowable.<Integer>error(new IOException())
.publish()
.refCount(500, TimeUnit.MILLISECONDS)
.test()
.assertFailure(IOException.class);
}
@Test
public void comeAndGo() {
PublishProcessor<Integer> pp = PublishProcessor.create();
Flowable<Integer> source = pp
.publish()
.refCount(1);
TestSubscriber<Integer> ts1 = source.test(0);
assertTrue(pp.hasSubscribers());
for (int i = 0; i < 3; i++) {
TestSubscriber<Integer> ts2 = source.test();
ts1.cancel();
ts1 = ts2;
}
ts1.cancel();
assertFalse(pp.hasSubscribers());
}
@Test
public void unsubscribeSubscribeRace() {
for (int i = 0; i < 1000; i++) {
final Flowable<Integer> source = Flowable.range(1, 5)
.replay()
.refCount(1)
;
final TestSubscriber<Integer> ts1 = source.test(0);
final TestSubscriber<Integer> ts2 = new TestSubscriber<>(0);
Runnable r1 = new Runnable() {
@Override
public void run() {
ts1.cancel();
}
};
Runnable r2 = new Runnable() {
@Override
public void run() {
source.subscribe(ts2);
}
};
TestHelper.race(r1, r2, Schedulers.single());
ts2.requestMore(6) // FIXME RxJava replay() doesn't issue onComplete without request
.withTag("Round: " + i)
.assertResult(1, 2, 3, 4, 5);
}
}
static final | BadFlowableConnect2 |
java | apache__kafka | clients/src/test/java/org/apache/kafka/common/protocol/ProtoUtilsTest.java | {
"start": 1005,
"end": 2192
} | class ____ {
@Test
public void testDelayedAllocationSchemaDetection() {
//verifies that schemas known to retain a reference to the underlying byte buffer are correctly detected.
for (ApiKeys key : ApiKeys.values()) {
switch (key) {
case PRODUCE:
case JOIN_GROUP:
case SYNC_GROUP:
case SASL_AUTHENTICATE:
case EXPIRE_DELEGATION_TOKEN:
case RENEW_DELEGATION_TOKEN:
case ALTER_USER_SCRAM_CREDENTIALS:
case PUSH_TELEMETRY:
case ENVELOPE:
assertTrue(key.requiresDelayedAllocation, key + " should require delayed allocation");
break;
default:
if (key.forwardable)
assertTrue(key.requiresDelayedAllocation,
key + " should require delayed allocation since it is forwardable");
else
assertFalse(key.requiresDelayedAllocation, key + " should not require delayed allocation");
break;
}
}
}
}
| ProtoUtilsTest |
java | elastic__elasticsearch | x-pack/plugin/esql/arrow/src/main/java/org/elasticsearch/xpack/esql/arrow/ArrowResponse.java | {
"start": 8787,
"end": 9197
} | class ____ extends ResponseSegment {
private final Page page;
private boolean done = false;
PageResponse(ArrowResponse response, Page page) {
super(response);
this.page = page;
}
@Override
public boolean isDone() {
return done;
}
// Writes some data and returns the number of bytes written.
| PageResponse |
java | elastic__elasticsearch | libs/entitlement/qa/entitlement-test-plugin/src/main/java/org/elasticsearch/entitlement/qa/test/DummyImplementations.java | {
"start": 22353,
"end": 22615
} | class ____ implements Channels.SelectableChannelCloser {
@Override
public void implCloseChannel(SelectableChannel sc) {}
@Override
public void implReleaseChannel(SelectableChannel sc) {}
}
static | DummySelectableChannelCloser |
java | apache__maven | its/core-it-suite/src/test/java/org/apache/maven/it/MavenITmng4679SnapshotUpdateInPluginTest.java | {
"start": 1124,
"end": 3227
} | class ____ extends AbstractMavenIntegrationTestCase {
/**
* Verify that plugins using the 2.x style artifact resolver/collector directly are subject to the snapshot update
* mode of the current Maven session.
*
* @throws Exception in case of failure
*/
@Test
public void testit() throws Exception {
File testDir = extractResources("/mng-4679");
Verifier verifier = newVerifier(testDir.getAbsolutePath());
verifier.setAutoclean(false);
verifier.deleteArtifacts("org.apache.maven.its.mng4679");
verifier.addCliArgument("-s");
verifier.addCliArgument("settings.xml");
Map<String, String> filterProps = verifier.newDefaultFilterMap();
filterProps.put("@repo@", "repo-1");
verifier.filterFile("settings-template.xml", "settings.xml", filterProps);
verifier.setLogFileName("log-force-1.txt");
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
assertChecksum(verifier, "jar", "2ea5c3d713bbaba7b87746449b91cd00e876703d");
assertChecksum(verifier, "pom", "d6883b610a0e087464ece92ac1e7f2b8e742e71f");
filterProps.put("@repo@", "repo-2");
verifier.filterFile("settings-template.xml", "settings.xml", filterProps);
verifier.setLogFileName("log-force-2.txt");
verifier.deleteDirectory("target");
verifier.addCliArgument("-U");
verifier.addCliArgument("validate");
verifier.execute();
verifier.verifyErrorFreeLog();
assertChecksum(verifier, "jar", "f3d46277c2ab45ff9bbd97605c942bed7fc27f97");
assertChecksum(verifier, "pom", "ddfa2de1fd5765bbd72829841abfa7a1fde7ff21");
}
private void assertChecksum(Verifier verifier, String ext, String checksum) throws Exception {
String path = verifier.getArtifactPath("org.apache.maven.its.mng4679", "dep", "0.1-SNAPSHOT", ext);
String actual = ItUtils.calcHash(new File(path), "SHA-1");
assertEquals(checksum, actual);
}
}
| MavenITmng4679SnapshotUpdateInPluginTest |
java | elastic__elasticsearch | server/src/internalClusterTest/java/org/elasticsearch/gateway/MetadataNodesIT.java | {
"start": 1547,
"end": 10473
} | class ____ extends ESIntegTestCase {
public void testMetaWrittenAlsoOnDataNode() throws Exception {
// this test checks that index state is written on data only nodes if they have a shard allocated
String masterNode = internalCluster().startMasterOnlyNode(Settings.EMPTY);
String dataNode = internalCluster().startDataOnlyNode(Settings.EMPTY);
assertAcked(prepareCreate("test").setSettings(Settings.builder().put("index.number_of_replicas", 0)));
index("test", "1", jsonBuilder().startObject().field("text", "some text").endObject());
ensureGreen("test");
assertIndexInMetaState(dataNode, "test");
assertIndexInMetaState(masterNode, "test");
}
public void testIndexFilesAreRemovedIfAllShardsFromIndexRemoved() throws Exception {
// this test checks that the index data is removed from a data only node once all shards have been allocated away from it
String masterNode = internalCluster().startMasterOnlyNode(Settings.EMPTY);
List<String> nodeNames = internalCluster().startDataOnlyNodes(2);
String node1 = nodeNames.get(0);
String node2 = nodeNames.get(1);
String index = "index";
assertAcked(
prepareCreate(index).setSettings(
Settings.builder()
.put("index.number_of_replicas", 0)
.put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", node1)
)
);
index(index, "1", jsonBuilder().startObject().field("text", "some text").endObject());
ensureGreen();
assertIndexInMetaState(node1, index);
Index resolveIndex = resolveIndex(index);
assertIndexDirectoryExists(node1, resolveIndex);
assertIndexDirectoryDeleted(node2, resolveIndex);
assertIndexInMetaState(masterNode, index);
assertIndexDirectoryDeleted(masterNode, resolveIndex);
logger.debug("relocating index...");
updateIndexSettings(Settings.builder().put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", node2), index);
clusterAdmin().prepareHealth(TEST_REQUEST_TIMEOUT).setWaitForNoRelocatingShards(true).get();
ensureGreen();
assertIndexDirectoryDeleted(node1, resolveIndex);
assertIndexInMetaState(node2, index);
assertIndexDirectoryExists(node2, resolveIndex);
assertIndexInMetaState(masterNode, index);
assertIndexDirectoryDeleted(masterNode, resolveIndex);
indicesAdmin().prepareDelete(index).get();
assertIndexDirectoryDeleted(node1, resolveIndex);
assertIndexDirectoryDeleted(node2, resolveIndex);
}
@SuppressWarnings("unchecked")
public void testMetaWrittenWhenIndexIsClosedAndMetaUpdated() throws Exception {
String masterNode = internalCluster().startMasterOnlyNode(Settings.EMPTY);
final String dataNode = internalCluster().startDataOnlyNode(Settings.EMPTY);
final String index = "index";
assertAcked(prepareCreate(index).setSettings(Settings.builder().put("index.number_of_replicas", 0)));
logger.info("--> wait for green index");
ensureGreen();
logger.info("--> wait for meta state written for index");
assertIndexInMetaState(dataNode, index);
assertIndexInMetaState(masterNode, index);
logger.info("--> close index");
indicesAdmin().prepareClose(index).get();
// close the index
ClusterStateResponse clusterStateResponse = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get();
assertThat(
clusterStateResponse.getState().getMetadata().getProject().index(index).getState().name(),
equalTo(IndexMetadata.State.CLOSE.name())
);
// update the mapping. this should cause the new meta data to be written although index is closed
indicesAdmin().preparePutMapping(index)
.setSource(
jsonBuilder().startObject()
.startObject("properties")
.startObject("integer_field")
.field("type", "integer")
.endObject()
.endObject()
.endObject()
)
.get();
GetMappingsResponse getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, index).get();
assertNotNull(
((Map<String, ?>) (getMappingsResponse.getMappings().get(index).getSourceAsMap().get("properties"))).get("integer_field")
);
// make sure it was also written on red node although index is closed
Map<String, IndexMetadata> indicesMetadata = getIndicesMetadataOnNode(dataNode);
assertNotNull(((Map<String, ?>) (indicesMetadata.get(index).mapping().getSourceAsMap().get("properties"))).get("integer_field"));
assertThat(indicesMetadata.get(index).getState(), equalTo(IndexMetadata.State.CLOSE));
/* Try the same and see if this also works if node was just restarted.
* Each node holds an array of indices it knows of and checks if it should
* write new meta data by looking up in this array. We need it because if an
* index is closed it will not appear in the shard routing and we therefore
* need to keep track of what we wrote before. However, when the node is
* restarted this array is empty and we have to fill it before we decide
* what we write. This is why we explicitly test for it.
*/
internalCluster().restartNode(dataNode, new RestartCallback());
indicesAdmin().preparePutMapping(index)
.setSource(
jsonBuilder().startObject()
.startObject("properties")
.startObject("float_field")
.field("type", "float")
.endObject()
.endObject()
.endObject()
)
.get();
getMappingsResponse = indicesAdmin().prepareGetMappings(TEST_REQUEST_TIMEOUT, index).get();
assertNotNull(
((Map<String, ?>) (getMappingsResponse.getMappings().get(index).getSourceAsMap().get("properties"))).get("float_field")
);
// make sure it was also written on red node although index is closed
indicesMetadata = getIndicesMetadataOnNode(dataNode);
assertNotNull(((Map<String, ?>) (indicesMetadata.get(index).mapping().getSourceAsMap().get("properties"))).get("float_field"));
assertThat(indicesMetadata.get(index).getState(), equalTo(IndexMetadata.State.CLOSE));
// finally check that meta data is also written of index opened again
assertAcked(indicesAdmin().prepareOpen(index).get());
// make sure index is fully initialized and nothing is changed anymore
ensureGreen();
indicesMetadata = getIndicesMetadataOnNode(dataNode);
assertThat(indicesMetadata.get(index).getState(), equalTo(IndexMetadata.State.OPEN));
}
protected void assertIndexDirectoryDeleted(final String nodeName, final Index index) throws Exception {
assertBusy(
() -> assertFalse(
"Expecting index directory of " + index + " to be deleted from node " + nodeName,
indexDirectoryExists(nodeName, index)
)
);
}
protected void assertIndexDirectoryExists(final String nodeName, final Index index) throws Exception {
assertBusy(
() -> assertTrue(
"Expecting index directory of " + index + " to exist on node " + nodeName,
indexDirectoryExists(nodeName, index)
)
);
}
protected void assertIndexInMetaState(final String nodeName, final String indexName) throws Exception {
assertBusy(() -> {
try {
assertTrue(
"Expecting meta state of index " + indexName + " to be on node " + nodeName,
getIndicesMetadataOnNode(nodeName).containsKey(indexName)
);
} catch (Exception e) {
logger.info("failed to load meta state", e);
fail("could not load meta state");
}
});
}
private boolean indexDirectoryExists(String nodeName, Index index) {
NodeEnvironment nodeEnv = ((InternalTestCluster) cluster()).getInstance(NodeEnvironment.class, nodeName);
for (Path path : nodeEnv.indexPaths(index)) {
if (Files.exists(path)) {
return true;
}
}
return false;
}
private Map<String, IndexMetadata> getIndicesMetadataOnNode(String nodeName) {
final Coordinator coordinator = internalCluster().getInstance(Coordinator.class, nodeName);
return coordinator.getApplierState().getMetadata().getProject().indices();
}
}
| MetadataNodesIT |
java | apache__flink | flink-state-backends/flink-statebackend-forst/src/main/java/org/apache/flink/state/forst/ForStDBPutRequest.java | {
"start": 1360,
"end": 3996
} | class ____<K, N, V> {
final ContextKey<K, N> key;
@Nullable final V value;
final boolean isMerge;
final ForStInnerTable<K, N, V> table;
final InternalAsyncFuture<Void> future;
ForStDBPutRequest(
ContextKey<K, N> key,
V value,
boolean isMerge,
ForStInnerTable<K, N, V> table,
InternalAsyncFuture<Void> future) {
this.key = key;
this.value = value;
this.isMerge = isMerge;
this.table = table;
this.future = future;
}
public void process(ForStDBWriteBatchWrapper writeBatchWrapper, RocksDB db)
throws IOException, RocksDBException {
if (value == null) {
writeBatchWrapper.remove(table.getColumnFamilyHandle(), buildSerializedKey());
} else if (isMerge) {
writeBatchWrapper.merge(
table.getColumnFamilyHandle(), buildSerializedKey(), buildSerializedValue());
} else {
writeBatchWrapper.put(
table.getColumnFamilyHandle(), buildSerializedKey(), buildSerializedValue());
}
}
public byte[] buildSerializedKey() throws IOException {
return table.serializeKey(key);
}
public byte[] buildSerializedValue() throws IOException {
assert value != null;
return table.serializeValue(value);
}
public void completeStateFuture() {
future.complete(null);
}
public void completeStateFutureExceptionally(String message, Throwable ex) {
future.completeExceptionally(message, ex);
}
/**
* If the value of the ForStDBPutRequest is null, then the request will signify the deletion of
* the data associated with that key.
*/
static <K, N, V> ForStDBPutRequest<K, N, V> of(
ContextKey<K, N> key,
@Nullable V value,
ForStInnerTable<K, N, V> table,
InternalAsyncFuture<Void> future) {
return new ForStDBPutRequest<>(key, value, false, table, future);
}
static <K, N, V> ForStDBPutRequest<K, N, V> ofMerge(
ContextKey<K, N> key,
@Nullable V value,
ForStInnerTable<K, N, V> table,
InternalAsyncFuture<Void> future) {
return new ForStDBPutRequest<>(key, value, true, table, future);
}
// --------------- For testing usage ---------------
@VisibleForTesting
public boolean valueIsNull() {
return value == null;
}
@VisibleForTesting
public ColumnFamilyHandle getColumnFamilyHandle() {
return table.getColumnFamilyHandle();
}
}
| ForStDBPutRequest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.