language stringclasses 1
value | repo stringclasses 60
values | path stringlengths 22 294 | class_span dict | source stringlengths 13 1.16M | target stringlengths 1 113 |
|---|---|---|---|---|---|
java | netty__netty | example/src/main/java/io/netty/example/stomp/websocket/StompWebSocketFrameEncoder.java | {
"start": 1445,
"end": 2853
} | class ____ extends StompSubframeEncoder {
@Override
public void encode(ChannelHandlerContext ctx, StompSubframe msg, List<Object> out) throws Exception {
super.encode(ctx, msg, out);
}
@Override
protected WebSocketFrame convertFullFrame(StompFrame original, ByteBuf encoded) {
if (isTextFrame(original)) {
return new TextWebSocketFrame(encoded);
}
return new BinaryWebSocketFrame(encoded);
}
@Override
protected WebSocketFrame convertHeadersSubFrame(StompHeadersSubframe original, ByteBuf encoded) {
if (isTextFrame(original)) {
return new TextWebSocketFrame(false, 0, encoded);
}
return new BinaryWebSocketFrame(false, 0, encoded);
}
@Override
protected WebSocketFrame convertContentSubFrame(StompContentSubframe original, ByteBuf encoded) {
if (original instanceof LastStompContentSubframe) {
return new ContinuationWebSocketFrame(true, 0, encoded);
}
return new ContinuationWebSocketFrame(false, 0, encoded);
}
private static boolean isTextFrame(StompHeadersSubframe headersSubframe) {
String contentType = headersSubframe.headers().getAsString(StompHeaders.CONTENT_TYPE);
return contentType != null && (contentType.startsWith("text") || contentType.startsWith("application/json"));
}
}
| StompWebSocketFrameEncoder |
java | apache__flink | flink-table/flink-table-api-java-bridge/src/main/java/org/apache/flink/table/api/bridge/java/StreamTableEnvironment.java | {
"start": 19914,
"end": 21279
} | class ____ data type, use {@link
* #toDataStream(Table, Class)} or {@link #toDataStream(Table, AbstractDataType)} instead.
*
* <p>Note that the type system of the table ecosystem is richer than the one of the DataStream
* API. The table runtime will make sure to properly serialize the output records to the first
* operator of the DataStream API. Afterwards, the {@link Types} semantics of the DataStream API
* need to be considered.
*
* <p>If the input table contains a single rowtime column, it will be propagated into a stream
* record's timestamp. Watermarks will be propagated as well.
*
* @param table The {@link Table} to convert. It must be insert-only.
* @return The converted {@link DataStream}.
* @see #toDataStream(Table, AbstractDataType)
* @see #toChangelogStream(Table)
*/
DataStream<Row> toDataStream(Table table);
/**
* Converts the given {@link Table} into a {@link DataStream} of the given {@link Class}.
*
* <p>See {@link #toDataStream(Table, AbstractDataType)} for more information on how a {@link
* Table} is translated into a {@link DataStream}.
*
* <p>This method is a shortcut for:
*
* <pre>
* tableEnv.toDataStream(table, DataTypes.of(targetClass))
* </pre>
*
* <p>Calling this method with a | or |
java | apache__camel | components/camel-ftp/src/test/java/org/apache/camel/component/file/remote/sftp/integration/SftpChangedReadLockIT.java | {
"start": 1540,
"end": 3593
} | class ____ extends SftpServerTestSupport {
private static final Logger LOG = LoggerFactory.getLogger(SftpChangedReadLockIT.class);
@TempDir
Path testDirectory;
protected String getFtpUrl() {
return "sftp://localhost:{{ftp.server.port}}/{{ftp.root.dir}}/changed" +
"?username=admin&password=admin&readLock=changed&readLockCheckInterval=1000&delete=true&knownHostsFile="
+ service.getKnownHostsFile();
}
@Test
public void testChangedReadLock() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(1);
mock.expectedFileExists(testDirectory.resolve("out/slowfile.dat"));
context.getRouteController().startRoute("foo");
writeSlowFile();
MockEndpoint.assertIsSatisfied(context);
String content = context.getTypeConverter().convertTo(String.class, testDirectory.resolve("out/slowfile.dat").toFile());
String[] lines = content.split(LS);
assertEquals(20, lines.length, "There should be 20 lines in the file");
for (int i = 0; i < 20; i++) {
assertEquals("Line " + i, lines[i]);
}
}
private void writeSlowFile() throws Exception {
LOG.debug("Writing slow file...");
createDirectory(ftpFile("changed"));
FileOutputStream fos = new FileOutputStream(ftpFile("changed/slowfile.dat").toFile(), true);
for (int i = 0; i < 20; i++) {
fos.write(("Line " + i + LS).getBytes());
LOG.debug("Writing line {}", i);
Thread.sleep(200);
}
fos.flush();
fos.close();
LOG.debug("Writing slow file DONE...");
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from(getFtpUrl()).routeId("foo").noAutoStartup().to(TestSupport.fileUri(testDirectory, "out"), "mock:result");
}
};
}
}
| SftpChangedReadLockIT |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/boot/database/qualfiedTableNaming/DefaultCatalogAndSchemaTest.java | {
"start": 40149,
"end": 41434
} | class ____ {
private final String catalog;
private final String schema;
private ExpectedQualifier(String catalog, String schema) {
this.catalog = catalog;
this.schema = schema;
}
String patternStringForNameWithThisQualifier(String patternStringForName) {
if ( catalog == null && schema == null ) {
// Look for unqualified names
return "(?<!\\.)" + patternStringForName;
}
else {
// Look for a qualified name with this exact qualifier
return "(?<!\\.)" + patternStringForQualifier() + patternStringForName;
}
}
String patternStringForNameWithDifferentQualifier(String patternStringForName) {
if ( catalog == null && schema == null ) {
// Look for a qualified name with any qualifier
return "\\." + patternStringForName;
}
else {
// Look for a qualified name with a different qualifier
// ignoring content of string literals (preceded with a single-quote)
return "(?<!" + patternStringForQualifier() + "|')" + patternStringForName;
}
}
private String patternStringForQualifier() {
return ( catalog != null ? Pattern.quote( catalog ) + "." : "" )
+ ( schema != null ? Pattern.quote( schema ) + "." : "" );
}
}
@Entity(name = EntityWithDefaultQualifiers.NAME)
public static | ExpectedQualifier |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/jmx/export/assembler/AbstractMBeanInfoAssembler.java | {
"start": 7087,
"end": 7840
} | interface ____ the managed resource.
* <p>Default implementation returns an empty array of {@code ModelMBeanConstructorInfo}.
* @param managedBean the bean instance (might be an AOP proxy)
* @param beanKey the key associated with the MBean in the beans map
* of the {@code MBeanExporter}
* @return the constructor metadata
* @throws JMException in case of errors
*/
protected ModelMBeanConstructorInfo[] getConstructorInfo(Object managedBean, String beanKey)
throws JMException {
return new ModelMBeanConstructorInfo[0];
}
/**
* Get the notification metadata for the MBean resource. Subclasses should implement
* this method to return the appropriate metadata for all notifications that should
* be exposed in the management | for |
java | apache__camel | dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/ZeebeComponentBuilderFactory.java | {
"start": 1403,
"end": 1890
} | interface ____ {
/**
* Zeebe (camel-zeebe)
* Zeebe component which integrates with Camunda Zeebe to interact with the
* API.
*
* Category: workflow,saas
* Since: 3.21
* Maven coordinates: org.apache.camel:camel-zeebe
*
* @return the dsl builder
*/
static ZeebeComponentBuilder zeebe() {
return new ZeebeComponentBuilderImpl();
}
/**
* Builder for the Zeebe component.
*/
| ZeebeComponentBuilderFactory |
java | micronaut-projects__micronaut-core | inject/src/main/java/io/micronaut/context/DefaultBeanContext.java | {
"start": 11284,
"end": 11712
} | class ____
*/
public DefaultBeanContext(@NonNull ClassLoader classLoader) {
this(new BeanContextConfiguration() {
@NonNull
@Override
public ClassLoader getClassLoader() {
ArgumentUtils.requireNonNull("classLoader", classLoader);
return classLoader;
}
});
}
/**
* Construct a new bean context with the given | loader |
java | processing__processing4 | core/src/processing/core/PShapeSVG.java | {
"start": 56911,
"end": 60879
} | class ____ extends Gradient {
public float cx, cy, r;
public RadialGradient(PShapeSVG parent, XML properties) {
super(parent, properties);
this.cx = getFloatWithUnit(properties, "cx", svgWidth);
this.cy = getFloatWithUnit(properties, "cy", svgHeight);
this.r = getFloatWithUnit(properties, "r", svgSizeXY);
String transformStr =
properties.getString("gradientTransform");
if (transformStr != null) {
float[] t = parseTransform(transformStr).get(null);
this.transform = new AffineTransform(t[0], t[3], t[1], t[4], t[2], t[5]);
Point2D t1 = transform.transform(new Point2D.Float(cx, cy), null);
Point2D t2 = transform.transform(new Point2D.Float(cx + r, cy), null);
this.cx = (float) t1.getX();
this.cy = (float) t1.getY();
this.r = (float) (t2.getX() - t1.getX());
}
}
}
// . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
// static private float TEXT_QUALITY = 1;
static private PFont parseFont(XML properties) {
String fontFamily = null;
float size = 10;
int weight = PLAIN; // 0
int italic = 0;
if (properties.hasAttribute("style")) {
String styleText = properties.getString("style");
String[] styleTokens = PApplet.splitTokens(styleText, ";");
//PApplet.println(styleTokens);
for (int i = 0; i < styleTokens.length; i++) {
String[] tokens = PApplet.splitTokens(styleTokens[i], ":");
//PApplet.println(tokens);
tokens[0] = PApplet.trim(tokens[0]);
if (tokens[0].equals("font-style")) {
// PApplet.println("font-style: " + tokens[1]);
if (tokens[1].contains("italic")) {
italic = ITALIC;
}
} else if (tokens[0].equals("font-variant")) {
// PApplet.println("font-variant: " + tokens[1]);
// setFillOpacity(tokens[1]);
} else if (tokens[0].equals("font-weight")) {
// PApplet.println("font-weight: " + tokens[1]);
if (tokens[1].contains("bold")) {
weight = BOLD;
// PApplet.println("Bold weight ! ");
}
} else if (tokens[0].equals("font-stretch")) {
// not supported.
} else if (tokens[0].equals("font-size")) {
// PApplet.println("font-size: " + tokens[1]);
size = Float.parseFloat(tokens[1].split("px")[0]);
// PApplet.println("font-size-parsed: " + size);
} else if (tokens[0].equals("line-height")) {
// not supported
} else if (tokens[0].equals("font-family")) {
// PApplet.println("Font-family: " + tokens[1]);
fontFamily = tokens[1];
} else if (tokens[0].equals("text-align")) {
// not supported
} else if (tokens[0].equals("letter-spacing")) {
// not supported
} else if (tokens[0].equals("word-spacing")) {
// not supported
} else if (tokens[0].equals("writing-mode")) {
// not supported
} else if (tokens[0].equals("text-anchor")) {
// not supported
} else {
// Other attributes are not yet implemented
}
}
}
if (fontFamily == null) {
return null;
}
// size = size * TEXT_QUALITY;
return createFont(fontFamily, weight | italic, size, true);
}
static protected PFont createFont(String name, int weight,
float size, boolean smooth) {
//System.out.println("Try to create a font of " + name + " family, " + weight);
java.awt.Font baseFont = new java.awt.Font(name, weight, (int) size);
//System.out.println("Resulting family : " + baseFont.getFamily() + " " + baseFont.getStyle());
return new PFont(baseFont.deriveFont(size), smooth, null);
}
// . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
static public | RadialGradient |
java | apache__camel | components/camel-spring-parent/camel-spring-xml/src/test/java/org/apache/camel/spring/processor/SpringSagaTest.java | {
"start": 1175,
"end": 2747
} | class ____ extends ContextTestSupport {
@Test
public void testSendAMessageWithinASaga() throws Exception {
MockEndpoint resultEndpoint = getMockEndpoint("mock:end");
resultEndpoint.expectedBodiesReceived("correct");
MockEndpoint completionEndpoint = getMockEndpoint("mock:completion");
completionEndpoint.expectedMessageCount(1);
completionEndpoint.expectedHeaderReceived("myOptionKey", "myOptionValue");
completionEndpoint.expectedHeaderReceived("myOptionKey2", "myOptionValue2");
sendBody("direct:start", "correct");
resultEndpoint.assertIsSatisfied();
completionEndpoint.assertIsSatisfied();
}
@Test
public void testCompensationWithinASaga() throws Exception {
MockEndpoint resultEndpoint = getMockEndpoint("mock:end");
resultEndpoint.expectedMessageCount(1);
resultEndpoint.setResultWaitTime(100);
MockEndpoint compensationEndpoint = getMockEndpoint("mock:compensation");
compensationEndpoint.expectedMessageCount(1);
compensationEndpoint.expectedHeaderReceived("myOptionKey", "myOptionValue");
compensationEndpoint.expectedHeaderReceived("myOptionKey2", "myOptionValue2");
sendBody("direct:start", "fail");
compensationEndpoint.assertIsSatisfied();
resultEndpoint.assertIsNotSatisfied();
}
@Override
protected CamelContext createCamelContext() throws Exception {
return createSpringCamelContext(this, "org/apache/camel/spring/processor/saga.xml");
}
}
| SpringSagaTest |
java | micronaut-projects__micronaut-core | core-processor/src/main/java/io/micronaut/inject/ast/ClassElement.java | {
"start": 13977,
"end": 14347
} | class ____
*/
@NonNull
default List<PropertyElement> getBeanProperties() {
return Collections.emptyList();
}
/**
* Returns the synthetic bean properties. The properties where one of the methods (getter or setter)
* is synthetic - not user defined but created by the compiler.
*
* @return The bean properties for this | element |
java | netty__netty | handler/src/main/java/io/netty/handler/ssl/ReferenceCountedOpenSslEngine.java | {
"start": 6303,
"end": 71578
} | enum ____ {
/**
* Not started yet.
*/
NOT_STARTED,
/**
* Started via unwrap/wrap.
*/
STARTED_IMPLICITLY,
/**
* Started via {@link #beginHandshake()}.
*/
STARTED_EXPLICITLY,
/**
* Handshake is finished.
*/
FINISHED
}
private HandshakeState handshakeState = HandshakeState.NOT_STARTED;
private boolean receivedShutdown;
private volatile boolean destroyed;
private volatile String applicationProtocol;
private volatile boolean needTask;
private boolean hasTLSv13Cipher;
private boolean sessionSet;
// Reference Counting
private final ResourceLeakTracker<ReferenceCountedOpenSslEngine> leak;
private final AbstractReferenceCounted refCnt = new AbstractReferenceCounted() {
@Override
public ReferenceCounted touch(Object hint) {
if (leak != null) {
leak.record(hint);
}
return ReferenceCountedOpenSslEngine.this;
}
@Override
protected void deallocate() {
shutdown();
if (leak != null) {
boolean closed = leak.close(ReferenceCountedOpenSslEngine.this);
assert closed;
}
parentContext.release();
}
};
private final Set<String> enabledProtocols = new LinkedHashSet<String>();
private volatile ClientAuth clientAuth = ClientAuth.NONE;
private String endpointIdentificationAlgorithm;
private List<SNIServerName> serverNames;
private String[] groups;
private AlgorithmConstraints algorithmConstraints;
// Mark as volatile as accessed by checkSniHostnameMatch(...).
private volatile Collection<SNIMatcher> matchers;
// SSL Engine status variables
private boolean isInboundDone;
private boolean outboundClosed;
final boolean jdkCompatibilityMode;
private final boolean clientMode;
final ByteBufAllocator alloc;
private final Map<Long, ReferenceCountedOpenSslEngine> engines;
private final OpenSslApplicationProtocolNegotiator apn;
private final ReferenceCountedOpenSslContext parentContext;
private final OpenSslInternalSession session;
private final ByteBuffer[] singleSrcBuffer = new ByteBuffer[1];
private final ByteBuffer[] singleDstBuffer = new ByteBuffer[1];
private final boolean enableOcsp;
private int maxWrapOverhead;
private int maxWrapBufferSize;
private Throwable pendingException;
/**
* Create a new instance.
* @param context Reference count release responsibility is not transferred! The callee still owns this object.
* @param alloc The allocator to use.
* @param peerHost The peer host name.
* @param peerPort The peer port.
* @param jdkCompatibilityMode {@code true} to behave like described in
* https://docs.oracle.com/javase/7/docs/api/javax/net/ssl/SSLEngine.html.
* {@code false} allows for partial and/or multiple packets to be process in a single
* wrap or unwrap call.
* @param leakDetection {@code true} to enable leak detection of this object.
*/
ReferenceCountedOpenSslEngine(ReferenceCountedOpenSslContext context, final ByteBufAllocator alloc, String peerHost,
int peerPort, boolean jdkCompatibilityMode, boolean leakDetection,
String endpointIdentificationAlgorithm, List<SNIServerName> serverNames) {
super(peerHost, peerPort);
OpenSsl.ensureAvailability();
engines = context.engines;
enableOcsp = context.enableOcsp;
groups = context.groups.clone();
this.jdkCompatibilityMode = jdkCompatibilityMode;
this.alloc = checkNotNull(alloc, "alloc");
apn = (OpenSslApplicationProtocolNegotiator) context.applicationProtocolNegotiator();
clientMode = context.isClient();
this.endpointIdentificationAlgorithm = endpointIdentificationAlgorithm;
this.serverNames = serverNames;
session = new ExtendedOpenSslSession(new DefaultOpenSslSession(context.sessionContext())) {
private String[] peerSupportedSignatureAlgorithms;
private List<SNIServerName> requestedServerNames;
@Override
public List<SNIServerName> getRequestedServerNames() {
if (clientMode) {
List<SNIServerName> names = ReferenceCountedOpenSslEngine.this.serverNames;
return names == null ? Collections.emptyList() : Collections.unmodifiableList(names);
} else {
synchronized (ReferenceCountedOpenSslEngine.this) {
if (requestedServerNames == null) {
if (destroyed) {
requestedServerNames = Collections.emptyList();
} else {
String name = SSL.getSniHostname(ssl);
if (name == null) {
requestedServerNames = Collections.emptyList();
} else {
// Convert to bytes as we do not want to do any strict validation of the
// SNIHostName while creating it.
byte[] hostname = SSL.getSniHostname(ssl).getBytes(CharsetUtil.UTF_8);
requestedServerNames = hostname.length == 0 ?
Collections.emptyList() :
Collections.singletonList(new SNIHostName(hostname));
}
}
}
return requestedServerNames;
}
}
}
@Override
public String[] getPeerSupportedSignatureAlgorithms() {
synchronized (ReferenceCountedOpenSslEngine.this) {
if (peerSupportedSignatureAlgorithms == null) {
if (destroyed) {
peerSupportedSignatureAlgorithms = EMPTY_STRINGS;
} else {
String[] algs = SSL.getSigAlgs(ssl);
if (algs == null) {
peerSupportedSignatureAlgorithms = EMPTY_STRINGS;
} else {
Set<String> algorithmList = new LinkedHashSet<>(algs.length);
for (String alg: algs) {
String converted = SignatureAlgorithmConverter.toJavaName(alg);
if (converted != null) {
algorithmList.add(converted);
}
}
peerSupportedSignatureAlgorithms = algorithmList.toArray(EMPTY_STRINGS);
}
}
}
return peerSupportedSignatureAlgorithms.clone();
}
}
@Override
public List<byte[]> getStatusResponses() {
byte[] ocspResponse = null;
if (enableOcsp && clientMode) {
synchronized (ReferenceCountedOpenSslEngine.this) {
if (!destroyed) {
ocspResponse = SSL.getOcspResponse(ssl);
}
}
}
return ocspResponse == null ?
Collections.emptyList() : Collections.singletonList(ocspResponse);
}
};
try {
// Let's retain the context before we try to use it so we ensure it is not released in between by someone
// calling context.release()
context.retain();
if (!context.sessionContext().useKeyManager()) {
session.setLocalCertificate(context.keyCertChain);
}
Lock readerLock = context.ctxLock.readLock();
readerLock.lock();
final long finalSsl;
try {
finalSsl = SSL.newSSL(context.ctx, !context.isClient());
} finally {
readerLock.unlock();
}
synchronized (this) {
ssl = finalSsl;
try {
networkBIO = SSL.bioNewByteBuffer(ssl, context.getBioNonApplicationBufferSize());
// Set the client auth mode, this needs to be done via setClientAuth(...) method so we actually
// call the needed JNI methods.
setClientAuth(clientMode ? ClientAuth.NONE : context.clientAuth);
assert context.protocols != null;
hasTLSv13Cipher = context.hasTLSv13Cipher;
setEnabledProtocols(context.protocols);
// Use SNI if peerHost was specified and a valid hostname
// See https://github.com/netty/netty/issues/4746
boolean usePeerHost = SslUtils.isValidHostNameForSNI(peerHost) && isValidHostNameForSNI(peerHost);
boolean useServerNames = serverNames != null && !serverNames.isEmpty();
if (clientMode && (usePeerHost || useServerNames)) {
// We do some extra validation to ensure we can construct the SNIHostName later again.
if (usePeerHost) {
SSL.setTlsExtHostName(ssl, peerHost);
this.serverNames = Collections.singletonList(new SNIHostName(peerHost));
} else {
for (SNIServerName serverName : serverNames) {
if (serverName instanceof SNIHostName) {
SNIHostName name = (SNIHostName) serverName;
SSL.setTlsExtHostName(ssl, name.getAsciiName());
} else {
throw new IllegalArgumentException("Only " + SNIHostName.class.getName()
+ " instances are supported, but found: " + serverName);
}
}
}
}
if (enableOcsp) {
SSL.enableOcsp(ssl);
}
if (!jdkCompatibilityMode) {
SSL.setMode(ssl, SSL.getMode(ssl) | SSL.SSL_MODE_ENABLE_PARTIAL_WRITE);
}
if (isProtocolEnabled(SSL.getOptions(ssl), SSL.SSL_OP_NO_TLSv1_3, SslProtocols.TLS_v1_3)) {
final boolean enableTickets = clientMode ?
ReferenceCountedOpenSslContext.CLIENT_ENABLE_SESSION_TICKET_TLSV13 :
ReferenceCountedOpenSslContext.SERVER_ENABLE_SESSION_TICKET_TLSV13;
if (enableTickets) {
// We should enable session tickets for stateless resumption when TLSv1.3 is enabled. This
// is also done by OpenJDK and without this session resumption does not work at all with
// BoringSSL when TLSv1.3 is used as BoringSSL only supports stateless resumption with
// TLSv1.3:
//
// See:
// https://bugs.openjdk.java.net/browse/JDK-8223922
// https://boringssl.googlesource.com/boringssl/+/refs/heads/master/ssl/tls13_server.cc#104
SSL.clearOptions(ssl, SSL.SSL_OP_NO_TICKET);
}
}
if ((OpenSsl.isBoringSSL() || OpenSsl.isAWSLC()) && clientMode) {
// If in client-mode and provider is BoringSSL or AWS-LC let's allow to renegotiate once as the
// server may use this for client auth.
//
// See https://github.com/netty/netty/issues/11529
SSL.setRenegotiateMode(ssl, SSL.SSL_RENEGOTIATE_ONCE);
}
// setMode may impact the overhead.
calculateMaxWrapOverhead();
// Configure any endpoint verification specified by the SslContext.
configureEndpointVerification(endpointIdentificationAlgorithm);
} catch (Throwable cause) {
// Call shutdown so we are sure we correctly release all native memory and also guard against the
// case when shutdown() will be called by the finalizer again.
shutdown();
PlatformDependent.throwException(cause);
}
}
} catch (Throwable cause) {
// Something did go wrong which means we will not be able to release the context later on. Release it
// now to prevent leaks.
context.release();
PlatformDependent.throwException(cause);
}
// Now that everything looks good and we're going to successfully return the
// object so we need to retain a reference to the parent context.
parentContext = context;
// Only create the leak after everything else was executed and so ensure we don't produce a false-positive for
// the ResourceLeakDetector.
leak = leakDetection ? leakDetector.track(this) : null;
}
private static boolean isValidHostNameForSNI(String hostname) {
try {
new SNIHostName(hostname);
return true;
} catch (IllegalArgumentException illegal) {
return false;
}
}
final synchronized String[] authMethods() {
if (destroyed) {
return EMPTY_STRINGS;
}
return SSL.authenticationMethods(ssl);
}
final void setKeyMaterial(OpenSslKeyMaterial keyMaterial) throws Exception {
synchronized (this) {
if (destroyed) {
return;
}
SSL.setKeyMaterial(ssl, keyMaterial.certificateChainAddress(), keyMaterial.privateKeyAddress());
}
session.setLocalCertificate(keyMaterial.certificateChain());
}
final synchronized SecretKeySpec masterKey() {
if (destroyed) {
return null;
}
return new SecretKeySpec(SSL.getMasterKey(ssl), "AES");
}
synchronized boolean isSessionReused() {
if (destroyed) {
return false;
}
return SSL.isSessionReused(ssl);
}
/**
* Sets the OCSP response.
*/
@UnstableApi
public void setOcspResponse(byte[] response) {
if (!enableOcsp) {
throw new IllegalStateException("OCSP stapling is not enabled");
}
if (clientMode) {
throw new IllegalStateException("Not a server SSLEngine");
}
synchronized (this) {
if (!destroyed) {
SSL.setOcspResponse(ssl, response);
}
}
}
/**
* Returns the OCSP response or {@code null} if the server didn't provide a stapled OCSP response.
*/
@UnstableApi
public byte[] getOcspResponse() {
if (!enableOcsp) {
throw new IllegalStateException("OCSP stapling is not enabled");
}
if (!clientMode) {
throw new IllegalStateException("Not a client SSLEngine");
}
synchronized (this) {
if (destroyed) {
return EmptyArrays.EMPTY_BYTES;
}
return SSL.getOcspResponse(ssl);
}
}
@Override
public final int refCnt() {
return refCnt.refCnt();
}
@Override
public final ReferenceCounted retain() {
refCnt.retain();
return this;
}
@Override
public final ReferenceCounted retain(int increment) {
refCnt.retain(increment);
return this;
}
@Override
public final ReferenceCounted touch() {
refCnt.touch();
return this;
}
@Override
public final ReferenceCounted touch(Object hint) {
refCnt.touch(hint);
return this;
}
@Override
public final boolean release() {
return refCnt.release();
}
@Override
public final boolean release(int decrement) {
return refCnt.release(decrement);
}
// These method will override the method defined by Java 8u251 and later. As we may compile with an earlier
// java8 version we don't use @Override annotations here.
public String getApplicationProtocol() {
return applicationProtocol;
}
// These method will override the method defined by Java 8u251 and later. As we may compile with an earlier
// java8 version we don't use @Override annotations here.
public String getHandshakeApplicationProtocol() {
return applicationProtocol;
}
@Override
public final synchronized SSLSession getHandshakeSession() {
// Javadocs state return value should be:
// null if this instance is not currently handshaking, or if the current handshake has not
// progressed far enough to create a basic SSLSession. Otherwise, this method returns the
// SSLSession currently being negotiated.
switch(handshakeState) {
case NOT_STARTED:
case FINISHED:
return null;
default:
return session;
}
}
/**
* Returns the pointer to the {@code SSL} object for this {@link ReferenceCountedOpenSslEngine}.
* Be aware that it is freed as soon as the {@link #release()} or {@link #shutdown()} methods are called.
* At this point {@code 0} will be returned.
*/
public final synchronized long sslPointer() {
return ssl;
}
/**
* Destroys this engine.
*/
public final synchronized void shutdown() {
if (!destroyed) {
destroyed = true;
// Let's check if engineMap is null as it could be in theory if we throw an OOME during the construction of
// ReferenceCountedOpenSslEngine (before we assign the field). This is needed as shutdown() is called from
// the finalizer as well.
if (engines != null) {
engines.remove(ssl);
}
SSL.freeSSL(ssl);
ssl = networkBIO = 0;
isInboundDone = outboundClosed = true;
}
// On shutdown clear all errors
SSL.clearError();
}
/**
* Write plaintext data to the OpenSSL internal BIO
*
* Calling this function with src.remaining == 0 is undefined.
*/
private int writePlaintextData(final ByteBuffer src, int len) {
final int pos = src.position();
final int limit = src.limit();
final int sslWrote;
if (src.isDirect()) {
sslWrote = SSL.writeToSSL(ssl, bufferAddress(src) + pos, len);
if (sslWrote > 0) {
src.position(pos + sslWrote);
}
} else {
ByteBuf buf = alloc.directBuffer(len);
try {
src.limit(pos + len);
buf.setBytes(0, src);
src.limit(limit);
sslWrote = SSL.writeToSSL(ssl, memoryAddress(buf), len);
if (sslWrote > 0) {
src.position(pos + sslWrote);
} else {
src.position(pos);
}
} finally {
buf.release();
}
}
return sslWrote;
}
synchronized void bioSetFd(int fd) {
if (!destroyed) {
SSL.bioSetFd(this.ssl, fd);
}
}
/**
* Write encrypted data to the OpenSSL network BIO.
*/
private ByteBuf writeEncryptedData(final ByteBuffer src, int len) throws SSLException {
final int pos = src.position();
if (src.isDirect()) {
SSL.bioSetByteBuffer(networkBIO, bufferAddress(src) + pos, len, false);
} else {
final ByteBuf buf = alloc.directBuffer(len);
try {
final int limit = src.limit();
src.limit(pos + len);
buf.writeBytes(src);
// Restore the original position and limit because we don't want to consume from `src`.
src.position(pos);
src.limit(limit);
SSL.bioSetByteBuffer(networkBIO, memoryAddress(buf), len, false);
return buf;
} catch (Throwable cause) {
buf.release();
PlatformDependent.throwException(cause);
}
}
return null;
}
/**
* Read plaintext data from the OpenSSL internal BIO
*/
private int readPlaintextData(final ByteBuffer dst) throws SSLException {
final int sslRead;
final int pos = dst.position();
if (dst.isDirect()) {
sslRead = SSL.readFromSSL(ssl, bufferAddress(dst) + pos, dst.limit() - pos);
if (sslRead > 0) {
dst.position(pos + sslRead);
}
} else {
final int limit = dst.limit();
final int len = min(maxEncryptedPacketLength0(), limit - pos);
final ByteBuf buf = alloc.directBuffer(len);
try {
sslRead = SSL.readFromSSL(ssl, memoryAddress(buf), len);
if (sslRead > 0) {
dst.limit(pos + sslRead);
buf.getBytes(buf.readerIndex(), dst);
dst.limit(limit);
}
} finally {
buf.release();
}
}
return sslRead;
}
/**
* Visible only for testing!
*/
final synchronized int maxWrapOverhead() {
return maxWrapOverhead;
}
/**
* Visible only for testing!
*/
final synchronized int maxEncryptedPacketLength() {
return maxEncryptedPacketLength0();
}
/**
* This method is intentionally not synchronized, only use if you know you are in the EventLoop
* thread and visibility on {@link #maxWrapOverhead} is achieved via other synchronized blocks.
*/
final int maxEncryptedPacketLength0() {
return maxWrapOverhead + MAX_PLAINTEXT_LENGTH;
}
/**
* This method is intentionally not synchronized, only use if you know you are in the EventLoop
* thread and visibility on {@link #maxWrapBufferSize} and {@link #maxWrapOverhead} is achieved
* via other synchronized blocks.
* <br>
* Calculates the max size of a single wrap operation for the given plaintextLength and
* numComponents.
*/
final int calculateMaxLengthForWrap(int plaintextLength, int numComponents) {
return (int) min(maxWrapBufferSize, plaintextLength + (long) maxWrapOverhead * numComponents);
}
/**
* This method is intentionally not synchronized, only use if you know you are in the EventLoop
* thread and visibility on {@link #maxWrapOverhead} is achieved via other synchronized blocks.
* <br>
* Calculates the size of the out net buf to create for the given plaintextLength and numComponents.
* This is not related to the max size per wrap, as we can wrap chunks at a time into one out net buf.
*/
final int calculateOutNetBufSize(int plaintextLength, int numComponents) {
return (int) min(MAX_VALUE, plaintextLength + (long) maxWrapOverhead * numComponents);
}
final synchronized int sslPending() {
return sslPending0();
}
/**
* It is assumed this method is called in a synchronized block (or the constructor)!
*/
private void calculateMaxWrapOverhead() {
maxWrapOverhead = SSL.getMaxWrapOverhead(ssl);
// maxWrapBufferSize must be set after maxWrapOverhead because there is a dependency on this value.
// If jdkCompatibility mode is off we allow enough space to encrypt 16 buffers at a time. This could be
// configurable in the future if necessary.
maxWrapBufferSize = jdkCompatibilityMode ? maxEncryptedPacketLength0() : maxEncryptedPacketLength0() << 4;
}
private int sslPending0() {
// OpenSSL has a limitation where if you call SSL_pending before the handshake is complete OpenSSL will throw a
// "called a function you should not call" error. Using the TLS_method instead of SSLv23_method may solve this
// issue but this API is only available in 1.1.0+ [1].
// [1] https://www.openssl.org/docs/man1.1.0/ssl/SSL_CTX_new.html
return handshakeState != HandshakeState.FINISHED ? 0 : SSL.sslPending(ssl);
}
private boolean isBytesAvailableEnoughForWrap(int bytesAvailable, int plaintextLength, int numComponents) {
return bytesAvailable - (long) maxWrapOverhead * numComponents >= plaintextLength;
}
@Override
public final SSLEngineResult wrap(
final ByteBuffer[] srcs, int offset, final int length, final ByteBuffer dst) throws SSLException {
// Throw required runtime exceptions
checkNotNullWithIAE(srcs, "srcs");
checkNotNullWithIAE(dst, "dst");
if (offset >= srcs.length || offset + length > srcs.length) {
throw new IndexOutOfBoundsException(
"offset: " + offset + ", length: " + length +
" (expected: offset <= offset + length <= srcs.length (" + srcs.length + "))");
}
if (dst.isReadOnly()) {
throw new ReadOnlyBufferException();
}
synchronized (this) {
if (isOutboundDone()) {
// All drained in the outbound buffer
return isInboundDone() || destroyed ? CLOSED_NOT_HANDSHAKING : NEED_UNWRAP_CLOSED;
}
int bytesProduced = 0;
ByteBuf bioReadCopyBuf = null;
try {
// Setup the BIO buffer so that we directly write the encryption results into dst.
if (dst.isDirect()) {
SSL.bioSetByteBuffer(networkBIO, bufferAddress(dst) + dst.position(), dst.remaining(),
true);
} else {
bioReadCopyBuf = alloc.directBuffer(dst.remaining());
SSL.bioSetByteBuffer(networkBIO, memoryAddress(bioReadCopyBuf), bioReadCopyBuf.writableBytes(),
true);
}
int bioLengthBefore = SSL.bioLengthByteBuffer(networkBIO);
// Explicitly use outboundClosed as we want to drain any bytes that are still present.
if (outboundClosed) {
// If the outbound was closed we want to ensure we can produce the alert to the destination buffer.
// This is true even if we not using jdkCompatibilityMode.
//
// We use a plaintextLength of 2 as we at least want to have an alert fit into it.
// https://tools.ietf.org/html/rfc5246#section-7.2
if (!isBytesAvailableEnoughForWrap(dst.remaining(), 2, 1)) {
return new SSLEngineResult(BUFFER_OVERFLOW, getHandshakeStatus(), 0, 0);
}
// There is something left to drain.
// See https://github.com/netty/netty/issues/6260
bytesProduced = SSL.bioFlushByteBuffer(networkBIO);
if (bytesProduced <= 0) {
return newResultMayFinishHandshake(NOT_HANDSHAKING, 0, 0);
}
// It is possible when the outbound was closed there was not enough room in the non-application
// buffers to hold the close_notify. We should keep trying to close until we consume all the data
// OpenSSL can give us.
if (!doSSLShutdown()) {
return newResultMayFinishHandshake(NOT_HANDSHAKING, 0, bytesProduced);
}
bytesProduced = bioLengthBefore - SSL.bioLengthByteBuffer(networkBIO);
return newResultMayFinishHandshake(NEED_WRAP, 0, bytesProduced);
}
// Flush any data that may be implicitly generated by OpenSSL (handshake, close, etc..).
SSLEngineResult.HandshakeStatus status = NOT_HANDSHAKING;
HandshakeState oldHandshakeState = handshakeState;
// Prepare OpenSSL to work in server mode and receive handshake
if (handshakeState != HandshakeState.FINISHED) {
if (handshakeState != HandshakeState.STARTED_EXPLICITLY) {
// Update accepted so we know we triggered the handshake via wrap
handshakeState = HandshakeState.STARTED_IMPLICITLY;
}
// Flush any data that may have been written implicitly during the handshake by OpenSSL.
bytesProduced = SSL.bioFlushByteBuffer(networkBIO);
if (pendingException != null) {
// TODO(scott): It is possible that when the handshake failed there was not enough room in the
// non-application buffers to hold the alert. We should get all the data before progressing on.
// However I'm not aware of a way to do this with the OpenSSL APIs.
// See https://github.com/netty/netty/issues/6385.
// We produced / consumed some data during the handshake, signal back to the caller.
// If there is a handshake exception and we have produced data, we should send the data before
// we allow handshake() to throw the handshake exception.
//
// When the user calls wrap() again we will propagate the handshake error back to the user as
// soon as there is no more data to was produced (as part of an alert etc).
if (bytesProduced > 0) {
return newResult(NEED_WRAP, 0, bytesProduced);
}
// Nothing was produced see if there is a handshakeException that needs to be propagated
// to the caller by calling handshakeException() which will return the right HandshakeStatus
// if it can "recover" from the exception for now.
return newResult(handshakeException(), 0, 0);
}
status = handshake();
// Handshake may have generated more data, for example if the internal SSL buffer is small
// we may have freed up space by flushing above.
bytesProduced = bioLengthBefore - SSL.bioLengthByteBuffer(networkBIO);
if (status == NEED_TASK) {
return newResult(status, 0, bytesProduced);
}
if (bytesProduced > 0) {
// If we have filled up the dst buffer and we have not finished the handshake we should try to
// wrap again. Otherwise we should only try to wrap again if there is still data pending in
// SSL buffers.
return newResult(mayFinishHandshake(status != FINISHED ?
bytesProduced == bioLengthBefore ? NEED_WRAP :
getHandshakeStatus(SSL.bioLengthNonApplication(networkBIO)) : FINISHED),
0, bytesProduced);
}
if (status == NEED_UNWRAP) {
// Signal if the outbound is done or not.
return isOutboundDone() ? NEED_UNWRAP_CLOSED : NEED_UNWRAP_OK;
}
// Explicit use outboundClosed and not outboundClosed() as we want to drain any bytes that are
// still present.
if (outboundClosed) {
bytesProduced = SSL.bioFlushByteBuffer(networkBIO);
return newResultMayFinishHandshake(status, 0, bytesProduced);
}
}
final int endOffset = offset + length;
if (jdkCompatibilityMode ||
// If the handshake was not finished before we entered the method, we also ensure we only
// wrap one record. We do this to ensure we not produce any extra data before the caller
// of the method is able to observe handshake completion and react on it.
oldHandshakeState != HandshakeState.FINISHED) {
int srcsLen = 0;
for (int i = offset; i < endOffset; ++i) {
final ByteBuffer src = srcs[i];
if (src == null) {
throw new IllegalArgumentException("srcs[" + i + "] is null");
}
if (srcsLen == MAX_PLAINTEXT_LENGTH) {
continue;
}
srcsLen += src.remaining();
if (srcsLen > MAX_PLAINTEXT_LENGTH || srcsLen < 0) {
// If srcLen > MAX_PLAINTEXT_LENGTH or secLen < 0 just set it to MAX_PLAINTEXT_LENGTH.
// This also help us to guard against overflow.
// We not break out here as we still need to check for null entries in srcs[].
srcsLen = MAX_PLAINTEXT_LENGTH;
}
}
// jdkCompatibilityMode will only produce a single TLS packet, and we don't aggregate src buffers,
// so we always fix the number of buffers to 1 when checking if the dst buffer is large enough.
if (!isBytesAvailableEnoughForWrap(dst.remaining(), srcsLen, 1)) {
return new SSLEngineResult(BUFFER_OVERFLOW, getHandshakeStatus(), 0, 0);
}
}
// There was no pending data in the network BIO -- encrypt any application data
int bytesConsumed = 0;
assert bytesProduced == 0;
// Flush any data that may have been written implicitly by OpenSSL in case a shutdown/alert occurs.
bytesProduced = SSL.bioFlushByteBuffer(networkBIO);
if (bytesProduced > 0) {
return newResultMayFinishHandshake(status, bytesConsumed, bytesProduced);
}
// There was a pending exception that we just delayed because there was something to produce left.
// Throw it now and shutdown the engine.
if (pendingException != null) {
Throwable error = pendingException;
pendingException = null;
shutdown();
// Throw a new exception wrapping the pending exception, so the stacktrace is meaningful and
// contains all the details.
throw new SSLException(error);
}
for (; offset < endOffset; ++offset) {
final ByteBuffer src = srcs[offset];
final int remaining = src.remaining();
if (remaining == 0) {
continue;
}
final int bytesWritten;
if (jdkCompatibilityMode) {
// Write plaintext application data to the SSL engine. We don't have to worry about checking
// if there is enough space if jdkCompatibilityMode because we only wrap at most
// MAX_PLAINTEXT_LENGTH and we loop over the input before hand and check if there is space.
bytesWritten = writePlaintextData(src, min(remaining, MAX_PLAINTEXT_LENGTH - bytesConsumed));
} else {
// OpenSSL's SSL_write keeps state between calls. We should make sure the amount we attempt to
// write is guaranteed to succeed so we don't have to worry about keeping state consistent
// between calls.
final int availableCapacityForWrap = dst.remaining() - bytesProduced - maxWrapOverhead;
if (availableCapacityForWrap <= 0) {
return new SSLEngineResult(BUFFER_OVERFLOW, getHandshakeStatus(), bytesConsumed,
bytesProduced);
}
bytesWritten = writePlaintextData(src, min(remaining, availableCapacityForWrap));
}
// Determine how much encrypted data was generated.
//
// Even if SSL_write doesn't consume any application data it is possible that OpenSSL will
// produce non-application data into the BIO. For example session tickets....
// See https://github.com/netty/netty/issues/10041
final int pendingNow = SSL.bioLengthByteBuffer(networkBIO);
bytesProduced += bioLengthBefore - pendingNow;
bioLengthBefore = pendingNow;
if (bytesWritten > 0) {
bytesConsumed += bytesWritten;
if (jdkCompatibilityMode || bytesProduced == dst.remaining()) {
return newResultMayFinishHandshake(status, bytesConsumed, bytesProduced);
}
} else {
int sslError = SSL.getError(ssl, bytesWritten);
if (sslError == SSL.SSL_ERROR_ZERO_RETURN) {
// This means the connection was shutdown correctly, close inbound and outbound
if (!receivedShutdown) {
closeAll();
bytesProduced += bioLengthBefore - SSL.bioLengthByteBuffer(networkBIO);
// If we have filled up the dst buffer and we have not finished the handshake we should
// try to wrap again. Otherwise we should only try to wrap again if there is still data
// pending in SSL buffers.
SSLEngineResult.HandshakeStatus hs = mayFinishHandshake(
status != FINISHED ? bytesProduced == dst.remaining() ? NEED_WRAP
: getHandshakeStatus(SSL.bioLengthNonApplication(networkBIO))
: FINISHED);
return newResult(hs, bytesConsumed, bytesProduced);
}
return newResult(NOT_HANDSHAKING, bytesConsumed, bytesProduced);
} else if (sslError == SSL.SSL_ERROR_WANT_READ) {
// If there is no pending data to read from BIO we should go back to event loop and try
// to read more data [1]. It is also possible that event loop will detect the socket has
// been closed. [1] https://www.openssl.org/docs/manmaster/ssl/SSL_write.html
return newResult(NEED_UNWRAP, bytesConsumed, bytesProduced);
} else if (sslError == SSL.SSL_ERROR_WANT_WRITE) {
// SSL_ERROR_WANT_WRITE typically means that the underlying transport is not writable
// and we should set the "want write" flag on the selector and try again when the
// underlying transport is writable [1]. However we are not directly writing to the
// underlying transport and instead writing to a BIO buffer. The OpenSsl documentation
// says we should do the following [1]:
//
// "When using a buffering BIO, like a BIO pair, data must be written into or retrieved
// out of the BIO before being able to continue."
//
// In practice this means the destination buffer doesn't have enough space for OpenSSL
// to write encrypted data to. This is an OVERFLOW condition.
// [1] https://www.openssl.org/docs/manmaster/ssl/SSL_write.html
if (bytesProduced > 0) {
// If we produced something we should report this back and let the user call
// wrap again.
return newResult(NEED_WRAP, bytesConsumed, bytesProduced);
}
return newResult(BUFFER_OVERFLOW, status, bytesConsumed, bytesProduced);
} else if (sslError == SSL.SSL_ERROR_WANT_X509_LOOKUP ||
sslError == SSL.SSL_ERROR_WANT_CERTIFICATE_VERIFY ||
sslError == SSL.SSL_ERROR_WANT_PRIVATE_KEY_OPERATION) {
return newResult(NEED_TASK, bytesConsumed, bytesProduced);
} else {
// Everything else is considered as error
throw shutdownWithError("SSL_write", sslError, SSL.getLastErrorNumber());
}
}
}
return newResultMayFinishHandshake(status, bytesConsumed, bytesProduced);
} finally {
SSL.bioClearByteBuffer(networkBIO);
if (bioReadCopyBuf == null) {
dst.position(dst.position() + bytesProduced);
} else {
assert bioReadCopyBuf.readableBytes() <= dst.remaining() : "The destination buffer " + dst +
" didn't have enough remaining space to hold the encrypted content in " + bioReadCopyBuf;
dst.put(bioReadCopyBuf.internalNioBuffer(bioReadCopyBuf.readerIndex(), bytesProduced));
bioReadCopyBuf.release();
}
}
}
}
private SSLEngineResult newResult(SSLEngineResult.HandshakeStatus hs, int bytesConsumed, int bytesProduced) {
return newResult(OK, hs, bytesConsumed, bytesProduced);
}
private SSLEngineResult newResult(SSLEngineResult.Status status, SSLEngineResult.HandshakeStatus hs,
int bytesConsumed, int bytesProduced) {
// If isOutboundDone, then the data from the network BIO
// was the close_notify message and all was consumed we are not required to wait
// for the receipt the peer's close_notify message -- shutdown.
if (isOutboundDone()) {
if (isInboundDone()) {
// If the inbound was done as well, we need to ensure we return NOT_HANDSHAKING to signal we are done.
hs = NOT_HANDSHAKING;
// As the inbound and the outbound is done we can shutdown the engine now.
shutdown();
}
return new SSLEngineResult(CLOSED, hs, bytesConsumed, bytesProduced);
}
if (hs == NEED_TASK) {
// Set needTask to true so getHandshakeStatus() will return the correct value.
needTask = true;
}
return new SSLEngineResult(status, hs, bytesConsumed, bytesProduced);
}
private SSLEngineResult newResultMayFinishHandshake(SSLEngineResult.HandshakeStatus hs,
int bytesConsumed, int bytesProduced) throws SSLException {
return newResult(mayFinishHandshake(hs, bytesConsumed, bytesProduced), bytesConsumed, bytesProduced);
}
private SSLEngineResult newResultMayFinishHandshake(SSLEngineResult.Status status,
SSLEngineResult.HandshakeStatus hs,
int bytesConsumed, int bytesProduced) throws SSLException {
return newResult(status, mayFinishHandshake(hs, bytesConsumed, bytesProduced), bytesConsumed, bytesProduced);
}
/**
* Log the error, shutdown the engine and throw an exception.
*/
private SSLException shutdownWithError(String operation, int sslError, int error) {
if (logger.isDebugEnabled()) {
String errorString = SSL.getErrorString(error);
logger.debug("{} failed with {}: OpenSSL error: {} {}",
operation, sslError, error, errorString);
}
// There was an internal error -- shutdown
shutdown();
SSLException exception = newSSLExceptionForError(error);
// If we have a pendingException stored already we should include it as well to help the user debug things.
if (pendingException != null) {
exception.initCause(pendingException);
pendingException = null;
}
return exception;
}
private SSLEngineResult handleUnwrapException(int bytesConsumed, int bytesProduced, SSLException e)
throws SSLException {
int lastError = SSL.getLastErrorNumber();
if (lastError != 0) {
return sslReadErrorResult(SSL.SSL_ERROR_SSL, lastError, bytesConsumed,
bytesProduced);
}
throw e;
}
public final SSLEngineResult unwrap(
final ByteBuffer[] srcs, int srcsOffset, final int srcsLength,
final ByteBuffer[] dsts, int dstsOffset, final int dstsLength) throws SSLException {
// Throw required runtime exceptions
checkNotNullWithIAE(srcs, "srcs");
if (srcsOffset >= srcs.length
|| srcsOffset + srcsLength > srcs.length) {
throw new IndexOutOfBoundsException(
"offset: " + srcsOffset + ", length: " + srcsLength +
" (expected: offset <= offset + length <= srcs.length (" + srcs.length + "))");
}
checkNotNullWithIAE(dsts, "dsts");
if (dstsOffset >= dsts.length || dstsOffset + dstsLength > dsts.length) {
throw new IndexOutOfBoundsException(
"offset: " + dstsOffset + ", length: " + dstsLength +
" (expected: offset <= offset + length <= dsts.length (" + dsts.length + "))");
}
long capacity = 0;
final int dstsEndOffset = dstsOffset + dstsLength;
for (int i = dstsOffset; i < dstsEndOffset; i ++) {
ByteBuffer dst = checkNotNullArrayParam(dsts[i], i, "dsts");
if (dst.isReadOnly()) {
throw new ReadOnlyBufferException();
}
capacity += dst.remaining();
}
final int srcsEndOffset = srcsOffset + srcsLength;
long len = 0;
for (int i = srcsOffset; i < srcsEndOffset; i++) {
ByteBuffer src = checkNotNullArrayParam(srcs[i], i, "srcs");
len += src.remaining();
}
synchronized (this) {
if (isInboundDone()) {
return isOutboundDone() || destroyed ? CLOSED_NOT_HANDSHAKING : NEED_WRAP_CLOSED;
}
SSLEngineResult.HandshakeStatus status = NOT_HANDSHAKING;
HandshakeState oldHandshakeState = handshakeState;
// Prepare OpenSSL to work in server mode and receive handshake
if (handshakeState != HandshakeState.FINISHED) {
if (handshakeState != HandshakeState.STARTED_EXPLICITLY) {
// Update accepted so we know we triggered the handshake via wrap
handshakeState = HandshakeState.STARTED_IMPLICITLY;
}
status = handshake();
if (status == NEED_TASK) {
return newResult(status, 0, 0);
}
if (status == NEED_WRAP) {
return NEED_WRAP_OK;
}
// Check if the inbound is considered to be closed if so let us try to wrap again.
if (isInboundDone) {
return NEED_WRAP_CLOSED;
}
}
int sslPending = sslPending0();
int packetLength;
// The JDK implies that only a single SSL packet should be processed per unwrap call [1]. If we are in
// JDK compatibility mode then we should honor this, but if not we just wrap as much as possible. If there
// are multiple records or partial records this may reduce thrashing events through the pipeline.
// [1] https://docs.oracle.com/javase/7/docs/api/javax/net/ssl/SSLEngine.html
if (jdkCompatibilityMode ||
// If the handshake was not finished before we entered the method, we also ensure we only
// unwrap one record. We do this to ensure we not produce any extra data before the caller
// of the method is able to observe handshake completion and react on it.
oldHandshakeState != HandshakeState.FINISHED) {
if (len < SSL_RECORD_HEADER_LENGTH) {
return newResultMayFinishHandshake(BUFFER_UNDERFLOW, status, 0, 0);
}
packetLength = SslUtils.getEncryptedPacketLength(srcs, srcsOffset);
if (packetLength == SslUtils.NOT_ENCRYPTED) {
throw new NotSslRecordException("not an SSL/TLS record");
}
assert packetLength >= 0;
final int packetLengthDataOnly = packetLength - SSL_RECORD_HEADER_LENGTH;
if (packetLengthDataOnly > capacity) {
// Not enough space in the destination buffer so signal the caller that the buffer needs to be
// increased.
if (packetLengthDataOnly > MAX_RECORD_SIZE) {
// The packet length MUST NOT exceed 2^14 [1]. However we do accommodate more data to support
// legacy use cases which may violate this condition (e.g. OpenJDK's SslEngineImpl). If the max
// length is exceeded we fail fast here to avoid an infinite loop due to the fact that we
// won't allocate a buffer large enough.
// [1] https://tools.ietf.org/html/rfc5246#section-6.2.1
throw new SSLException("Illegal packet length: " + packetLengthDataOnly + " > " +
session.getApplicationBufferSize());
} else {
session.tryExpandApplicationBufferSize(packetLengthDataOnly);
}
return newResultMayFinishHandshake(BUFFER_OVERFLOW, status, 0, 0);
}
if (len < packetLength) {
// We either don't have enough data to read the packet length or not enough for reading the whole
// packet.
return newResultMayFinishHandshake(BUFFER_UNDERFLOW, status, 0, 0);
}
} else if (len == 0 && sslPending <= 0) {
return newResultMayFinishHandshake(BUFFER_UNDERFLOW, status, 0, 0);
} else if (capacity == 0) {
return newResultMayFinishHandshake(BUFFER_OVERFLOW, status, 0, 0);
} else {
packetLength = (int) min(MAX_VALUE, len);
}
// This must always be the case when we reached here as if not we returned BUFFER_UNDERFLOW.
assert srcsOffset < srcsEndOffset;
// This must always be the case if we reached here.
assert capacity > 0;
// Number of produced bytes
int bytesProduced = 0;
int bytesConsumed = 0;
try {
srcLoop:
for (;;) {
ByteBuffer src = srcs[srcsOffset];
int remaining = src.remaining();
final ByteBuf bioWriteCopyBuf;
int pendingEncryptedBytes;
if (remaining == 0) {
if (sslPending <= 0) {
// We must skip empty buffers as BIO_write will return 0 if asked to write something
// with length 0.
if (++srcsOffset >= srcsEndOffset) {
break;
}
continue;
} else {
bioWriteCopyBuf = null;
pendingEncryptedBytes = SSL.bioLengthByteBuffer(networkBIO);
}
} else {
// Write more encrypted data into the BIO. Ensure we only read one packet at a time as
// stated in the SSLEngine javadocs.
pendingEncryptedBytes = min(packetLength, remaining);
try {
bioWriteCopyBuf = writeEncryptedData(src, pendingEncryptedBytes);
} catch (SSLException e) {
// Ensure we correctly handle the error stack.
return handleUnwrapException(bytesConsumed, bytesProduced, e);
}
}
try {
for (;;) {
ByteBuffer dst = dsts[dstsOffset];
if (!dst.hasRemaining()) {
// No space left in the destination buffer, skip it.
if (++dstsOffset >= dstsEndOffset) {
break srcLoop;
}
continue;
}
int bytesRead;
try {
bytesRead = readPlaintextData(dst);
} catch (SSLException e) {
// Ensure we correctly handle the error stack.
return handleUnwrapException(bytesConsumed, bytesProduced, e);
}
// We are directly using the ByteBuffer memory for the write, and so we only know what has
// been consumed after we let SSL decrypt the data. At this point we should update the
// number of bytes consumed, update the ByteBuffer position, and release temp ByteBuf.
int localBytesConsumed = pendingEncryptedBytes - SSL.bioLengthByteBuffer(networkBIO);
bytesConsumed += localBytesConsumed;
packetLength -= localBytesConsumed;
pendingEncryptedBytes -= localBytesConsumed;
src.position(src.position() + localBytesConsumed);
if (bytesRead > 0) {
bytesProduced += bytesRead;
if (!dst.hasRemaining()) {
sslPending = sslPending0();
// Move to the next dst buffer as this one is full.
if (++dstsOffset >= dstsEndOffset) {
return sslPending > 0 ?
newResult(BUFFER_OVERFLOW, status, bytesConsumed, bytesProduced) :
newResultMayFinishHandshake(isInboundDone() ? CLOSED : OK, status,
bytesConsumed, bytesProduced);
}
} else if (packetLength == 0 || jdkCompatibilityMode) {
// We either consumed all data or we are in jdkCompatibilityMode and have consumed
// a single TLS packet and should stop consuming until this method is called again.
break srcLoop;
}
} else {
int sslError = SSL.getError(ssl, bytesRead);
if (sslError == SSL.SSL_ERROR_WANT_READ || sslError == SSL.SSL_ERROR_WANT_WRITE) {
// break to the outer loop as we want to read more data which means we need to
// write more to the BIO.
break;
} else if (sslError == SSL.SSL_ERROR_ZERO_RETURN) {
// This means the connection was shutdown correctly, close inbound and outbound
if (!receivedShutdown) {
closeAll();
}
return newResultMayFinishHandshake(isInboundDone() ? CLOSED : OK, status,
bytesConsumed, bytesProduced);
} else if (sslError == SSL.SSL_ERROR_WANT_X509_LOOKUP ||
sslError == SSL.SSL_ERROR_WANT_CERTIFICATE_VERIFY ||
sslError == SSL.SSL_ERROR_WANT_PRIVATE_KEY_OPERATION) {
return newResult(isInboundDone() ? CLOSED : OK,
NEED_TASK, bytesConsumed, bytesProduced);
} else {
return sslReadErrorResult(sslError, SSL.getLastErrorNumber(), bytesConsumed,
bytesProduced);
}
}
}
if (++srcsOffset >= srcsEndOffset) {
break;
}
} finally {
if (bioWriteCopyBuf != null) {
bioWriteCopyBuf.release();
}
}
}
} finally {
SSL.bioClearByteBuffer(networkBIO);
rejectRemoteInitiatedRenegotiation();
}
// Check to see if we received a close_notify message from the peer.
if (!receivedShutdown && (SSL.getShutdown(ssl) & SSL.SSL_RECEIVED_SHUTDOWN) == SSL.SSL_RECEIVED_SHUTDOWN) {
closeAll();
}
return newResultMayFinishHandshake(isInboundDone() ? CLOSED : OK, status, bytesConsumed, bytesProduced);
}
}
private boolean needWrapAgain(int stackError) {
// Check if we have a pending handshakeException and if so see if we need to consume all pending data from the
// BIO first or can just shutdown and throw it now.
// This is needed so we ensure close_notify etc is correctly send to the remote peer.
// See https://github.com/netty/netty/issues/3900
if (SSL.bioLengthNonApplication(networkBIO) > 0) {
// we seem to have data left that needs to be transferred and so the user needs
// call wrap(...). Store the error so we can pick it up later.
if (pendingException == null) {
pendingException = newSSLExceptionForError(stackError);
} else if (shouldAddSuppressed(pendingException, stackError)) {
ThrowableUtil.addSuppressed(pendingException, newSSLExceptionForError(stackError));
}
// We need to clear all errors so we not pick up anything that was left on the stack on the next
// operation. Note that shutdownWithError(...) will cleanup the stack as well so its only needed here.
SSL.clearError();
return true;
}
return false;
}
private SSLException newSSLExceptionForError(int stackError) {
String message = SSL.getErrorString(stackError);
return handshakeState == HandshakeState.FINISHED ?
new OpenSslException(message, stackError) : new OpenSslHandshakeException(message, stackError);
}
private static boolean shouldAddSuppressed(Throwable target, int errorCode) {
for (Throwable suppressed: ThrowableUtil.getSuppressed(target)) {
if (suppressed instanceof NativeSslException &&
((NativeSslException) suppressed).errorCode() == errorCode) {
/// An exception with this errorCode was already added before.
return false;
}
}
return true;
}
private SSLEngineResult sslReadErrorResult(int error, int stackError, int bytesConsumed, int bytesProduced)
throws SSLException {
if (needWrapAgain(stackError)) {
// There is something that needs to be send to the remote peer before we can teardown.
// This is most likely some alert.
return new SSLEngineResult(OK, NEED_WRAP, bytesConsumed, bytesProduced);
}
throw shutdownWithError("SSL_read", error, stackError);
}
private void closeAll() throws SSLException {
receivedShutdown = true;
closeOutbound();
closeInbound();
}
private void rejectRemoteInitiatedRenegotiation() throws SSLHandshakeException {
// Avoid NPE: SSL.getHandshakeCount(ssl) must not be called if destroyed.
// TLS 1.3 forbids renegotiation by spec.
if (destroyed || SslProtocols.TLS_v1_3.equals(session.getProtocol())
|| handshakeState != HandshakeState.FINISHED) {
return;
}
int count = SSL.getHandshakeCount(ssl);
boolean renegotiationAttempted = (!clientMode && count > 1) || (clientMode && count > 2);
if (renegotiationAttempted) {
shutdown();
throw new SSLHandshakeException("remote-initiated renegotiation not allowed");
}
}
public final SSLEngineResult unwrap(final ByteBuffer[] srcs, final ByteBuffer[] dsts) throws SSLException {
return unwrap(srcs, 0, srcs.length, dsts, 0, dsts.length);
}
private ByteBuffer[] singleSrcBuffer(ByteBuffer src) {
singleSrcBuffer[0] = src;
return singleSrcBuffer;
}
private void resetSingleSrcBuffer() {
singleSrcBuffer[0] = null;
}
private ByteBuffer[] singleDstBuffer(ByteBuffer src) {
singleDstBuffer[0] = src;
return singleDstBuffer;
}
private void resetSingleDstBuffer() {
singleDstBuffer[0] = null;
}
@Override
public final synchronized SSLEngineResult unwrap(
final ByteBuffer src, final ByteBuffer[] dsts, final int offset, final int length) throws SSLException {
try {
return unwrap(singleSrcBuffer(src), 0, 1, dsts, offset, length);
} finally {
resetSingleSrcBuffer();
}
}
@Override
public final synchronized SSLEngineResult wrap(ByteBuffer src, ByteBuffer dst) throws SSLException {
try {
return wrap(singleSrcBuffer(src), dst);
} finally {
resetSingleSrcBuffer();
}
}
@Override
public final synchronized SSLEngineResult unwrap(ByteBuffer src, ByteBuffer dst) throws SSLException {
try {
return unwrap(singleSrcBuffer(src), singleDstBuffer(dst));
} finally {
resetSingleSrcBuffer();
resetSingleDstBuffer();
}
}
@Override
public final synchronized SSLEngineResult unwrap(ByteBuffer src, ByteBuffer[] dsts) throws SSLException {
try {
return unwrap(singleSrcBuffer(src), dsts);
} finally {
resetSingleSrcBuffer();
}
}
private final | HandshakeState |
java | apache__camel | core/camel-core-model/src/main/java/org/apache/camel/model/dataformat/TarFileDataFormat.java | {
"start": 4689,
"end": 8377
} | class ____ implements DataFormatBuilder<TarFileDataFormat> {
private String usingIterator;
private String allowEmptyDirectory;
private String preservePathElements;
private String maxDecompressedSize;
/**
* If the tar file has more than one entry, the setting this option to true, allows working with the splitter
* EIP, to split the data using an iterator in a streaming mode.
*/
public Builder usingIterator(String usingIterator) {
this.usingIterator = usingIterator;
return this;
}
/**
* If the tar file has more than one entry, the setting this option to true, allows working with the splitter
* EIP, to split the data using an iterator in a streaming mode.
*/
public Builder usingIterator(boolean usingIterator) {
this.usingIterator = Boolean.toString(usingIterator);
return this;
}
/**
* If the tar file has more than one entry, setting this option to true, allows to get the iterator even if the
* directory is empty
*/
public Builder allowEmptyDirectory(String allowEmptyDirectory) {
this.allowEmptyDirectory = allowEmptyDirectory;
return this;
}
/**
* If the tar file has more than one entry, setting this option to true, allows to get the iterator even if the
* directory is empty
*/
public Builder allowEmptyDirectory(boolean allowEmptyDirectory) {
this.allowEmptyDirectory = Boolean.toString(allowEmptyDirectory);
return this;
}
/**
* If the file name contains path elements, setting this option to true, allows the path to be maintained in the
* tar file.
*/
public Builder preservePathElements(String preservePathElements) {
this.preservePathElements = preservePathElements;
return this;
}
/**
* If the file name contains path elements, setting this option to true, allows the path to be maintained in the
* tar file.
*/
public Builder preservePathElements(boolean preservePathElements) {
this.preservePathElements = Boolean.toString(preservePathElements);
return this;
}
/**
* Set the maximum decompressed size of a tar file (in bytes). The default value if not specified corresponds to
* 1 gigabyte. An IOException will be thrown if the decompressed size exceeds this amount. Set to -1 to disable
* setting a maximum decompressed size.
*
* @param maxDecompressedSize the maximum decompressed size of a tar file (in bytes)
*/
public Builder maxDecompressedSize(String maxDecompressedSize) {
this.maxDecompressedSize = maxDecompressedSize;
return this;
}
/**
* Set the maximum decompressed size of a tar file (in bytes). The default value if not specified corresponds to
* 1 gigabyte. An IOException will be thrown if the decompressed size exceeds this amount. Set to -1 to disable
* setting a maximum decompressed size.
*
* @param maxDecompressedSize the maximum decompressed size of a tar file (in bytes)
*/
public Builder maxDecompressedSize(long maxDecompressedSize) {
this.maxDecompressedSize = Long.toString(maxDecompressedSize);
return this;
}
@Override
public TarFileDataFormat end() {
return new TarFileDataFormat(this);
}
}
}
| Builder |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencingWithReplication.java | {
"start": 2360,
"end": 5433
} | class ____ extends RepeatingTestThread {
private final FileSystem fs;
private final Path path;
private final MiniDFSCluster cluster;
ReplicationToggler(TestContext ctx, FileSystem fs, Path p,
MiniDFSCluster cluster) {
super(ctx);
this.fs = fs;
this.path = p;
this.cluster = cluster;
}
@Override
public void doAnAction() throws Exception {
fs.setReplication(path, (short)1);
waitForReplicas(1);
fs.setReplication(path, (short)2);
waitForReplicas(2);
}
private void waitForReplicas(final int replicas) throws Exception {
try {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
cluster.waitActive();
BlockLocation[] blocks = fs.getFileBlockLocations(path, 0, 10);
assertEquals(1, blocks.length);
return blocks[0].getHosts().length == replicas;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}, 100, 60000);
} catch (TimeoutException te) {
throw new IOException("Timed out waiting for " + replicas +
" replicas on path " + path);
}
}
@Override
public String toString() {
return "Toggler for " + path;
}
}
@Test
public void testFencingStress() throws Exception {
HAStressTestHarness harness = new HAStressTestHarness();
harness.setNumberOfNameNodes(3);
harness.conf.setInt(
DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
harness.conf.setInt(
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
harness.conf.setInt(
DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
final MiniDFSCluster cluster = harness.startCluster();
try {
cluster.waitActive();
cluster.transitionToActive(0);
FileSystem fs = harness.getFailoverFs();
TestContext togglers = new TestContext();
for (int i = 0; i < NUM_THREADS; i++) {
Path p = new Path("/test-" + i);
DFSTestUtil.createFile(fs, p, BLOCK_SIZE*10, (short)3, (long)i);
togglers.addThread(new ReplicationToggler(togglers, fs, p, cluster));
}
// Start a separate thread which will make sure that replication
// happens quickly by triggering deletion reports and replication
// work calculation frequently.
harness.addReplicationTriggerThread(500);
harness.addFailoverThread(5000);
harness.startThreads();
togglers.startThreads();
togglers.waitFor(RUNTIME);
togglers.stop();
harness.stopThreads();
// CHeck that the files can be read without throwing
for (int i = 0; i < NUM_THREADS; i++) {
Path p = new Path("/test-" + i);
DFSTestUtil.readFile(fs, p);
}
} finally {
System.err.println("===========================\n\n\n\n");
harness.shutdown();
}
}
}
| ReplicationToggler |
java | redisson__redisson | redisson/src/main/java/org/redisson/client/RedisConnection.java | {
"start": 1543,
"end": 13175
} | enum ____ {OPEN, CLOSED, CLOSED_IDLE}
private static final Logger LOG = LoggerFactory.getLogger(RedisConnection.class);
private static final AttributeKey<RedisConnection> CONNECTION = AttributeKey.valueOf("connection");
final RedisClient redisClient;
private volatile CompletableFuture<Void> fastReconnect;
private volatile Status status = Status.OPEN;
volatile Channel channel;
private CompletableFuture<?> connectionPromise;
private volatile long lastUsageTime;
@Deprecated
private Runnable connectedListener;
@Deprecated
private Runnable disconnectedListener;
private final AtomicInteger usage = new AtomicInteger();
public <C> RedisConnection(RedisClient redisClient, Channel channel, CompletableFuture<C> connectionPromise) {
this.redisClient = redisClient;
this.connectionPromise = connectionPromise;
updateChannel(channel);
lastUsageTime = System.nanoTime();
LOG.debug("Connection created {}", redisClient);
}
protected RedisConnection(RedisClient redisClient) {
this.redisClient = redisClient;
}
public void fireConnected() {
if (connectedListener != null) {
connectedListener.run();
}
if (redisClient.getConfig().getConnectedListener() != null) {
redisClient.getConfig().getConnectedListener().accept(redisClient.getAddr());
}
}
public int incUsage() {
return usage.incrementAndGet();
}
public int getUsage() {
return usage.get();
}
public int decUsage() {
return usage.decrementAndGet();
}
@Deprecated
public void setConnectedListener(Runnable connectedListener) {
this.connectedListener = connectedListener;
}
public void fireDisconnected() {
if (disconnectedListener != null) {
disconnectedListener.run();
}
if (redisClient.getConfig().getDisconnectedListener() != null) {
redisClient.getConfig().getDisconnectedListener().accept(redisClient.getAddr());
}
}
@Deprecated
public void setDisconnectedListener(Runnable disconnectedListener) {
this.disconnectedListener = disconnectedListener;
}
public <C extends RedisConnection> CompletableFuture<C> getConnectionPromise() {
return (CompletableFuture<C>) connectionPromise;
}
public static <C extends RedisConnection> C getFrom(Channel channel) {
return (C) channel.attr(RedisConnection.CONNECTION).get();
}
public CommandData<?, ?> getLastCommand() {
Deque<QueueCommandHolder> queue = channel.attr(CommandsQueue.COMMANDS_QUEUE).get();
if (queue != null) {
QueueCommandHolder holder = queue.peekLast();
if (holder != null) {
if (holder.getCommand() instanceof CommandData) {
return (CommandData<?, ?>) holder.getCommand();
}
}
}
return null;
}
public QueueCommand getCurrentCommandData() {
Queue<QueueCommandHolder> queue = channel.attr(CommandsQueue.COMMANDS_QUEUE).get();
if (queue != null) {
QueueCommandHolder holder = queue.peek();
if (holder != null) {
return holder.getCommand();
}
}
QueueCommandHolder holder = channel.attr(CommandsQueuePubSub.CURRENT_COMMAND).get();
if (holder != null) {
return holder.getCommand();
}
return null;
}
public CommandData<?, ?> getCurrentCommand() {
Queue<QueueCommandHolder> queue = channel.attr(CommandsQueue.COMMANDS_QUEUE).get();
if (queue != null) {
QueueCommandHolder holder = queue.peek();
if (holder != null) {
if (holder.getCommand() instanceof CommandData) {
return (CommandData<?, ?>) holder.getCommand();
}
}
}
QueueCommandHolder holder = channel.attr(CommandsQueuePubSub.CURRENT_COMMAND).get();
if (holder != null && holder.getCommand() instanceof CommandData) {
return (CommandData<?, ?>) holder.getCommand();
}
return null;
}
public long getLastUsageTime() {
return lastUsageTime;
}
public void setLastUsageTime(long lastUsageTime) {
this.lastUsageTime = lastUsageTime;
}
public boolean isOpen() {
return channel.isOpen();
}
/**
* Check is channel connected and ready for transfer
*
* @return true if so
*/
public boolean isActive() {
return channel.isActive();
}
public void updateChannel(Channel channel) {
if (channel == null) {
throw new NullPointerException();
}
this.channel = channel;
channel.attr(CONNECTION).set(this);
}
public RedisClient getRedisClient() {
return redisClient;
}
public <R> R await(CompletableFuture<R> future) {
try {
return future.get(redisClient.getCommandTimeout(), TimeUnit.MILLISECONDS);
} catch (ExecutionException e) {
if (e.getCause() instanceof RedisException) {
throw (RedisException) e.getCause();
}
throw new RedisException("Unexpected exception while processing command", e.getCause());
} catch (TimeoutException e) {
RedisTimeoutException ex = new RedisTimeoutException("Command execution timeout for " + redisClient.getAddr());
future.completeExceptionally(ex);
throw ex;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return null;
}
}
public <T> T sync(RedisCommand<T> command, Object... params) {
return sync(null, command, params);
}
public <T, R> ChannelFuture send(CommandData<T, R> data) {
return channel.writeAndFlush(data);
}
public ChannelFuture send(CommandsData data) {
return channel.writeAndFlush(data);
}
public <T, R> R sync(Codec encoder, RedisCommand<T> command, Object... params) {
CompletableFuture<R> promise = new CompletableFuture<>();
send(new CommandData<T, R>(promise, encoder, command, params));
return await(promise);
}
public <T, R> RFuture<R> async(RedisCommand<T> command, Object... params) {
return async(-1, command, params);
}
public <T, R> RFuture<R> async(long timeout, RedisCommand<T> command, Object... params) {
return async(timeout, null, command, params);
}
public <T, R> RFuture<R> async(Codec codec, RedisCommand<T> command, Object... params) {
return async(-1, codec, command, params);
}
public <T, R> RFuture<R> async(int retryAttempts, DelayStrategy delayStrategy, long timeout, Codec codec, RedisCommand<T> command, Object... params) {
CompletableFuture<R> result = new CompletableFuture<>();
AtomicInteger attempts = new AtomicInteger();
async(result, retryAttempts, attempts, delayStrategy, timeout, codec, command, params);
return new CompletableFutureWrapper<>(result);
}
private <T, R> void async(CompletableFuture<R> promise, int maxAttempts, AtomicInteger attempts, DelayStrategy delayStrategy,
long timeout, Codec codec, RedisCommand<T> command, Object... params) {
RFuture<R> f = async(timeout, codec, command, params);
f.whenComplete((r, e) -> {
if (e != null) {
if (attempts.get() < maxAttempts) {
Duration delay = delayStrategy.calcDelay(attempts.get());
attempts.incrementAndGet();
redisClient.getTimer().newTimeout(t -> {
async(promise, maxAttempts, attempts, delayStrategy, timeout, codec, command, params);
}, delay.toMillis(), TimeUnit.MILLISECONDS);
return;
}
promise.completeExceptionally(e);
return;
}
promise.complete(r);
});
}
public <T, R> RFuture<R> async(long timeout, Codec codec, RedisCommand<T> command, Object... params) {
CompletableFuture<R> promise = new CompletableFuture<>();
if (timeout == -1) {
timeout = redisClient.getCommandTimeout();
}
if (redisClient.isShutdown()) {
RedissonShutdownException cause = new RedissonShutdownException("Redis client " + redisClient.getAddr() + " is shutdown");
return new CompletableFutureWrapper<>(cause);
}
Timeout scheduledFuture = redisClient.getTimer().newTimeout(t -> {
RedisTimeoutException ex = new RedisTimeoutException("Command execution timeout for "
+ LogHelper.toString(command, params) + ", Redis client: " + redisClient);
promise.completeExceptionally(ex);
}, timeout, TimeUnit.MILLISECONDS);
promise.whenComplete((res, e) -> {
scheduledFuture.cancel();
});
ChannelFuture writeFuture = send(new CommandData<>(promise, codec, command, params));
writeFuture.addListener((ChannelFutureListener) future -> {
if (!future.isSuccess()) {
promise.completeExceptionally(future.cause());
}
});
return new CompletableFutureWrapper<>(promise);
}
public <T, R> CommandData<T, R> create(Codec encoder, RedisCommand<T> command, Object... params) {
CompletableFuture<R> promise = new CompletableFuture<>();
return new CommandData<>(promise, encoder, command, params);
}
public boolean isClosed() {
return status != Status.OPEN;
}
public boolean isFastReconnect() {
return fastReconnect != null;
}
public void clearFastReconnect() {
fastReconnect.complete(null);
fastReconnect = null;
}
public void close() {
try {
closeAsync().sync();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (Exception e) {
throw e;
}
}
private void closeInternal() {
channel.close();
}
public CompletionStage<Void> forceFastReconnectAsync() {
CompletableFuture<Void> promise = new CompletableFuture<>();
fastReconnect = promise;
closeInternal();
return promise;
}
/**
* Access to Netty channel.
* This method is provided to use in debug info only.
*
* @return channel
*/
public Channel getChannel() {
return channel;
}
public ChannelFuture closeIdleAsync() {
status = Status.CLOSED_IDLE;
closeInternal();
return channel.closeFuture();
}
public boolean isClosedIdle() {
return status == Status.CLOSED_IDLE;
}
public ChannelFuture closeAsync() {
if (status == Status.CLOSED) {
return channel.closeFuture();
}
status = Status.CLOSED;
closeInternal();
return channel.closeFuture();
}
@Override
public String toString() {
return getClass().getSimpleName() + "@" + System.identityHashCode(this)
+ " [redisClient=" + redisClient
+ ", channel=" + channel
+ ", currentCommand=" + getCurrentCommandData()
+ ", usage=" + usage + "]";
}
}
| Status |
java | quarkusio__quarkus | extensions/qute/deployment/src/test/java/io/quarkus/qute/deployment/i18n/LocalizedFileResourceBundleNameTest.java | {
"start": 1571,
"end": 1645
} | interface ____ {
@Message
String hello();
}
}
| Messages1 |
java | google__guava | android/guava/src/com/google/common/primitives/ImmutableIntArray.java | {
"start": 3562,
"end": 8760
} | class ____ implements Serializable {
private static final ImmutableIntArray EMPTY = new ImmutableIntArray(new int[0]);
/** Returns the empty array. */
public static ImmutableIntArray of() {
return EMPTY;
}
/** Returns an immutable array containing a single value. */
public static ImmutableIntArray of(int e0) {
return new ImmutableIntArray(new int[] {e0});
}
/** Returns an immutable array containing the given values, in order. */
public static ImmutableIntArray of(int e0, int e1) {
return new ImmutableIntArray(new int[] {e0, e1});
}
/** Returns an immutable array containing the given values, in order. */
public static ImmutableIntArray of(int e0, int e1, int e2) {
return new ImmutableIntArray(new int[] {e0, e1, e2});
}
/** Returns an immutable array containing the given values, in order. */
public static ImmutableIntArray of(int e0, int e1, int e2, int e3) {
return new ImmutableIntArray(new int[] {e0, e1, e2, e3});
}
/** Returns an immutable array containing the given values, in order. */
public static ImmutableIntArray of(int e0, int e1, int e2, int e3, int e4) {
return new ImmutableIntArray(new int[] {e0, e1, e2, e3, e4});
}
/** Returns an immutable array containing the given values, in order. */
public static ImmutableIntArray of(int e0, int e1, int e2, int e3, int e4, int e5) {
return new ImmutableIntArray(new int[] {e0, e1, e2, e3, e4, e5});
}
// TODO(kevinb): go up to 11?
/**
* Returns an immutable array containing the given values, in order.
*
* <p>The array {@code rest} must not be longer than {@code Integer.MAX_VALUE - 1}.
*/
// Use (first, rest) so that `of(someIntArray)` won't compile (they should use copyOf), which is
// okay since we have to copy the just-created array anyway.
public static ImmutableIntArray of(int first, int... rest) {
checkArgument(
rest.length <= Integer.MAX_VALUE - 1, "the total number of elements must fit in an int");
int[] array = new int[rest.length + 1];
array[0] = first;
System.arraycopy(rest, 0, array, 1, rest.length);
return new ImmutableIntArray(array);
}
/** Returns an immutable array containing the given values, in order. */
public static ImmutableIntArray copyOf(int[] values) {
return values.length == 0 ? EMPTY : new ImmutableIntArray(Arrays.copyOf(values, values.length));
}
/** Returns an immutable array containing the given values, in order. */
public static ImmutableIntArray copyOf(Collection<Integer> values) {
return values.isEmpty() ? EMPTY : new ImmutableIntArray(Ints.toArray(values));
}
/**
* Returns an immutable array containing the given values, in order.
*
* <p><b>Performance note:</b> this method delegates to {@link #copyOf(Collection)} if {@code
* values} is a {@link Collection}. Otherwise it creates a {@link #builder} and uses {@link
* Builder#addAll(Iterable)}, with all the performance implications associated with that.
*/
public static ImmutableIntArray copyOf(Iterable<Integer> values) {
if (values instanceof Collection) {
return copyOf((Collection<Integer>) values);
}
return builder().addAll(values).build();
}
/**
* Returns an immutable array containing all the values from {@code stream}, in order.
*
* @since 33.4.0 (but since 22.0 in the JRE flavor)
*/
@IgnoreJRERequirement // Users will use this only if they're already using streams.
public static ImmutableIntArray copyOf(IntStream stream) {
// Note this uses very different growth behavior from copyOf(Iterable) and the builder.
int[] array = stream.toArray();
return (array.length == 0) ? EMPTY : new ImmutableIntArray(array);
}
/**
* Returns a new, empty builder for {@link ImmutableIntArray} instances, sized to hold up to
* {@code initialCapacity} values without resizing. The returned builder is not thread-safe.
*
* <p><b>Performance note:</b> When feasible, {@code initialCapacity} should be the exact number
* of values that will be added, if that knowledge is readily available. It is better to guess a
* value slightly too high than slightly too low. If the value is not exact, the {@link
* ImmutableIntArray} that is built will very likely occupy more memory than strictly necessary;
* to trim memory usage, build using {@code builder.build().trimmed()}.
*/
public static Builder builder(int initialCapacity) {
checkArgument(initialCapacity >= 0, "Invalid initialCapacity: %s", initialCapacity);
return new Builder(initialCapacity);
}
/**
* Returns a new, empty builder for {@link ImmutableIntArray} instances, with a default initial
* capacity. The returned builder is not thread-safe.
*
* <p><b>Performance note:</b> The {@link ImmutableIntArray} that is built will very likely occupy
* more memory than necessary; to trim memory usage, build using {@code
* builder.build().trimmed()}.
*/
public static Builder builder() {
return new Builder(10);
}
/**
* A builder for {@link ImmutableIntArray} instances; obtained using {@link
* ImmutableIntArray#builder}.
*/
public static final | ImmutableIntArray |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/ComparatorBasedComparisonStrategy_isLessThanOrEqualTo_Test.java | {
"start": 882,
"end": 2100
} | class ____ extends AbstractTest_ComparatorBasedComparisonStrategy {
@Test
void should_pass() {
String string = "string";
String greaterString = "STRINGA";
assertThat(caseInsensitiveComparisonStrategy.isLessThanOrEqualTo(string, greaterString)).isTrue();
assertThat(caseInsensitiveComparisonStrategy.isLessThanOrEqualTo(string, "STRING")).isTrue();
assertThat(caseInsensitiveComparisonStrategy.isLessThanOrEqualTo(string, string)).isTrue();
assertThat(caseInsensitiveComparisonStrategy.isLessThanOrEqualTo(greaterString, string)).isFalse();
String lowerString = "stringA";
assertThat(caseInsensitiveComparisonStrategy.isLessThanOrEqualTo(string, lowerString)).isTrue();
assertThat(caseInsensitiveComparisonStrategy.isLessThanOrEqualTo(lowerString, string)).isFalse();
}
@Test
void should_fail_if_a_parameter_is_not_comparable() {
assertThatExceptionOfType(ClassCastException.class).isThrownBy(() -> caseInsensitiveComparisonStrategy.isLessThanOrEqualTo(Locale.ROOT,
Locale.US));
}
}
| ComparatorBasedComparisonStrategy_isLessThanOrEqualTo_Test |
java | spring-projects__spring-framework | spring-websocket/src/main/java/org/springframework/web/socket/server/support/DefaultHandshakeHandler.java | {
"start": 1271,
"end": 1518
} | class ____ {@link JettyRequestUpgradeStrategy} when Jetty WebSocket
* is available on the classpath, using {@link StandardWebSocketUpgradeStrategy} otherwise.
*
* @author Rossen Stoyanchev
* @author Juergen Hoeller
* @since 4.0
*/
public | prefers |
java | netty__netty | buffer/src/test/java/io/netty/buffer/AdaptiveLittleEndianDirectByteBufTest.java | {
"start": 817,
"end": 1258
} | class ____ extends AbstractAdaptiveByteBufTest {
@Override
protected ByteBuf alloc(AdaptiveByteBufAllocator allocator, int length, int maxCapacity) {
ByteBuf buffer = allocator.directBuffer(length, maxCapacity)
.order(ByteOrder.LITTLE_ENDIAN);
assertSame(ByteOrder.LITTLE_ENDIAN, buffer.order());
assertTrue(buffer.isDirect());
return buffer;
}
}
| AdaptiveLittleEndianDirectByteBufTest |
java | spring-projects__spring-framework | spring-beans/src/test/java/org/springframework/beans/factory/wiring/BeanConfigurerSupportTests.java | {
"start": 1271,
"end": 5042
} | class ____ {
@Test
void supplyIncompatibleBeanFactoryImplementation() {
assertThatIllegalArgumentException().isThrownBy(() ->
new StubBeanConfigurerSupport().setBeanFactory(mock()));
}
@Test
void configureBeanDoesNothingIfBeanWiringInfoResolverResolvesToNull() {
TestBean beanInstance = new TestBean();
BeanWiringInfoResolver resolver = mock();
BeanConfigurerSupport configurer = new StubBeanConfigurerSupport();
configurer.setBeanWiringInfoResolver(resolver);
configurer.setBeanFactory(new DefaultListableBeanFactory());
configurer.configureBean(beanInstance);
verify(resolver).resolveWiringInfo(beanInstance);
assertThat(beanInstance.getName()).isNull();
}
@Test
void configureBeanDoesNothingIfNoBeanFactoryHasBeenSet() {
TestBean beanInstance = new TestBean();
BeanConfigurerSupport configurer = new StubBeanConfigurerSupport();
configurer.configureBean(beanInstance);
assertThat(beanInstance.getName()).isNull();
}
@Test
void configureBeanReallyDoesDefaultToUsingTheFullyQualifiedClassNameOfTheSuppliedBeanInstance() {
TestBean beanInstance = new TestBean();
BeanDefinitionBuilder builder = BeanDefinitionBuilder.rootBeanDefinition(TestBean.class);
builder.addPropertyValue("name", "Harriet Wheeler");
DefaultListableBeanFactory factory = new DefaultListableBeanFactory();
factory.registerBeanDefinition(beanInstance.getClass().getName(), builder.getBeanDefinition());
BeanConfigurerSupport configurer = new StubBeanConfigurerSupport();
configurer.setBeanFactory(factory);
configurer.afterPropertiesSet();
configurer.configureBean(beanInstance);
assertThat(beanInstance.getName()).as("Bean is evidently not being configured (for some reason)").isEqualTo("Harriet Wheeler");
}
@Test
void configureBeanPerformsAutowiringByNameIfAppropriateBeanWiringInfoResolverIsPluggedIn() {
TestBean beanInstance = new TestBean();
// spouse for autowiring by name...
BeanDefinitionBuilder builder = BeanDefinitionBuilder.rootBeanDefinition(TestBean.class);
builder.addConstructorArgValue("David Gavurin");
DefaultListableBeanFactory factory = new DefaultListableBeanFactory();
factory.registerBeanDefinition("spouse", builder.getBeanDefinition());
BeanWiringInfoResolver resolver = mock();
given(resolver.resolveWiringInfo(beanInstance)).willReturn(new BeanWiringInfo(BeanWiringInfo.AUTOWIRE_BY_NAME, false));
BeanConfigurerSupport configurer = new StubBeanConfigurerSupport();
configurer.setBeanFactory(factory);
configurer.setBeanWiringInfoResolver(resolver);
configurer.configureBean(beanInstance);
assertThat(beanInstance.getSpouse().getName()).as("Bean is evidently not being configured (for some reason)").isEqualTo("David Gavurin");
}
@Test
void configureBeanPerformsAutowiringByTypeIfAppropriateBeanWiringInfoResolverIsPluggedIn() {
TestBean beanInstance = new TestBean();
// spouse for autowiring by type...
BeanDefinitionBuilder builder = BeanDefinitionBuilder.rootBeanDefinition(TestBean.class);
builder.addConstructorArgValue("David Gavurin");
DefaultListableBeanFactory factory = new DefaultListableBeanFactory();
factory.registerBeanDefinition("Mmm, I fancy a salad!", builder.getBeanDefinition());
BeanWiringInfoResolver resolver = mock();
given(resolver.resolveWiringInfo(beanInstance)).willReturn(new BeanWiringInfo(BeanWiringInfo.AUTOWIRE_BY_TYPE, false));
BeanConfigurerSupport configurer = new StubBeanConfigurerSupport();
configurer.setBeanFactory(factory);
configurer.setBeanWiringInfoResolver(resolver);
configurer.configureBean(beanInstance);
assertThat(beanInstance.getSpouse().getName()).as("Bean is evidently not being configured (for some reason)").isEqualTo("David Gavurin");
}
private static | BeanConfigurerSupportTests |
java | apache__camel | components/camel-consul/src/test/java/org/apache/camel/component/consul/ConsulHealthIT.java | {
"start": 1708,
"end": 4974
} | class ____ extends CamelTestSupport {
/*
NOTE: this one is not registered as extension because it requires a different lifecycle. It
needs to be started much earlier than usual, so in this test we take care of handling it.
*/
private ConsulService consulService = ConsulServiceFactory.createService();
private AgentClient client;
private List<Registration> registrations;
private String service;
public ConsulHealthIT() {
consulService.initialize();
}
@BindToRegistry("consul")
public ConsulComponent getConsulComponent() {
ConsulComponent component = new ConsulComponent();
component.getConfiguration().setUrl(consulService.getConsulUrl());
return component;
}
protected Consul getConsul() {
return Consul.builder().withUrl(consulService.getConsulUrl()).build();
}
@Override
public void doPreSetup() throws Exception {
super.doPreSetup();
SecureRandom random = new SecureRandom();
this.service = UUID.randomUUID().toString();
this.client = getConsul().agentClient();
this.registrations = Arrays
.asList(ImmutableRegistration.builder().id(UUID.randomUUID().toString()).name(this.service).address("127.0.0.1")
.port(random.nextInt(10000)).build(),
ImmutableRegistration.builder().id(UUID.randomUUID().toString()).name(this.service).address("127.0.0.1")
.port(random.nextInt(10000)).build());
this.registrations.forEach(client::register);
}
@Override
public void doPostTearDown() throws Exception {
super.doPostTearDown();
registrations.forEach(r -> client.deregister(r.getId()));
}
// *************************************************************************
// Test
// *************************************************************************
@Test
public void testServiceInstance() {
List<ServiceHealth> ref = getConsul().healthClient().getAllServiceInstances(this.service).getResponse();
List<ServiceHealth> res
= fluentTemplate().withHeader(ConsulConstants.CONSUL_ACTION, ConsulHealthActions.SERVICE_INSTANCES)
.withHeader(ConsulConstants.CONSUL_SERVICE, this.service).to("direct:consul").request(List.class);
Assertions.assertEquals(2, ref.size());
Assertions.assertEquals(2, res.size());
Assertions.assertEquals(ref, res);
assertTrue(registrations.stream()
.anyMatch(r -> r.getPort().isPresent() && r.getPort().get() == res.get(0).getService().getPort()
&& r.getId().equalsIgnoreCase(res.get(0).getService().getId())));
assertTrue(registrations.stream()
.anyMatch(r -> r.getPort().isPresent() && r.getPort().get() == res.get(1).getService().getPort()
&& r.getId().equalsIgnoreCase(res.get(1).getService().getId())));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
from("direct:consul").to("consul:health");
}
};
}
}
| ConsulHealthIT |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java | {
"start": 1605,
"end": 11796
} | class ____ {
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
private Configuration conf;
private final static int SUCCESS = 0;
private ErasureCodingPolicy ecPolicy;
private short groupSize;
public ErasureCodingPolicy getEcPolicy() {
return StripedFileTestUtil.getDefaultECPolicy();
}
@BeforeEach
public void setupCluster() throws IOException {
ecPolicy = getEcPolicy();
groupSize = (short) (ecPolicy.getNumDataUnits()
+ ecPolicy.getNumParityUnits());
conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build();
cluster.waitActive();
fs = cluster.getFileSystem();
fs.enableErasureCodingPolicy(ecPolicy.getName());
}
@AfterEach
public void shutdownCluster() throws IOException {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
/**
* Test correctness of successive snapshot creation and deletion with erasure
* coding policies. Create snapshot of ecDir's parent directory.
*/
@Test
public void testSnapshotsOnErasureCodingDirsParentDir() throws Exception {
final int len = 1024;
final Path ecDirParent = new Path("/parent");
final Path ecDir = new Path(ecDirParent, "ecdir");
final Path ecFile = new Path(ecDir, "ecfile");
fs.mkdirs(ecDir);
fs.allowSnapshot(ecDirParent);
// set erasure coding policy
fs.setErasureCodingPolicy(ecDir, ecPolicy.getName());
DFSTestUtil.createFile(fs, ecFile, len, (short) 1, 0xFEED);
String contents = DFSTestUtil.readFile(fs, ecFile);
final Path snap1 = fs.createSnapshot(ecDirParent, "snap1");
final Path snap1ECDir = new Path(snap1, ecDir.getName());
assertEquals(ecPolicy, fs.getErasureCodingPolicy(snap1ECDir),
"Got unexpected erasure coding policy");
// Now delete the dir which has erasure coding policy. Re-create the dir again, and
// take another snapshot
fs.delete(ecDir, true);
fs.mkdir(ecDir, FsPermission.getDirDefault());
final Path snap2 = fs.createSnapshot(ecDirParent, "snap2");
final Path snap2ECDir = new Path(snap2, ecDir.getName());
assertNull(fs.getErasureCodingPolicy(snap2ECDir),
"Expected null erasure coding policy");
// Make dir again with system default ec policy
fs.setErasureCodingPolicy(ecDir, ecPolicy.getName());
final Path snap3 = fs.createSnapshot(ecDirParent, "snap3");
final Path snap3ECDir = new Path(snap3, ecDir.getName());
// Check that snap3's ECPolicy has the correct settings
ErasureCodingPolicy ezSnap3 = fs.getErasureCodingPolicy(snap3ECDir);
assertEquals(ecPolicy, ezSnap3, "Got unexpected erasure coding policy");
// Check that older snapshots still have the old ECPolicy settings
assertEquals(ecPolicy, fs.getErasureCodingPolicy(snap1ECDir),
"Got unexpected erasure coding policy");
assertNull(fs.getErasureCodingPolicy(snap2ECDir),
"Expected null erasure coding policy");
// Verify contents of the snapshotted file
final Path snapshottedECFile = new Path(snap1.toString() + "/"
+ ecDir.getName() + "/" + ecFile.getName());
assertEquals(contents, DFSTestUtil.readFile(fs, snapshottedECFile),
"Contents of snapshotted file have changed unexpectedly");
// Now delete the snapshots out of order and verify the EC policy
// correctness
fs.deleteSnapshot(ecDirParent, snap2.getName());
assertEquals(ecPolicy, fs.getErasureCodingPolicy(snap1ECDir),
"Got unexpected erasure coding policy");
assertEquals(ecPolicy, fs.getErasureCodingPolicy(snap3ECDir),
"Got unexpected erasure coding policy");
fs.deleteSnapshot(ecDirParent, snap1.getName());
assertEquals(ecPolicy, fs.getErasureCodingPolicy(snap3ECDir),
"Got unexpected erasure coding policy");
}
/**
* Test creation of snapshot on directory has erasure coding policy.
*/
@Test
public void testSnapshotsOnErasureCodingDir() throws Exception {
final Path ecDir = new Path("/ecdir");
fs.mkdirs(ecDir);
fs.allowSnapshot(ecDir);
fs.setErasureCodingPolicy(ecDir, ecPolicy.getName());
final Path snap1 = fs.createSnapshot(ecDir, "snap1");
assertEquals(ecPolicy, fs.getErasureCodingPolicy(snap1),
"Got unexpected erasure coding policy");
}
/**
* Test verify erasure coding policy is present after restarting the NameNode.
*/
@Test
public void testSnapshotsOnErasureCodingDirAfterNNRestart() throws Exception {
final Path ecDir = new Path("/ecdir");
fs.mkdirs(ecDir);
fs.allowSnapshot(ecDir);
// set erasure coding policy
fs.setErasureCodingPolicy(ecDir, ecPolicy.getName());
final Path snap1 = fs.createSnapshot(ecDir, "snap1");
ErasureCodingPolicy ecSnap = fs.getErasureCodingPolicy(snap1);
assertEquals(ecPolicy, ecSnap, "Got unexpected erasure coding policy");
// save namespace, restart namenode, and check ec policy correctness.
fs.setSafeMode(SafeModeAction.ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.LEAVE);
cluster.restartNameNode(true);
ErasureCodingPolicy ecSnap1 = fs.getErasureCodingPolicy(snap1);
assertEquals(ecPolicy, ecSnap1, "Got unexpected erasure coding policy");
assertEquals(ecSnap.getSchema(), ecSnap1.getSchema(), "Got unexpected ecSchema");
}
/**
* Test copy a snapshot will not preserve its erasure coding policy info.
*/
@Test
public void testCopySnapshotWillNotPreserveErasureCodingPolicy()
throws Exception {
final int len = 1024;
final Path ecDir = new Path("/ecdir");
final Path ecFile = new Path(ecDir, "ecFile");
fs.mkdirs(ecDir);
fs.allowSnapshot(ecDir);
// set erasure coding policy
fs.setErasureCodingPolicy(ecDir, ecPolicy.getName());
DFSTestUtil.createFile(fs, ecFile, len, (short) 1, 0xFEED);
final Path snap1 = fs.createSnapshot(ecDir, "snap1");
Path snap1Copy = new Path(ecDir.toString() + "-copy");
final Path snap1CopyECDir = new Path("/ecdir-copy");
String[] argv = new String[] { "-cp", "-px", snap1.toUri().toString(),
snap1Copy.toUri().toString() };
int ret = ToolRunner.run(new FsShell(conf), argv);
assertEquals(SUCCESS, ret, "cp -px is not working on a snapshot");
assertNull(fs.getErasureCodingPolicy(snap1CopyECDir),
"Got unexpected erasure coding policy");
assertEquals(ecPolicy, fs.getErasureCodingPolicy(snap1),
"Got unexpected erasure coding policy");
}
@Test
@Timeout(value = 300)
public void testFileStatusAcrossNNRestart() throws IOException {
final int len = 1024;
final Path normalFile = new Path("/", "normalFile");
DFSTestUtil.createFile(fs, normalFile, len, (short) 1, 0xFEED);
final Path ecDir = new Path("/ecdir");
final Path ecFile = new Path(ecDir, "ecFile");
fs.mkdirs(ecDir);
// Set erasure coding policy
fs.setErasureCodingPolicy(ecDir, ecPolicy.getName());
DFSTestUtil.createFile(fs, ecFile, len, (short) 1, 0xFEED);
// Verify FileStatus for normal and EC files
ContractTestUtils.assertNotErasureCoded(fs, normalFile);
ContractTestUtils.assertErasureCoded(fs, ecFile);
cluster.restartNameNode(true);
// Verify FileStatus for normal and EC files
ContractTestUtils.assertNotErasureCoded(fs, normalFile);
ContractTestUtils.assertErasureCoded(fs, ecFile);
}
@Test
public void testErasureCodingPolicyOnDotSnapshotDir() throws IOException {
final Path ecDir = new Path("/ecdir");
fs.mkdirs(ecDir);
fs.allowSnapshot(ecDir);
// set erasure coding policy and create snapshot
fs.setErasureCodingPolicy(ecDir, ecPolicy.getName());
final Path snap = fs.createSnapshot(ecDir, "snap1");
// verify the EC policy correctness
ErasureCodingPolicy ecSnap = fs.getErasureCodingPolicy(snap);
assertEquals(ecPolicy, ecSnap, "Got unexpected erasure coding policy");
// verify the EC policy is null, not an exception
final Path ecDotSnapshotDir = new Path(ecDir, ".snapshot");
ErasureCodingPolicy ecSnap1 = fs.getErasureCodingPolicy(ecDotSnapshotDir);
assertNull(ecSnap1, "Got unexpected erasure coding policy");
}
/**
* Test creation of snapshot on directory which changes its
* erasure coding policy.
*/
@Test
public void testSnapshotsOnErasureCodingDirAfterECPolicyChanges()
throws Exception {
final Path ecDir = new Path("/ecdir");
fs.mkdirs(ecDir);
fs.allowSnapshot(ecDir);
final Path snap1 = fs.createSnapshot(ecDir, "snap1");
assertNull(fs.getErasureCodingPolicy(snap1), "Expected null erasure coding policy");
// Set erasure coding policy
final ErasureCodingPolicy ec63Policy = SystemErasureCodingPolicies
.getByID(SystemErasureCodingPolicies.RS_6_3_POLICY_ID);
fs.setErasureCodingPolicy(ecDir, ec63Policy.getName());
final Path snap2 = fs.createSnapshot(ecDir, "snap2");
assertEquals(ec63Policy, fs.getErasureCodingPolicy(snap2),
"Got unexpected erasure coding policy");
// Verify the EC policy correctness after the unset operation
fs.unsetErasureCodingPolicy(ecDir);
final Path snap3 = fs.createSnapshot(ecDir, "snap3");
assertNull(fs.getErasureCodingPolicy(snap3), "Expected null erasure coding policy");
// Change the erasure coding policy and take another snapshot
final ErasureCodingPolicy ec32Policy = SystemErasureCodingPolicies
.getByID(SystemErasureCodingPolicies.RS_3_2_POLICY_ID);
fs.enableErasureCodingPolicy(ec32Policy.getName());
fs.setErasureCodingPolicy(ecDir, ec32Policy.getName());
final Path snap4 = fs.createSnapshot(ecDir, "snap4");
assertEquals(ec32Policy, fs.getErasureCodingPolicy(snap4),
"Got unexpected erasure coding policy");
// Check that older snapshot still have the old ECPolicy settings
assertNull(fs.getErasureCodingPolicy(snap1), "Expected null erasure coding policy");
assertEquals(ec63Policy, fs.getErasureCodingPolicy(snap2),
"Got unexpected erasure coding policy");
assertNull(fs.getErasureCodingPolicy(snap3), "Expected null erasure coding policy");
}
}
| TestErasureCodingPolicyWithSnapshot |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-jackson/runtime/src/main/java/io/quarkus/resteasy/reactive/jackson/runtime/serialisers/vertx/VertxJsonObjectMessageBodyReader.java | {
"start": 595,
"end": 1174
} | class ____ extends VertxJsonObjectBasicMessageBodyReader
implements ServerMessageBodyReader<JsonObject> {
@Override
public boolean isReadable(Class<?> type, Type genericType, ResteasyReactiveResourceInfo lazyMethod, MediaType mediaType) {
return isReadable(type);
}
@Override
public JsonObject readFrom(Class<JsonObject> type, Type genericType, MediaType mediaType, ServerRequestContext context)
throws WebApplicationException, IOException {
return readFrom(context.getInputStream());
}
}
| VertxJsonObjectMessageBodyReader |
java | apache__logging-log4j2 | log4j-core-test/src/test/java/org/apache/logging/log4j/core/appender/db/jdbc/DataSourceConnectionSourceTest.java | {
"start": 1525,
"end": 4025
} | class ____ extends AbstractJdbcDataSourceTest {
@Parameterized.Parameters(name = "{0}")
public static Object[][] data() {
return new Object[][] {{"java:/comp/env/jdbc/Logging01"}, {"java:/comp/env/jdbc/Logging02"}};
}
private static final String CONFIG = "log4j-fatalOnly.xml";
@Rule
public final RuleChain rules;
private final DataSource dataSource = mock(DataSource.class);
private final String jndiURL;
public DataSourceConnectionSourceTest(final String jndiURL) {
this.rules = RuleChain.outerRule(new JndiRule(jndiURL, dataSource)).around(new LoggerContextRule(CONFIG));
this.jndiURL = jndiURL;
}
@Test
public void testNullJndiName() {
final DataSourceConnectionSource source = DataSourceConnectionSource.createConnectionSource(null);
assertNull("The connection source should be null.", source);
}
@Test
public void testEmptyJndiName() {
final DataSourceConnectionSource source = DataSourceConnectionSource.createConnectionSource("");
assertNull("The connection source should be null.", source);
}
@Test
public void testNoDataSource() {
final DataSourceConnectionSource source = DataSourceConnectionSource.createConnectionSource(jndiURL + "123");
assertNull("The connection source should be null.", source);
}
@Test
public void testDataSource() throws SQLException {
try (final Connection connection1 = mock(Connection.class);
final Connection connection2 = mock(Connection.class)) {
given(dataSource.getConnection()).willReturn(connection1, connection2);
DataSourceConnectionSource source = DataSourceConnectionSource.createConnectionSource(jndiURL);
assertNotNull("The connection source should not be null.", source);
assertEquals(
"The toString value is not correct.",
"dataSource{ name=" + jndiURL + ", value=" + dataSource + " }",
source.toString());
assertSame("The connection is not correct (1).", connection1, source.getConnection());
assertSame("The connection is not correct (2).", connection2, source.getConnection());
source = DataSourceConnectionSource.createConnectionSource(jndiURL.substring(0, jndiURL.length() - 1));
assertNull("The connection source should be null now.", source);
}
}
}
| DataSourceConnectionSourceTest |
java | spring-projects__spring-boot | buildSrc/src/test/java/org/springframework/boot/build/bom/bomr/version/MultipleComponentsDependencyVersionTests.java | {
"start": 903,
"end": 1757
} | class ____ {
@Test
void isSameMajorOfFiveComponentVersionWithSameMajorShouldReturnTrue() {
assertThat(version("21.4.0.0.1").isSameMajor(version("21.1.0.0"))).isTrue();
}
@Test
void isSameMajorOfFiveComponentVersionWithDifferentMajorShouldReturnFalse() {
assertThat(version("21.4.0.0.1").isSameMajor(version("22.1.0.0"))).isFalse();
}
@Test
void isSameMinorOfFiveComponentVersionWithSameMinorShouldReturnTrue() {
assertThat(version("21.4.0.0.1").isSameMinor(version("21.4.0.0"))).isTrue();
}
@Test
void isSameMinorOfFiveComponentVersionWithDifferentMinorShouldReturnFalse() {
assertThat(version("21.4.0.0.1").isSameMinor(version("21.5.0.0"))).isFalse();
}
private MultipleComponentsDependencyVersion version(String version) {
return MultipleComponentsDependencyVersion.parse(version);
}
}
| MultipleComponentsDependencyVersionTests |
java | apache__camel | dsl/camel-endpointdsl/src/generated/java/org/apache/camel/builder/endpoint/dsl/Sqs2EndpointBuilderFactory.java | {
"start": 120311,
"end": 122712
} | interface ____
extends
AdvancedSqs2EndpointConsumerBuilder,
AdvancedSqs2EndpointProducerBuilder {
default Sqs2EndpointBuilder basic() {
return (Sqs2EndpointBuilder) this;
}
/**
* To use the AmazonSQS client.
*
* The option is a:
* <code>software.amazon.awssdk.services.sqs.SqsClient</code> type.
*
* Group: advanced
*
* @param amazonSQSClient the value to set
* @return the dsl builder
*/
default AdvancedSqs2EndpointBuilder amazonSQSClient(software.amazon.awssdk.services.sqs.SqsClient amazonSQSClient) {
doSetProperty("amazonSQSClient", amazonSQSClient);
return this;
}
/**
* To use the AmazonSQS client.
*
* The option will be converted to a
* <code>software.amazon.awssdk.services.sqs.SqsClient</code> type.
*
* Group: advanced
*
* @param amazonSQSClient the value to set
* @return the dsl builder
*/
default AdvancedSqs2EndpointBuilder amazonSQSClient(String amazonSQSClient) {
doSetProperty("amazonSQSClient", amazonSQSClient);
return this;
}
/**
* Define if you want to apply delaySeconds option to the queue or on
* single messages.
*
* The option is a: <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param delayQueue the value to set
* @return the dsl builder
*/
default AdvancedSqs2EndpointBuilder delayQueue(boolean delayQueue) {
doSetProperty("delayQueue", delayQueue);
return this;
}
/**
* Define if you want to apply delaySeconds option to the queue or on
* single messages.
*
* The option will be converted to a <code>boolean</code> type.
*
* Default: false
* Group: advanced
*
* @param delayQueue the value to set
* @return the dsl builder
*/
default AdvancedSqs2EndpointBuilder delayQueue(String delayQueue) {
doSetProperty("delayQueue", delayQueue);
return this;
}
}
public | AdvancedSqs2EndpointBuilder |
java | apache__flink | flink-python/src/main/java/org/apache/flink/table/runtime/operators/python/aggregate/PythonStreamGroupWindowAggregateOperator.java | {
"start": 3372,
"end": 23839
} | class ____<K, W extends Window>
extends AbstractPythonStreamAggregateOperator implements Triggerable<K, W> {
private static final long serialVersionUID = 1L;
@VisibleForTesting
static final String STREAM_GROUP_WINDOW_AGGREGATE_URN =
"flink:transform:stream_group_window_aggregate:v1";
@VisibleForTesting static final byte REGISTER_EVENT_TIMER = 0;
@VisibleForTesting static final byte REGISTER_PROCESSING_TIMER = 1;
@VisibleForTesting static final byte DELETE_EVENT_TIMER = 2;
@VisibleForTesting static final byte DELETE_PROCESSING_TIMER = 3;
/** True if the count(*) agg is inserted by the planner. */
private final boolean countStarInserted;
/** The row time index of the input data. */
@VisibleForTesting final int inputTimeFieldIndex;
/**
* The allowed lateness for elements. This is used for:
*
* <ul>
* <li>Deciding if an element should be dropped from a window due to lateness.
* <li>Clearing the state of a window if the system time passes the {@code window.maxTimestamp
* + allowedLateness} landmark.
* </ul>
*/
@VisibleForTesting final long allowedLateness;
/** The shift timeZone of the window. */
@VisibleForTesting final ZoneId shiftTimeZone;
/** The Infos of the Window. */
private FlinkFnApi.GroupWindow.WindowProperty[] namedProperties;
/** A {@link GroupWindowAssigner} assigns zero or more {@link Window Windows} to an element. */
@VisibleForTesting final GroupWindowAssigner<W> windowAssigner;
/** Window Type includes Tumble window, Sliding window and Session Window. */
private final FlinkFnApi.GroupWindow.WindowType windowType;
/** Whether it is a row time window. */
private final boolean isRowTime;
/** Whether it is a Time Window. */
private final boolean isTimeWindow;
/** Window size. */
private final long size;
/** Window slide. */
private final long slide;
/** Session Window gap. */
private final long gap;
/** For serializing the window in checkpoints. */
@VisibleForTesting transient TypeSerializer<W> windowSerializer;
/** Interface for working with time and timers. */
private transient InternalTimerService<W> internalTimerService;
private transient UpdatableRowData reuseTimerData;
private transient int timerDataLength;
private transient int keyLength;
private transient UpdatableRowData reuseRowData;
private transient UpdatableRowData reuseTimerRowData;
private transient RowDataSerializer keySerializer;
protected PythonStreamGroupWindowAggregateOperator(
Configuration config,
RowType inputType,
RowType outputType,
PythonAggregateFunctionInfo[] aggregateFunctions,
DataViewSpec[][] dataViewSpecs,
int[] grouping,
int indexOfCountStar,
boolean generateUpdateBefore,
boolean countStarInserted,
int inputTimeFieldIndex,
GroupWindowAssigner<W> windowAssigner,
FlinkFnApi.GroupWindow.WindowType windowType,
boolean isRowTime,
boolean isTimeWindow,
long size,
long slide,
long gap,
long allowedLateness,
NamedWindowProperty[] namedProperties,
ZoneId shiftTimeZone) {
super(
config,
inputType,
outputType,
aggregateFunctions,
dataViewSpecs,
grouping,
indexOfCountStar,
generateUpdateBefore);
this.countStarInserted = countStarInserted;
this.inputTimeFieldIndex = inputTimeFieldIndex;
this.windowAssigner = windowAssigner;
this.windowType = windowType;
this.isRowTime = isRowTime;
this.isTimeWindow = isTimeWindow;
this.size = size;
this.slide = slide;
this.gap = gap;
this.allowedLateness = allowedLateness;
this.shiftTimeZone = shiftTimeZone;
// Convert named properties
this.namedProperties = new FlinkFnApi.GroupWindow.WindowProperty[namedProperties.length];
for (int i = 0; i < namedProperties.length; i++) {
WindowProperty namedProperty = namedProperties[i].getProperty();
if (namedProperty instanceof WindowStart) {
this.namedProperties[i] = FlinkFnApi.GroupWindow.WindowProperty.WINDOW_START;
} else if (namedProperty instanceof WindowEnd) {
this.namedProperties[i] = FlinkFnApi.GroupWindow.WindowProperty.WINDOW_END;
} else if (namedProperty instanceof RowtimeAttribute) {
this.namedProperties[i] = FlinkFnApi.GroupWindow.WindowProperty.ROW_TIME_ATTRIBUTE;
} else if (namedProperty instanceof ProctimeAttribute) {
this.namedProperties[i] = FlinkFnApi.GroupWindow.WindowProperty.PROC_TIME_ATTRIBUTE;
} else {
throw new RuntimeException("Unexpected property " + namedProperty);
}
}
}
// The below static create methods are reflected from the planner
public static <K, W extends Window>
PythonStreamGroupWindowAggregateOperator<K, W>
createTumblingGroupWindowAggregateOperator(
Configuration config,
RowType inputType,
RowType outputType,
PythonAggregateFunctionInfo[] aggregateFunctions,
DataViewSpec[][] dataViewSpecs,
int[] grouping,
int indexOfCountStar,
boolean generateUpdateBefore,
boolean countStarInserted,
int inputTimeFieldIndex,
GroupWindowAssigner<W> windowAssigner,
boolean isRowTime,
boolean isTimeWindow,
long size,
long allowedLateness,
NamedWindowProperty[] namedProperties,
ZoneId shiftTimeZone) {
return new PythonStreamGroupWindowAggregateOperator<>(
config,
inputType,
outputType,
aggregateFunctions,
dataViewSpecs,
grouping,
indexOfCountStar,
generateUpdateBefore,
countStarInserted,
inputTimeFieldIndex,
windowAssigner,
FlinkFnApi.GroupWindow.WindowType.TUMBLING_GROUP_WINDOW,
isRowTime,
isTimeWindow,
size,
0,
0,
allowedLateness,
namedProperties,
shiftTimeZone);
}
public static <K, W extends Window>
PythonStreamGroupWindowAggregateOperator<K, W>
createSlidingGroupWindowAggregateOperator(
Configuration config,
RowType inputType,
RowType outputType,
PythonAggregateFunctionInfo[] aggregateFunctions,
DataViewSpec[][] dataViewSpecs,
int[] grouping,
int indexOfCountStar,
boolean generateUpdateBefore,
boolean countStarInserted,
int inputTimeFieldIndex,
GroupWindowAssigner<W> windowAssigner,
boolean isRowTime,
boolean isTimeWindow,
long size,
long slide,
long allowedLateness,
NamedWindowProperty[] namedProperties,
ZoneId shiftTimeZone) {
return new PythonStreamGroupWindowAggregateOperator<>(
config,
inputType,
outputType,
aggregateFunctions,
dataViewSpecs,
grouping,
indexOfCountStar,
generateUpdateBefore,
countStarInserted,
inputTimeFieldIndex,
windowAssigner,
FlinkFnApi.GroupWindow.WindowType.SLIDING_GROUP_WINDOW,
isRowTime,
isTimeWindow,
size,
slide,
0,
allowedLateness,
namedProperties,
shiftTimeZone);
}
public static <K, W extends Window>
PythonStreamGroupWindowAggregateOperator<K, W>
createSessionGroupWindowAggregateOperator(
Configuration config,
RowType inputType,
RowType outputType,
PythonAggregateFunctionInfo[] aggregateFunctions,
DataViewSpec[][] dataViewSpecs,
int[] grouping,
int indexOfCountStar,
boolean generateUpdateBefore,
boolean countStarInserted,
int inputTimeFieldIndex,
GroupWindowAssigner<W> windowAssigner,
boolean isRowTime,
long gap,
long allowedLateness,
NamedWindowProperty[] namedProperties,
ZoneId shiftTimeZone) {
return new PythonStreamGroupWindowAggregateOperator<>(
config,
inputType,
outputType,
aggregateFunctions,
dataViewSpecs,
grouping,
indexOfCountStar,
generateUpdateBefore,
countStarInserted,
inputTimeFieldIndex,
windowAssigner,
FlinkFnApi.GroupWindow.WindowType.SESSION_GROUP_WINDOW,
isRowTime,
true,
0,
0,
gap,
allowedLateness,
namedProperties,
shiftTimeZone);
}
@Override
public void open() throws Exception {
windowSerializer = windowAssigner.getWindowSerializer(new ExecutionConfig());
internalTimerService = getInternalTimerService("window-timers", windowSerializer, this);
// The structure is: [type]|[normal record]|[timestamp]|[current watermark]|[timer data]
// If the type is 'NORMAL_RECORD', store the RowData object in the 2nd column.
// If the type is 'TRIGGER_TIMER', store the timestamp in 3rd column and the timer
// data in 5th column.
reuseRowData =
new UpdatableRowData(GenericRowData.of(NORMAL_RECORD, null, null, null, null), 5);
reuseTimerRowData =
new UpdatableRowData(GenericRowData.of(TRIGGER_TIMER, null, null, null, null), 5);
// The structure is: [timer_type]|[row key]|[encoded namespace]
reuseTimerData = new UpdatableRowData(GenericRowData.of(0, null, 0), 3);
reuseTimerRowData.setField(4, reuseTimerData);
keyLength = getKeyType().getFieldCount();
keySerializer = (RowDataSerializer) getKeySerializer();
super.open();
}
@Override
public void processElementInternal(RowData value) throws Exception {
reuseRowData.setField(1, value);
reuseRowData.setLong(3, internalTimerService.currentWatermark());
udfInputTypeSerializer.serialize(reuseRowData, baosWrapper);
pythonFunctionRunner.process(baos.toByteArray());
baos.reset();
}
@Override
public void emitResult(Tuple3<String, byte[], Integer> resultTuple) throws Exception {
byte[] rawUdfResult = resultTuple.f1;
int length = resultTuple.f2;
bais.setBuffer(rawUdfResult, 0, length);
RowData udfResult = udfOutputTypeSerializer.deserialize(baisWrapper);
byte recordType = udfResult.getByte(0);
if (recordType == NORMAL_RECORD) {
GenericRowData aggResult =
(GenericRowData) udfResult.getRow(1, outputType.getFieldCount());
int fieldCount = outputType.getFieldCount();
for (int i = fieldCount - namedProperties.length; i < fieldCount; i++) {
FlinkFnApi.GroupWindow.WindowProperty namedProperty =
namedProperties[i - (fieldCount - namedProperties.length)];
if (namedProperty == WINDOW_START || namedProperty == WINDOW_END) {
aggResult.setField(i, TimestampData.fromEpochMillis(aggResult.getLong(i)));
} else {
aggResult.setField(
i,
TimestampData.fromEpochMillis(
getShiftEpochMills(aggResult.getLong(i))));
}
}
rowDataWrapper.collect(aggResult);
} else {
RowData timerData = udfResult.getRow(2, timerDataLength);
byte timerOperandType = timerData.getByte(0);
RowData key = timerData.getRow(1, keyLength);
long timestamp = timerData.getLong(2);
W window;
byte[] encodedNamespace = timerData.getBinary(3);
bais.setBuffer(encodedNamespace, 0, encodedNamespace.length);
window = windowSerializer.deserialize(baisWrapper);
BinaryRowData rowKey = keySerializer.toBinaryRow(key).copy();
synchronized (getKeyedStateBackend()) {
setCurrentKey(rowKey);
if (timerOperandType == REGISTER_EVENT_TIMER) {
internalTimerService.registerEventTimeTimer(
window, toEpochMillsForTimer(timestamp, shiftTimeZone));
} else if (timerOperandType == REGISTER_PROCESSING_TIMER) {
internalTimerService.registerProcessingTimeTimer(
window, toEpochMillsForTimer(timestamp, shiftTimeZone));
} else if (timerOperandType == DELETE_EVENT_TIMER) {
internalTimerService.deleteEventTimeTimer(
window, toEpochMillsForTimer(timestamp, shiftTimeZone));
} else if (timerOperandType == DELETE_PROCESSING_TIMER) {
internalTimerService.deleteProcessingTimeTimer(
window, toEpochMillsForTimer(timestamp, shiftTimeZone));
} else {
throw new RuntimeException(
String.format("Unsupported timerOperandType %s.", timerOperandType));
}
}
}
}
@Override
public String getFunctionUrn() {
return STREAM_GROUP_WINDOW_AGGREGATE_URN;
}
@Override
public RowType createUserDefinedFunctionInputType() {
List<RowType.RowField> inputFields = new ArrayList<>();
inputFields.add(new RowType.RowField("record_type", new TinyIntType()));
inputFields.add(new RowType.RowField("row_data", inputType));
inputFields.add(new RowType.RowField("timestamp", new BigIntType()));
inputFields.add(new RowType.RowField("watermark", new BigIntType()));
List<RowType.RowField> timerDataFields = new ArrayList<>();
timerDataFields.add(new RowType.RowField("timer_type", new TinyIntType()));
timerDataFields.add(new RowType.RowField("key", getKeyType()));
timerDataFields.add(new RowType.RowField("encoded_namespace", new BinaryType()));
inputFields.add(new RowType.RowField("timer", new RowType(timerDataFields)));
return new RowType(inputFields);
}
@Override
public RowType createUserDefinedFunctionOutputType() {
List<RowType.RowField> outputFields = new ArrayList<>();
outputFields.add(new RowType.RowField("record_type", new TinyIntType()));
List<RowType.RowField> resultFields =
new ArrayList<>(
outputType
.getFields()
.subList(0, outputType.getFieldCount() - namedProperties.length));
for (int i = 0; i < namedProperties.length; i++) {
resultFields.add(new RowType.RowField("w" + i, new BigIntType()));
}
outputFields.add(new RowType.RowField("row_data", new RowType(resultFields)));
List<RowType.RowField> timerDataFields = new ArrayList<>();
timerDataFields.add(new RowType.RowField("timer_operand_type", new TinyIntType()));
timerDataFields.add(new RowType.RowField("key", getKeyType()));
timerDataFields.add(new RowType.RowField("timestamp", new BigIntType()));
timerDataFields.add(new RowType.RowField("encoded_namespace", new BinaryType()));
timerDataLength = timerDataFields.size();
outputFields.add(new RowType.RowField("timer", new RowType(timerDataFields)));
return new RowType(outputFields);
}
@Override
protected FlinkFnApi.UserDefinedAggregateFunctions getUserDefinedFunctionsProto() {
FlinkFnApi.UserDefinedAggregateFunctions.Builder builder =
super.getUserDefinedFunctionsProto().toBuilder();
builder.setCountStarInserted(countStarInserted);
FlinkFnApi.GroupWindow.Builder windowBuilder = FlinkFnApi.GroupWindow.newBuilder();
windowBuilder.setWindowType(windowType);
windowBuilder.setIsTimeWindow(isTimeWindow);
windowBuilder.setIsRowTime(isRowTime);
windowBuilder.setTimeFieldIndex(inputTimeFieldIndex);
windowBuilder.setWindowSize(size);
windowBuilder.setWindowSlide(slide);
windowBuilder.setWindowGap(gap);
windowBuilder.setAllowedLateness(allowedLateness);
for (FlinkFnApi.GroupWindow.WindowProperty namedProperty : namedProperties) {
windowBuilder.addNamedProperties(namedProperty);
}
windowBuilder.setShiftTimezone(shiftTimeZone.getId());
builder.setGroupWindow(windowBuilder);
return builder.build();
}
@Override
public TypeSerializer<W> getWindowSerializer() {
return windowSerializer;
}
@Override
public void onEventTime(InternalTimer<K, W> timer) throws Exception {
emitTriggerTimerData(timer, REGISTER_EVENT_TIMER);
}
@Override
public void onProcessingTime(InternalTimer<K, W> timer) throws Exception {
emitTriggerTimerData(timer, REGISTER_PROCESSING_TIMER);
}
@Override
public FlinkFnApi.CoderInfoDescriptor createInputCoderInfoDescriptor(RowType runnerInputType) {
return createFlattenRowTypeCoderInfoDescriptorProto(
runnerInputType, FlinkFnApi.CoderInfoDescriptor.Mode.MULTIPLE, false);
}
@Override
public FlinkFnApi.CoderInfoDescriptor createOutputCoderInfoDescriptor(RowType runnerOutType) {
return createFlattenRowTypeCoderInfoDescriptorProto(
runnerOutType, FlinkFnApi.CoderInfoDescriptor.Mode.MULTIPLE, false);
}
@VisibleForTesting
long getShiftEpochMills(long utcTimestampMills) {
return TimeWindowUtil.toEpochMills(utcTimestampMills, shiftTimeZone);
}
private void emitTriggerTimerData(InternalTimer<K, W> timer, byte processingTimer)
throws Exception {
reuseTimerData.setByte(0, processingTimer);
reuseTimerData.setField(1, timer.getKey());
// serialize namespace
W window = timer.getNamespace();
windowSerializer.serialize(window, baosWrapper);
reuseTimerData.setField(2, baos.toByteArray());
baos.reset();
reuseTimerRowData.setLong(2, toUtcTimestampMills(timer.getTimestamp(), shiftTimeZone));
udfInputTypeSerializer.serialize(reuseTimerRowData, baosWrapper);
pythonFunctionRunner.process(baos.toByteArray());
baos.reset();
elementCount++;
checkInvokeFinishBundleByCount();
emitResults();
}
}
| PythonStreamGroupWindowAggregateOperator |
java | quarkusio__quarkus | extensions/websockets-next/runtime/src/main/java/io/quarkus/websockets/next/runtime/config/TelemetryConfig.java | {
"start": 196,
"end": 676
} | interface ____ {
/**
* If collection of WebSocket traces is enabled.
* Only applicable when the OpenTelemetry extension is present.
*/
@WithName("traces.enabled")
@WithDefault("true")
boolean tracesEnabled();
/**
* If collection of WebSocket metrics is enabled.
* Only applicable when the Micrometer extension is present.
*/
@WithName("metrics.enabled")
@WithDefault("false")
boolean metricsEnabled();
}
| TelemetryConfig |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/query/NamedQueryDefinition.java | {
"start": 555,
"end": 1137
} | interface ____<E> extends TypedQueryReference<E> {
@Override
default String getName() {
return getRegistrationName();
}
/**
* The name under which the query is to be registered.
*/
String getRegistrationName();
/**
* The expected result type of the query, or {@code null}.
*/
@Nullable
Class<E> getResultType();
/**
* Resolve the mapping definition into its run-time memento form.
*/
NamedQueryMemento<E> resolve(SessionFactoryImplementor factory);
/**
* The location at which the defining named query annotation occurs,
* usually a | NamedQueryDefinition |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/api/Int2DArrayAssertBaseTest.java | {
"start": 839,
"end": 1362
} | class ____ extends BaseTestTemplate<Int2DArrayAssert, int[][]> {
protected Int2DArrays arrays;
@Override
protected Int2DArrayAssert create_assertions() {
return new Int2DArrayAssert(new int[][] {});
}
@Override
protected void inject_internal_objects() {
super.inject_internal_objects();
arrays = mock(Int2DArrays.class);
assertions.int2dArrays = arrays;
}
protected Int2DArrays getArrays(Int2DArrayAssert someAssertions) {
return someAssertions.int2dArrays;
}
}
| Int2DArrayAssertBaseTest |
java | apache__camel | components/camel-http/src/test/java/org/apache/camel/component/http/HttpProducerFileUploadTest.java | {
"start": 1351,
"end": 3097
} | class ____ extends BaseHttpTest {
private HttpServer localServer;
private String endpointUrl;
@Override
public void setupResources() throws Exception {
localServer = ServerBootstrap.bootstrap()
.setCanonicalHostName("localhost").setHttpProcessor(getBasicHttpProcessor())
.setConnectionReuseStrategy(getConnectionReuseStrategy()).setResponseFactory(getHttpResponseFactory())
.setSslContext(getSSLContext())
.register("/upload", (request, response, context) -> {
var e = request.getEntity();
var arr = e.getContent().readAllBytes();
long len = arr.length;
String name = request.getHeader("fileName").getValue();
response.setEntity(new StringEntity(name + ";" + len));
response.setCode(HttpStatus.SC_OK);
}).create();
localServer.start();
endpointUrl = "http://localhost:" + localServer.getLocalPort();
}
@Override
public void cleanupResources() throws Exception {
if (localServer != null) {
localServer.stop();
}
}
@Test
public void testFileUpload() {
File f = new File("src/test/resources/log4j2.properties");
Exchange out = template.request(endpointUrl + "/upload", exchange -> {
exchange.getMessage().setHeader("fileName", "log4j2.properties");
exchange.getMessage().setBody(f);
});
assertNotNull(out);
assertFalse(out.isFailed(), "Should not fail");
assertEquals("log4j2.properties;" + f.length(), out.getMessage().getBody(String.class));
}
}
| HttpProducerFileUploadTest |
java | alibaba__druid | core/src/test/java/com/alibaba/druid/bvt/sql/odps/OdpsFormatCommentTest29.java | {
"start": 121,
"end": 583
} | class ____ extends TestCase {
public void test_drop_function() throws Exception {
String sql = "create table xxxx001( --测试"
+ "\ncol string, --测试2"
+ "\ncol2 string --测试3"
+ "\n)";
assertEquals("CREATE TABLE xxxx001 ( -- 测试"
+ "\n\tcol STRING, -- 测试2"
+ "\n\tcol2 STRING -- 测试3"
+ "\n)", SQLUtils.formatOdps(sql));
}
}
| OdpsFormatCommentTest29 |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/script/IpFieldScript.java | {
"start": 6126,
"end": 6405
} | class ____ {
private final IpFieldScript script;
public Emit(IpFieldScript script) {
this.script = script;
}
public void emit(String v) {
script.checkMaxSize(script.count());
script.emit(v);
}
}
}
| Emit |
java | bumptech__glide | library/src/main/java/com/bumptech/glide/load/engine/Resource.java | {
"start": 515,
"end": 1935
} | class ____ in fact
* it is often appropriate to return a new instance for each call. For example, {@link
* android.graphics.drawable.Drawable Drawable}s should only be used by a single {@link
* android.view.View View} at a time so each call to this method for Resources that wrap {@link
* android.graphics.drawable.Drawable Drawable}s should always return a new {@link
* android.graphics.drawable.Drawable Drawable}.
*/
@NonNull
Z get();
/**
* Returns the size in bytes of the wrapped resource to use to determine how much of the memory
* cache this resource uses.
*/
int getSize();
/**
* Cleans up and recycles internal resources.
*
* <p>It is only safe to call this method if there are no current resource consumers and if this
* method has not yet been called. Typically this occurs at one of two times:
*
* <ul>
* <li>During a resource load when the resource is transformed or transcoded before any consumer
* have ever had access to this resource
* <li>After all consumers have released this resource and it has been evicted from the cache
* </ul>
*
* For most users of this class, the only time this method should ever be called is during
* transformations or transcoders, the framework will call this method when all consumers have
* released this resource and it has been evicted from the cache.
*/
void recycle();
}
| and |
java | apache__avro | lang/java/grpc/src/main/java/org/apache/avro/grpc/AvroGrpcUtils.java | {
"start": 1125,
"end": 1328
} | class ____ {
private static final Logger LOG = Logger.getLogger(AvroGrpcUtils.class.getName());
private AvroGrpcUtils() {
}
/**
* Provides a a unique gRPC service name for Avro RPC | AvroGrpcUtils |
java | apache__camel | components/camel-zeebe/src/main/java/org/apache/camel/component/zeebe/internal/OperationName.java | {
"start": 862,
"end": 1662
} | enum ____ {
START_PROCESS("startProcess"),
CANCEL_PROCESS("cancelProcess"),
PUBLISH_MESSAGE("publishMessage"),
COMPLETE_JOB("completeJob"),
FAIL_JOB("failJob"),
UPDATE_JOB_RETRIES("updateJobRetries"),
REGISTER_JOB_WORKER("worker"),
THROW_ERROR("throwError"),
DEPLOY_RESOURCE("deployResource");
private final String value;
OperationName(String value) {
this.value = value;
}
public String value() {
return value;
}
public static OperationName fromValue(String value) {
for (OperationName operationName : OperationName.values()) {
if (operationName.value.equals(value)) {
return operationName;
}
}
throw new IllegalArgumentException(value);
}
}
| OperationName |
java | mapstruct__mapstruct | processor/src/test/java/org/mapstruct/ap/test/bugs/_3806/Issue3806Mapper.java | {
"start": 547,
"end": 719
} | interface ____ {
Issue3806Mapper INSTANCE = Mappers.getMapper( Issue3806Mapper.class );
void update(@MappingTarget Target target, Target source);
| Issue3806Mapper |
java | spring-projects__spring-framework | framework-docs/src/main/java/org/springframework/docs/web/websocket/stomp/websocketstompconfigurationperformance/WebSocketConfiguration.java | {
"start": 1267,
"end": 1554
} | class ____ implements WebSocketMessageBrokerConfigurer {
@Override
public void configureWebSocketTransport(WebSocketTransportRegistration registration) {
registration.setSendTimeLimit(15 * 1000).setSendBufferSizeLimit(512 * 1024);
}
// ...
}
// end::snippet[]
| WebSocketConfiguration |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/boot/model/internal/CollectionBinder.java | {
"start": 6921,
"end": 16114
} | class ____ {
private static final List<Class<?>> INFERRED_CLASS_PRIORITY = List.of(
List.class,
java.util.SortedSet.class,
java.util.Set.class,
java.util.SortedMap.class,
Map.class,
java.util.Collection.class
);
final MetadataBuildingContext buildingContext;
private final Supplier<ManagedBean<? extends UserCollectionType>> customTypeBeanResolver;
private final boolean isSortedCollection;
protected Collection collection;
protected String propertyName;
protected PropertyHolder propertyHolder;
private String mappedBy;
protected ClassDetails declaringClass;
protected MemberDetails property;
private TypeDetails collectionElementType;
private TypeDetails targetEntity;
private EnumSet<CascadeType> cascadeTypes;
private String cacheConcurrencyStrategy;
private String cacheRegionName;
private CacheLayout queryCacheLayout;
private boolean oneToMany;
protected IndexColumn indexColumn;
protected OnDeleteAction onDeleteAction;
protected boolean hasMapKeyProperty;
protected String mapKeyPropertyName;
private boolean insertable = true;
private boolean updatable = true;
protected AnnotatedJoinColumns inverseJoinColumns;
protected AnnotatedJoinColumns foreignJoinColumns;
private AnnotatedJoinColumns joinColumns;
private boolean isExplicitAssociationTable;
private AnnotatedColumns elementColumns;
protected boolean isEmbedded;
protected NotFoundAction notFoundAction;
private TableBinder tableBinder;
protected AnnotatedColumns mapKeyColumns;
protected AnnotatedJoinColumns mapKeyManyToManyColumns;
protected Map<String, IdentifierGeneratorDefinition> localGenerators;
protected Map<ClassDetails, InheritanceState> inheritanceStatePerClass;
private boolean declaringClassSet;
private AccessType accessType;
private boolean hibernateExtensionMapping;
private jakarta.persistence.OrderBy jpaOrderBy;
private SQLOrder sqlOrder;
private SortNatural naturalSort;
private SortComparator comparatorSort;
protected CollectionBinder(
Supplier<ManagedBean<? extends UserCollectionType>> customTypeBeanResolver,
boolean isSortedCollection,
MetadataBuildingContext buildingContext) {
this.customTypeBeanResolver = customTypeBeanResolver;
this.isSortedCollection = isSortedCollection;
this.buildingContext = buildingContext;
}
private String getRole() {
return collection.getRole();
}
private InFlightMetadataCollector getMetadataCollector() {
return buildingContext.getMetadataCollector();
}
/**
* The first pass at binding a collection.
*/
public static void bindCollection(
PropertyHolder propertyHolder,
Nullability nullability,
PropertyData inferredData,
EntityBinder entityBinder,
boolean isIdentifierMapper,
MetadataBuildingContext context,
Map<ClassDetails, InheritanceState> inheritanceStatePerClass,
AnnotatedJoinColumns joinColumns) {
final var modelsContext = context.getBootstrapContext().getModelsContext();
final var memberDetails = inferredData.getAttributeMember();
final var oneToManyAnn = memberDetails.getAnnotationUsage( OneToMany.class, modelsContext );
final var manyToManyAnn = memberDetails.getAnnotationUsage( ManyToMany.class, modelsContext );
final var elementCollectionAnn = memberDetails.getAnnotationUsage( ElementCollection.class, modelsContext );
checkAnnotations( propertyHolder, inferredData, memberDetails, oneToManyAnn, manyToManyAnn, elementCollectionAnn );
final var collectionBinder = getCollectionBinder( memberDetails, hasMapKeyAnnotation( memberDetails ), context );
collectionBinder.setIndexColumn( getIndexColumn( propertyHolder, inferredData, entityBinder, context ) );
collectionBinder.setMapKey( memberDetails.getAnnotationUsage( MapKey.class, modelsContext ) );
collectionBinder.setPropertyName( inferredData.getPropertyName() );
collectionBinder.setJpaOrderBy( memberDetails.getAnnotationUsage( OrderBy.class, modelsContext ) );
collectionBinder.setSqlOrder( getOverridableAnnotation( memberDetails, SQLOrder.class, context ) );
collectionBinder.setNaturalSort( memberDetails.getAnnotationUsage( SortNatural.class, modelsContext ) );
collectionBinder.setComparatorSort( memberDetails.getAnnotationUsage( SortComparator.class, modelsContext ) );
collectionBinder.setCache( memberDetails.getAnnotationUsage( Cache.class, modelsContext ) );
collectionBinder.setQueryCacheLayout( memberDetails.getAnnotationUsage( QueryCacheLayout.class, modelsContext ) );
collectionBinder.setPropertyHolder(propertyHolder);
collectionBinder.setNotFoundAction( notFoundAction( propertyHolder, inferredData, memberDetails, manyToManyAnn, modelsContext ) );
collectionBinder.setElementType( inferredData.getClassOrElementType() );
collectionBinder.setAccessType( inferredData.getDefaultAccess() );
collectionBinder.setEmbedded( memberDetails.hasAnnotationUsage( Embedded.class, modelsContext ) );
collectionBinder.setProperty( memberDetails );
collectionBinder.setOnDeleteActionAction( onDeleteAction( memberDetails ) );
collectionBinder.setInheritanceStatePerClass( inheritanceStatePerClass );
collectionBinder.setDeclaringClass( inferredData.getDeclaringClass() );
final var hibernateCascade = memberDetails.getAnnotationUsage( Cascade.class, modelsContext );
collectionBinder.setElementColumns( elementColumns(
propertyHolder,
nullability,
entityBinder,
context,
memberDetails,
virtualPropertyData( inferredData, memberDetails )
// comment
) );
collectionBinder.setMapKeyColumns( mapKeyColumns(
propertyHolder,
inferredData,
entityBinder,
context,
memberDetails
) );
collectionBinder.setMapKeyManyToManyColumns( mapKeyJoinColumns(
propertyHolder,
inferredData,
entityBinder,
context,
memberDetails
) );
bindJoinedTableAssociation(
memberDetails,
context,
entityBinder,
collectionBinder,
propertyHolder,
inferredData,
handleTargetEntity(
propertyHolder,
inferredData,
context,
memberDetails,
joinColumns,
oneToManyAnn,
manyToManyAnn,
elementCollectionAnn,
collectionBinder,
hibernateCascade
)
);
if ( isIdentifierMapper ) {
collectionBinder.setInsertable( false );
collectionBinder.setUpdatable( false );
}
if ( memberDetails.hasAnnotationUsage( CollectionId.class, modelsContext ) ) {
//do not compute the generators unless necessary
final HashMap<String, IdentifierGeneratorDefinition> availableGenerators = new HashMap<>();
visitIdGeneratorDefinitions(
memberDetails.getDeclaringType(),
definition -> {
if ( !definition.getName().isEmpty() ) {
availableGenerators.put( definition.getName(), definition );
}
},
context
);
visitIdGeneratorDefinitions(
memberDetails,
definition -> {
if ( !definition.getName().isEmpty() ) {
availableGenerators.put( definition.getName(), definition );
}
},
context
);
collectionBinder.setLocalGenerators( availableGenerators );
}
collectionBinder.bind();
}
private static NotFoundAction notFoundAction(
PropertyHolder propertyHolder,
PropertyData inferredData,
MemberDetails property,
ManyToMany manyToManyAnn,
ModelsContext sourceModelContext) {
final var notFound = property.getAnnotationUsage( NotFound.class, sourceModelContext );
if ( notFound != null ) {
if ( manyToManyAnn == null ) {
throw new AnnotationException( "Collection '" + getPath(propertyHolder, inferredData)
+ "' annotated '@NotFound' is not a '@ManyToMany' association" );
}
return notFound.action();
}
else {
return null;
}
}
private static AnnotatedJoinColumns mapKeyJoinColumns(
PropertyHolder propertyHolder,
PropertyData inferredData,
EntityBinder entityBinder,
MetadataBuildingContext context,
MemberDetails property) {
return buildJoinColumnsWithDefaultColumnSuffix(
mapKeyJoinColumnAnnotations( property, context ),
null,
entityBinder.getSecondaryTables(),
propertyHolder,
inferredData,
"_KEY",
context
);
}
private static OnDeleteAction onDeleteAction(MemberDetails property) {
final var onDelete = property.getDirectAnnotationUsage( OnDelete.class );
return onDelete == null ? null : onDelete.action();
}
private static PropertyData virtualPropertyData(PropertyData inferredData, MemberDetails property) {
//do not use "element" if you are a JPA 2 @ElementCollection, only for legacy Hibernate mappings
return property.hasDirectAnnotationUsage( ElementCollection.class )
? inferredData
: new WrappedInferredData(inferredData, "element" );
}
private static void checkAnnotations(
PropertyHolder propertyHolder,
PropertyData inferredData,
MemberDetails property,
OneToMany oneToMany,
ManyToMany manyToMany,
ElementCollection elementCollection) {
if ( ( oneToMany != null || manyToMany != null || elementCollection != null )
&& isToManyAssociationWithinEmbeddableCollection( propertyHolder ) ) {
throw new AnnotationException( "Property '" + getPath( propertyHolder, inferredData ) +
"' belongs to an '@Embeddable' | CollectionBinder |
java | apache__camel | components/camel-cxf/camel-cxf-spring-soap/src/test/java/org/apache/camel/component/cxf/soap/headers/CxfMessageHeadersRelayTest.java | {
"start": 35043,
"end": 36534
} | class ____ implements Processor {
@Override
public void process(Exchange exchange) throws Exception {
// You should be able to get the header if exchange is routed from camel-cxf endpoint
List<SoapHeader> soapHeaders = CastUtils.cast((List<?>) exchange.getIn().getHeader(Header.HEADER_LIST));
if (soapHeaders == null) {
// we just create a new soap headers in case the header is null
soapHeaders = new ArrayList<>();
}
// Insert a new header
String xml = "<?xml version=\"1.0\" encoding=\"utf-8\"?><outofbandHeader "
+ "xmlns=\"http://cxf.apache.org/outofband/Header\" hdrAttribute=\"testHdrAttribute\" "
+ "xmlns:soap=\"http://schemas.xmlsoap.org/soap/envelope/\" soap:mustUnderstand=\"1\">"
+ "<name>New_testOobHeader</name><value>New_testOobHeaderValue</value></outofbandHeader>";
SoapHeader newHeader = new SoapHeader(
soapHeaders.get(0).getName(),
StaxUtils.read(new StringReader(xml)).getDocumentElement());
// make sure direction is OUT since it is a response message.
newHeader.setDirection(Direction.DIRECTION_OUT);
//newHeader.setMustUnderstand(false);
soapHeaders.add(newHeader);
}
}
// END SNIPPET: InsertResponseOutHeaderProcessor
}
| InsertResponseOutHeaderProcessor |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/join/TestJoinDatamerge.java | {
"start": 5019,
"end": 5810
} | class ____
extends SimpleCheckerMapBase<TupleWritable> {
public void map(IntWritable key, TupleWritable val, Context context)
throws IOException, InterruptedException {
int k = key.get();
final String kvstr = "Unexpected tuple: " + stringify(key, val);
assertTrue(0 == k % (srcs * srcs), kvstr);
for (int i = 0; i < val.size(); ++i) {
final int vali = ((IntWritable)val.get(i)).get();
assertTrue((vali - i) * srcs == 10 * k, kvstr);
}
context.write(key, one);
// If the user modifies the key or any of the values in the tuple, it
// should not affect the rest of the join.
key.set(-1);
if (val.has(0)) {
((IntWritable)val.get(0)).set(0);
}
}
}
private static | InnerJoinMapChecker |
java | elastic__elasticsearch | x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/mapper/TDigestFieldMapper.java | {
"start": 21009,
"end": 22735
} | class ____ extends HistogramValue {
double value;
long count;
boolean isExhausted;
final ByteArrayStreamInput streamInput;
InternalTDigestValue() {
streamInput = new ByteArrayStreamInput();
}
/** reset the value for the histogram */
void reset(BytesRef bytesRef) throws IOException {
streamInput.reset(bytesRef.bytes, bytesRef.offset, bytesRef.length);
isExhausted = false;
value = 0;
count = 0;
}
@Override
public boolean next() throws IOException {
if (streamInput.available() > 0) {
count = streamInput.readVLong();
value = streamInput.readDouble();
return true;
}
isExhausted = true;
return false;
}
@Override
public double value() {
if (isExhausted) {
throw new IllegalArgumentException("histogram already exhausted");
}
return value;
}
@Override
public long count() {
if (isExhausted) {
throw new IllegalArgumentException("histogram already exhausted");
}
return count;
}
}
@Override
protected SyntheticSourceSupport syntheticSourceSupport() {
return new SyntheticSourceSupport.Native(
() -> new CompositeSyntheticFieldLoader(
leafName(),
fullPath(),
new TDigestSyntheticFieldLoader(),
new CompositeSyntheticFieldLoader.MalformedValuesLayer(fullPath())
)
);
}
private | InternalTDigestValue |
java | elastic__elasticsearch | modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/RestPutDatabaseConfigurationAction.java | {
"start": 1181,
"end": 2114
} | class ____ extends BaseRestHandler {
@Override
public List<Route> routes() {
return List.of(new Route(PUT, "/_ingest/ip_location/database/{id}"), new Route(PUT, "/_ingest/geoip/database/{id}"));
}
@Override
public String getName() {
return "geoip_put_database_configuration";
}
@Override
protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException {
final Request req;
try (var parser = request.contentParser()) {
req = PutDatabaseConfigurationAction.Request.parseRequest(
getMasterNodeTimeout(request),
getAckTimeout(request),
request.param("id"),
parser
);
}
return channel -> client.execute(PutDatabaseConfigurationAction.INSTANCE, req, new RestToXContentListener<>(channel));
}
}
| RestPutDatabaseConfigurationAction |
java | apache__camel | components/camel-ftp/src/test/java/org/apache/camel/component/file/remote/integration/FromFtpRemoteFileSorterIT.java | {
"start": 2517,
"end": 2789
} | class ____ implements Comparator<RemoteFile<?>> {
@Override
public int compare(RemoteFile<?> o1, RemoteFile<?> o2) {
return o1.getFileNameOnly().compareToIgnoreCase(o2.getFileNameOnly());
}
}
// END SNIPPET: e1
}
| MyRemoteFileSorter |
java | elastic__elasticsearch | modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java | {
"start": 6394,
"end": 8505
} | class ____ extends SingleShardRequest<Request> implements ToXContentObject, IndicesRequest.SingleIndexNoWildcards {
private static final ParseField SCRIPT_FIELD = new ParseField("script");
private static final ParseField CONTEXT_FIELD = new ParseField("context");
private static final ParseField CONTEXT_SETUP_FIELD = new ParseField("context_setup");
private static final ConstructingObjectParser<Request, Void> PARSER = new ConstructingObjectParser<>(
"painless_execute_request",
args -> new Request((Script) args[0], (String) args[1], (ContextSetup) args[2])
);
static {
PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> Script.parse(p), SCRIPT_FIELD);
PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), CONTEXT_FIELD);
PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), ContextSetup::parse, CONTEXT_SETUP_FIELD);
}
private static Map<String, ScriptContext<?>> getSupportedContexts() {
Map<String, ScriptContext<?>> contexts = new HashMap<>();
contexts.put(PainlessTestScript.CONTEXT.name, PainlessTestScript.CONTEXT);
contexts.put(FilterScript.CONTEXT.name, FilterScript.CONTEXT);
contexts.put(ScoreScript.CONTEXT.name, ScoreScript.CONTEXT);
for (ScriptContext<?> runtimeFieldsContext : ScriptModule.RUNTIME_FIELDS_CONTEXTS) {
contexts.put(runtimeFieldsContext.name, runtimeFieldsContext);
}
return Collections.unmodifiableMap(contexts);
}
static final Map<String, ScriptContext<?>> SUPPORTED_CONTEXTS = getSupportedContexts();
static ScriptContext<?> fromScriptContextName(String name) {
ScriptContext<?> scriptContext = SUPPORTED_CONTEXTS.get(name);
if (scriptContext == null) {
throw new UnsupportedOperationException("unsupported script context name [" + name + "]");
}
return scriptContext;
}
static | Request |
java | spring-projects__spring-boot | module/spring-boot-mustache/src/test/java/org/springframework/boot/mustache/reactive/view/MustacheViewTests.java | {
"start": 1338,
"end": 2239
} | class ____ {
private final StaticApplicationContext context = new StaticApplicationContext();
@Test
@WithResource(name = "template.html", content = "Hello {{World}}")
void viewResolvesHandlebars() {
MockServerWebExchange exchange = MockServerWebExchange.from(MockServerHttpRequest.get("/test").build());
MustacheView view = new MustacheView();
view.setCompiler(Mustache.compiler());
view.setUrl("classpath:template.html");
view.setCharset(StandardCharsets.UTF_8.displayName());
view.setApplicationContext(this.context);
view.render(Collections.singletonMap("World", "Spring"), MediaType.TEXT_HTML, exchange)
.block(Duration.ofSeconds(30));
StepVerifier.create(exchange.getResponse().getBodyAsString())
.assertNext((body) -> Assertions.assertThat(body).isEqualToIgnoringWhitespace("Hello Spring"))
.expectComplete()
.verify(Duration.ofSeconds(30));
}
}
| MustacheViewTests |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/sql/ast/ParameterMarkerStrategyTests.java | {
"start": 7599,
"end": 8299
} | class ____ {
@Id
private Integer id;
@Basic
private String name;
@Basic
private String region;
protected EntityWithFilters() {
// for use by Hibernate
}
public EntityWithFilters(Integer id, String name, String region) {
this.id = id;
this.name = name;
this.region = region;
}
public Integer getId() {
return id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getRegion() {
return region;
}
public void setRegion(String region) {
this.region = region;
}
}
@Entity( name = "EntityWithVersion" )
@Table( name = "versioned_entity" )
public static | EntityWithFilters |
java | google__auto | value/src/test/java/com/google/auto/value/processor/AutoOneOfCompilationTest.java | {
"start": 13985,
"end": 14496
} | enum ____ {DOG, CAT}",
" public abstract Kind getKind(String wut);",
" public abstract String dog();",
" public abstract String cat();",
"}");
Compilation compilation =
javac().withProcessors(new AutoOneOfProcessor()).compile(javaFileObject);
assertThat(compilation)
.hadErrorContaining(
"foo.bar.Pet must have a no-arg abstract method returning foo.bar.Pet.Kind")
.inFile(javaFileObject)
.onLineContaining(" | Kind |
java | playframework__playframework | core/play/src/main/java/play/components/TemporaryFileComponents.java | {
"start": 266,
"end": 353
} | interface ____ {
Files.TemporaryFileCreator tempFileCreator();
}
| TemporaryFileComponents |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationContext.java | {
"start": 4119,
"end": 4251
} | class ____
* AllContainerLogAggregationPolicy.
* </li>
* <li>
* policyParameters. The parameters passed to the policy | is |
java | apache__spark | sql/core/src/test/java/test/org/apache/spark/sql/connector/catalog/functions/JavaStrLen.java | {
"start": 3507,
"end": 3751
} | class ____ extends JavaStrLenBase {
public static int invoke(String str) {
return str.length();
}
public int invoke(UTF8String str) {
return str.toString().length() + 100;
}
}
public static | JavaStrLenBadStaticMagic |
java | quarkusio__quarkus | extensions/resteasy-reactive/rest-client/runtime/src/main/java/io/quarkus/rest/client/reactive/ClientQueryParams.java | {
"start": 1026,
"end": 1090
} | interface ____ {
ClientQueryParam[] value();
}
| ClientQueryParams |
java | spring-projects__spring-boot | integration-test/spring-boot-actuator-integration-tests/src/test/java/org/springframework/boot/actuate/endpoint/web/annotation/AbstractWebEndpointIntegrationTests.java | {
"start": 21907,
"end": 22114
} | class ____ {
@Bean
PathMapper pathMapper() {
return (endpointId) -> "/";
}
}
@Configuration(proxyBeanMethods = false)
@Import(BaseConfiguration.class)
static | EndpointPathMappedToRootConfiguration |
java | assertj__assertj-core | assertj-core/src/test/java/org/assertj/core/internal/doubles/Doubles_assertIsPositive_Test.java | {
"start": 1107,
"end": 1989
} | class ____ extends DoublesBaseTest {
@Test
void should_succeed_since_actual_is_positive() {
doubles.assertIsPositive(someInfo(), (double) 6);
}
@Test
void should_fail_since_actual_is_not_positive() {
assertThatExceptionOfType(AssertionError.class).isThrownBy(() -> doubles.assertIsPositive(someInfo(), -6.0d))
.withMessage("%nExpecting actual:%n -6.0%nto be greater than:%n 0.0%n".formatted());
}
@Test
void should_succeed_since_actual_is_positive_according_to_absolute_value_comparison_strategy() {
doublesWithAbsValueComparisonStrategy.assertIsPositive(someInfo(), 6.0d);
}
@Test
void should_succeed_since_actual_is_positive_according_to_absolute_value_comparison_strategy2() {
doublesWithAbsValueComparisonStrategy.assertIsPositive(someInfo(), -6.0d);
}
}
| Doubles_assertIsPositive_Test |
java | google__guice | extensions/testlib/src/com/google/inject/testing/fieldbinder/BoundFieldModule.java | {
"start": 6126,
"end": 6607
} | class ____ extends AbstractModule {
private final Object instance;
protected WithPermits(Object instance) {
this.instance = instance;
// TODO(user): Enforce this at compile-time (e.g. via ErrorProne).
Preconditions.checkState(
getClass().isAnonymousClass()
&& (hasPermitAnnotation(getClass().getAnnotations())
|| hasPermitAnnotation(getClass().getAnnotatedSuperclass().getAnnotations())),
"This | WithPermits |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/onetomany/AbstractRecursiveBidirectionalOneToManyTest.java | {
"start": 826,
"end": 4186
} | class ____ extends BaseSessionFactoryFunctionalTest {
@Override
protected String[] getOrmXmlFiles() {
return new String[] { "org/hibernate/orm/test/onetomany/Node.hbm.xml" };
}
protected abstract CacheMode getSessionCacheMode();
@Test
public void testOneToManyMoveElement() {
init();
transformMove();
check( false );
delete();
}
@Test
public void testOneToManyMoveElementWithDirtySimpleProperty() {
init();
transformMoveWithDirtySimpleProperty();
check( true );
delete();
}
@Test
public void testOneToManyReplaceList() {
init();
transformReplace();
check( false );
delete();
}
void init() {
inTransaction(
session -> {
session.setCacheMode( getSessionCacheMode() );
Node node1 = new Node( 1, "node1" );
Node node2 = new Node( 2, "node2" );
Node node3 = new Node( 3, "node3" );
node1.addSubNode( node2 );
node2.addSubNode( node3 );
session.persist( node1 );
}
);
}
void transformMove() {
inTransaction(
session -> {
session.setCacheMode( getSessionCacheMode() );
Node node3 = session.getReference( Node.class, new Integer( 3 ) );
Node node2 = node3.getParentNode();
Node node1 = node2.getParentNode();
node2.removeSubNode( node3 );
node1.addSubNode( node3 );
}
);
}
void transformMoveWithDirtySimpleProperty() {
inTransaction(
session -> {
session.setCacheMode( getSessionCacheMode() );
Node node3 = session.getReference( Node.class, new Integer( 3 ) );
Node node2 = node3.getParentNode();
Node node1 = node2.getParentNode();
node2.removeSubNode( node3 );
node1.addSubNode( node3 );
node3.setDescription( "node3-updated" );
}
);
}
void transformReplace() {
inTransaction(
session -> {
session.setCacheMode( getSessionCacheMode() );
Node node3 = session.getReference( Node.class, new Integer( 3 ) );
Node node2 = node3.getParentNode();
Node node1 = node2.getParentNode();
node2.removeSubNode( node3 );
node1.setSubNodes( new ArrayList() );
node1.addSubNode( node2 );
node1.addSubNode( node3 );
}
);
}
void check(boolean simplePropertyUpdated) {
inTransaction(
session -> {
session.setCacheMode( getSessionCacheMode() );
Node node3 = session.get( Node.class, Integer.valueOf( 3 ) );
// fails with 2nd level cache enabled
assertEquals( 1, node3.getParentNode().getId().intValue() );
assertEquals( ( simplePropertyUpdated ? "node3-updated" : "node3" ), node3.getDescription() );
assertTrue( node3.getSubNodes().isEmpty() );
Node node1 = node3.getParentNode();
assertNull( node1.getParentNode() );
assertEquals( 2, node1.getSubNodes().size() );
assertEquals( 2, ( (Node) node1.getSubNodes().get( 0 ) ).getId().intValue() );
assertEquals( "node1", node1.getDescription() );
Node node2 = (Node) node1.getSubNodes().get( 0 );
assertSame( node1, node2.getParentNode() );
assertTrue( node2.getSubNodes().isEmpty() );
assertEquals( "node2", node2.getDescription() );
}
);
}
void delete() {
inTransaction(
session -> {
session.setCacheMode( getSessionCacheMode() );
Node node1 = session.get( Node.class, Integer.valueOf( 1 ) );
session.remove( node1 );
}
);
}
}
| AbstractRecursiveBidirectionalOneToManyTest |
java | apache__flink | flink-connectors/flink-connector-files/src/main/java/org/apache/flink/connector/file/table/ProjectingBulkFormat.java | {
"start": 1428,
"end": 3093
} | class ____ implements BulkFormat<RowData, FileSourceSplit> {
private final BulkFormat<RowData, FileSourceSplit> wrapped;
private final TypeInformation<RowData> producedType;
private final int[] projections;
public ProjectingBulkFormat(
BulkFormat<RowData, FileSourceSplit> wrapped,
int[] projections,
TypeInformation<RowData> producedType) {
this.wrapped = wrapped;
this.projections = projections;
this.producedType = producedType;
}
@Override
public Reader<RowData> createReader(Configuration config, FileSourceSplit split)
throws IOException {
return wrapReader(wrapped.createReader(config, split), split);
}
@Override
public Reader<RowData> restoreReader(Configuration config, FileSourceSplit split)
throws IOException {
return wrapReader(wrapped.restoreReader(config, split), split);
}
@Override
public boolean isSplittable() {
return wrapped.isSplittable();
}
@Override
public TypeInformation<RowData> getProducedType() {
return producedType;
}
private Reader<RowData> wrapReader(Reader<RowData> superReader, FileSourceSplit split) {
// This row is going to be reused for every record
final ProjectedRowData producedRowData = ProjectedRowData.from(this.projections);
return RecordMapperWrapperRecordIterator.wrapReader(
superReader,
physicalRowData -> {
producedRowData.replaceRow(physicalRowData);
return producedRowData;
});
}
}
| ProjectingBulkFormat |
java | apache__flink | flink-core/src/main/java/org/apache/flink/util/InstantiationUtil.java | {
"start": 15601,
"end": 15697
} | class ____ be instantiated by {@code
* Class#newInstance()}.
*
* @param clazz The | can |
java | alibaba__druid | core/src/main/java/com/alibaba/druid/support/monitor/MonitorContext.java | {
"start": 739,
"end": 2334
} | class ____ {
public static String DEFAULT_DOMAIN;
private final Map<String, Object> attributes = new HashMap<String, Object>();
private String domain = "default";
private String app = "default";
private String cluster = "default";
private String host;
private int pid;
private Date collectTime;
private Date startTime;
public MonitorContext() {
}
public Date getCollectTime() {
return collectTime;
}
public void setCollectTime(Date collectTime) {
this.collectTime = collectTime;
}
public Date getStartTime() {
return startTime;
}
public void setStartTime(Date startTime) {
this.startTime = startTime;
}
public Map<String, Object> getAttributes() {
return attributes;
}
public int getPID() {
return pid;
}
public void setPID(int pid) {
this.pid = pid;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public int getPid() {
return pid;
}
public void setPid(int pid) {
this.pid = pid;
}
public String getDomain() {
return domain;
}
public void setDomain(String domain) {
this.domain = domain;
}
public String getApp() {
return app;
}
public void setApp(String app) {
this.app = app;
}
public String getCluster() {
return cluster;
}
public void setCluster(String cluster) {
this.cluster = cluster;
}
}
| MonitorContext |
java | spring-projects__spring-framework | spring-test/src/main/java/org/springframework/test/web/servlet/client/MockMvcWebTestClientSpecs.java | {
"start": 8223,
"end": 12090
} | class ____ extends AbstractMockMvcServerSpec<MockMvcWebTestClient.ControllerSpec>
implements MockMvcWebTestClient.ControllerSpec {
private final StandaloneMockMvcBuilder mockMvcBuilder;
StandaloneMockMvcSpec(Object... controllers) {
this.mockMvcBuilder = MockMvcBuilders.standaloneSetup(controllers);
}
@Override
public StandaloneMockMvcSpec controllerAdvice(Object... controllerAdvice) {
this.mockMvcBuilder.setControllerAdvice(controllerAdvice);
return this;
}
@Override
public StandaloneMockMvcSpec messageConverters(HttpMessageConverter<?>... messageConverters) {
this.mockMvcBuilder.setMessageConverters(messageConverters);
return this;
}
@Override
public StandaloneMockMvcSpec validator(Validator validator) {
this.mockMvcBuilder.setValidator(validator);
return this;
}
@Override
public StandaloneMockMvcSpec conversionService(FormattingConversionService conversionService) {
this.mockMvcBuilder.setConversionService(conversionService);
return this;
}
@Override
public MockMvcWebTestClient.ControllerSpec apiVersionStrategy(ApiVersionStrategy versionStrategy) {
this.mockMvcBuilder.setApiVersionStrategy(versionStrategy);
return this;
}
@Override
public StandaloneMockMvcSpec interceptors(HandlerInterceptor... interceptors) {
mappedInterceptors(null, interceptors);
return this;
}
@Override
public StandaloneMockMvcSpec mappedInterceptors(
String @Nullable [] pathPatterns, HandlerInterceptor... interceptors) {
this.mockMvcBuilder.addMappedInterceptors(pathPatterns, interceptors);
return this;
}
@Override
public StandaloneMockMvcSpec contentNegotiationManager(ContentNegotiationManager manager) {
this.mockMvcBuilder.setContentNegotiationManager(manager);
return this;
}
@Override
public StandaloneMockMvcSpec asyncRequestTimeout(long timeout) {
this.mockMvcBuilder.setAsyncRequestTimeout(timeout);
return this;
}
@Override
public StandaloneMockMvcSpec customArgumentResolvers(HandlerMethodArgumentResolver... argumentResolvers) {
this.mockMvcBuilder.setCustomArgumentResolvers(argumentResolvers);
return this;
}
@Override
public StandaloneMockMvcSpec customReturnValueHandlers(HandlerMethodReturnValueHandler... handlers) {
this.mockMvcBuilder.setCustomReturnValueHandlers(handlers);
return this;
}
@Override
public StandaloneMockMvcSpec handlerExceptionResolvers(HandlerExceptionResolver... exceptionResolvers) {
this.mockMvcBuilder.setHandlerExceptionResolvers(exceptionResolvers);
return this;
}
@Override
public StandaloneMockMvcSpec viewResolvers(ViewResolver... resolvers) {
this.mockMvcBuilder.setViewResolvers(resolvers);
return this;
}
@Override
public StandaloneMockMvcSpec singleView(View view) {
this.mockMvcBuilder.setSingleView(view);
return this;
}
@Override
public StandaloneMockMvcSpec localeResolver(LocaleResolver localeResolver) {
this.mockMvcBuilder.setLocaleResolver(localeResolver);
return this;
}
@Override
public StandaloneMockMvcSpec flashMapManager(FlashMapManager flashMapManager) {
this.mockMvcBuilder.setFlashMapManager(flashMapManager);
return this;
}
@Override
public StandaloneMockMvcSpec patternParser(PathPatternParser parser) {
this.mockMvcBuilder.setPatternParser(parser);
return this;
}
@Override
public StandaloneMockMvcSpec placeholderValue(String name, String value) {
this.mockMvcBuilder.addPlaceholderValue(name, value);
return this;
}
@Override
public StandaloneMockMvcSpec customHandlerMapping(Supplier<RequestMappingHandlerMapping> factory) {
this.mockMvcBuilder.setCustomHandlerMapping(factory);
return this;
}
@Override
public ConfigurableMockMvcBuilder<?> getMockMvcBuilder() {
return this.mockMvcBuilder;
}
}
}
| StandaloneMockMvcSpec |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/RedundantOverrideTest.java | {
"start": 8894,
"end": 9084
} | class ____ extends A {
@Override
protected void swap(int a) {
super.swap(a);
}
}
""")
.doTest();
}
}
| B |
java | spring-projects__spring-framework | spring-web/src/main/java/org/springframework/web/server/WebFilter.java | {
"start": 1063,
"end": 1489
} | interface ____ {
/**
* Process the Web request and (optionally) delegate to the next
* {@code WebFilter} through the given {@link WebFilterChain}.
* @param exchange the current server exchange
* @param chain provides a way to delegate to the next filter
* @return {@code Mono<Void>} to indicate when request processing is complete
*/
Mono<Void> filter(ServerWebExchange exchange, WebFilterChain chain);
}
| WebFilter |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/inject/guice/OverridesGuiceInjectableMethodTest.java | {
"start": 4152,
"end": 4404
} | class ____ extends TestClass1 {
@javax.inject.Inject
public void foo() {}
}
/**
* Class with a method foo() annotated with @com.google.inject.Inject that overrides a method
* annotated with @javax.inject.Inject.
*/
public | TestClass3 |
java | netty__netty | handler/src/main/java/io/netty/handler/ssl/util/X509TrustManagerWrapper.java | {
"start": 975,
"end": 2459
} | class ____ extends X509ExtendedTrustManager {
private final X509TrustManager delegate;
X509TrustManagerWrapper(X509TrustManager delegate) {
this.delegate = checkNotNull(delegate, "delegate");
}
@Override
public void checkClientTrusted(X509Certificate[] chain, String s) throws CertificateException {
delegate.checkClientTrusted(chain, s);
}
@Override
public void checkClientTrusted(X509Certificate[] chain, String s, Socket socket)
throws CertificateException {
delegate.checkClientTrusted(chain, s);
}
@Override
public void checkClientTrusted(X509Certificate[] chain, String s, SSLEngine sslEngine)
throws CertificateException {
delegate.checkClientTrusted(chain, s);
}
@Override
public void checkServerTrusted(X509Certificate[] chain, String s) throws CertificateException {
delegate.checkServerTrusted(chain, s);
}
@Override
public void checkServerTrusted(X509Certificate[] chain, String s, Socket socket)
throws CertificateException {
delegate.checkServerTrusted(chain, s);
}
@Override
public void checkServerTrusted(X509Certificate[] chain, String s, SSLEngine sslEngine)
throws CertificateException {
delegate.checkServerTrusted(chain, s);
}
@Override
public X509Certificate[] getAcceptedIssuers() {
return delegate.getAcceptedIssuers();
}
}
| X509TrustManagerWrapper |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/engine/jdbc/spi/JdbcServices.java | {
"start": 1158,
"end": 3198
} | interface ____ extends Service {
/**
* Obtain the {@link JdbcEnvironment} backing this {@code JdbcServices} instance.
*/
JdbcEnvironment getJdbcEnvironment();
/**
* Obtain a {@link JdbcConnectionAccess} usable from bootstrap actions
* (hbm2ddl.auto, {@code Dialect} resolution, etc).
*/
JdbcConnectionAccess getBootstrapJdbcConnectionAccess();
/**
* Obtain the dialect of the database.
*/
Dialect getDialect();
/**
* Obtain service for logging SQL statements.
*
* @return The SQL statement logger.
*/
SqlStatementLogger getSqlStatementLogger();
/**
* Obtains the service used for marking SQL parameters
* @return the registered ParameterMarkerStrategy implementation.
*/
@Incubating
ParameterMarkerStrategy getParameterMarkerStrategy();
/**
* Obtain service for dealing with exceptions.
*
* @return The exception helper service.
*/
SqlExceptionHelper getSqlExceptionHelper();
/**
* Obtain information about supported behavior reported by the JDBC driver.
* <p>
* Yuck, yuck, yuck! Much prefer this to be part of a "basic settings" type object.
*
* @return The extracted database metadata, oddly enough :)
*/
ExtractedDatabaseMetaData getExtractedMetaDataSupport();
/**
* Create an instance of a {@link LobCreator} appropriate for the current environment,
* mainly meant to account for variance between:
* <ul>
* <li>JDBC 4 (<= JDK 1.6) and
* <li>JDBC 3 (>= JDK 1.5).
* </ul>
*
* @param lobCreationContext The context in which the LOB is being created
* @return The LOB creator.
*/
LobCreator getLobCreator(LobCreationContext lobCreationContext);
/**
* Access the executor for {@link JdbcOperationQuerySelect} operations.
*/
default JdbcSelectExecutor getJdbcSelectExecutor() {
return JdbcSelectExecutorStandardImpl.INSTANCE;
}
/**
* Access the executor for {@link JdbcOperationQueryMutation} operations.
*/
default JdbcMutationExecutor getJdbcMutationExecutor() {
return StandardJdbcMutationExecutor.INSTANCE;
}
}
| JdbcServices |
java | apache__camel | components/camel-jaxb/src/test/java/org/apache/camel/example/Address.java | {
"start": 1104,
"end": 2134
} | class ____ {
@XmlElement(namespace = "http://www.camel.apache.org/jaxb/example/address/1")
private String street;
@XmlElement(namespace = "http://www.camel.apache.org/jaxb/example/address/1")
private String streetNumber;
@XmlElement(namespace = "http://www.camel.apache.org/jaxb/example/address/1")
private String zip;
@XmlElement(namespace = "http://www.camel.apache.org/jaxb/example/address/1")
private String city;
public String getStreet() {
return street;
}
public String getStreetNumber() {
return streetNumber;
}
public String getZip() {
return zip;
}
public String getCity() {
return city;
}
public void setStreet(String street) {
this.street = street;
}
public void setStreetNumber(String streetNumber) {
this.streetNumber = streetNumber;
}
public void setZip(String zip) {
this.zip = zip;
}
public void setCity(String city) {
this.city = city;
}
}
| Address |
java | elastic__elasticsearch | x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/common/CartesianPoint.java | {
"start": 1295,
"end": 9743
} | class ____ implements SpatialPoint, ToXContentFragment {
private static final String X_FIELD = "x";
private static final String Y_FIELD = "y";
private static final String Z_FIELD = "z";
protected double x;
protected double y;
public CartesianPoint() {}
public CartesianPoint(double x, double y) {
this.x = x;
this.y = y;
}
public CartesianPoint(SpatialPoint template) {
this(template.getX(), template.getY());
}
public CartesianPoint reset(double x, double y) {
this.x = x;
this.y = y;
return this;
}
public CartesianPoint resetX(double x) {
this.x = x;
return this;
}
public CartesianPoint resetY(double y) {
this.y = y;
return this;
}
public CartesianPoint resetFromEncoded(long encoded) {
// TODO add this method to SpatialPoint interface, allowing more code de-duplication
final double x = XYEncodingUtils.decode((int) (encoded >>> 32));
final double y = XYEncodingUtils.decode((int) (encoded & 0xFFFFFFFF));
return reset(x, y);
}
public CartesianPoint resetFromString(String value, final boolean ignoreZValue) {
if (value.toLowerCase(Locale.ROOT).contains("point")) {
return resetFromWKT(value, ignoreZValue);
} else if (value.contains(",")) {
return resetFromCoordinates(value, ignoreZValue);
} else if (value.contains(".")) {
// This error mimics the structure of the parser error from 'resetFromCoordinates' below
throw new ElasticsearchParseException("failed to parse [{}], expected 2 or 3 coordinates but found: [{}]", value, 1);
} else {
// This error mimics the structure of the Geohash.mortonEncode() error to simplify testing
throw new ElasticsearchParseException("unsupported symbol [{}] in point [{}]", value.charAt(0), value);
}
}
@SuppressWarnings("HiddenField")
public CartesianPoint resetFromCoordinates(String value, final boolean ignoreZValue) {
String[] vals = value.split(",");
if (vals.length > 3 || vals.length < 2) {
throw new ElasticsearchParseException("failed to parse [{}], expected 2 or 3 coordinates but found: [{}]", vals, vals.length);
}
final double x;
final double y;
try {
x = Double.parseDouble(vals[0].trim());
if (Double.isFinite(x) == false) {
throw new ElasticsearchParseException(
"invalid [{}] value [{}]; must be between -3.4028234663852886E38 and 3.4028234663852886E38",
X_FIELD,
x
);
}
} catch (NumberFormatException ex) {
throw new ElasticsearchParseException("[{}] must be a number", X_FIELD);
}
try {
y = Double.parseDouble(vals[1].trim());
if (Double.isFinite(y) == false) {
throw new ElasticsearchParseException(
"invalid [{}] value [{}]; must be between -3.4028234663852886E38 and 3.4028234663852886E38",
Y_FIELD,
y
);
}
} catch (NumberFormatException ex) {
throw new ElasticsearchParseException("[{}] must be a number", Y_FIELD);
}
if (vals.length > 2) {
try {
CartesianPoint.assertZValue(ignoreZValue, Double.parseDouble(vals[2].trim()));
} catch (NumberFormatException ex) {
throw new ElasticsearchParseException("[{}] must be a number", Y_FIELD);
}
}
return reset(x, y);
}
private CartesianPoint resetFromWKT(String value, boolean ignoreZValue) {
Geometry geometry;
try {
geometry = WellKnownText.fromWKT(StandardValidator.instance(ignoreZValue), false, value);
} catch (Exception e) {
throw new ElasticsearchParseException("Invalid WKT format", e);
}
if (geometry.type() != ShapeType.POINT) {
throw new ElasticsearchParseException(
"[{}] supports only POINT among WKT primitives, but found {}",
PointFieldMapper.CONTENT_TYPE,
geometry.type()
);
}
org.elasticsearch.geometry.Point point = (org.elasticsearch.geometry.Point) geometry;
return reset(point.getX(), point.getY());
}
@Override
public double getX() {
return this.x;
}
@Override
public double getY() {
return this.y;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CartesianPoint point = (CartesianPoint) o;
if (Double.compare(point.x, x) != 0) return false;
if (Double.compare(point.y, y) != 0) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(x, y);
}
@Override
public String toString() {
return x + ", " + y;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return builder.startObject().field(X_FIELD, x).field(Y_FIELD, y).endObject();
}
/**
* Parse a {@link CartesianPoint} with a {@link XContentParser}. A point has one of the following forms:
*
* <ul>
* <li>Object: <pre>{"x": <i><x-value></i>, "y": <i><y-value></i>}</pre></li>
* <li>Object: <pre>{"type": <i>Point</i>, "coordinates": <i><array of doubles></i>}</pre></li>
* <li>String: <pre>"<i><latitude></i>,<i><longitude></i>"</pre></li>
* <li>Array: <pre>[<i><x></i>,<i><y></i>]</pre></li>
* </ul>
*
* @param parser {@link XContentParser} to parse the value from
* @param ignoreZValue {@link XContentParser} to not throw an error if 3 dimensional data is provided
* @return new {@link CartesianPoint} parsed from the parser
*/
public static CartesianPoint parsePoint(XContentParser parser, final boolean ignoreZValue) throws IOException,
ElasticsearchParseException {
return cartesianPointParser.parsePoint(parser, ignoreZValue, value -> new CartesianPoint().resetFromString(value, ignoreZValue));
}
public static CartesianPoint parsePoint(Object value, boolean ignoreZValue) throws ElasticsearchParseException {
try (
XContentParser parser = new MapXContentParser(
NamedXContentRegistry.EMPTY,
LoggingDeprecationHandler.INSTANCE,
Collections.singletonMap("null_value", value),
null
)
) {
parser.nextToken(); // start object
parser.nextToken(); // field name
parser.nextToken(); // field value
return parsePoint(parser, ignoreZValue);
} catch (IOException ex) {
throw new ElasticsearchParseException("error parsing point", ex);
}
}
public static void assertZValue(final boolean ignoreZValue, double zValue) {
if (ignoreZValue == false) {
throw new ElasticsearchParseException(
"Exception parsing coordinates: found Z value [{}] but [ignore_z_value] parameter is [{}]",
zValue,
ignoreZValue
);
}
if (Double.isFinite(zValue) == false) {
throw new ElasticsearchParseException(
"invalid [{}] value [{}]; must be between -3.4028234663852886E38 and 3.4028234663852886E38",
Z_FIELD,
zValue
);
}
}
private static final GenericPointParser<CartesianPoint> cartesianPointParser = new GenericPointParser<>("point", "x", "y") {
@Override
public void assertZValue(boolean ignoreZValue, double zValue) {
CartesianPoint.assertZValue(ignoreZValue, zValue);
}
@Override
public CartesianPoint createPoint(double x, double y) {
return new CartesianPoint(x, y);
}
@Override
public String fieldError() {
return "field must be either lat/lon or type/coordinates";
}
};
}
| CartesianPoint |
java | apache__maven | impl/maven-core/src/main/java/org/apache/maven/logging/LoggingOutputStream.java | {
"start": 1016,
"end": 2153
} | class ____ extends FilterOutputStream {
static final byte[] LINE_SEP = System.lineSeparator().getBytes();
final EolBaos buf;
final Consumer<String> consumer;
public LoggingOutputStream(Consumer<String> consumer) {
this(new EolBaos(), consumer);
}
LoggingOutputStream(EolBaos out, Consumer<String> consumer) {
super(out);
this.buf = out;
this.consumer = consumer;
}
public PrintStream printStream() {
return new LoggingPrintStream(this);
}
@Override
public void write(int b) throws IOException {
super.write(b);
if (buf.isEol()) {
String line = new String(buf.toByteArray(), 0, buf.size() - LINE_SEP.length);
ProjectBuildLogAppender.updateMdc();
consumer.accept(line);
buf.reset();
}
}
public void forceFlush() {
if (buf.size() > 0) {
String line = new String(buf.toByteArray(), 0, buf.size());
ProjectBuildLogAppender.updateMdc();
consumer.accept(line);
buf.reset();
}
}
static | LoggingOutputStream |
java | quarkusio__quarkus | extensions/amazon-lambda-rest/runtime/src/main/java/io/quarkus/amazon/lambda/http/LambdaHttpConfig.java | {
"start": 318,
"end": 1010
} | interface ____ {
/**
* If true, runtime will search Cognito JWT claims for "cognito:groups"
* and add them as Quarkus security roles.
*
* True by default
*/
@WithDefault("true")
boolean mapCognitoToRoles();
/**
* Cognito claim that contains roles you want to map. Defaults to "cognito:groups"
*/
@WithDefault("cognito:groups")
String cognitoRoleClaim();
/**
* Regular expression to locate role values within a Cognito claim string.
* By default, it looks for space delimited strings enclosed in brackets
* "[^\[\] \t]+"
*/
@WithDefault("[^\\[\\] \\t]+")
String cognitoClaimMatcher();
}
| LambdaHttpConfig |
java | hibernate__hibernate-orm | hibernate-core/src/main/java/org/hibernate/sql/model/internal/TableInsertStandard.java | {
"start": 620,
"end": 1554
} | class ____ extends AbstractTableInsert {
private final List<ColumnReference> returningColumns;
public TableInsertStandard(
MutatingTableReference mutatingTable,
MutationTarget<?> mutationTarget,
List<ColumnValueBinding> valueBindings,
List<ColumnReference> returningColumns,
List<ColumnValueParameter> parameters) {
super( mutatingTable, mutationTarget, parameters, valueBindings );
this.returningColumns = returningColumns;
}
@Override
public boolean isCustomSql() {
return false;
}
@Override
public List<ColumnReference> getReturningColumns() {
return returningColumns;
}
@Override
public void forEachReturningColumn(BiConsumer<Integer,ColumnReference> consumer) {
forEachThing( returningColumns, consumer );
}
@Override
public boolean isCallable() {
return false;
}
@Override
public void accept(SqlAstWalker walker) {
walker.visitStandardTableInsert( this );
}
}
| TableInsertStandard |
java | apache__flink | flink-tests/src/test/java/org/apache/flink/test/checkpointing/ApproximateLocalRecoveryDownstreamITCase.java | {
"start": 9269,
"end": 11164
} | class ____<T> extends RichMapFunction<T, T> {
private static final long serialVersionUID = 6334389850158703L;
private static volatile boolean failedBefore;
private final int failCount;
private int numElementsTotal;
private boolean failer;
FailingMapper(int failCount) {
this.failCount = failCount;
}
@Override
public void open(OpenContext openContext) {
failer = getRuntimeContext().getTaskInfo().getIndexOfThisSubtask() == 0;
}
@Override
public T map(T value) throws Exception {
numElementsTotal++;
if (!failedBefore) {
Thread.sleep(10);
if (failer && numElementsTotal >= failCount) {
failedBefore = true;
throw new Exception("Artificial Test Failure");
}
}
return value;
}
}
/**
* Validating sink to make sure each selectedChannel (of map) gets at least numElementsTotal
* elements.
*
* <p>Notice that the source generates tuples evenly distributed amongst downstream operator
* (map) instances. Besides, the index generated is continuous and monotonic increasing as long
* as the source is not restarted. Hence, - In the case of approximate local recovery is NOT
* enabled, where the entire job, including source, is restarted after map fails, source
* regenerates tuples indexed from 0. Upon reach numElementsTotal, the maximal possible index is
* numElementsTotal * numberOfInputChannels - 1 - In the case of approximate local recovery is
* enabled, where only the downstream of the failed task restart, source does not regenerating
* tuples, the maximal possible index > numElementsTotal * numberOfInputChannels - 1
*/
private static | FailingMapper |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/annotations/collectionelement/deepcollectionelements/DeepCollectionElementTest.java | {
"start": 627,
"end": 1201
} | class ____ {
@Test
public void testInitialization() {
Configuration configuration = new Configuration();
configuration.addAnnotatedClass( A.class );
configuration.addAnnotatedClass( B.class );
configuration.addAnnotatedClass( C.class );
StandardServiceRegistryImpl serviceRegistry = ServiceRegistryBuilder.buildServiceRegistry( configuration.getProperties() );
try {
SessionFactory sessionFactory = configuration.buildSessionFactory( serviceRegistry );
sessionFactory.close();
}
finally {
serviceRegistry.destroy();
}
}
}
| DeepCollectionElementTest |
java | apache__kafka | streams/src/test/java/org/apache/kafka/streams/processor/internals/assignment/GraphTest.java | {
"start": 18822,
"end": 19279
} | class ____ {
final int source;
final int destination;
final int capacity;
final int cost;
final int flow;
TestEdge(final int source, final int destination, final int capacity, final int cost, final int flow) {
this.source = source;
this.destination = destination;
this.capacity = capacity;
this.cost = cost;
this.flow = flow;
}
}
}
| TestEdge |
java | apache__camel | components/camel-ai/camel-neo4j/src/generated/java/org/apache/camel/component/neo4j/Neo4jEndpointUriFactory.java | {
"start": 515,
"end": 2887
} | class ____ extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
private static final String BASE = ":name";
private static final Set<String> PROPERTY_NAMES;
private static final Set<String> SECRET_PROPERTY_NAMES;
private static final Map<String, String> MULTI_VALUE_PREFIXES;
static {
Set<String> props = new HashSet<>(18);
props.add("alias");
props.add("databaseUrl");
props.add("detachRelationship");
props.add("dimension");
props.add("driver");
props.add("kerberosAuthTicket");
props.add("label");
props.add("lazyStartProducer");
props.add("maxResults");
props.add("minScore");
props.add("name");
props.add("password");
props.add("query");
props.add("realm");
props.add("similarityFunction");
props.add("token");
props.add("username");
props.add("vectorIndexName");
PROPERTY_NAMES = Collections.unmodifiableSet(props);
Set<String> secretProps = new HashSet<>(5);
secretProps.add("kerberosAuthTicket");
secretProps.add("password");
secretProps.add("realm");
secretProps.add("token");
secretProps.add("username");
SECRET_PROPERTY_NAMES = Collections.unmodifiableSet(secretProps);
MULTI_VALUE_PREFIXES = Collections.emptyMap();
}
@Override
public boolean isEnabled(String scheme) {
return "neo4j".equals(scheme);
}
@Override
public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
String syntax = scheme + BASE;
String uri = syntax;
Map<String, Object> copy = new HashMap<>(properties);
uri = buildPathParameter(syntax, uri, "name", null, true, copy);
uri = buildQueryParameters(uri, copy, encode);
return uri;
}
@Override
public Set<String> propertyNames() {
return PROPERTY_NAMES;
}
@Override
public Set<String> secretPropertyNames() {
return SECRET_PROPERTY_NAMES;
}
@Override
public Map<String, String> multiValuePrefixes() {
return MULTI_VALUE_PREFIXES;
}
@Override
public boolean isLenientProperties() {
return false;
}
}
| Neo4jEndpointUriFactory |
java | apache__hadoop | hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/policies/manager/BasePolicyManagerTest.java | {
"start": 1824,
"end": 4106
} | class ____ {
@SuppressWarnings("checkstyle:visibilitymodifier")
protected FederationPolicyManager wfp = null;
@SuppressWarnings("checkstyle:visibilitymodifier")
protected Class expectedPolicyManager;
@SuppressWarnings("checkstyle:visibilitymodifier")
protected Class expectedAMRMProxyPolicy;
@SuppressWarnings("checkstyle:visibilitymodifier")
protected Class expectedRouterPolicy;
@Test
public void testSerializeAndInstantiate() throws Exception {
serializeAndDeserializePolicyManager(wfp, expectedPolicyManager,
expectedAMRMProxyPolicy, expectedRouterPolicy);
}
@Test
public void testSerializeAndInstantiateBad1() throws Exception {
assertThrows(FederationPolicyInitializationException.class, () -> {
serializeAndDeserializePolicyManager(wfp, String.class,
expectedAMRMProxyPolicy, expectedRouterPolicy);
});
}
@Test
public void testSerializeAndInstantiateBad2() throws Exception {
assertThrows(AssertionError.class, () -> {
serializeAndDeserializePolicyManager(wfp, expectedPolicyManager,
String.class, expectedRouterPolicy);
});
}
@Test
public void testSerializeAndInstantiateBad3() throws Exception {
assertThrows(AssertionError.class, () -> {
serializeAndDeserializePolicyManager(wfp, expectedPolicyManager,
expectedAMRMProxyPolicy, String.class);
});
}
protected static void serializeAndDeserializePolicyManager(
FederationPolicyManager wfp, Class policyManagerType,
Class expAMRMProxyPolicy, Class expRouterPolicy) throws Exception {
// serializeConf it in a context
Configuration conf = new Configuration();
SubClusterPolicyConfiguration fpc = wfp.serializeConf();
fpc.setType(policyManagerType.getCanonicalName());
FederationPolicyInitializationContext context =
new FederationPolicyInitializationContext();
context.setSubClusterPolicyConfiguration(fpc);
context
.setFederationStateStoreFacade(FederationPoliciesTestUtil.initFacade(conf));
context.setFederationSubclusterResolver(
FederationPoliciesTestUtil.initResolver());
context.setHomeSubcluster(SubClusterId.newInstance("homesubcluster"));
// based on the "context" created instantiate new | BasePolicyManagerTest |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/serialization/SerializerConfigImpl.java | {
"start": 16197,
"end": 21441
} | class ____ serialization config"),
e -> ConfigurationUtils.parseStringToMap(e.getValue()),
(v1, v2) -> {
throw new IllegalArgumentException(
"Duplicated serializer for the same class.");
},
LinkedHashMap::new));
for (Map.Entry<Class<?>, Map<String, String>> entry :
serializationConfigByClass.entrySet()) {
Class<?> type = entry.getKey();
Map<String, String> config = entry.getValue();
String configType = config.get("type");
if (configType == null) {
throw new IllegalArgumentException("Serializer type not specified for " + type);
}
switch (configType) {
case "pojo":
registerPojoType(type);
break;
case "kryo":
parseAndRegisterKryoType(classLoader, type, config);
break;
case "typeinfo":
parseAndRegisterTypeFactory(classLoader, type, config);
break;
default:
throw new IllegalArgumentException(
String.format(
"Unsupported serializer type %s for %s", configType, type));
}
}
}
private void parseAndRegisterKryoType(
ClassLoader classLoader, Class<?> t, Map<String, String> m) {
String kryoType = m.get("kryo-type");
if (kryoType == null) {
registerKryoType(t);
} else {
switch (kryoType) {
case "default":
addDefaultKryoSerializer(
t,
loadClass(
m.get("class"),
classLoader,
"Could not load serializer's class"));
break;
case "registered":
registerTypeWithKryoSerializer(
t,
loadClass(
m.get("class"),
classLoader,
"Could not load serializer's class"));
break;
default:
break;
}
}
}
private void parseAndRegisterTypeFactory(
ClassLoader classLoader, Class<?> t, Map<String, String> m) {
Class<? extends TypeInfoFactory<?>> factoryClass =
loadClass(m.get("class"), classLoader, "Could not load TypeInfoFactory's class");
// Register in the global static factory map of TypeExtractor for now so that it can be
// accessed from the static methods of TypeExtractor where SerializerConfig is currently
// not accessible
TypeExtractor.registerFactory(t, factoryClass);
// Register inside SerializerConfig only for testing purpose for now
registerTypeWithTypeInfoFactory(t, factoryClass);
}
private void registerTypeWithTypeInfoFactory(
Class<?> t, Class<? extends TypeInfoFactory<?>> factory) {
Preconditions.checkNotNull(t, "Type parameter must not be null.");
Preconditions.checkNotNull(factory, "Factory parameter must not be null.");
if (!TypeInfoFactory.class.isAssignableFrom(factory)) {
throw new IllegalArgumentException("Class is not a TypeInfoFactory.");
}
if (registeredTypeInfoFactories.containsKey(t)) {
throw new InvalidTypesException(
"A TypeInfoFactory for type '" + t + "' is already registered.");
}
registeredTypeInfoFactories.put(t, factory);
}
@Override
public SerializerConfigImpl copy() {
final SerializerConfigImpl newSerializerConfig = new SerializerConfigImpl();
newSerializerConfig.configure(configuration, this.getClass().getClassLoader());
getRegisteredTypesWithKryoSerializers()
.forEach(
(c, s) ->
newSerializerConfig.registerTypeWithKryoSerializer(
c, s.getSerializer()));
getRegisteredTypesWithKryoSerializerClasses()
.forEach(newSerializerConfig::registerTypeWithKryoSerializer);
getDefaultKryoSerializers()
.forEach(
(c, s) ->
newSerializerConfig.addDefaultKryoSerializer(c, s.getSerializer()));
getDefaultKryoSerializerClasses().forEach(newSerializerConfig::addDefaultKryoSerializer);
getRegisteredKryoTypes().forEach(newSerializerConfig::registerKryoType);
getRegisteredPojoTypes().forEach(newSerializerConfig::registerPojoType);
getRegisteredTypeInfoFactories()
.forEach(newSerializerConfig::registerTypeWithTypeInfoFactory);
return newSerializerConfig;
}
}
| for |
java | spring-projects__spring-framework | spring-context/src/main/java/org/springframework/context/annotation/DeferredImportSelector.java | {
"start": 1518,
"end": 1955
} | interface ____ extends ImportSelector {
/**
* Return a specific import group.
* <p>The default implementations return {@code null} for no grouping required.
* @return the import group class, or {@code null} if none
* @since 5.0
*/
default @Nullable Class<? extends Group> getImportGroup() {
return null;
}
/**
* Interface used to group results from different import selectors.
* @since 5.0
*/
| DeferredImportSelector |
java | google__error-prone | core/src/test/java/com/google/errorprone/bugpatterns/ForOverrideCheckerTest.java | {
"start": 9585,
"end": 9946
} | class ____ {
@ForOverride
protected void forOverride() {}
public Runnable getRunner() {
return new Runnable() {
public void run() {
forOverride();
}
};
}
}
""")
.doTest();
}
}
| OuterClass |
java | google__dagger | javatests/dagger/internal/codegen/DuplicateBindingsValidationTest.java | {
"start": 4006,
"end": 4061
} | interface ____ {}",
"",
" static | A |
java | apache__camel | components/camel-sjms/src/test/java/org/apache/camel/component/sjms/it/SyncJmsInOutIT.java | {
"start": 1385,
"end": 2634
} | class ____ extends JmsTestSupport {
@Test
public void testSynchronous() throws Exception {
MockEndpoint mock = getMockEndpoint("mock:result");
mock.expectedMessageCount(100);
mock.expectsNoDuplicates(body());
StopWatch watch = new StopWatch();
for (int i = 0; i < 100; i++) {
template.sendBody("seda:start.SyncJmsInOutIT", Integer.toString(i));
}
// just in case we run on slow boxes
MockEndpoint.assertIsSatisfied(context, 20, TimeUnit.SECONDS);
log.info("Took {} ms. to process 100 messages request/reply over JMS", watch.taken());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("seda:start.SyncJmsInOutIT")
.to("sjms:queue:in.foo.SyncJmsInOutIT?replyTo=out.bar&exchangePattern=InOut")
.to("mock:result");
from("sjms:queue:in.foo.SyncJmsInOutIT?exchangePattern=InOut")
.log("Using ${threadName} to process ${body}")
.transform(body().prepend("Bye "));
}
};
}
}
| SyncJmsInOutIT |
java | elastic__elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java | {
"start": 2215,
"end": 2728
} | class ____ extends DeferableBucketAggregator implements SingleBucketAggregator {
public static final ParseField SHARD_SIZE_FIELD = new ParseField("shard_size");
public static final ParseField MAX_DOCS_PER_VALUE_FIELD = new ParseField("max_docs_per_value");
public static final ParseField EXECUTION_HINT_FIELD = new ParseField("execution_hint");
static final long SCOREDOCKEY_SIZE = RamUsageEstimator.shallowSizeOfInstance(DiversifiedTopDocsCollector.ScoreDocKey.class);
public | SamplerAggregator |
java | google__gson | gson/src/test/java/com/google/gson/ParameterizedTypeTest.java | {
"start": 1064,
"end": 2003
} | class ____ {
private ParameterizedType ourType;
@Before
public void setUp() throws Exception {
ourType = GsonTypes.newParameterizedTypeWithOwner(null, List.class, String.class);
}
@Test
public void testOurTypeFunctionality() {
Type parameterizedType = new TypeToken<List<String>>() {}.getType();
assertThat(ourType.getOwnerType()).isNull();
assertThat(ourType.getActualTypeArguments()[0]).isSameInstanceAs(String.class);
assertThat(ourType.getRawType()).isSameInstanceAs(List.class);
assertThat(ourType).isEqualTo(parameterizedType);
assertThat(ourType.hashCode()).isEqualTo(parameterizedType.hashCode());
}
@Test
public void testNotEquals() {
Type differentParameterizedType = new TypeToken<List<Integer>>() {}.getType();
assertThat(differentParameterizedType.equals(ourType)).isFalse();
assertThat(ourType.equals(differentParameterizedType)).isFalse();
}
}
| ParameterizedTypeTest |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/aot/BeanRegistrationAotContribution.java | {
"start": 1110,
"end": 3991
} | interface ____ {
/**
* Customize the {@link BeanRegistrationCodeFragments} that will be used to
* generate the bean registration code. Custom code fragments can be used if
* default code generation isn't suitable.
* @param generationContext the generation context
* @param codeFragments the existing code fragments
* @return the code fragments to use, may be the original instance or a wrapper
*/
default BeanRegistrationCodeFragments customizeBeanRegistrationCodeFragments(
GenerationContext generationContext, BeanRegistrationCodeFragments codeFragments) {
return codeFragments;
}
/**
* Apply this contribution to the given {@link BeanRegistrationCode}.
* @param generationContext the generation context
* @param beanRegistrationCode the generated registration
*/
void applyTo(GenerationContext generationContext, BeanRegistrationCode beanRegistrationCode);
/**
* Create a {@link BeanRegistrationAotContribution} that customizes
* the {@link BeanRegistrationCodeFragments}. Typically used in
* conjunction with an extension of {@link BeanRegistrationCodeFragmentsDecorator}
* that overrides a specific callback.
* @param defaultCodeFragments the default code fragments
* @return a new {@link BeanRegistrationAotContribution} instance
* @see BeanRegistrationCodeFragmentsDecorator
*/
static BeanRegistrationAotContribution withCustomCodeFragments(
UnaryOperator<BeanRegistrationCodeFragments> defaultCodeFragments) {
Assert.notNull(defaultCodeFragments, "'defaultCodeFragments' must not be null");
return new BeanRegistrationAotContribution() {
@Override
public BeanRegistrationCodeFragments customizeBeanRegistrationCodeFragments(
GenerationContext generationContext, BeanRegistrationCodeFragments codeFragments) {
return defaultCodeFragments.apply(codeFragments);
}
@Override
public void applyTo(GenerationContext generationContext, BeanRegistrationCode beanRegistrationCode) {
}
};
}
/**
* Create a contribution that applies the contribution of the first contribution
* followed by the second contribution. Any contribution can be {@code null} to be
* ignored and the concatenated contribution is {@code null} if both inputs are
* {@code null}.
* @param a the first contribution
* @param b the second contribution
* @return the concatenation of the two contributions, or {@code null} if
* they are both {@code null}.
* @since 6.1
*/
static @Nullable BeanRegistrationAotContribution concat(@Nullable BeanRegistrationAotContribution a,
@Nullable BeanRegistrationAotContribution b) {
if (a == null) {
return b;
}
if (b == null) {
return a;
}
return (generationContext, beanRegistrationCode) -> {
a.applyTo(generationContext, beanRegistrationCode);
b.applyTo(generationContext, beanRegistrationCode);
};
}
}
| BeanRegistrationAotContribution |
java | apache__rocketmq | controller/src/main/java/org/apache/rocketmq/controller/impl/DLedgerController.java | {
"start": 17494,
"end": 17931
} | interface ____<T> {
/**
* Run the controller event
*/
void run() throws Throwable;
/**
* Return the completableFuture
*/
CompletableFuture<RemotingCommand> future();
/**
* Handle Exception.
*/
void handleException(final Throwable t);
}
/**
* Event scheduler, schedule event handler from event queue
*/
| EventHandler |
java | hibernate__hibernate-orm | hibernate-core/src/test/java/org/hibernate/orm/test/inheritance/ManyToManyAbstractTablePerClassTest.java | {
"start": 1382,
"end": 3295
} | class ____ {
@Test
public void testAddAndRemove(SessionFactoryScope scope) {
scope.inTransaction( session -> {
final TablePerClassSub1 o1 = session.find( TablePerClassSub1.class, 1 );
assertNotNull( o1 );
assertEquals( 1, o1.childrenSet.size() );
assertEquals( 1, o1.childrenList.size() );
assertEquals( 1, o1.childrenMap.size() );
TablePerClassBase o2 = o1.childrenSet.iterator().next();
assertEquals( 2, o2.id );
assertEquals( 2, o1.childrenList.get( 0 ).id );
assertEquals( 2, o1.childrenMap.get( 2 ).id );
o1.childrenSet.remove( o2 );
o1.childrenList.remove( 0 );
o1.childrenMap.remove( 2 );
TablePerClassSub1 o3 = new TablePerClassSub1( 3 );
session.persist( o3 );
o1.childrenSet.add( o3 );
o1.childrenList.add( o3 );
o1.childrenMap.put( 3, o3 );
session.flush();
} );
scope.inTransaction( session -> {
final TablePerClassSub1 o1 = session.find( TablePerClassSub1.class, 1 );
assertNotNull( o1 );
assertEquals( 1, o1.childrenSet.size() );
assertEquals( 1, o1.childrenList.size() );
assertEquals( 1, o1.childrenMap.size() );
TablePerClassBase o2 = o1.childrenSet.iterator().next();
assertEquals( 3, o2.id );
assertEquals( 3, o1.childrenList.get( 0 ).id );
assertEquals( 3, o1.childrenMap.get( 3 ).id );
} );
}
@BeforeEach
public void setupData(SessionFactoryScope scope) {
scope.inTransaction( session -> {
TablePerClassSub1 o1 = new TablePerClassSub1( 1 );
TablePerClassSub2 o2 = new TablePerClassSub2( 2 );
o1.childrenSet.add( o2 );
o1.childrenList.add( o2 );
session.persist( o2 );
session.persist( o1 );
o1.childrenMap.put( 2, o2 );
} );
}
@AfterEach
public void cleanupData(SessionFactoryScope scope) {
scope.dropData();
}
@Entity(name = "TablePerClassBase")
@Inheritance(strategy = InheritanceType.TABLE_PER_CLASS)
public static abstract | ManyToManyAbstractTablePerClassTest |
java | apache__hadoop | hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java | {
"start": 1987,
"end": 8891
} | class ____ {
private static final Logger LOG = LoggerFactory.getLogger(TokenCache.class);
/**
* auxiliary method to get user's secret keys..
* @param alias
* @return secret key from the storage
*/
public static byte[] getSecretKey(Credentials credentials, Text alias) {
if(credentials == null)
return null;
return credentials.getSecretKey(alias);
}
/**
* Convenience method to obtain delegation tokens from namenodes
* corresponding to the paths passed.
* @param credentials
* @param ps array of paths
* @param conf configuration
* @throws IOException
*/
public static void obtainTokensForNamenodes(Credentials credentials,
Path[] ps, Configuration conf) throws IOException {
if (!UserGroupInformation.isSecurityEnabled()) {
return;
}
obtainTokensForNamenodesInternal(credentials, ps, conf);
}
/**
* Remove jobtoken referrals which don't make sense in the context
* of the task execution.
*
* @param conf
*/
public static void cleanUpTokenReferral(Configuration conf) {
conf.unset(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY);
}
static void obtainTokensForNamenodesInternal(Credentials credentials,
Path[] ps, Configuration conf) throws IOException {
Set<FileSystem> fsSet = new HashSet<FileSystem>();
for(Path p: ps) {
fsSet.add(p.getFileSystem(conf));
}
String masterPrincipal = Master.getMasterPrincipal(conf);
for (FileSystem fs : fsSet) {
obtainTokensForNamenodesInternal(fs, credentials, conf, masterPrincipal);
}
}
static boolean isTokenRenewalExcluded(FileSystem fs, Configuration conf) {
String [] nns =
conf.getStrings(MRJobConfig.JOB_NAMENODES_TOKEN_RENEWAL_EXCLUDE);
if (nns != null) {
String host = fs.getUri().getHost();
for(int i=0; i< nns.length; i++) {
if (nns[i].equals(host)) {
return true;
}
}
}
return false;
}
/**
* get delegation token for a specific FS
* @param fs
* @param credentials
* @param conf
* @throws IOException
*/
static void obtainTokensForNamenodesInternal(FileSystem fs,
Credentials credentials, Configuration conf, String renewer)
throws IOException {
// RM skips renewing token with empty renewer
String delegTokenRenewer = "";
if (!isTokenRenewalExcluded(fs, conf)) {
if (StringUtils.isEmpty(renewer)) {
throw new IOException(
"Can't get Master Kerberos principal for use as renewer");
} else {
delegTokenRenewer = renewer;
}
}
mergeBinaryTokens(credentials, conf);
final Token<?> tokens[] = fs.addDelegationTokens(delegTokenRenewer,
credentials);
if (tokens != null) {
for (Token<?> token : tokens) {
LOG.info("Got dt for " + fs.getUri() + "; "+token);
}
}
}
private static void mergeBinaryTokens(Credentials creds, Configuration conf) {
String binaryTokenFilename =
conf.get(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY);
if (binaryTokenFilename != null) {
Credentials binary;
try {
binary = Credentials.readTokenStorageFile(
FileSystem.getLocal(conf).makeQualified(
new Path(binaryTokenFilename)),
conf);
} catch (IOException e) {
throw new RuntimeException(e);
}
// supplement existing tokens with the tokens in the binary file
creds.mergeAll(binary);
}
}
/**
* file name used on HDFS for generated job token
*/
@InterfaceAudience.Private
public static final String JOB_TOKEN_HDFS_FILE = "jobToken";
/**
* conf setting for job tokens cache file name
*/
@InterfaceAudience.Private
public static final String JOB_TOKENS_FILENAME = "mapreduce.job.jobTokenFile";
private static final Text JOB_TOKEN = new Text("JobToken");
private static final Text SHUFFLE_TOKEN = new Text("MapReduceShuffleToken");
private static final Text ENC_SPILL_KEY = new Text("MapReduceEncryptedSpillKey");
/**
* load job token from a file
* @deprecated Use {@link Credentials#readTokenStorageFile} instead,
* this method is included for compatibility against Hadoop-1.
* @param conf
* @throws IOException
*/
@InterfaceAudience.Private
@Deprecated
public static Credentials loadTokens(String jobTokenFile, JobConf conf)
throws IOException {
Path localJobTokenFile = new Path ("file:///" + jobTokenFile);
Credentials ts = Credentials.readTokenStorageFile(localJobTokenFile, conf);
if(LOG.isDebugEnabled()) {
LOG.debug("Task: Loaded jobTokenFile from: "+
localJobTokenFile.toUri().getPath()
+"; num of sec keys = " + ts.numberOfSecretKeys() +
" Number of tokens " + ts.numberOfTokens());
}
return ts;
}
/**
* load job token from a file
* @deprecated Use {@link Credentials#readTokenStorageFile} instead,
* this method is included for compatibility against Hadoop-1.
* @param conf
* @throws IOException
*/
@InterfaceAudience.Private
@Deprecated
public static Credentials loadTokens(String jobTokenFile, Configuration conf)
throws IOException {
return loadTokens(jobTokenFile, new JobConf(conf));
}
/**
* store job token
* @param t
*/
@InterfaceAudience.Private
public static void setJobToken(Token<? extends TokenIdentifier> t,
Credentials credentials) {
credentials.addToken(JOB_TOKEN, t);
}
/**
*
* @return job token
*/
@SuppressWarnings("unchecked")
@InterfaceAudience.Private
public static Token<JobTokenIdentifier> getJobToken(Credentials credentials) {
return (Token<JobTokenIdentifier>) credentials.getToken(JOB_TOKEN);
}
@InterfaceAudience.Private
public static void setShuffleSecretKey(byte[] key, Credentials credentials) {
credentials.addSecretKey(SHUFFLE_TOKEN, key);
}
@InterfaceAudience.Private
public static byte[] getShuffleSecretKey(Credentials credentials) {
return getSecretKey(credentials, SHUFFLE_TOKEN);
}
@InterfaceAudience.Private
public static void setEncryptedSpillKey(byte[] key, Credentials credentials) {
credentials.addSecretKey(ENC_SPILL_KEY, key);
}
@InterfaceAudience.Private
public static byte[] getEncryptedSpillKey(Credentials credentials) {
return getSecretKey(credentials, ENC_SPILL_KEY);
}
/**
* @deprecated Use {@link Credentials#getToken(org.apache.hadoop.io.Text)}
* instead, this method is included for compatibility against Hadoop-1
* @param namenode
* @return delegation token
*/
@InterfaceAudience.Private
@Deprecated
public static
Token<?> getDelegationToken(
Credentials credentials, String namenode) {
return (Token<?>) credentials.getToken(new Text(
namenode));
}
}
| TokenCache |
java | redisson__redisson | redisson/src/main/java/org/redisson/api/options/MapOptions.java | {
"start": 772,
"end": 1098
} | interface ____<K, V> extends ExMapOptions<MapOptions<K, V>, K, V> {
/**
* Creates options with the name of object instance
*
* @param name of object instance
* @return options instance
*/
static <K, V> MapOptions<K, V> name(String name) {
return new MapParams<>(name);
}
}
| MapOptions |
java | apache__hadoop | hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java | {
"start": 3244,
"end": 6519
} | class ____<E> extends LinkedBlockingQueue<E> {
public MockCallQueue(int levels, int cap, String ns, int[] capacityWeights,
boolean serverFailOverEnabled, Configuration conf) {
super(cap);
mockQueueConstructions++;
}
public void put(E e) throws InterruptedException {
super.put(e);
mockQueuePuts++;
}
}
// Returns true if mock queue was used for put
public boolean canPutInMockQueue() throws IOException {
FileSystem fs = FileSystem.get(config);
int putsBefore = mockQueuePuts;
fs.exists(new Path("/")); // Make an RPC call
fs.close();
return mockQueuePuts > putsBefore;
}
@Test
public void testRefresh() throws Exception {
// We want to count additional events, so we reset here
mockQueueConstructions = 0;
mockQueuePuts = 0;
setUp(MockCallQueue.class);
assertTrue(mockQueueConstructions > 0, "Mock queue should have been constructed");
assertTrue(canPutInMockQueue(), "Puts are routed through MockQueue");
int lastMockQueueConstructions = mockQueueConstructions;
// Replace queue with the queue specified in core-site.xml, which would be
// the LinkedBlockingQueue
DFSAdmin admin = new DFSAdmin(config);
String [] args = new String[]{"-refreshCallQueue"};
int exitCode = admin.run(args);
assertEquals(0, exitCode, "DFSAdmin should return 0");
assertEquals(lastMockQueueConstructions, mockQueueConstructions,
"Mock queue should have no additional constructions");
try {
assertFalse(canPutInMockQueue(), "Puts are routed through LBQ instead of MockQueue");
} catch (IOException ioe) {
fail("Could not put into queue at all");
}
}
@Test
public void testRefreshCallQueueWithFairCallQueue() throws Exception {
setUp(FairCallQueue.class);
boolean oldValue = DefaultMetricsSystem.inMiniClusterMode();
// throw an error when we double-initialize JvmMetrics
DefaultMetricsSystem.setMiniClusterMode(false);
int serviceHandlerCount = config.getInt(
DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
NameNodeRpcServer rpcServer = (NameNodeRpcServer) cluster.getNameNodeRpc();
// check callqueue size
assertEquals(
CommonConfigurationKeys.IPC_SERVER_HANDLER_QUEUE_SIZE_DEFAULT * serviceHandlerCount,
rpcServer.getClientRpcServer().getMaxQueueSize());
// Replace queue and update queue size
config.setInt(CommonConfigurationKeys.IPC_SERVER_HANDLER_QUEUE_SIZE_KEY,
150);
try {
rpcServer.getClientRpcServer().refreshCallQueue(config);
} catch (Exception e) {
Throwable cause = e.getCause();
if ((cause instanceof MetricsException)
&& cause.getMessage().contains(
"Metrics source DecayRpcSchedulerMetrics2.ipc." + nnPort
+ " already exists!")) {
fail("DecayRpcScheduler metrics should be unregistered before"
+ " reregister");
}
throw e;
} finally {
DefaultMetricsSystem.setMiniClusterMode(oldValue);
}
// check callQueueSize has changed
assertEquals(150 * serviceHandlerCount, rpcServer.getClientRpcServer().getMaxQueueSize());
}
} | MockCallQueue |
java | elastic__elasticsearch | test/framework/src/main/java/org/elasticsearch/common/xcontent/support/AbstractFilteringTestCase.java | {
"start": 1633,
"end": 22198
} | interface ____ extends CheckedFunction<XContentBuilder, XContentBuilder, IOException> {}
protected abstract void testFilter(Builder expected, Builder actual, Collection<String> includes, Collection<String> excludes)
throws IOException;
/** Sample test case */
protected static final Builder SAMPLE = builderFor("sample.json");
protected static Builder builderFor(String file) {
return builder -> {
try (InputStream stream = AbstractFilteringTestCase.class.getResourceAsStream(file)) {
assertThat("Couldn't find [" + file + "]", stream, notNullValue());
try (XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, stream)) {
// copyCurrentStructure does not property handle filters when it is passed a json parser. So we hide it.
return builder.copyCurrentStructure(new FilterXContentParserWrapper(parser) {
});
}
}
};
}
public final void testNoFiltering() throws Exception {
final Builder expected = SAMPLE;
testFilter(expected, SAMPLE, emptySet(), emptySet());
testFilter(expected, SAMPLE, singleton("*"), emptySet());
testFilter(expected, SAMPLE, singleton("**"), emptySet());
testFilter(expected, SAMPLE, emptySet(), singleton("xyz"));
}
public final void testNoMatch() throws Exception {
final Builder expected = builder -> builder.startObject().endObject();
testFilter(expected, SAMPLE, singleton("xyz"), emptySet());
testFilter(expected, SAMPLE, emptySet(), singleton("*"));
testFilter(expected, SAMPLE, emptySet(), singleton("**"));
}
public final void testSimpleFieldInclusive() throws Exception {
final Builder expected = builder -> builder.startObject().field("title", "My awesome book").endObject();
testFilter(expected, SAMPLE, singleton("title"), emptySet());
}
public final void testSimpleFieldExclusive() throws Exception {
testFilter(builderFor("sample_no_title.json"), SAMPLE, emptySet(), singleton("title"));
}
public final void testSimpleFieldWithWildcardInclusive() throws Exception {
testFilter(builderFor("sample_just_pr.json"), SAMPLE, singleton("pr*"), emptySet());
}
public final void testSimpleFieldWithWildcardExclusive() throws Exception {
testFilter(builderFor("sample_no_pr.json"), SAMPLE, emptySet(), singleton("pr*"));
}
public final void testMultipleFieldsInclusive() throws Exception {
Builder expected = builder -> builder.startObject().field("title", "My awesome book").field("pages", 456).endObject();
testFilter(expected, SAMPLE, Sets.newHashSet("title", "pages"), emptySet());
}
public final void testMultipleFieldsExclusive() throws Exception {
testFilter(builderFor("sample_no_title_pages.json"), SAMPLE, emptySet(), Sets.newHashSet("title", "pages"));
}
public final void testSimpleArrayInclusive() throws Exception {
Builder expected = builder -> builder.startObject().startArray("tags").value("elasticsearch").value("java").endArray().endObject();
testFilter(expected, SAMPLE, singleton("tags"), emptySet());
}
public final void testSimpleArrayExclusive() throws Exception {
testFilter(builderFor("sample_no_tags.json"), SAMPLE, emptySet(), singleton("tags"));
}
public final void testSimpleArrayOfObjectsInclusive() throws Exception {
Builder expected = builderFor("sample_just_authors.json");
testFilter(expected, SAMPLE, singleton("authors"), emptySet());
testFilter(expected, SAMPLE, singleton("authors.*"), emptySet());
testFilter(expected, SAMPLE, singleton("authors.*name"), emptySet());
}
public final void testSimpleArrayOfObjectsExclusive() throws Exception {
Builder expected = builderFor("sample_no_authors.json");
testFilter(expected, SAMPLE, emptySet(), singleton("authors"));
if (removesEmptyArrays()) {
testFilter(expected, SAMPLE, emptySet(), singleton("authors.*"));
testFilter(expected, SAMPLE, emptySet(), singleton("authors.*name"));
}
}
protected abstract boolean removesEmptyArrays();
public void testSimpleArrayOfObjectsPropertyInclusive() throws Exception {
Builder expected = builderFor("sample_just_authors_lastname.json");
testFilter(expected, SAMPLE, singleton("authors.lastname"), emptySet());
testFilter(expected, SAMPLE, singleton("authors.l*"), emptySet());
}
public void testSimpleArrayOfObjectsPropertyExclusive() throws Exception {
Builder expected = builderFor("sample_no_authors_lastname.json");
testFilter(expected, SAMPLE, emptySet(), singleton("authors.lastname"));
testFilter(expected, SAMPLE, emptySet(), singleton("authors.l*"));
}
public void testRecurseField1Inclusive() throws Exception {
Builder expected = builderFor("sample_just_names.json");
testFilter(expected, SAMPLE, singleton("**.name"), emptySet());
}
public void testRecurseField1Exclusive() throws Exception {
Builder expected = builderFor("sample_no_names.json");
testFilter(expected, SAMPLE, emptySet(), singleton("**.name"));
}
public void testRecurseField2Inclusive() throws Exception {
Builder expected = builderFor("sample_just_properties_names.json");
testFilter(expected, SAMPLE, singleton("properties.**.name"), emptySet());
}
public void testRecurseField2Exclusive() throws Exception {
Builder expected = builderFor("sample_no_properties_names.json");
testFilter(expected, SAMPLE, emptySet(), singleton("properties.**.name"));
}
public void testRecurseField3Inclusive() throws Exception {
Builder expected = builderFor("sample_just_properties_en_names.json");
testFilter(expected, SAMPLE, singleton("properties.*.en.**.name"), emptySet());
}
public void testRecurseField3Exclusive() throws Exception {
Builder expected = builderFor("sample_no_properties_en_names.json");
testFilter(expected, SAMPLE, emptySet(), singleton("properties.*.en.**.name"));
}
public void testRecurseField4Inclusive() throws Exception {
Builder expected = builderFor("sample_just_properties_distributors_names.json");
testFilter(expected, SAMPLE, singleton("properties.**.distributors.name"), emptySet());
}
public void testRecurseField4Exclusive() throws Exception {
Builder expected = builderFor("sample_no_properties_distributors_names.json");
testFilter(expected, SAMPLE, emptySet(), singleton("properties.**.distributors.name"));
}
public void testRawField() throws Exception {
Builder expectedRawField = builder -> {
builder.startObject();
builder.field("foo", 0);
builder.startObject("raw").field("content", "hello world!").endObject();
return builder.endObject();
};
Builder expectedRawFieldRemoved = builder -> {
builder.startObject();
builder.field("foo", 0);
return builder.endObject();
};
Builder expectedRawFieldIncluded = builder -> {
builder.startObject();
builder.startObject("raw").field("content", "hello world!").endObject();
return builder.endObject();
};
@SuppressWarnings("deprecation") // Tests filtering with a deprecated method
Builder sampleWithRaw = builder -> {
BytesReference raw = BytesReference.bytes(
XContentBuilder.builder(builder.contentType().xContent()).startObject().field("content", "hello world!").endObject()
);
return builder.startObject().field("foo", 0).rawField("raw", raw.streamInput()).endObject();
};
testFilter(expectedRawField, sampleWithRaw, emptySet(), emptySet());
testFilter(expectedRawFieldRemoved, sampleWithRaw, singleton("f*"), emptySet());
testFilter(expectedRawFieldRemoved, sampleWithRaw, emptySet(), singleton("r*"));
testFilter(expectedRawFieldIncluded, sampleWithRaw, singleton("r*"), emptySet());
testFilter(expectedRawFieldIncluded, sampleWithRaw, emptySet(), singleton("f*"));
sampleWithRaw = builder -> {
BytesReference raw = BytesReference.bytes(
XContentBuilder.builder(builder.contentType().xContent()).startObject().field("content", "hello world!").endObject()
);
return builder.startObject().field("foo", 0).rawField("raw", raw.streamInput(), builder.contentType()).endObject();
};
testFilter(expectedRawField, sampleWithRaw, emptySet(), emptySet());
testFilter(expectedRawFieldRemoved, sampleWithRaw, singleton("f*"), emptySet());
testFilter(expectedRawFieldRemoved, sampleWithRaw, emptySet(), singleton("r*"));
testFilter(expectedRawFieldIncluded, sampleWithRaw, singleton("r*"), emptySet());
testFilter(expectedRawFieldIncluded, sampleWithRaw, emptySet(), singleton("f*"));
}
public void testArrays() throws Exception {
// Test: Array of values (no filtering)
Builder sampleArrayOfValues = builder -> {
builder.startObject();
builder.startArray("tags").value("lorem").value("ipsum").value("dolor").endArray();
return builder.endObject();
};
testFilter(sampleArrayOfValues, sampleArrayOfValues, singleton("t*"), emptySet());
testFilter(sampleArrayOfValues, sampleArrayOfValues, singleton("tags"), emptySet());
testFilter(sampleArrayOfValues, sampleArrayOfValues, emptySet(), singleton("a"));
// Test: Array of values (with filtering)
Builder expected = builder -> builder.startObject().endObject();
testFilter(expected, sampleArrayOfValues, singleton("foo"), emptySet());
testFilter(expected, sampleArrayOfValues, emptySet(), singleton("t*"));
testFilter(expected, sampleArrayOfValues, emptySet(), singleton("tags"));
// Test: Array of objects (no filtering)
Builder sampleArrayOfObjects = builder -> {
builder.startObject();
builder.startArray("tags");
{
builder.startObject().field("lastname", "lorem").endObject();
builder.startObject().field("firstname", "ipsum").endObject();
}
builder.endArray();
return builder.endObject();
};
testFilter(sampleArrayOfObjects, sampleArrayOfObjects, singleton("t*"), emptySet());
testFilter(sampleArrayOfObjects, sampleArrayOfObjects, singleton("tags"), emptySet());
testFilter(sampleArrayOfObjects, sampleArrayOfObjects, emptySet(), singleton("a"));
// Test: Array of objects (with filtering)
testFilter(expected, sampleArrayOfObjects, singleton("foo"), emptySet());
testFilter(expected, sampleArrayOfObjects, emptySet(), singleton("t*"));
testFilter(expected, sampleArrayOfObjects, emptySet(), singleton("tags"));
// Test: Array of objects (with partial filtering)
expected = builder -> {
builder.startObject();
builder.startArray("tags");
{
builder.startObject().field("firstname", "ipsum").endObject();
}
builder.endArray();
return builder.endObject();
};
testFilter(expected, sampleArrayOfObjects, singleton("t*.firstname"), emptySet());
testFilter(expected, sampleArrayOfObjects, emptySet(), singleton("t*.lastname"));
}
public void testEmptyObject() throws IOException {
final Builder sample = builder -> builder.startObject().startObject("foo").endObject().endObject();
Builder expected = builder -> builder.startObject().startObject("foo").endObject().endObject();
testFilter(expected, sample, singleton("foo"), emptySet());
testFilter(expected, sample, emptySet(), singleton("bar"));
testFilter(expected, sample, singleton("f*"), singleton("baz"));
expected = builder -> builder.startObject().endObject();
testFilter(expected, sample, emptySet(), singleton("foo"));
testFilter(expected, sample, singleton("bar"), emptySet());
testFilter(expected, sample, singleton("f*"), singleton("foo"));
}
public void testSingleFieldWithBothExcludesIncludes() throws IOException {
Builder expected = builder -> builder.startObject().field("pages", 456).field("price", 27.99).endObject();
testFilter(expected, SAMPLE, singleton("p*"), singleton("properties"));
}
public void testObjectsInArrayWithBothExcludesIncludes() throws IOException {
Set<String> includes = Sets.newHashSet("tags", "authors");
Set<String> excludes = singleton("authors.name");
testFilter(builderFor("sample_just_tags_authors_no_name.json"), SAMPLE, includes, excludes);
}
public void testRecursiveObjectsInArrayWithBothExcludesIncludes() throws IOException {
Set<String> includes = Sets.newHashSet("**.language", "properties.weight");
Set<String> excludes = singleton("**.distributors");
testFilter(builderFor("sample_just_properties_no_distributors.json"), SAMPLE, includes, excludes);
}
public void testRecursiveSameObjectWithBothExcludesIncludes() throws IOException {
Set<String> includes = singleton("**.distributors");
Set<String> excludes = singleton("**.distributors");
final Builder expected = builder -> builder.startObject().endObject();
testFilter(expected, SAMPLE, includes, excludes);
}
public void testRecursiveObjectsPropertiesWithBothExcludesIncludes() throws IOException {
Set<String> includes = singleton("**.en.*");
Set<String> excludes = Sets.newHashSet("**.distributors.*.name", "**.street");
testFilter(builderFor("sample_just_properties_en_no_distributors_name_no_street.json"), SAMPLE, includes, excludes);
}
public void testWithLfAtEnd() throws IOException {
final Builder sample = builder -> {
builder.startObject();
builder.startObject("foo").field("bar", "baz").endObject();
return builder.endObject().prettyPrint().lfAtEnd();
};
testFilter(sample, sample, singleton("foo"), emptySet());
testFilter(sample, sample, emptySet(), singleton("bar"));
testFilter(sample, sample, singleton("f*"), singleton("baz"));
final Builder expected = builder -> builder.startObject().endObject().prettyPrint().lfAtEnd();
testFilter(expected, sample, emptySet(), singleton("foo"));
testFilter(expected, sample, singleton("bar"), emptySet());
testFilter(expected, sample, singleton("f*"), singleton("foo"));
}
public void testBasics() throws Exception {
Builder sample = builder -> {
return builder.startObject().field("test1", "value1").field("test2", "value2").field("something_else", "value3").endObject();
};
Builder expected = builder -> builder.startObject().field("test1", "value1").endObject();
testFilter(expected, sample, singleton("test1"), emptySet());
expected = builder -> builder.startObject().field("test1", "value1").field("test2", "value2").endObject();
testFilter(expected, sample, singleton("test*"), emptySet());
expected = builder -> builder.startObject().field("test2", "value2").field("something_else", "value3").endObject();
testFilter(expected, sample, emptySet(), singleton("test1"));
// more complex object...
Builder complex = builder -> {
builder.startObject();
builder.startObject("path1");
{
builder.startArray("path2");
{
builder.startObject().field("test", "value1").endObject();
builder.startObject().field("test", "value2").endObject();
}
builder.endArray();
}
builder.endObject();
builder.field("test1", "value1");
return builder.endObject();
};
expected = builder -> {
builder.startObject();
builder.startObject("path1");
{
builder.startArray("path2");
{
builder.startObject().field("test", "value1").endObject();
builder.startObject().field("test", "value2").endObject();
}
builder.endArray();
}
builder.endObject();
return builder.endObject();
};
testFilter(expected, complex, singleton("path1"), emptySet());
testFilter(expected, complex, singleton("path1*"), emptySet());
testFilter(expected, complex, singleton("path1.path2.*"), emptySet());
expected = builder -> builder.startObject().field("test1", "value1").endObject();
testFilter(expected, complex, singleton("test1*"), emptySet());
}
/**
* Tests that we can extract paths containing non-ascii characters.
*/
public void testFilterSupplementaryCharactersInPaths() throws IOException {
Builder sample = builder -> builder.startObject().field("搜索", 2).field("指数", 3).endObject();
Builder expected = builder -> builder.startObject().field("搜索", 2).endObject();
testFilter(expected, sample, singleton("搜索"), emptySet());
expected = builder -> builder.startObject().field("指数", 3).endObject();
testFilter(expected, sample, emptySet(), singleton("搜索"));
}
/**
* Tests that we can extract paths which share a prefix with other paths.
*/
public void testFilterSharedPrefixes() throws IOException {
Builder sample = builder -> builder.startObject().field("foobar", 2).field("foobaz", 3).endObject();
Builder expected = builder -> builder.startObject().field("foobar", 2).endObject();
testFilter(expected, sample, singleton("foobar"), emptySet());
expected = builder -> builder.startObject().field("foobaz", 3).endObject();
testFilter(expected, sample, emptySet(), singleton("foobar"));
}
/**
* Tests that we can extract paths which have another path as a prefix.
*/
public void testFilterPrefix() throws IOException {
Builder sample = builder -> builder.startObject().array("photos", "foo", "bar").field("photosCount", 2).endObject();
Builder expected = builder -> builder.startObject().field("photosCount", 2).endObject();
testFilter(expected, sample, singleton("photosCount"), emptySet());
}
public void testManyFilters() throws IOException, URISyntaxException {
Builder deep = builder -> builder.startObject()
.startObject("system")
.startObject("process")
.startObject("cgroup")
.startObject("memory")
.startObject("stats")
.startObject("mapped_file")
.field("bytes", 100)
.endObject()
.endObject()
.endObject()
.endObject()
.endObject()
.endObject()
.endObject();
try (InputStream stream = AbstractFilteringTestCase.class.getResourceAsStream("many_filters.txt")) {
assertThat("Couldn't find [many_filters.txt]", stream, notNullValue());
Set<String> manyFilters = new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8)).lines()
.filter(s -> false == s.startsWith("#"))
.collect(toSet());
testFilter(deep, deep, manyFilters, emptySet());
}
}
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/80160")
public void testExcludeWildCardFields() throws IOException {
Builder sample = builder -> builder.startObject()
.startObject("include")
.field("field1", "v1")
.field("field2", "v2")
.endObject()
.field("include2", "vv2")
.endObject();
Builder expected = builder -> builder.startObject().startObject("include").field("field1", "v1").endObject().endObject();
testFilter(expected, sample, singleton("include"), singleton("*.field2"));
}
}
| Builder |
java | quarkusio__quarkus | extensions/vertx-http/runtime/src/main/java/io/quarkus/vertx/http/runtime/TrustedProxyCheck.java | {
"start": 5190,
"end": 5712
} | class ____ {
final BiPredicate<InetAddress, Integer> proxyCheck;
final String hostName;
final int port;
TrustedProxyCheckPart(BiPredicate<InetAddress, Integer> proxyCheck) {
this.proxyCheck = proxyCheck;
this.hostName = null;
this.port = 0;
}
TrustedProxyCheckPart(String hostName, int port) {
this.proxyCheck = null;
this.hostName = hostName;
this.port = port;
}
}
}
| TrustedProxyCheckPart |
java | elastic__elasticsearch | x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitVariableEvaluator.java | {
"start": 1168,
"end": 5369
} | class ____ implements EvalOperator.ExpressionEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(SplitVariableEvaluator.class);
private final Source source;
private final EvalOperator.ExpressionEvaluator str;
private final EvalOperator.ExpressionEvaluator delim;
private final BytesRef scratch;
private final DriverContext driverContext;
private Warnings warnings;
public SplitVariableEvaluator(Source source, EvalOperator.ExpressionEvaluator str,
EvalOperator.ExpressionEvaluator delim, BytesRef scratch, DriverContext driverContext) {
this.source = source;
this.str = str;
this.delim = delim;
this.scratch = scratch;
this.driverContext = driverContext;
}
@Override
public Block eval(Page page) {
try (BytesRefBlock strBlock = (BytesRefBlock) str.eval(page)) {
try (BytesRefBlock delimBlock = (BytesRefBlock) delim.eval(page)) {
BytesRefVector strVector = strBlock.asVector();
if (strVector == null) {
return eval(page.getPositionCount(), strBlock, delimBlock);
}
BytesRefVector delimVector = delimBlock.asVector();
if (delimVector == null) {
return eval(page.getPositionCount(), strBlock, delimBlock);
}
return eval(page.getPositionCount(), strVector, delimVector);
}
}
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
baseRamBytesUsed += str.baseRamBytesUsed();
baseRamBytesUsed += delim.baseRamBytesUsed();
return baseRamBytesUsed;
}
public BytesRefBlock eval(int positionCount, BytesRefBlock strBlock, BytesRefBlock delimBlock) {
try(BytesRefBlock.Builder result = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) {
BytesRef strScratch = new BytesRef();
BytesRef delimScratch = new BytesRef();
position: for (int p = 0; p < positionCount; p++) {
switch (strBlock.getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
switch (delimBlock.getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
BytesRef str = strBlock.getBytesRef(strBlock.getFirstValueIndex(p), strScratch);
BytesRef delim = delimBlock.getBytesRef(delimBlock.getFirstValueIndex(p), delimScratch);
Split.process(result, str, delim, this.scratch);
}
return result.build();
}
}
public BytesRefBlock eval(int positionCount, BytesRefVector strVector,
BytesRefVector delimVector) {
try(BytesRefBlock.Builder result = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) {
BytesRef strScratch = new BytesRef();
BytesRef delimScratch = new BytesRef();
position: for (int p = 0; p < positionCount; p++) {
BytesRef str = strVector.getBytesRef(p, strScratch);
BytesRef delim = delimVector.getBytesRef(p, delimScratch);
Split.process(result, str, delim, this.scratch);
}
return result.build();
}
}
@Override
public String toString() {
return "SplitVariableEvaluator[" + "str=" + str + ", delim=" + delim + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(str, delim);
}
private Warnings warnings() {
if (warnings == null) {
this.warnings = Warnings.createWarnings(
driverContext.warningsMode(),
source.source().getLineNumber(),
source.source().getColumnNumber(),
source.text()
);
}
return warnings;
}
static | SplitVariableEvaluator |
java | apache__dubbo | dubbo-rpc/dubbo-rpc-api/src/test/java/org/apache/dubbo/rpc/proxy/wrapper/StubProxyFactoryWrapperTest.java | {
"start": 1388,
"end": 2336
} | class ____ {
@Test
void test() {
ProxyFactory proxyFactory = Mockito.mock(ProxyFactory.class);
Protocol protocol = Mockito.mock(Protocol.class);
StubProxyFactoryWrapper stubProxyFactoryWrapper = new StubProxyFactoryWrapper(proxyFactory);
stubProxyFactoryWrapper.setProtocol(protocol);
URL url = URL.valueOf("test://127.0.0.1/test?stub=true");
url = url.addParameter(STUB_KEY, "true");
url = url.addParameter(STUB_EVENT_KEY, "true");
Invoker<DemoService> invoker = Mockito.mock(Invoker.class);
Mockito.when(invoker.getInterface()).thenReturn(DemoService.class);
Mockito.when(invoker.getUrl()).thenReturn(url);
DemoService proxy = stubProxyFactoryWrapper.getProxy(invoker, false);
Assertions.assertTrue(proxy instanceof DemoServiceStub);
Mockito.verify(protocol, Mockito.times(1)).export(Mockito.any());
}
}
| StubProxyFactoryWrapperTest |
java | square__okhttp | samples/slack/src/main/java/okhttp3/slack/SlackApi.java | {
"start": 1122,
"end": 1475
} | class ____ operate without a user, or on behalf of many users. Use the Slack API
* dashboard to create a client ID and secret for this application.
*
* <p>You must configure your Slack API OAuth and Permissions page with a localhost URL like {@code
* http://localhost:53203/oauth/}, passing the same port to this class’ constructor.
*/
public final | may |
java | apache__flink | flink-core/src/main/java/org/apache/flink/api/common/typeutils/base/LocalDateTimeSerializer.java | {
"start": 1293,
"end": 3984
} | class ____ extends TypeSerializerSingleton<LocalDateTime> {
private static final long serialVersionUID = 1L;
public static final LocalDateTimeSerializer INSTANCE = new LocalDateTimeSerializer();
@Override
public boolean isImmutableType() {
return true;
}
@Override
public LocalDateTime createInstance() {
return LocalDateTime.of(
LocalDateSerializer.INSTANCE.createInstance(),
LocalTimeSerializer.INSTANCE.createInstance());
}
@Override
public LocalDateTime copy(LocalDateTime from) {
return from;
}
@Override
public LocalDateTime copy(LocalDateTime from, LocalDateTime reuse) {
return from;
}
@Override
public int getLength() {
return LocalDateSerializer.INSTANCE.getLength() + LocalTimeSerializer.INSTANCE.getLength();
}
@Override
public void serialize(LocalDateTime record, DataOutputView target) throws IOException {
if (record == null) {
LocalDateSerializer.INSTANCE.serialize(null, target);
LocalTimeSerializer.INSTANCE.serialize(null, target);
} else {
LocalDateSerializer.INSTANCE.serialize(record.toLocalDate(), target);
LocalTimeSerializer.INSTANCE.serialize(record.toLocalTime(), target);
}
}
@Override
public LocalDateTime deserialize(DataInputView source) throws IOException {
LocalDate localDate = LocalDateSerializer.INSTANCE.deserialize(source);
LocalTime localTime = LocalTimeSerializer.INSTANCE.deserialize(source);
if (localDate == null && localTime == null) {
return null;
} else if (localDate == null || localTime == null) {
throw new IOException("Exactly one of LocalDate and LocalTime is null.");
} else {
return LocalDateTime.of(localDate, localTime);
}
}
@Override
public LocalDateTime deserialize(LocalDateTime reuse, DataInputView source) throws IOException {
return deserialize(source);
}
@Override
public void copy(DataInputView source, DataOutputView target) throws IOException {
LocalDateSerializer.INSTANCE.copy(source, target);
LocalTimeSerializer.INSTANCE.copy(source, target);
}
@Override
public TypeSerializerSnapshot<LocalDateTime> snapshotConfiguration() {
return new LocalDateTimeSerializerSnapshot();
}
// ------------------------------------------------------------------------
/** Serializer configuration snapshot for compatibility and format evolution. */
@SuppressWarnings("WeakerAccess")
public static final | LocalDateTimeSerializer |
java | alibaba__fastjson | src/test/java/com/alibaba/json/bvt/issue_3300/Issue3352.java | {
"start": 169,
"end": 893
} | class ____ extends TestCase {
public void test_for_issue() throws Exception {
UUID uuid = UUID.randomUUID();
JSONObject object = new JSONObject();
Map map = object.getInnerMap();
map.put("1", "1");
map.put("A", "A");
map.put("true", "true");
map.put(uuid.toString(), uuid);
assertTrue(object.containsKey(1));
assertTrue(object.containsKey("1"));
assertTrue(object.containsKey('A'));
assertTrue(object.containsKey("A"));
assertTrue(object.containsKey(true));
assertTrue(object.containsKey("true"));
assertTrue(object.containsKey(uuid));
assertTrue(object.containsKey(uuid.toString()));
}
}
| Issue3352 |
java | quarkusio__quarkus | core/deployment/src/main/java/io/quarkus/deployment/pkg/builditem/ProcessInheritIODisabled.java | {
"start": 1088,
"end": 1631
} | class ____ implements Function<Map<String, Object>, List<Consumer<BuildChainBuilder>>> {
@Override
public List<Consumer<BuildChainBuilder>> apply(final Map<String, Object> props) {
return Collections.singletonList((builder) -> {
final BuildStepBuilder stepBuilder = builder.addBuildStep((ctx) -> {
ctx.produce(new ProcessInheritIODisabled());
});
stepBuilder.produces(ProcessInheritIODisabled.class).build();
});
}
}
}
| Factory |
java | grpc__grpc-java | netty/src/test/java/io/grpc/netty/NettyClientTransportFactoryTest.java | {
"start": 830,
"end": 1095
} | class ____ extends AbstractClientTransportFactoryTest {
@Override protected ClientTransportFactory newClientTransportFactory() {
return NettyChannelBuilder
.forAddress("localhost", 0)
.buildTransportFactory();
}
}
| NettyClientTransportFactoryTest |
java | apache__avro | lang/java/ipc/src/test/java/org/apache/avro/io/Perf.java | {
"start": 50961,
"end": 51230
} | class ____ extends ReflectTest<int[]> {
ReflectIntArrayTest() throws IOException {
super("ReflectIntArray", new int[0], 12);
}
@Override
protected int[] createDatum(Random r) {
return populateIntArray(r);
}
}
static | ReflectIntArrayTest |
java | spring-projects__spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/parsing/BeanDefinitionParsingException.java | {
"start": 960,
"end": 1337
} | class ____ extends BeanDefinitionStoreException {
/**
* Create a new BeanDefinitionParsingException.
* @param problem the configuration problem that was detected during the parsing process
*/
public BeanDefinitionParsingException(Problem problem) {
super(problem.getResourceDescription(), problem.toString(), problem.getRootCause());
}
}
| BeanDefinitionParsingException |
java | FasterXML__jackson-databind | src/main/java/tools/jackson/databind/deser/std/StdDeserializer.java | {
"start": 1217,
"end": 1378
} | class ____ common deserializers. Contains shared
* base functionality for dealing with primitive values, such
* as (re)parsing from String.
*/
public abstract | for |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.