language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
spring-projects__spring-framework
|
spring-webmvc/src/test/java/org/springframework/web/servlet/resource/EncodedResourceResolverTests.java
|
{
"start": 1493,
"end": 5357
}
|
class ____ {
private ResourceResolverChain resolver;
private List<Resource> locations;
@BeforeEach
void setup() {
Cache cache = new ConcurrentMapCache("resourceCache");
VersionResourceResolver versionResolver = new VersionResourceResolver();
versionResolver.setStrategyMap(Collections.singletonMap("/**", new ContentVersionStrategy()));
List<ResourceResolver> resolvers = new ArrayList<>();
resolvers.add(new CachingResourceResolver(cache));
resolvers.add(new EncodedResourceResolver());
resolvers.add(versionResolver);
resolvers.add(new PathResourceResolver());
this.resolver = new DefaultResourceResolverChain(resolvers);
this.locations = new ArrayList<>();
this.locations.add(new ClassPathResource("test/", getClass()));
this.locations.add(new ClassPathResource("testalternatepath/", getClass()));
}
@Test
void resolveGzipped(GzippedFiles gzippedFiles) {
String file = "js/foo.js";
gzippedFiles.create(file);
MockHttpServletRequest request = new MockHttpServletRequest();
request.addHeader("Accept-Encoding", "gzip");
Resource actual = this.resolver.resolveResource(request, file, this.locations);
assertThat(actual.getDescription()).isEqualTo(getResource(file + ".gz").getDescription());
assertThat(actual.getFilename()).isEqualTo(getResource(file).getFilename());
boolean condition = actual instanceof HttpResource;
assertThat(condition).isTrue();
HttpHeaders headers = ((HttpResource) actual).getResponseHeaders();
assertThat(headers.getFirst(HttpHeaders.CONTENT_ENCODING)).isEqualTo("gzip");
assertThat(headers.getFirst(HttpHeaders.VARY)).isEqualTo("Accept-Encoding");
}
@Test
void resolveGzippedWithVersion(GzippedFiles gzippedFiles) {
gzippedFiles.create("foo.css");
String file = "foo-e36d2e05253c6c7085a91522ce43a0b4.css";
MockHttpServletRequest request = new MockHttpServletRequest();
request.addHeader("Accept-Encoding", "gzip");
Resource resolved = this.resolver.resolveResource(request, file, this.locations);
assertThat(resolved.getDescription()).isEqualTo(getResource("foo.css.gz").getDescription());
assertThat(resolved.getFilename()).isEqualTo(getResource("foo.css").getFilename());
boolean condition = resolved instanceof HttpResource;
assertThat(condition).isTrue();
}
@Test
void resolveFromCacheWithEncodingVariants(GzippedFiles gzippedFiles) {
// 1. Resolve, and cache .gz variant
String file = "js/foo.js";
gzippedFiles.create(file);
MockHttpServletRequest request = new MockHttpServletRequest("GET", "/js/foo.js");
request.addHeader("Accept-Encoding", "gzip");
Resource resolved = this.resolver.resolveResource(request, file, this.locations);
assertThat(resolved.getDescription()).isEqualTo(getResource(file + ".gz").getDescription());
assertThat(resolved.getFilename()).isEqualTo(getResource(file).getFilename());
boolean condition = resolved instanceof HttpResource;
assertThat(condition).isTrue();
// 2. Resolve unencoded resource
request = new MockHttpServletRequest("GET", "/js/foo.js");
resolved = this.resolver.resolveResource(request, file, this.locations);
assertThat(resolved.getDescription()).isEqualTo(getResource(file).getDescription());
assertThat(resolved.getFilename()).isEqualTo(getResource(file).getFilename());
boolean condition1 = resolved instanceof HttpResource;
assertThat(condition1).isFalse();
}
@Test // SPR-13149
public void resolveWithNullRequest() {
String file = "js/foo.js";
Resource resolved = this.resolver.resolveResource(null, file, this.locations);
assertThat(resolved.getDescription()).isEqualTo(getResource(file).getDescription());
assertThat(resolved.getFilename()).isEqualTo(getResource(file).getFilename());
}
private Resource getResource(String filePath) {
return new ClassPathResource("test/" + filePath, getClass());
}
}
|
EncodedResourceResolverTests
|
java
|
mapstruct__mapstruct
|
processor/src/test/java/org/mapstruct/ap/test/bugs/_3673/Details.java
|
{
"start": 198,
"end": 373
}
|
class ____ {
private final String name;
public Details(String name) {
this.name = name;
}
public String getName() {
return name;
}
}
|
Details
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsAction.java
|
{
"start": 3523,
"end": 3991
}
|
class ____ extends AbstractGetResourcesResponse<Job> implements ToXContentObject {
public Response(QueryPage<Job> jobs) {
super(jobs);
}
public Response(StreamInput in) throws IOException {
super(in);
}
public QueryPage<Job> getResponse() {
return getResources();
}
@Override
protected Reader<Job> getReader() {
return Job::new;
}
}
}
|
Response
|
java
|
apache__camel
|
components/camel-sjms/src/main/java/org/apache/camel/component/sjms/reply/TemporaryQueueMessageListenerContainer.java
|
{
"start": 1292,
"end": 2111
}
|
class ____ extends SimpleMessageListenerContainer {
// no need to override any methods currently
public TemporaryQueueMessageListenerContainer(SjmsEndpoint endpoint) {
super(endpoint);
}
@Override
protected Session createSession(Connection connection, SjmsEndpoint endpoint) throws Exception {
// cannot be transacted when doing request/reply
return connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
}
@Override
protected MessageConsumer createMessageConsumer(Session session) throws Exception {
Destination destination = getDestinationCreationStrategy().createTemporaryDestination(session, false);
return getEndpoint().getJmsObjectFactory().createQueueMessageConsumer(session, destination);
}
}
|
TemporaryQueueMessageListenerContainer
|
java
|
google__guava
|
guava/src/com/google/common/hash/BloomFilter.java
|
{
"start": 23790,
"end": 27729
}
|
class ____<T extends @Nullable Object> implements Serializable {
final long[] data;
final int numHashFunctions;
final Funnel<? super T> funnel;
final Strategy strategy;
SerialForm(BloomFilter<T> bf) {
this.data = LockFreeBitArray.toPlainArray(bf.bits.data);
this.numHashFunctions = bf.numHashFunctions;
this.funnel = bf.funnel;
this.strategy = bf.strategy;
}
Object readResolve() {
return new BloomFilter<T>(new LockFreeBitArray(data), numHashFunctions, funnel, strategy);
}
private static final long serialVersionUID = 1;
}
/**
* Writes this {@code BloomFilter} to an output stream, with a custom format (not Java
* serialization). This has been measured to save at least 400 bytes compared to regular
* serialization.
*
* <p>Use {@linkplain #readFrom(InputStream, Funnel)} to reconstruct the written BloomFilter.
*/
public void writeTo(OutputStream out) throws IOException {
// Serial form:
// 1 signed byte for the strategy
// 1 unsigned byte for the number of hash functions
// 1 big endian int, the number of longs in our bitset
// N big endian longs of our bitset
DataOutputStream dout = new DataOutputStream(out);
dout.writeByte(SignedBytes.checkedCast(strategy.ordinal()));
dout.writeByte(UnsignedBytes.checkedCast(numHashFunctions)); // note: checked at the c'tor
dout.writeInt(bits.data.length());
for (int i = 0; i < bits.data.length(); i++) {
dout.writeLong(bits.data.get(i));
}
}
/**
* Reads a byte stream, which was written by {@linkplain #writeTo(OutputStream)}, into a {@code
* BloomFilter}.
*
* <p>The {@code Funnel} to be used is not encoded in the stream, so it must be provided here.
* <b>Warning:</b> the funnel provided <b>must</b> behave identically to the one used to populate
* the original Bloom filter!
*
* @throws IOException if the InputStream throws an {@code IOException}, or if its data does not
* appear to be a BloomFilter serialized using the {@linkplain #writeTo(OutputStream)} method.
*/
@SuppressWarnings("CatchingUnchecked") // sneaky checked exception
public static <T extends @Nullable Object> BloomFilter<T> readFrom(
InputStream in, Funnel<? super T> funnel) throws IOException {
checkNotNull(in, "InputStream");
checkNotNull(funnel, "Funnel");
int strategyOrdinal = -1;
int numHashFunctions = -1;
int dataLength = -1;
try {
DataInputStream din = new DataInputStream(in);
// currently this assumes there is no negative ordinal; will have to be updated if we
// add non-stateless strategies (for which we've reserved negative ordinals; see
// Strategy.ordinal()).
strategyOrdinal = din.readByte();
numHashFunctions = toUnsignedInt(din.readByte());
dataLength = din.readInt();
/*
* We document in BloomFilterStrategies that we must not change the ordering, and we have a
* test that verifies that we don't do so.
*/
@SuppressWarnings("EnumOrdinal")
Strategy strategy = BloomFilterStrategies.values()[strategyOrdinal];
LockFreeBitArray dataArray = new LockFreeBitArray(Math.multiplyExact(dataLength, 64L));
for (int i = 0; i < dataLength; i++) {
dataArray.putData(i, din.readLong());
}
return new BloomFilter<>(dataArray, numHashFunctions, funnel, strategy);
} catch (IOException e) {
throw e;
} catch (Exception e) { // sneaky checked exception
String message =
"Unable to deserialize BloomFilter from InputStream."
+ " strategyOrdinal: "
+ strategyOrdinal
+ " numHashFunctions: "
+ numHashFunctions
+ " dataLength: "
+ dataLength;
throw new IOException(message, e);
}
}
private static final long serialVersionUID = 0xcafebabe;
}
|
SerialForm
|
java
|
reactor__reactor-core
|
reactor-core/src/main/java/reactor/core/publisher/MonoCacheTime.java
|
{
"start": 12052,
"end": 12602
}
|
class ____<T> extends Operators.MonoSubscriber<T, T> {
@Nullable CoordinatorSubscriber<T> coordinator;
CacheMonoSubscriber(CoreSubscriber<? super T> actual) {
super(actual);
}
@Override
public void cancel() {
super.cancel();
CoordinatorSubscriber<T> coordinator = this.coordinator;
if (coordinator != null) {
coordinator.remove(this);
}
}
@Override
public @Nullable Object scanUnsafe(Attr key) {
if (key == Attr.RUN_STYLE) return Attr.RunStyle.SYNC;
return super.scanUnsafe(key);
}
}
}
|
CacheMonoSubscriber
|
java
|
lettuce-io__lettuce-core
|
src/main/java/io/lettuce/core/ScriptOutputType.java
|
{
"start": 2279,
"end": 2912
}
|
enum ____ {
/**
* Boolean output (expects a number {@code 0} or {@code 1} to be converted to a boolean value).
*/
BOOLEAN,
/**
* {@link Long integer} output.
*/
INTEGER,
/**
* List of flat arrays.
*/
MULTI,
/**
* Simple status value such as {@code OK}. The Redis response is parsed as ASCII.
*/
STATUS,
/**
* Value return type decoded through {@link io.lettuce.core.codec.RedisCodec#decodeValue(ByteBuffer)}.
*/
VALUE,
/**
* RESP3-defined object output supporting all Redis response structures.
*/
OBJECT
}
|
ScriptOutputType
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/issue_2700/Issue2784.java
|
{
"start": 255,
"end": 3620
}
|
class ____ extends TestCase {
public void test_for_issue() throws Exception {
Model m = new Model();
m.time = java.time.LocalDateTime.now();
String str = JSON.toJSONString(m);
assertEquals("{\"time\":"
+ m.time.atZone(JSON.defaultTimeZone.toZoneId()).toInstant().toEpochMilli()
+ "}", str);
Model m1 = JSON.parseObject(str, Model.class);
assertEquals(m.time, m1.time);
}
public void test_for_issue_1() throws Exception {
Model m = new Model();
m.ztime = ZonedDateTime.now();
String str = JSON.toJSONString(m);
assertEquals("{\"ztime\":"
+ m.ztime.toInstant().toEpochMilli()
+ "}", str);
Model m1 = JSON.parseObject(str, Model.class);
assertEquals(m.ztime.toInstant().toEpochMilli(), m1.ztime.toInstant().toEpochMilli());
}
public void test_for_issue_2() throws Exception {
Model m = new Model();
m.time1 = java.time.LocalDateTime.now();
String str = JSON.toJSONString(m);
assertEquals("{\"time1\":"
+ m.time1.atZone(JSON.defaultTimeZone.toZoneId()).toEpochSecond()
+ "}", str);
Model m1 = JSON.parseObject(str, Model.class);
assertEquals(m.time1.atZone(JSON.defaultTimeZone.toZoneId()).toEpochSecond()
, m1.time1.atZone(JSON.defaultTimeZone.toZoneId()).toEpochSecond());
}
public void test_for_issue_3() throws Exception {
Model m = new Model();
m.ztime1 = ZonedDateTime.now();
String str = JSON.toJSONString(m);
assertEquals("{\"ztime1\":"
+ m.ztime1.toEpochSecond()
+ "}", str);
Model m1 = JSON.parseObject(str, Model.class);
assertEquals(m.ztime1.toEpochSecond()
, m1.ztime1.toEpochSecond());
}
public void test_for_issue_4() throws Exception {
Model m = new Model();
m.date = new Date();
String str = JSON.toJSONString(m);
assertEquals("{\"date\":"
+ m.date.getTime()
+ "}", str);
Model m1 = JSON.parseObject(str, Model.class);
assertEquals(m.date.getTime()
, m1.date.getTime());
}
public void test_for_issue_5() throws Exception {
Model m = new Model();
m.date1 = new Date();
String str = JSON.toJSONString(m);
assertEquals("{\"date1\":"
+ (m.date1.getTime() / 1000)
+ "}", str);
Model m1 = JSON.parseObject(str, Model.class);
assertEquals(m.date1.getTime() / 1000
, m1.date1.getTime() / 1000);
}
public void test_for_issue_6() throws Exception {
Model m = new Model();
m.date1 = new Date();
String str = JSON.toJSONString(m);
assertEquals("{\"date1\":"
+ (m.date1.getTime() / 1000)
+ "}", str);
Model m1 = JSON.parseObject(str, Model.class);
assertEquals(m.date1.getTime() / 1000
, m1.date1.getTime() / 1000);
}
public void test_for_issue_7() throws Exception {
Model m = JSON.parseObject("{\"time2\":20190714121314}", Model.class);
assertEquals(m.time2, LocalDateTime.of(2019, 7, 14, 12, 13, 14));
}
public static
|
Issue2784
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ResourceTracker.java
|
{
"start": 1651,
"end": 2099
}
|
interface ____ {
@Idempotent
RegisterNodeManagerResponse registerNodeManager(
RegisterNodeManagerRequest request) throws YarnException, IOException;
@AtMostOnce
NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request)
throws YarnException, IOException;
@Idempotent
UnRegisterNodeManagerResponse unRegisterNodeManager(
UnRegisterNodeManagerRequest request) throws YarnException, IOException;
}
|
ResourceTracker
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/procedure/internal/AbstractStandardCallableStatementSupport.java
|
{
"start": 510,
"end": 1163
}
|
class ____ implements CallableStatementSupport {
@Override
public void registerParameters(
String procedureName,
JdbcOperationQueryCall procedureCall,
CallableStatement statement,
ProcedureParameterMetadataImplementor parameterMetadata,
SharedSessionContractImplementor session) {
if ( procedureCall.getFunctionReturn() != null ) {
procedureCall.getFunctionReturn().registerParameter( statement, session );
}
for ( JdbcCallParameterRegistration parameterRegistration : procedureCall.getParameterRegistrations() ) {
parameterRegistration.registerParameter( statement, session );
}
}
}
|
AbstractStandardCallableStatementSupport
|
java
|
junit-team__junit5
|
jupiter-tests/src/test/java/org/junit/jupiter/api/extension/support/TypeBasedParameterResolverTests.java
|
{
"start": 5323,
"end": 5735
}
|
class ____
extends TypeBasedParameterResolver<Map<String, List<Integer>>> {
@Override
public Map<String, List<Integer>> resolveParameter(ParameterContext parameterContext,
ExtensionContext extensionContext) throws ParameterResolutionException {
return Map.of("ids", List.of(1, 42));
}
}
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.PARAMETER)
@
|
ParameterizedTypeBasedParameterResolver
|
java
|
spring-projects__spring-boot
|
module/spring-boot-kotlinx-serialization-json/src/main/java/org/springframework/boot/kotlinx/serialization/json/autoconfigure/KotlinxSerializationJsonBuilderCustomizer.java
|
{
"start": 806,
"end": 1039
}
|
interface ____ can be implemented by beans wishing to further customize the
* {@link Json} through {@link JsonBuilder} retaining its default configuration.
*
* @author Dmitry Sulman
* @since 4.0.0
*/
@FunctionalInterface
public
|
that
|
java
|
netty__netty
|
transport/src/main/java/io/netty/channel/MultiThreadIoEventLoopGroup.java
|
{
"start": 1341,
"end": 11057
}
|
class ____ extends MultithreadEventLoopGroup implements IoEventLoopGroup {
/**
* Creates a new instance of the {@link MultiThreadIoEventLoopGroup} using the default number
* of threads and default {@link ThreadFactory}.
*/
public MultiThreadIoEventLoopGroup(IoHandlerFactory ioHandlerFactory) {
this(0, ioHandlerFactory);
}
/**
/**
* Creates a new instance of the {@link MultiThreadIoEventLoopGroup} using the default {@link ThreadFactory}.
*
* @param nThreads the number of threads and so {@link EventLoop}s that are created.
* @param ioHandlerFactory the {@link IoHandlerFactory} that will be used to create {@link IoHandler} for handling
* IO.
*/
public MultiThreadIoEventLoopGroup(int nThreads, IoHandlerFactory ioHandlerFactory) {
this(nThreads, (Executor) null, ioHandlerFactory);
}
/**
* Create a new instance using the default number of thread.
*
* @param threadFactory the {@link ThreadFactory} that is used.
* @param ioHandlerFactory the {@link IoHandlerFactory} that will be used to create {@link IoHandler} for handling
* IO.
*/
public MultiThreadIoEventLoopGroup(ThreadFactory threadFactory, IoHandlerFactory ioHandlerFactory) {
this(0, threadFactory, ioHandlerFactory);
}
/**
* Creates a new instance of the {@link MultiThreadIoEventLoopGroup} using the default number
* of threads.
*
* @param executor the {@link Executor} that is used.
* @param ioHandlerFactory the {@link IoHandlerFactory} that will be used to create {@link IoHandler} for handling
* IO.
*/
public MultiThreadIoEventLoopGroup(Executor executor,
IoHandlerFactory ioHandlerFactory) {
super(0, executor, ioHandlerFactory);
}
/**
* Creates a new instance of the {@link MultiThreadIoEventLoopGroup}.
*
* @param nThreads the number of threads and so {@link EventLoop}s that are created.
* @param executor the {@link Executor} that is used.
* @param ioHandlerFactory the {@link IoHandlerFactory} that will be used to create {@link IoHandler} for handling
* IO.
*/
public MultiThreadIoEventLoopGroup(int nThreads, Executor executor,
IoHandlerFactory ioHandlerFactory) {
super(nThreads, executor, ioHandlerFactory);
}
/**
* Creates a new instance of the {@link MultiThreadIoEventLoopGroup}.
*
* @param nThreads the number of threads and so {@link EventLoop}s that are created.
* @param threadFactory the {@link ThreadFactory} that is used.
* @param ioHandlerFactory the {@link IoHandlerFactory} that will be used to create {@link IoHandler} for handling
* IO.
*/
public MultiThreadIoEventLoopGroup(int nThreads, ThreadFactory threadFactory,
IoHandlerFactory ioHandlerFactory) {
super(nThreads, threadFactory, ioHandlerFactory);
}
/**
* Creates a new instance of the {@link MultiThreadIoEventLoopGroup}.
*
* @param nThreads the number of threads and so {@link EventLoop}s that are created.
* @param executor the {@link Executor} that is used.
* @param chooserFactory the {@link EventExecutorChooserFactory} that is used to choose the
* {@link IoEventLoop} when {@link MultiThreadIoEventLoopGroup#next()} is
* called.
* @param ioHandlerFactory the {@link IoHandlerFactory} that will be used to create {@link IoHandler} for handling
* IO.
*/
public MultiThreadIoEventLoopGroup(int nThreads, Executor executor,
EventExecutorChooserFactory chooserFactory,
IoHandlerFactory ioHandlerFactory) {
super(nThreads, executor, chooserFactory, ioHandlerFactory);
}
/**
* Creates a new instance of the {@link MultiThreadIoEventLoopGroup}.
*
* @param nThreads the number of threads and so {@link EventLoop}s that are created.
* @param executor the {@link Executor} that is used.
* @param ioHandlerFactory the {@link IoHandlerFactory} that will be used to create {@link IoHandler} for handling
* IO.
* @param args extra args that are passed to {@link #newChild(Executor, Object...)} method.
*/
protected MultiThreadIoEventLoopGroup(int nThreads, Executor executor,
IoHandlerFactory ioHandlerFactory, Object... args) {
super(nThreads, executor, combine(ioHandlerFactory, args));
}
/**
* Creates a new instance of the {@link MultiThreadIoEventLoopGroup}.
*
* @param nThreads the number of threads and so {@link EventLoop}s that are created.
* @param threadFactory the {@link ThreadFactory} that is used.
* @param ioHandlerFactory the {@link IoHandlerFactory} that will be used to create {@link IoHandler} for handling
* IO.
* @param args extra args that are passed to {@link #newChild(Executor, Object...)} method.
*/
protected MultiThreadIoEventLoopGroup(int nThreads, ThreadFactory threadFactory,
IoHandlerFactory ioHandlerFactory, Object... args) {
super(nThreads, threadFactory, combine(ioHandlerFactory, args));
}
/**
* Creates a new instance of the {@link MultiThreadIoEventLoopGroup}.
*
* @param nThreads the number of threads and so {@link EventLoop}s that are created.
* @param threadFactory the {@link ThreadFactory} that is used.
* @param ioHandlerFactory the {@link IoHandlerFactory} that will be used to create {@link IoHandler} for handling
* IO.
* @param chooserFactory the {@link EventExecutorChooserFactory} that is used to choose the
* @param args extra args that are passed to {@link #newChild(Executor, Object...)} method.
*/
protected MultiThreadIoEventLoopGroup(int nThreads, ThreadFactory threadFactory,
IoHandlerFactory ioHandlerFactory,
EventExecutorChooserFactory chooserFactory,
Object... args) {
super(nThreads, threadFactory, chooserFactory, combine(ioHandlerFactory, args));
}
/**
* Creates a new instance of the {@link MultiThreadIoEventLoopGroup}.
*
* @param nThreads the number of threads and so {@link EventLoop}s that are created.
* @param executor the {@link Executor} that is used.
* @param ioHandlerFactory the {@link IoHandlerFactory} that will be used to create {@link IoHandler} for handling
* IO.
* @param chooserFactory the {@link EventExecutorChooserFactory} that is used to choose the
* @param args extra args that are passed to {@link #newChild(Executor, Object...)} method.
*/
protected MultiThreadIoEventLoopGroup(int nThreads, Executor executor,
IoHandlerFactory ioHandlerFactory,
EventExecutorChooserFactory chooserFactory,
Object... args) {
super(nThreads, executor, chooserFactory, combine(ioHandlerFactory, args));
}
// The return type should be IoHandleEventLoop but we choose EventLoop to allow us to introduce the IoHandle
// concept without breaking API.
@Override
protected EventLoop newChild(Executor executor, Object... args) throws Exception {
IoHandlerFactory handlerFactory = (IoHandlerFactory) args[0];
Object[] argsCopy;
if (args.length > 1) {
argsCopy = new Object[args.length - 1];
System.arraycopy(args, 1, argsCopy, 0, argsCopy.length);
} else {
argsCopy = EmptyArrays.EMPTY_OBJECTS;
}
return newChild(executor, handlerFactory, argsCopy);
}
/**
* Creates a new {@link IoEventLoop} to use with the given {@link Executor} and {@link IoHandler}.
*
* @param executor the {@link Executor} that should be used to handle execution of tasks and IO.
* @param ioHandlerFactory the {@link IoHandlerFactory} that should be used to obtain {@link IoHandler} to
* handle IO.
* @param args extra arguments that are based by the constructor.
* @return the created {@link IoEventLoop}.
*/
protected IoEventLoop newChild(Executor executor, IoHandlerFactory ioHandlerFactory,
@SuppressWarnings("unused") Object... args) {
return new SingleThreadIoEventLoop(this, executor, ioHandlerFactory);
}
@Override
public IoEventLoop next() {
return (IoEventLoop) super.next();
}
private static Object[] combine(IoHandlerFactory handlerFactory, Object... args) {
List<Object> combinedList = new ArrayList<Object>();
combinedList.add(handlerFactory);
if (args != null) {
Collections.addAll(combinedList, args);
}
return combinedList.toArray(new Object[0]);
}
}
|
MultiThreadIoEventLoopGroup
|
java
|
google__dagger
|
dagger-android/test/javatests/dagger/android/DispatchingAndroidInjectorTest.java
|
{
"start": 4606,
"end": 4654
}
|
class ____ extends Activity {}
static
|
FooActivity
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatNanosEvaluator.java
|
{
"start": 1259,
"end": 5516
}
|
class ____ implements EvalOperator.ExpressionEvaluator {
private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(DateFormatNanosEvaluator.class);
private final Source source;
private final EvalOperator.ExpressionEvaluator val;
private final EvalOperator.ExpressionEvaluator formatter;
private final Locale locale;
private final DriverContext driverContext;
private Warnings warnings;
public DateFormatNanosEvaluator(Source source, EvalOperator.ExpressionEvaluator val,
EvalOperator.ExpressionEvaluator formatter, Locale locale, DriverContext driverContext) {
this.source = source;
this.val = val;
this.formatter = formatter;
this.locale = locale;
this.driverContext = driverContext;
}
@Override
public Block eval(Page page) {
try (LongBlock valBlock = (LongBlock) val.eval(page)) {
try (BytesRefBlock formatterBlock = (BytesRefBlock) formatter.eval(page)) {
LongVector valVector = valBlock.asVector();
if (valVector == null) {
return eval(page.getPositionCount(), valBlock, formatterBlock);
}
BytesRefVector formatterVector = formatterBlock.asVector();
if (formatterVector == null) {
return eval(page.getPositionCount(), valBlock, formatterBlock);
}
return eval(page.getPositionCount(), valVector, formatterVector).asBlock();
}
}
}
@Override
public long baseRamBytesUsed() {
long baseRamBytesUsed = BASE_RAM_BYTES_USED;
baseRamBytesUsed += val.baseRamBytesUsed();
baseRamBytesUsed += formatter.baseRamBytesUsed();
return baseRamBytesUsed;
}
public BytesRefBlock eval(int positionCount, LongBlock valBlock, BytesRefBlock formatterBlock) {
try(BytesRefBlock.Builder result = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) {
BytesRef formatterScratch = new BytesRef();
position: for (int p = 0; p < positionCount; p++) {
switch (valBlock.getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
switch (formatterBlock.getValueCount(p)) {
case 0:
result.appendNull();
continue position;
case 1:
break;
default:
warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value"));
result.appendNull();
continue position;
}
long val = valBlock.getLong(valBlock.getFirstValueIndex(p));
BytesRef formatter = formatterBlock.getBytesRef(formatterBlock.getFirstValueIndex(p), formatterScratch);
result.appendBytesRef(DateFormat.processNanos(val, formatter, this.locale));
}
return result.build();
}
}
public BytesRefVector eval(int positionCount, LongVector valVector,
BytesRefVector formatterVector) {
try(BytesRefVector.Builder result = driverContext.blockFactory().newBytesRefVectorBuilder(positionCount)) {
BytesRef formatterScratch = new BytesRef();
position: for (int p = 0; p < positionCount; p++) {
long val = valVector.getLong(p);
BytesRef formatter = formatterVector.getBytesRef(p, formatterScratch);
result.appendBytesRef(DateFormat.processNanos(val, formatter, this.locale));
}
return result.build();
}
}
@Override
public String toString() {
return "DateFormatNanosEvaluator[" + "val=" + val + ", formatter=" + formatter + ", locale=" + locale + "]";
}
@Override
public void close() {
Releasables.closeExpectNoException(val, formatter);
}
private Warnings warnings() {
if (warnings == null) {
this.warnings = Warnings.createWarnings(
driverContext.warningsMode(),
source.source().getLineNumber(),
source.source().getColumnNumber(),
source.text()
);
}
return warnings;
}
static
|
DateFormatNanosEvaluator
|
java
|
apache__flink
|
flink-filesystems/flink-s3-fs-base/src/main/java/com/amazonaws/services/s3/model/transform/XmlResponsesSaxParser.java
|
{
"start": 118125,
"end": 124904
}
|
class ____ extends AbstractHandler {
private final AnalyticsConfiguration configuration = new AnalyticsConfiguration();
private AnalyticsFilter filter;
private List<AnalyticsFilterPredicate> andOperandsList;
private StorageClassAnalysis storageClassAnalysis;
private StorageClassAnalysisDataExport dataExport;
private AnalyticsExportDestination destination;
private AnalyticsS3BucketDestination s3BucketDestination;
private String currentTagKey;
private String currentTagValue;
public GetBucketAnalyticsConfigurationResult getResult() {
return new GetBucketAnalyticsConfigurationResult()
.withAnalyticsConfiguration(configuration);
}
@Override
protected void doStartElement(String uri, String name, String qName, Attributes attrs) {
if (in("AnalyticsConfiguration")) {
if (name.equals("Filter")) {
filter = new AnalyticsFilter();
} else if (name.equals("StorageClassAnalysis")) {
storageClassAnalysis = new StorageClassAnalysis();
}
} else if (in("AnalyticsConfiguration", "Filter")) {
if (name.equals("And")) {
andOperandsList = new ArrayList<AnalyticsFilterPredicate>();
}
} else if (in("AnalyticsConfiguration", "StorageClassAnalysis")) {
if (name.equals("DataExport")) {
dataExport = new StorageClassAnalysisDataExport();
}
} else if (in("AnalyticsConfiguration", "StorageClassAnalysis", "DataExport")) {
if (name.equals("Destination")) {
destination = new AnalyticsExportDestination();
}
} else if (in(
"AnalyticsConfiguration",
"StorageClassAnalysis",
"DataExport",
"Destination")) {
if (name.equals("S3BucketDestination")) {
s3BucketDestination = new AnalyticsS3BucketDestination();
}
}
}
@Override
protected void doEndElement(String uri, String name, String qName) {
if (in("AnalyticsConfiguration")) {
if (name.equals("Id")) {
configuration.setId(getText());
} else if (name.equals("Filter")) {
configuration.setFilter(filter);
} else if (name.equals("StorageClassAnalysis")) {
configuration.setStorageClassAnalysis(storageClassAnalysis);
}
} else if (in("AnalyticsConfiguration", "Filter")) {
if (name.equals("Prefix")) {
filter.setPredicate(new AnalyticsPrefixPredicate(getText()));
} else if (name.equals("Tag")) {
filter.setPredicate(
new AnalyticsTagPredicate(new Tag(currentTagKey, currentTagValue)));
currentTagKey = null;
currentTagValue = null;
} else if (name.equals("And")) {
filter.setPredicate(new AnalyticsAndOperator(andOperandsList));
andOperandsList = null;
}
} else if (in("AnalyticsConfiguration", "Filter", "Tag")) {
if (name.equals("Key")) {
currentTagKey = getText();
} else if (name.equals("Value")) {
currentTagValue = getText();
}
} else if (in("AnalyticsConfiguration", "Filter", "And")) {
if (name.equals("Prefix")) {
andOperandsList.add(new AnalyticsPrefixPredicate(getText()));
} else if (name.equals("Tag")) {
andOperandsList.add(
new AnalyticsTagPredicate(new Tag(currentTagKey, currentTagValue)));
currentTagKey = null;
currentTagValue = null;
}
} else if (in("AnalyticsConfiguration", "Filter", "And", "Tag")) {
if (name.equals("Key")) {
currentTagKey = getText();
} else if (name.equals("Value")) {
currentTagValue = getText();
}
} else if (in("AnalyticsConfiguration", "StorageClassAnalysis")) {
if (name.equals("DataExport")) {
storageClassAnalysis.setDataExport(dataExport);
}
} else if (in("AnalyticsConfiguration", "StorageClassAnalysis", "DataExport")) {
if (name.equals("OutputSchemaVersion")) {
dataExport.setOutputSchemaVersion(getText());
} else if (name.equals("Destination")) {
dataExport.setDestination(destination);
}
} else if (in(
"AnalyticsConfiguration",
"StorageClassAnalysis",
"DataExport",
"Destination")) {
if (name.equals("S3BucketDestination")) {
destination.setS3BucketDestination(s3BucketDestination);
}
} else if (in(
"AnalyticsConfiguration",
"StorageClassAnalysis",
"DataExport",
"Destination",
"S3BucketDestination")) {
if (name.equals("Format")) {
s3BucketDestination.setFormat(getText());
} else if (name.equals("BucketAccountId")) {
s3BucketDestination.setBucketAccountId(getText());
} else if (name.equals("Bucket")) {
s3BucketDestination.setBucketArn(getText());
} else if (name.equals("Prefix")) {
s3BucketDestination.setPrefix(getText());
}
}
}
}
/*
HTTP/1.1 200 OK
x-amz-id-2: ITnGT1y4RyTmXa3rPi4hklTXouTf0hccUjo0iCPjz6FnfIutBj3M7fPGlWO2SEWp
x-amz-request-id: 51991C342C575321
Date: Wed, 14 May 2014 02:11:22 GMT
Server: AmazonS3
Content-Length: ...
<ListBucketAnalyticsConfigurationsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<AnalyticsConfiguration>
...
</AnalyticsConfiguration>
<IsTruncated>true</IsTruncated>
<ContinuationToken>token1</ContinuationToken>
<NextContinuationToken>token2</NextContinuationToken>
</ListBucketAnalyticsConfigurationsResult>
*/
public static
|
GetBucketAnalyticsConfigurationHandler
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/context/annotation/ConfigurationClassEnhancer.java
|
{
"start": 8955,
"end": 9365
}
|
interface ____ be implemented by all @Configuration CGLIB subclasses.
* Facilitates idempotent behavior for {@link ConfigurationClassEnhancer#enhance}
* through checking to see if candidate classes are already assignable to it.
* <p>Also extends {@link BeanFactoryAware}, as all enhanced {@code @Configuration}
* classes require access to the {@link BeanFactory} that created them.
* <p>Note that this
|
to
|
java
|
spring-projects__spring-boot
|
module/spring-boot-web-server/src/main/java/org/springframework/boot/web/server/context/WebServerApplicationContext.java
|
{
"start": 1143,
"end": 3278
}
|
interface ____ extends ApplicationContext {
/**
* {@link SmartLifecycle#getPhase() SmartLifecycle phase} in which graceful shutdown
* of the web server is performed.
* @since 4.0.0
*/
int GRACEFUL_SHUTDOWN_PHASE = SmartLifecycle.DEFAULT_PHASE - 1024;
/**
* {@link SmartLifecycle#getPhase() SmartLifecycle phase} in which starting and
* stopping of the web server is performed.
*/
int START_STOP_LIFECYCLE_PHASE = GRACEFUL_SHUTDOWN_PHASE - 1024;
/**
* Returns the {@link WebServer} that was created by the context or {@code null} if
* the server has not yet been created.
* @return the web server
*/
@Nullable WebServer getWebServer();
/**
* Returns the namespace of the web server application context or {@code null} if no
* namespace has been set. Used for disambiguation when multiple web servers are
* running in the same application (for example a management context running on a
* different port).
* @return the server namespace
*/
@Nullable String getServerNamespace();
/**
* Returns {@code true} if the specified context is a
* {@link WebServerApplicationContext} with a matching server namespace.
* @param context the context to check
* @param serverNamespace the server namespace to match against
* @return {@code true} if the server namespace of the context matches
*/
static boolean hasServerNamespace(@Nullable ApplicationContext context, String serverNamespace) {
return (context instanceof WebServerApplicationContext webServerApplicationContext)
&& ObjectUtils.nullSafeEquals(webServerApplicationContext.getServerNamespace(), serverNamespace);
}
/**
* Returns the server namespace if the specified context is a
* {@link WebServerApplicationContext}.
* @param context the context
* @return the server namespace or {@code null} if the context is not a
* {@link WebServerApplicationContext}
*/
static @Nullable String getServerNamespace(@Nullable ApplicationContext context) {
return (context instanceof WebServerApplicationContext configurableContext)
? configurableContext.getServerNamespace() : null;
}
}
|
WebServerApplicationContext
|
java
|
spring-projects__spring-framework
|
spring-beans/src/test/java/org/springframework/beans/factory/annotation/AnnotationBeanWiringInfoResolverTests.java
|
{
"start": 976,
"end": 1513
}
|
class ____ {
@Test
void testResolveWiringInfo() {
assertThatIllegalArgumentException().isThrownBy(() ->
new AnnotationBeanWiringInfoResolver().resolveWiringInfo(null));
}
@Test
void testResolveWiringInfoWithAnInstanceOfANonAnnotatedClass() {
AnnotationBeanWiringInfoResolver resolver = new AnnotationBeanWiringInfoResolver();
BeanWiringInfo info = resolver.resolveWiringInfo("java.lang.String is not @Configurable");
assertThat(info).as("Must be returning null for a non-@Configurable
|
AnnotationBeanWiringInfoResolverTests
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/cache/interceptor/CacheProxyFactoryBeanTests.java
|
{
"start": 3610,
"end": 3891
}
|
class ____ implements Greeter {
private final AtomicBoolean cacheMiss = new AtomicBoolean();
@Override
public boolean isCacheMiss() {
return this.cacheMiss.getAndSet(false);
}
@Override
public void setCacheMiss() {
this.cacheMiss.set(true);
}
}
}
|
SimpleGreeter
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/interceptors/bindings/multiple/MultipleBindingsTest.java
|
{
"start": 398,
"end": 1095
}
|
class ____ {
@RegisterExtension
public ArcTestContainer container = new ArcTestContainer(BarBinding.class, FooBinding.class,
MyBean.class, MyInterceptor.class, MyOtherBean.class);
@Test
public void testInterception() {
assertEquals(0, MyInterceptor.TIMES_INVOKED);
// bean only has one binding, the interceptor should not get triggered
Arc.container().instance(MyBean.class).get().foo();
assertEquals(0, MyInterceptor.TIMES_INVOKED);
// bean has both bindings that the interceptor has
Arc.container().instance(MyOtherBean.class).get().foo();
assertEquals(1, MyInterceptor.TIMES_INVOKED);
}
}
|
MultipleBindingsTest
|
java
|
alibaba__nacos
|
persistence/src/main/java/com/alibaba/nacos/persistence/repository/embedded/operate/StandaloneDatabaseOperateImpl.java
|
{
"start": 2236,
"end": 6256
}
|
class ____ implements BaseDatabaseOperate {
private static final Logger LOGGER = LoggerFactory.getLogger(StandaloneDatabaseOperateImpl.class);
private final SqlLimiter sqlLimiter;
private JdbcTemplate jdbcTemplate;
private TransactionTemplate transactionTemplate;
public StandaloneDatabaseOperateImpl() {
this.sqlLimiter = new SqlTypeLimiter();
}
@PostConstruct
protected void init() {
DataSourceService dataSourceService = DynamicDataSource.getInstance().getDataSource();
jdbcTemplate = dataSourceService.getJdbcTemplate();
transactionTemplate = dataSourceService.getTransactionTemplate();
LOGGER.info("use StandaloneDatabaseOperateImpl");
}
@Override
public <R> R queryOne(String sql, Class<R> cls) {
return queryOne(jdbcTemplate, sql, cls);
}
@Override
public <R> R queryOne(String sql, Object[] args, Class<R> cls) {
return queryOne(jdbcTemplate, sql, args, cls);
}
@Override
public <R> R queryOne(String sql, Object[] args, RowMapper<R> mapper) {
return queryOne(jdbcTemplate, sql, args, mapper);
}
@Override
public <R> List<R> queryMany(String sql, Object[] args, RowMapper<R> mapper) {
return queryMany(jdbcTemplate, sql, args, mapper);
}
@Override
public <R> List<R> queryMany(String sql, Object[] args, Class<R> rClass) {
return queryMany(jdbcTemplate, sql, args, rClass);
}
@Override
public List<Map<String, Object>> queryMany(String sql, Object[] args) {
return queryMany(jdbcTemplate, sql, args);
}
@Override
public CompletableFuture<RestResult<String>> dataImport(File file) {
return CompletableFuture.supplyAsync(() -> {
try (DiskUtils.LineIterator iterator = DiskUtils.lineIterator(file)) {
int batchSize = 1000;
List<String> batchUpdate = new ArrayList<>(batchSize);
List<CompletableFuture<Void>> futures = new ArrayList<>();
List<Boolean> results = new CopyOnWriteArrayList<>();
while (iterator.hasNext()) {
String sql = iterator.next();
if (StringUtils.isNotBlank(sql)) {
sqlLimiter.doLimit(sql);
batchUpdate.add(sql);
}
if (batchUpdate.size() == batchSize || !iterator.hasNext()) {
List<ModifyRequest> sqls = batchUpdate.stream().map(s -> {
ModifyRequest request = new ModifyRequest();
request.setSql(s);
return request;
}).collect(Collectors.toList());
futures.add(CompletableFuture.runAsync(() -> results.add(doDataImport(jdbcTemplate, sqls))));
batchUpdate.clear();
}
}
CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join();
int code = 500;
if (!CollectionUtils.isEmpty(results)) {
code = (!results.stream().anyMatch(Boolean.FALSE::equals)) ? 200 : 500;
}
return RestResult.<String>builder().withCode(code).withData("").build();
} catch (Throwable ex) {
LOGGER.error("An exception occurred when external data was imported into Derby : ", ex);
return RestResultUtils.failed(ex.getMessage());
}
});
}
@Override
public Boolean update(List<ModifyRequest> modifyRequests, BiConsumer<Boolean, Throwable> consumer) {
return update(transactionTemplate, jdbcTemplate, modifyRequests, consumer);
}
@Override
public Boolean update(List<ModifyRequest> requestList) {
return update(transactionTemplate, jdbcTemplate, requestList);
}
}
|
StandaloneDatabaseOperateImpl
|
java
|
apache__camel
|
components/camel-oauth/src/main/java/org/apache/camel/oauth/OAuthBearerTokenProcessor.java
|
{
"start": 935,
"end": 2604
}
|
class ____ extends AbstractOAuthProcessor {
private final Logger log = LoggerFactory.getLogger(getClass());
@Override
public void process(Exchange exchange) {
var context = exchange.getContext();
var msg = exchange.getMessage();
logRequestHeaders(procName, msg);
// Validate Authorization header
//
var authHeader = msg.getHeader("Authorization", String.class);
if (authHeader == null) {
log.error("No Authorization header in request");
msg.setHeader("CamelHttpResponseCode", 400);
msg.setBody("Authorization header");
return;
}
var toks = authHeader.split(" ");
if (toks.length != 2 || !"Bearer".equals(toks[0])) {
log.error("Invalid Authorization header: {}", authHeader);
msg.setHeader("CamelHttpResponseCode", 400);
msg.setBody("Invalid Authorization header");
return;
}
// Find or create the OAuth instance
//
var oauth = findOAuth(context).orElseGet(() -> {
var factory = OAuthFactory.lookupFactory(context);
return factory.createOAuth();
});
// Authenticate the bearer's access token
//
var access_token = toks[1];
var userProfile = oauth.authenticate(new TokenCredentials(access_token));
// Get or create the OAuthSession
//
var session = oauth.getOrCreateSession(exchange);
session.putUserProfile(userProfile);
log.info("Authenticated {}", userProfile.subject());
userProfile.logDetails();
}
}
|
OAuthBearerTokenProcessor
|
java
|
elastic__elasticsearch
|
x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportHasPrivilegesAction.java
|
{
"start": 1922,
"end": 5573
}
|
class ____ extends HandledTransportAction<HasPrivilegesRequest, HasPrivilegesResponse> {
private final AuthorizationService authorizationService;
private final NativePrivilegeStore privilegeStore;
private final SecurityContext securityContext;
@Inject
public TransportHasPrivilegesAction(
TransportService transportService,
ActionFilters actionFilters,
AuthorizationService authorizationService,
NativePrivilegeStore privilegeStore,
SecurityContext context
) {
super(HasPrivilegesAction.NAME, transportService, actionFilters, HasPrivilegesRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE);
this.authorizationService = authorizationService;
this.privilegeStore = privilegeStore;
this.securityContext = context;
}
@Override
protected void doExecute(Task task, HasPrivilegesRequest request, ActionListener<HasPrivilegesResponse> listener) {
final String username = request.username();
final Authentication authentication = securityContext.getAuthentication();
if (isSameUser(authentication, username) == false) {
listener.onFailure(new IllegalArgumentException("users may only check the privileges of their own account"));
return;
}
resolveApplicationPrivileges(
request,
ActionListener.wrap(
applicationPrivilegeDescriptors -> authorizationService.checkPrivileges(
authentication.getEffectiveSubject(),
request.getPrivilegesToCheck(),
applicationPrivilegeDescriptors,
listener.map(privilegesCheckResult -> {
AuthorizationEngine.PrivilegesCheckResult.Details checkResultDetails = privilegesCheckResult.getDetails();
assert checkResultDetails != null : "runDetailedCheck is 'true' but the result has no details";
return new HasPrivilegesResponse(
request.username(),
privilegesCheckResult.allChecksSuccess(),
checkResultDetails != null ? checkResultDetails.cluster() : Map.of(),
checkResultDetails != null ? checkResultDetails.index().values() : List.of(),
checkResultDetails != null ? checkResultDetails.application() : Map.of()
);
})
),
listener::onFailure
)
);
}
private void resolveApplicationPrivileges(
HasPrivilegesRequest request,
ActionListener<Collection<ApplicationPrivilegeDescriptor>> listener
) {
final Set<String> applications = getApplicationNames(request);
privilegeStore.getPrivileges(applications, null, listener);
}
public static Set<String> getApplicationNames(HasPrivilegesRequest request) {
return Arrays.stream(request.applicationPrivileges())
.map(RoleDescriptor.ApplicationResourcePrivileges::getApplication)
.collect(Collectors.toSet());
}
private static boolean isSameUser(Authentication authentication, String username) {
final Subject subjectToCheck;
if (authentication.isCrossClusterAccess()) {
subjectToCheck = getAuthenticationFromCrossClusterAccessMetadata(authentication).getEffectiveSubject();
} else {
subjectToCheck = authentication.getEffectiveSubject();
}
return subjectToCheck.getUser().principal().equals(username);
}
}
|
TransportHasPrivilegesAction
|
java
|
quarkusio__quarkus
|
extensions/resteasy-reactive/rest/deployment/src/test/java/io/quarkus/resteasy/reactive/server/test/customproviders/CustomFiltersResource.java
|
{
"start": 287,
"end": 781
}
|
class ____ {
@GET
@Produces(MediaType.TEXT_PLAIN)
@Path("req")
public String filters(@Context HttpHeaders headers) {
return headers.getHeaderString("custom-header") + "-" + headers.getHeaderString("heavy");
}
@GET
@Produces(MediaType.TEXT_PLAIN)
@Path("metal")
@Metal
public String metal(@Context HttpHeaders headers) {
return headers.getHeaderString("custom-header") + "-" + headers.getHeaderString("heavy");
}
}
|
CustomFiltersResource
|
java
|
apache__commons-lang
|
src/test/java/org/apache/commons/lang3/ClassUtilsTest.java
|
{
"start": 27069,
"end": 28364
}
|
class ____ {
// empty
}
assertEquals("", ClassUtils.getShortCanonicalName(new Object() {
// empty
}.getClass()));
// WARNING: this is fragile, implementation may change, naming is not guaranteed
assertEquals("", ClassUtils.getShortCanonicalName(Named.class));
assertEquals("Inner", ClassUtils.getShortCanonicalName(Inner.class));
assertEquals(StringUtils.EMPTY, ClassUtils.getShortCanonicalName((Class<?>) null));
}
@Test
void test_getShortCanonicalName_Object() {
assertEquals("<null>", ClassUtils.getShortCanonicalName(null, "<null>"));
assertEquals("ClassUtils", ClassUtils.getShortCanonicalName(new ClassUtils(), "<null>"));
assertEquals("ClassUtils[]", ClassUtils.getShortCanonicalName(new ClassUtils[0], "<null>"));
assertEquals("ClassUtils[][]", ClassUtils.getShortCanonicalName(new ClassUtils[0][0], "<null>"));
assertEquals("int[]", ClassUtils.getShortCanonicalName(new int[0], "<null>"));
assertEquals("int[][]", ClassUtils.getShortCanonicalName(new int[0][0], "<null>"));
assertEquals("int[][][][][][][][][][]", ClassUtils.getShortCanonicalName(new int[0][0][0][0][0][0][0][0][0][0], "<null>"));
// Inner types
final
|
Named
|
java
|
elastic__elasticsearch
|
server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java
|
{
"start": 3689,
"end": 16934
}
|
class ____ {
private static final Logger logger = LogManager.getLogger(PublicationTransportHandler.class);
public static final String PUBLISH_STATE_ACTION_NAME = "internal:cluster/coordination/publish_state";
private final TransportService transportService;
private final Executor clusterCoordinationExecutor;
private final NamedWriteableRegistry namedWriteableRegistry;
private final Function<PublishRequest, PublishWithJoinResponse> handlePublishRequest;
private final AtomicReference<ClusterState> lastSeenClusterState = new AtomicReference<>();
private final AtomicLong fullClusterStateReceivedCount = new AtomicLong();
private final AtomicLong incompatibleClusterStateDiffReceivedCount = new AtomicLong();
private final AtomicLong compatibleClusterStateDiffReceivedCount = new AtomicLong();
// -> no need to put a timeout on the options here, because we want the response to eventually be received
// and not log an error if it arrives after the timeout
private static final TransportRequestOptions STATE_REQUEST_OPTIONS = TransportRequestOptions.of(
null,
TransportRequestOptions.Type.STATE
);
public static final TransportVersion INCLUDES_LAST_COMMITTED_DATA_VERSION = TransportVersions.V_8_6_0;
private final SerializationStatsTracker serializationStatsTracker = new SerializationStatsTracker();
public PublicationTransportHandler(
TransportService transportService,
NamedWriteableRegistry namedWriteableRegistry,
Function<PublishRequest, PublishWithJoinResponse> handlePublishRequest
) {
this.transportService = transportService;
this.clusterCoordinationExecutor = transportService.getThreadPool().executor(ThreadPool.Names.CLUSTER_COORDINATION);
this.namedWriteableRegistry = namedWriteableRegistry;
this.handlePublishRequest = handlePublishRequest;
transportService.registerRequestHandler(
PUBLISH_STATE_ACTION_NAME,
transportService.getThreadPool().generic(),
false,
false,
BytesTransportRequest::new,
(request, channel, task) -> this.handleIncomingPublishRequest(request, new ChannelActionListener<>(channel))
);
}
public PublishClusterStateStats stats() {
return new PublishClusterStateStats(
fullClusterStateReceivedCount.get(),
incompatibleClusterStateDiffReceivedCount.get(),
compatibleClusterStateDiffReceivedCount.get(),
serializationStatsTracker.getSerializationStats()
);
}
private void handleIncomingPublishRequest(
BytesTransportRequest request,
ActionListener<PublishWithJoinResponse> publishResponseListener
) throws IOException {
assert ThreadPool.assertCurrentThreadPool(GENERIC);
final Compressor compressor = CompressorFactory.compressorForUnknownXContentType(request.bytes());
StreamInput in = request.bytes().streamInput();
try {
if (compressor != null) {
in = compressor.threadLocalStreamInput(in);
}
in = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry);
in.setTransportVersion(request.version());
// If true we received full cluster state - otherwise diffs
if (in.readBoolean()) {
final ClusterState incomingState;
// Close early to release resources used by the de-compression as early as possible
try (StreamInput input = in) {
incomingState = ClusterState.readFrom(input, transportService.getLocalNode());
assert input.read() == -1;
} catch (Exception e) {
logger.warn("unexpected error while deserializing an incoming cluster state", e);
assert false : e;
throw e;
}
fullClusterStateReceivedCount.incrementAndGet();
logger.debug("received full cluster state version [{}] with size [{}]", incomingState.version(), request.bytes().length());
acceptState(incomingState, publishResponseListener.map(response -> {
lastSeenClusterState.set(incomingState);
return response;
}));
} else {
final ClusterState lastSeen = lastSeenClusterState.get();
if (lastSeen == null) {
logger.debug("received diff for but don't have any local cluster state - requesting full state");
incompatibleClusterStateDiffReceivedCount.incrementAndGet();
throw new IncompatibleClusterStateVersionException("have no local cluster state");
} else {
final ClusterState incomingState = deserializeAndApplyDiff(request, in, lastSeen);
compatibleClusterStateDiffReceivedCount.incrementAndGet();
logger.debug(
"received diff cluster state version [{}] with uuid [{}], diff size [{}]",
incomingState.version(),
incomingState.stateUUID(),
request.bytes().length()
);
acceptState(incomingState, publishResponseListener.map(response -> {
lastSeenClusterState.compareAndSet(lastSeen, incomingState);
return response;
}));
}
}
} finally {
IOUtils.close(in);
}
}
private ClusterState deserializeAndApplyDiff(BytesTransportRequest request, StreamInput in, ClusterState currentState)
throws IOException {
ClusterState incomingState;
try {
final Diff<ClusterState> diff;
final boolean includesLastCommittedData = request.version().onOrAfter(INCLUDES_LAST_COMMITTED_DATA_VERSION);
final boolean clusterUuidCommitted;
final CoordinationMetadata.VotingConfiguration lastCommittedConfiguration;
// Close stream early to release resources used by the de-compression as early as possible
try (StreamInput input = in) {
diff = ClusterState.readDiffFrom(input, currentState.nodes().getLocalNode());
if (includesLastCommittedData) {
clusterUuidCommitted = in.readBoolean();
lastCommittedConfiguration = new CoordinationMetadata.VotingConfiguration(in);
} else {
clusterUuidCommitted = false;
lastCommittedConfiguration = null;
}
assert input.read() == -1;
}
incomingState = diff.apply(currentState); // might throw IncompatibleClusterStateVersionException
if (includesLastCommittedData) {
final var adjustedMetadata = incomingState.metadata()
.withLastCommittedValues(clusterUuidCommitted, lastCommittedConfiguration);
if (adjustedMetadata != incomingState.metadata()) {
incomingState = ClusterState.builder(incomingState).metadata(adjustedMetadata).build();
}
}
} catch (IncompatibleClusterStateVersionException e) {
incompatibleClusterStateDiffReceivedCount.incrementAndGet();
throw e;
} catch (Exception e) {
logger.warn("unexpected error while deserializing an incoming cluster state", e);
assert false : e;
throw e;
}
return incomingState;
}
/**
* Delegate to cluster-coordination thread to apply received state
*
* @param incomingState The received cluster state
* @param actionListener The action to perform once the publish call completes
*/
private void acceptState(ClusterState incomingState, ActionListener<PublishWithJoinResponse> actionListener) {
assert incomingState.nodes().isLocalNodeElectedMaster() == false
: "should handle local publications locally, but got " + incomingState;
clusterCoordinationExecutor.execute(ActionRunnable.supply(actionListener, new CheckedSupplier<>() {
@Override
public PublishWithJoinResponse get() {
return handlePublishRequest.apply(new PublishRequest(incomingState));
}
@Override
public String toString() {
return "acceptState[term=" + incomingState.term() + ",version=" + incomingState.version() + "]";
}
}));
}
public PublicationContext newPublicationContext(ClusterStatePublicationEvent clusterStatePublicationEvent) {
final PublicationContext publicationContext = new PublicationContext(clusterStatePublicationEvent);
boolean success = false;
try {
// Build the serializations we expect to need now, early in the process, so that an error during serialization fails the
// publication straight away. This isn't watertight since we send diffs on a best-effort basis and may fall back to sending a
// full state (and therefore serializing it) if the diff-based publication fails.
publicationContext.buildDiffAndSerializeStates();
success = true;
return publicationContext;
} finally {
if (success == false) {
publicationContext.decRef();
}
}
}
private ReleasableBytesReference serializeFullClusterState(ClusterState clusterState, DiscoveryNode node, TransportVersion version) {
try (RecyclerBytesStreamOutput bytesStream = transportService.newNetworkBytesStream()) {
final long uncompressedBytes;
try (
StreamOutput stream = new PositionTrackingOutputStreamStreamOutput(
CompressorFactory.COMPRESSOR.threadLocalOutputStream(Streams.flushOnCloseStream(bytesStream))
)
) {
stream.setTransportVersion(version);
stream.writeBoolean(true);
clusterState.writeTo(stream);
uncompressedBytes = stream.position();
} catch (IOException e) {
throw new ElasticsearchException("failed to serialize cluster state for publishing to node {}", e, node);
}
final int size = bytesStream.size();
serializationStatsTracker.serializedFullState(uncompressedBytes, size);
logger.trace(
"serialized full cluster state version [{}] using transport version [{}] with size [{}]",
clusterState.version(),
version,
size
);
return bytesStream.moveToBytesReference();
}
}
private ReleasableBytesReference serializeDiffClusterState(
ClusterState newState,
Diff<ClusterState> diff,
DiscoveryNode node,
TransportVersion version
) {
final long clusterStateVersion = newState.version();
try (RecyclerBytesStreamOutput bytesStream = transportService.newNetworkBytesStream()) {
final long uncompressedBytes;
try (
StreamOutput stream = new PositionTrackingOutputStreamStreamOutput(
CompressorFactory.COMPRESSOR.threadLocalOutputStream(Streams.flushOnCloseStream(bytesStream))
)
) {
stream.setTransportVersion(version);
stream.writeBoolean(false);
diff.writeTo(stream);
if (version.onOrAfter(INCLUDES_LAST_COMMITTED_DATA_VERSION)) {
stream.writeBoolean(newState.metadata().clusterUUIDCommitted());
newState.getLastCommittedConfiguration().writeTo(stream);
}
uncompressedBytes = stream.position();
} catch (IOException e) {
throw new ElasticsearchException("failed to serialize cluster state diff for publishing to node {}", e, node);
}
final int size = bytesStream.size();
serializationStatsTracker.serializedDiff(uncompressedBytes, size);
logger.trace(
"serialized cluster state diff for version [{}] using transport version [{}] with size [{}]",
clusterStateVersion,
version,
size
);
return bytesStream.moveToBytesReference();
}
}
/**
* Publishing a cluster state typically involves sending the same cluster state (or diff) to every node, so the work of diffing,
* serializing, and compressing the state can be done once and the results shared across publish requests. The
* {@code PublicationContext} implements this sharing. It's ref-counted: the initial reference is released by the coordinator when
* a state (or diff) has been sent to every node, every transmitted diff also holds a reference in case it needs to retry with a full
* state.
*/
public
|
PublicationTransportHandler
|
java
|
apache__flink
|
flink-runtime/src/main/java/org/apache/flink/runtime/jobmaster/JobMasterRegistrationSuccess.java
|
{
"start": 1225,
"end": 2272
}
|
class ____ extends RegistrationResponse.Success {
private static final long serialVersionUID = 5577641250204140415L;
private final ResourceManagerId resourceManagerId;
private final ResourceID resourceManagerResourceId;
public JobMasterRegistrationSuccess(
final ResourceManagerId resourceManagerId, final ResourceID resourceManagerResourceId) {
this.resourceManagerId = checkNotNull(resourceManagerId);
this.resourceManagerResourceId = checkNotNull(resourceManagerResourceId);
}
public ResourceManagerId getResourceManagerId() {
return resourceManagerId;
}
public ResourceID getResourceManagerResourceId() {
return resourceManagerResourceId;
}
@Override
public String toString() {
return "JobMasterRegistrationSuccess{"
+ "resourceManagerId="
+ resourceManagerId
+ ", resourceManagerResourceId="
+ resourceManagerResourceId
+ '}';
}
}
|
JobMasterRegistrationSuccess
|
java
|
apache__flink
|
flink-streaming-java/src/main/java/org/apache/flink/streaming/api/functions/sink/filesystem/legacy/StreamingFileSink.java
|
{
"start": 6265,
"end": 9350
}
|
class ____<IN> extends RichSinkFunction<IN>
implements CheckpointedFunction, CheckpointListener {
private static final long serialVersionUID = 1L;
// ------------------------ configuration fields --------------------------
private final long bucketCheckInterval;
private final BucketsBuilder<IN, ?, ? extends BucketsBuilder<IN, ?, ?>> bucketsBuilder;
// --------------------------- runtime fields -----------------------------
private transient StreamingFileSinkHelper<IN> helper;
/**
* Creates a new {@code StreamingFileSink} that writes files to the given base directory with
* the give buckets properties.
*/
@VisibleForTesting
public StreamingFileSink(
BucketsBuilder<IN, ?, ? extends BucketsBuilder<IN, ?, ?>> bucketsBuilder,
long bucketCheckInterval) {
Preconditions.checkArgument(bucketCheckInterval > 0L);
this.bucketsBuilder = Preconditions.checkNotNull(bucketsBuilder);
this.bucketCheckInterval = bucketCheckInterval;
}
// ------------------------------------------------------------------------
// --------------------------- Sink Builders -----------------------------
/**
* Creates the builder for a {@link StreamingFileSink} with row-encoding format.
*
* @param basePath the base path where all the buckets are going to be created as
* sub-directories.
* @param encoder the {@link Encoder} to be used when writing elements in the buckets.
* @param <IN> the type of incoming elements
* @return The builder where the remaining of the configuration parameters for the sink can be
* configured. In order to instantiate the sink, call {@link RowFormatBuilder#build()} after
* specifying the desired parameters.
*/
@Internal
public static <IN> StreamingFileSink.DefaultRowFormatBuilder<IN> forRowFormat(
final Path basePath, final Encoder<IN> encoder) {
return new DefaultRowFormatBuilder<>(basePath, encoder, new DateTimeBucketAssigner<>());
}
/**
* Creates the builder for a {@link StreamingFileSink} with bulk-encoding format.
*
* @param basePath the base path where all the buckets are going to be created as
* sub-directories.
* @param writerFactory the {@link BulkWriter.Factory} to be used when writing elements in the
* buckets.
* @param <IN> the type of incoming elements
* @return The builder where the remaining of the configuration parameters for the sink can be
* configured. In order to instantiate the sink, call {@link BulkFormatBuilder#build()}
* after specifying the desired parameters.
*/
@Internal
public static <IN> StreamingFileSink.DefaultBulkFormatBuilder<IN> forBulkFormat(
final Path basePath, final BulkWriter.Factory<IN> writerFactory) {
return new StreamingFileSink.DefaultBulkFormatBuilder<>(
basePath, writerFactory, new DateTimeBucketAssigner<>());
}
/** The base abstract
|
StreamingFileSink
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/state/TaskExecutorStateChangelogStoragesManagerTest.java
|
{
"start": 2164,
"end": 9083
}
|
class ____ {
@Test
void testDuplicatedAllocation() throws IOException {
TaskExecutorStateChangelogStoragesManager manager =
new TaskExecutorStateChangelogStoragesManager();
Configuration configuration = new Configuration();
JobID jobId1 = new JobID(1L, 1L);
StateChangelogStorage<?> storage1 =
manager.stateChangelogStorageForJob(
jobId1,
configuration,
createUnregisteredTaskManagerJobMetricGroup(),
TestLocalRecoveryConfig.disabled());
StateChangelogStorage<?> storage2 =
manager.stateChangelogStorageForJob(
jobId1,
configuration,
createUnregisteredTaskManagerJobMetricGroup(),
TestLocalRecoveryConfig.disabled());
assertThat(storage2).isEqualTo(storage1);
JobID jobId2 = new JobID(1L, 2L);
StateChangelogStorage<?> storage3 =
manager.stateChangelogStorageForJob(
jobId2,
configuration,
createUnregisteredTaskManagerJobMetricGroup(),
TestLocalRecoveryConfig.disabled());
assertThat(storage3).isNotEqualTo(storage1);
manager.shutdown();
}
@Test
void testReleaseForJob() throws IOException {
StateChangelogStorageLoader.initialize(TestStateChangelogStorageFactory.pluginManager);
TaskExecutorStateChangelogStoragesManager manager =
new TaskExecutorStateChangelogStoragesManager();
Configuration configuration = new Configuration();
configuration.set(
StateChangelogOptions.STATE_CHANGE_LOG_STORAGE,
TestStateChangelogStorageFactory.identifier);
JobID jobId1 = new JobID(1L, 1L);
StateChangelogStorage<?> storage1 =
manager.stateChangelogStorageForJob(
jobId1,
configuration,
createUnregisteredTaskManagerJobMetricGroup(),
TestLocalRecoveryConfig.disabled());
assertThat(storage1).isInstanceOf(TestStateChangelogStorage.class);
assertThat(((TestStateChangelogStorage) storage1).closed).isFalse();
manager.releaseResourcesForJob(jobId1);
assertThat(((TestStateChangelogStorage) storage1).closed).isTrue();
StateChangelogStorage<?> storage2 =
manager.stateChangelogStorageForJob(
jobId1,
configuration,
createUnregisteredTaskManagerJobMetricGroup(),
TestLocalRecoveryConfig.disabled());
assertThat(storage2).isNotEqualTo(storage1);
manager.shutdown();
StateChangelogStorageLoader.initialize(null);
}
@Test
void testConsistencyAmongTask() throws IOException {
TaskExecutorStateChangelogStoragesManager manager =
new TaskExecutorStateChangelogStoragesManager();
Configuration configuration = new Configuration();
configuration.set(StateChangelogOptions.STATE_CHANGE_LOG_STORAGE, "invalid");
JobID jobId1 = new JobID(1L, 1L);
StateChangelogStorage<?> storage1 =
manager.stateChangelogStorageForJob(
jobId1,
configuration,
createUnregisteredTaskManagerJobMetricGroup(),
TestLocalRecoveryConfig.disabled());
assertThat(storage1).isNull();
// change configuration, assert the result not change.
configuration.set(
StateChangelogOptions.STATE_CHANGE_LOG_STORAGE,
StateChangelogOptions.STATE_CHANGE_LOG_STORAGE.defaultValue());
StateChangelogStorage<?> storage2 =
manager.stateChangelogStorageForJob(
jobId1,
configuration,
createUnregisteredTaskManagerJobMetricGroup(),
TestLocalRecoveryConfig.disabled());
assertThat(storage2).isNull();
JobID jobId2 = new JobID(1L, 2L);
StateChangelogStorage<?> storage3 =
manager.stateChangelogStorageForJob(
jobId2,
configuration,
createUnregisteredTaskManagerJobMetricGroup(),
TestLocalRecoveryConfig.disabled());
assertThat(storage3).isNotNull();
configuration.set(StateChangelogOptions.STATE_CHANGE_LOG_STORAGE, "invalid");
StateChangelogStorage<?> storage4 =
manager.stateChangelogStorageForJob(
jobId2,
configuration,
createUnregisteredTaskManagerJobMetricGroup(),
TestLocalRecoveryConfig.disabled());
assertThat(storage4).isNotNull();
assertThat(storage4).isEqualTo(storage3);
manager.shutdown();
}
@Test
void testShutdown() throws IOException {
StateChangelogStorageLoader.initialize(TestStateChangelogStorageFactory.pluginManager);
TaskExecutorStateChangelogStoragesManager manager =
new TaskExecutorStateChangelogStoragesManager();
Configuration configuration = new Configuration();
configuration.set(
StateChangelogOptions.STATE_CHANGE_LOG_STORAGE,
TestStateChangelogStorageFactory.identifier);
JobID jobId1 = new JobID(1L, 1L);
StateChangelogStorage<?> storage1 =
manager.stateChangelogStorageForJob(
jobId1,
configuration,
createUnregisteredTaskManagerJobMetricGroup(),
TestLocalRecoveryConfig.disabled());
assertThat(storage1).isInstanceOf(TestStateChangelogStorage.class);
assertThat(((TestStateChangelogStorage) storage1).closed).isFalse();
JobID jobId2 = new JobID(1L, 2L);
StateChangelogStorage<?> storage2 =
manager.stateChangelogStorageForJob(
jobId1,
configuration,
createUnregisteredTaskManagerJobMetricGroup(),
TestLocalRecoveryConfig.disabled());
assertThat(storage2).isInstanceOf(TestStateChangelogStorage.class);
assertThat(((TestStateChangelogStorage) storage2).closed).isFalse();
manager.shutdown();
assertThat(((TestStateChangelogStorage) storage1).closed).isTrue();
assertThat(((TestStateChangelogStorage) storage2).closed).isTrue();
StateChangelogStorageLoader.initialize(null);
}
private static
|
TaskExecutorStateChangelogStoragesManagerTest
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/lookup/MarkerLookup.java
|
{
"start": 1154,
"end": 1582
}
|
class ____ extends AbstractLookup {
static final String MARKER = "marker";
@Override
public String lookup(final LogEvent event, final String key) {
final Marker marker = event == null ? null : event.getMarker();
return marker == null ? null : marker.getName();
}
@Override
public String lookup(final String key) {
return MarkerManager.exists(key) ? key : null;
}
}
|
MarkerLookup
|
java
|
spring-projects__spring-data-jpa
|
spring-data-jpa/src/test/java/org/springframework/data/jpa/domain/support/AbstractAttributeConverterIntegrationTests.java
|
{
"start": 1725,
"end": 1804
}
|
class ____ {
protected abstract static
|
AbstractAttributeConverterIntegrationTests
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/cluster/coordination/LinearizabilityCheckerTests.java
|
{
"start": 1019,
"end": 13631
}
|
class ____ extends ESTestCase {
/**
* Simple specification of a lock that can be exactly locked once. There is no unlocking.
* Input is always null (and represents lock acquisition), output is a boolean whether lock was acquired.
*/
final SequentialSpec lockSpec = new SequentialSpec() {
@Override
public Object initialState() {
return false;
}
@Override
public Optional<Object> nextState(Object currentState, Object input, Object output) {
if (input != null) {
throw new AssertionError("invalid history: input must be null");
}
if (output instanceof Boolean == false) {
throw new AssertionError("invalid history: output must be boolean");
}
if (false == (boolean) currentState) {
if (false == (boolean) output) {
return Optional.empty();
}
return Optional.of(true);
} else if (false == (boolean) output) {
return Optional.of(currentState);
}
return Optional.empty();
}
};
public void testLockConsistent() {
assertThat(lockSpec.initialState(), equalTo(false));
assertThat(lockSpec.nextState(false, null, true), equalTo(Optional.of(true)));
assertThat(lockSpec.nextState(false, null, false), equalTo(Optional.empty()));
assertThat(lockSpec.nextState(true, null, false), equalTo(Optional.of(true)));
assertThat(lockSpec.nextState(true, null, true), equalTo(Optional.empty()));
}
public void testLockWithLinearizableHistory1() throws LinearizabilityCheckAborted {
final History history = new History();
int call0 = history.invoke(null); // 0: acquire lock
history.respond(call0, true); // 0: lock acquisition succeeded
int call1 = history.invoke(null); // 1: acquire lock
history.respond(call1, false); // 0: lock acquisition failed
assertTrue(LinearizabilityChecker.isLinearizable(lockSpec, history));
}
public void testLockWithLinearizableHistory2() throws LinearizabilityCheckAborted {
final History history = new History();
int call0 = history.invoke(null); // 0: acquire lock
int call1 = history.invoke(null); // 1: acquire lock
history.respond(call0, false); // 0: lock acquisition failed
history.respond(call1, true); // 0: lock acquisition succeeded
assertTrue(LinearizabilityChecker.isLinearizable(lockSpec, history));
}
public void testLockWithLinearizableHistory3() throws LinearizabilityCheckAborted {
final History history = new History();
int call0 = history.invoke(null); // 0: acquire lock
int call1 = history.invoke(null); // 1: acquire lock
history.respond(call0, true); // 0: lock acquisition succeeded
history.respond(call1, false); // 0: lock acquisition failed
assertTrue(LinearizabilityChecker.isLinearizable(lockSpec, history));
}
public void testLockWithNonLinearizableHistory() throws LinearizabilityCheckAborted {
final History history = new History();
int call0 = history.invoke(null); // 0: acquire lock
history.respond(call0, false); // 0: lock acquisition failed
int call1 = history.invoke(null); // 1: acquire lock
history.respond(call1, true); // 0: lock acquisition succeeded
assertFalse(LinearizabilityChecker.isLinearizable(lockSpec, history));
}
/**
* Simple specification of a read/write register.
* Writes are modeled as integer inputs (with corresponding null responses) and
* reads are modeled as null inputs with integer outputs.
*/
final SequentialSpec registerSpec = new SequentialSpec() {
@Override
public Object initialState() {
return 0;
}
@Override
public Optional<Object> nextState(Object currentState, Object input, Object output) {
if ((input == null) == (output == null)) {
throw new AssertionError("invalid history: exactly one of input or output must be null");
}
if (input != null) {
return Optional.of(input);
} else if (output.equals(currentState)) {
return Optional.of(currentState);
}
return Optional.empty();
}
};
public void testRegisterConsistent() {
assertThat(registerSpec.initialState(), equalTo(0));
assertThat(registerSpec.nextState(7, 42, null), equalTo(Optional.of(42)));
assertThat(registerSpec.nextState(7, null, 7), equalTo(Optional.of(7)));
assertThat(registerSpec.nextState(7, null, 42), equalTo(Optional.empty()));
}
public void testRegisterWithLinearizableHistory() throws LinearizabilityCheckAborted {
final History history = new History();
int call0 = history.invoke(42); // 0: invoke write 42
int call1 = history.invoke(null); // 1: invoke read
int call2 = history.invoke(null); // 2: invoke read
history.respond(call2, 0); // 2: read returns 0
history.respond(call1, 42); // 1: read returns 42
expectThrows(AssertionError.class, () -> LinearizabilityChecker.isLinearizable(registerSpec, history));
assertTrue(LinearizabilityChecker.isLinearizable(registerSpec, history, i -> null));
history.respond(call0, null); // 0: write returns
assertTrue(LinearizabilityChecker.isLinearizable(registerSpec, history));
}
public void testRegisterHistoryVisualisation() {
final History history = new History();
int write0 = history.invoke(42); // invoke write(42)
history.respond(history.invoke(null), 42); // read, returns 42
history.respond(write0, null); // write(42) succeeds
int write1 = history.invoke(24); // invoke write 24
history.respond(history.invoke(null), 42); // read returns 42
history.respond(history.invoke(null), 24); // subsequent read returns 24
history.respond(write1, null); // write(24) succeeds
assertEquals("""
Partition 0
42 XXX null (0)
null X 42 (1)
24 XXXXX null (2)
null X 42 (3)
null X 24 (4)
""", LinearizabilityChecker.visualize(registerSpec, history, o -> { throw new AssertionError("history was complete"); }));
}
public void testRegisterWithNonLinearizableHistory() throws LinearizabilityCheckAborted {
final History history = new History();
int call0 = history.invoke(42); // 0: invoke write 42
int call1 = history.invoke(null); // 1: invoke read
history.respond(call1, 42); // 1: read returns 42
int call2 = history.invoke(null); // 2: invoke read
history.respond(call2, 0); // 2: read returns 0, not allowed
expectThrows(AssertionError.class, () -> LinearizabilityChecker.isLinearizable(registerSpec, history));
assertFalse(LinearizabilityChecker.isLinearizable(registerSpec, history, i -> null));
history.respond(call0, null); // 0: write returns
assertFalse(LinearizabilityChecker.isLinearizable(registerSpec, history));
}
public void testRegisterObservedSequenceOfUpdatesWitLinearizableHistory() throws LinearizabilityCheckAborted {
final History history = new History();
int call0 = history.invoke(42); // 0: invoke write 42
int call1 = history.invoke(43); // 1: invoke write 43
int call2 = history.invoke(null); // 2: invoke read
history.respond(call2, 42); // 1: read returns 42
int call3 = history.invoke(null); // 3: invoke read
history.respond(call3, 43); // 3: read returns 43
int call4 = history.invoke(null); // 4: invoke read
history.respond(call4, 43); // 4: read returns 43
history.respond(call0, null); // 0: write returns
history.respond(call1, null); // 1: write returns
assertTrue(LinearizabilityChecker.isLinearizable(registerSpec, history));
}
public void testRegisterObservedSequenceOfUpdatesWithNonLinearizableHistory() throws LinearizabilityCheckAborted {
final History history = new History();
int call0 = history.invoke(42); // 0: invoke write 42
int call1 = history.invoke(43); // 1: invoke write 43
int call2 = history.invoke(null); // 2: invoke read
history.respond(call2, 42); // 1: read returns 42
int call3 = history.invoke(null); // 3: invoke read
history.respond(call3, 43); // 3: read returns 43
int call4 = history.invoke(null); // 4: invoke read
history.respond(call4, 42); // 4: read returns 42, not allowed
history.respond(call0, null); // 0: write returns
history.respond(call1, null); // 1: write returns
assertFalse(LinearizabilityChecker.isLinearizable(registerSpec, history));
}
final SequentialSpec multiRegisterSpec = new KeyedSpec() {
@Override
public Object getKey(Object value) {
return ((Tuple) value).v1();
}
@Override
public Object getValue(Object value) {
return ((Tuple) value).v2();
}
@Override
public Object initialState() {
return registerSpec.initialState();
}
@Override
public Optional<Object> nextState(Object currentState, Object input, Object output) {
return registerSpec.nextState(currentState, input, output);
}
};
public void testMultiRegisterWithLinearizableHistory() throws LinearizabilityCheckAborted {
final History history = new History();
int callX0 = history.invoke(new Tuple<>("x", 42)); // 0: invoke write 42 on key x
int callX1 = history.invoke(new Tuple<>("x", null)); // 1: invoke read on key x
int callY0 = history.invoke(new Tuple<>("y", 42)); // 0: invoke write 42 on key y
int callY1 = history.invoke(new Tuple<>("y", null)); // 1: invoke read on key y
int callX2 = history.invoke(new Tuple<>("x", null)); // 2: invoke read on key x
int callY2 = history.invoke(new Tuple<>("y", null)); // 2: invoke read on key y
history.respond(callX2, 0); // 2: read returns 0 on key x
history.respond(callY2, 0); // 2: read returns 0 on key y
history.respond(callY1, 42); // 1: read returns 42 on key y
history.respond(callX1, 42); // 1: read returns 42 on key x
expectThrows(AssertionError.class, () -> LinearizabilityChecker.isLinearizable(multiRegisterSpec, history));
assertTrue(LinearizabilityChecker.isLinearizable(multiRegisterSpec, history, i -> null));
history.respond(callX0, null); // 0: write returns on key x
history.respond(callY0, null); // 0: write returns on key y
assertTrue(LinearizabilityChecker.isLinearizable(multiRegisterSpec, history));
}
public void testMultiRegisterWithNonLinearizableHistory() throws LinearizabilityCheckAborted {
final History history = new History();
int callX0 = history.invoke(new Tuple<>("x", 42)); // 0: invoke write 42 on key x
int callX1 = history.invoke(new Tuple<>("x", null)); // 1: invoke read on key x
int callY0 = history.invoke(new Tuple<>("y", 42)); // 0: invoke write 42 on key y
int callY1 = history.invoke(new Tuple<>("y", null)); // 1: invoke read on key y
int callX2 = history.invoke(new Tuple<>("x", null)); // 2: invoke read on key x
history.respond(callY1, 42); // 1: read returns 42 on key y
int callY2 = history.invoke(new Tuple<>("y", null)); // 2: invoke read on key y
history.respond(callX2, 0); // 2: read returns 0 on key x
history.respond(callY2, 0); // 2: read returns 0 on key y, not allowed
history.respond(callX1, 42); // 1: read returns 42 on key x
expectThrows(AssertionError.class, () -> LinearizabilityChecker.isLinearizable(multiRegisterSpec, history));
assertFalse(LinearizabilityChecker.isLinearizable(multiRegisterSpec, history, i -> null));
history.respond(callX0, null); // 0: write returns on key x
history.respond(callY0, null); // 0: write returns on key y
assertFalse(LinearizabilityChecker.isLinearizable(multiRegisterSpec, history));
}
}
|
LinearizabilityCheckerTests
|
java
|
alibaba__nacos
|
core/src/main/java/com/alibaba/nacos/core/monitor/MetricsMonitor.java
|
{
"start": 1298,
"end": 6839
}
|
class ____ {
private static final String METER_REGISTRY = NacosMeterRegistryCenter.CORE_STABLE_REGISTRY;
private static final DistributionSummary RAFT_READ_INDEX_FAILED;
private static final DistributionSummary RAFT_FROM_LEADER;
private static final Timer RAFT_APPLY_LOG_TIMER;
private static final Timer RAFT_APPLY_READ_TIMER;
private static AtomicInteger longConnection = new AtomicInteger();
private static GrpcServerExecutorMetric sdkServerExecutorMetric = new GrpcServerExecutorMetric("grpcSdkServer");
private static GrpcServerExecutorMetric clusterServerExecutorMetric = new GrpcServerExecutorMetric("grpcClusterServer");
private static Map<String, AtomicInteger> moduleConnectionCnt = new ConcurrentHashMap<>();
static {
ImmutableTag immutableTag = new ImmutableTag("module", "core");
List<Tag> tags = new ArrayList<>();
tags.add(immutableTag);
tags.add(new ImmutableTag("name", "raft_read_index_failed"));
RAFT_READ_INDEX_FAILED = NacosMeterRegistryCenter.summary(METER_REGISTRY, "nacos_monitor_summary", tags);
tags = new ArrayList<>();
tags.add(immutableTag);
tags.add(new ImmutableTag("name", "raft_read_from_leader"));
RAFT_FROM_LEADER = NacosMeterRegistryCenter.summary(METER_REGISTRY, "nacos_monitor_summary", tags);
tags = new ArrayList<>();
tags.add(immutableTag);
tags.add(new ImmutableTag("name", "raft_apply_log_timer"));
RAFT_APPLY_LOG_TIMER = NacosMeterRegistryCenter.timer(METER_REGISTRY, "nacos_monitor_summary", tags);
tags = new ArrayList<>();
tags.add(immutableTag);
tags.add(new ImmutableTag("name", "raft_apply_read_timer"));
RAFT_APPLY_READ_TIMER = NacosMeterRegistryCenter.timer(METER_REGISTRY, "nacos_monitor_summary", tags);
tags = new ArrayList<>();
tags.add(immutableTag);
tags.add(new ImmutableTag("name", "longConnection"));
NacosMeterRegistryCenter.gauge(METER_REGISTRY, "nacos_monitor", tags, longConnection);
tags = new ArrayList<>();
tags.add(immutableTag);
tags.add(new ImmutableTag("type", sdkServerExecutorMetric.getType()));
initGrpcServerExecutorMetric(tags, sdkServerExecutorMetric);
tags = new ArrayList<>();
tags.add(immutableTag);
tags.add(new ImmutableTag("type", clusterServerExecutorMetric.getType()));
initGrpcServerExecutorMetric(tags, clusterServerExecutorMetric);
}
private static void initGrpcServerExecutorMetric(List<Tag> tags, GrpcServerExecutorMetric metric) {
List<Tag> snapshotTags = new ArrayList<>();
snapshotTags.add(new ImmutableTag("name", "activeCount"));
snapshotTags.addAll(tags);
NacosMeterRegistryCenter.gauge(METER_REGISTRY, "grpc_server_executor", snapshotTags, metric.getActiveCount());
snapshotTags = new ArrayList<>();
snapshotTags.add(new ImmutableTag("name", "poolSize"));
snapshotTags.addAll(tags);
NacosMeterRegistryCenter.gauge(METER_REGISTRY, "grpc_server_executor", snapshotTags, metric.getPoolSize());
snapshotTags = new ArrayList<>();
snapshotTags.add(new ImmutableTag("name", "corePoolSize"));
snapshotTags.addAll(tags);
NacosMeterRegistryCenter.gauge(METER_REGISTRY, "grpc_server_executor", snapshotTags, metric.getCorePoolSize());
snapshotTags = new ArrayList<>();
snapshotTags.add(new ImmutableTag("name", "maximumPoolSize"));
snapshotTags.addAll(tags);
NacosMeterRegistryCenter.gauge(METER_REGISTRY, "grpc_server_executor", snapshotTags, metric.getMaximumPoolSize());
snapshotTags = new ArrayList<>();
snapshotTags.add(new ImmutableTag("name", "inQueueTaskCount"));
snapshotTags.addAll(tags);
NacosMeterRegistryCenter.gauge(METER_REGISTRY, "grpc_server_executor", snapshotTags, metric.getInQueueTaskCount());
snapshotTags = new ArrayList<>();
snapshotTags.add(new ImmutableTag("name", "taskCount"));
snapshotTags.addAll(tags);
NacosMeterRegistryCenter.gauge(METER_REGISTRY, "grpc_server_executor", snapshotTags, metric.getTaskCount());
snapshotTags = new ArrayList<>();
snapshotTags.add(new ImmutableTag("name", "completedTaskCount"));
snapshotTags.addAll(tags);
NacosMeterRegistryCenter.gauge(METER_REGISTRY, "grpc_server_executor", snapshotTags, metric.getCompletedTaskCount());
}
public static AtomicInteger getLongConnectionMonitor() {
return longConnection;
}
public static void raftReadIndexFailed() {
RAFT_READ_INDEX_FAILED.record(1);
}
public static void raftReadFromLeader() {
RAFT_FROM_LEADER.record(1);
}
public static Timer getRaftApplyLogTimer() {
return RAFT_APPLY_LOG_TIMER;
}
public static Timer getRaftApplyReadTimer() {
return RAFT_APPLY_READ_TIMER;
}
public static DistributionSummary getRaftReadIndexFailed() {
return RAFT_READ_INDEX_FAILED;
}
public static DistributionSummary getRaftFromLeader() {
return RAFT_FROM_LEADER;
}
public static GrpcServerExecutorMetric getSdkServerExecutorMetric() {
return sdkServerExecutorMetric;
}
public static GrpcServerExecutorMetric getClusterServerExecutorMetric() {
return clusterServerExecutorMetric;
}
public static
|
MetricsMonitor
|
java
|
netty__netty
|
pkitesting/src/main/java/io/netty/pkitesting/CertificateBuilder.java
|
{
"start": 51334,
"end": 51440
}
|
class ____ {
private static final SecureRandom RANDOM = new SecureRandom();
}
}
|
SecureRandomHolder
|
java
|
spring-projects__spring-framework
|
spring-beans/src/main/java/org/springframework/beans/factory/config/ObjectFactoryCreatingFactoryBean.java
|
{
"start": 5302,
"end": 5759
}
|
class ____ implements ObjectFactory<Object>, Serializable {
private final BeanFactory beanFactory;
private final String targetBeanName;
public TargetBeanObjectFactory(BeanFactory beanFactory, String targetBeanName) {
this.beanFactory = beanFactory;
this.targetBeanName = targetBeanName;
}
@Override
public Object getObject() throws BeansException {
return this.beanFactory.getBean(this.targetBeanName);
}
}
}
|
TargetBeanObjectFactory
|
java
|
elastic__elasticsearch
|
x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/FrozenCacheInfoService.java
|
{
"start": 6417,
"end": 6603
}
|
class ____ {
volatile NodeState nodeState = NodeState.FETCHING;
}
/**
* The state of the retrieval of the frozen cache info on a node
*/
public
|
NodeStateHolder
|
java
|
google__gson
|
test-shrinker/src/main/java/com/example/Main.java
|
{
"start": 11715,
"end": 12072
}
|
class ____ referenced
new TypeToken<List<InterfaceWithImplementation.Implementation>>() {});
return list.get(0).getValue();
} catch (ClassCastException e) {
// TODO: R8 causes exception, see https://github.com/google/gson/issues/2658
return "ClassCastException";
}
});
}
}
|
is
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/legacy/InnerKey.java
|
{
"start": 206,
"end": 1038
}
|
class ____ implements Serializable {
private String akey;
private String bkey;
public String getAkey() {
return akey;
}
public void setAkey(String akey) {
this.akey = akey;
}
public String getBkey() {
return bkey;
}
public void setBkey(String bkey) {
this.bkey = bkey;
}
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof InnerKey)) return false;
final InnerKey cidSuperID = (InnerKey) o;
if (akey != null ? !akey.equals(cidSuperID.akey) : cidSuperID.akey != null) return false;
if (bkey != null ? !bkey.equals(cidSuperID.bkey) : cidSuperID.bkey != null) return false;
return true;
}
public int hashCode() {
int result;
result = (akey != null ? akey.hashCode() : 0);
result = 29 * result + (bkey != null ? bkey.hashCode() : 0);
return result;
}
}
|
InnerKey
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/ser/ValueSerializerModifierTest.java
|
{
"start": 2988,
"end": 3423
}
|
class ____ extends ValueSerializerModifier
{
private final ValueSerializer<?> _serializer;
public ReplacingModifier(ValueSerializer<?> s) { _serializer = s; }
@Override
public ValueSerializer<?> modifySerializer(SerializationConfig config,
BeanDescription.Supplier beanDesc, ValueSerializer<?> serializer) {
return _serializer;
}
}
static
|
ReplacingModifier
|
java
|
spring-projects__spring-security
|
config/src/integration-test/java/org/springframework/security/config/annotation/rsocket/HelloHandler.java
|
{
"start": 904,
"end": 1336
}
|
class ____ implements SocketAcceptor {
@Override
public Mono<RSocket> accept(ConnectionSetupPayload setup, RSocket sendingSocket) {
return Mono.just(new RSocket() {
@Override
public Mono<Payload> requestResponse(Payload payload) {
String data = payload.getDataUtf8();
payload.release();
System.out.println("Got " + data);
return Mono.just(ByteBufPayload.create("Hello " + data));
}
});
}
}
|
HelloHandler
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/generated-src/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundToInt.java
|
{
"start": 769,
"end": 9022
}
|
class ____ {
static final RoundTo.Build BUILD = (source, field, points) -> {
int[] f = points.stream().mapToInt(p -> ((Number) p).intValue()).toArray();
return switch (f.length) {
// TODO should be a consistent way to do the 0 version - is CASE(MV_COUNT(f) == 1, f[0])
case 1 -> new RoundToInt1Evaluator.Factory(source, field, f[0]);
/*
* These hand-unrolled implementations are even faster than the linear scan implementations.
*/
case 2 -> new RoundToInt2Evaluator.Factory(source, field, f[0], f[1]);
case 3 -> new RoundToInt3Evaluator.Factory(source, field, f[0], f[1], f[2]);
case 4 -> new RoundToInt4Evaluator.Factory(source, field, f[0], f[1], f[2], f[3]);
case 5 -> new RoundToInt5Evaluator.Factory(source, field, f[0], f[1], f[2], f[3], f[4]);
case 6 -> new RoundToInt6Evaluator.Factory(source, field, f[0], f[1], f[2], f[3], f[4], f[5]);
case 7 -> new RoundToInt7Evaluator.Factory(source, field, f[0], f[1], f[2], f[3], f[4], f[5], f[6]);
case 8 -> new RoundToInt8Evaluator.Factory(source, field, f[0], f[1], f[2], f[3], f[4], f[5], f[6], f[7]);
case 9 -> new RoundToInt9Evaluator.Factory(source, field, f[0], f[1], f[2], f[3], f[4], f[5], f[6], f[7], f[8]);
case 10 -> new RoundToInt10Evaluator.Factory(source, field, f[0], f[1], f[2], f[3], f[4], f[5], f[6], f[7], f[8], f[9]);
/*
* Break point of 10 experimentally derived on Nik's laptop (13th Gen Intel(R) Core(TM) i7-1370P)
* on 2025-05-22.
*/
default -> new RoundToIntBinarySearchEvaluator.Factory(source, field, f);
};
};
@Evaluator(extraName = "BinarySearch")
static int process(int field, @Fixed(includeInToString = false) int[] points) {
int idx = Arrays.binarySearch(points, field);
return points[idx >= 0 ? idx : Math.max(0, -idx - 2)];
}
@Evaluator(extraName = "1")
static int process(int field, @Fixed int p0) {
return p0;
}
@Evaluator(extraName = "2")
static int process(int field, @Fixed int p0, @Fixed int p1) {
if (field < p1) {
return p0;
}
return p1;
}
@Evaluator(extraName = "3")
static int process(int field, @Fixed int p0, @Fixed int p1, @Fixed int p2) {
if (field < p1) {
return p0;
}
if (field < p2) {
return p1;
}
return p2;
}
@Evaluator(extraName = "4")
static int process(int field, @Fixed int p0, @Fixed int p1, @Fixed int p2, @Fixed int p3) {
if (field < p1) {
return p0;
}
if (field < p2) {
return p1;
}
if (field < p3) {
return p2;
}
return p3;
}
/*
* Manual binary search for 5 rounding points, it is faster than linear search or array style binary search.
*/
@Evaluator(extraName = "5")
static int process(int field, @Fixed int p0, @Fixed int p1, @Fixed int p2, @Fixed int p3, @Fixed int p4) {
if (field < p2) {
if (field < p1) {
return p0;
}
return p1;
}
if (field < p3) {
return p2;
}
if (field < p4) {
return p3;
}
return p4;
}
/*
* Manual binary search for 6 rounding points, it is faster than linear search or array style binary search.
*/
@Evaluator(extraName = "6")
static int process(
int field, // hack to keep the formatter happy.
@Fixed int p0, // int is so short this should be on one line but double is not.
@Fixed int p1, // That's not compatible with the templates.
@Fixed int p2, // So we comment to make the formatter not try to change the line.
@Fixed int p3,
@Fixed int p4,
@Fixed int p5
) {
if (field < p2) {
if (field < p1) {
return p0;
}
return p1;
}
if (field < p4) {
if (field < p3) {
return p2;
}
return p3;
}
if (field < p5) {
return p4;
}
return p5;
}
/*
* Manual binary search for 7 rounding points, it is faster than linear search or array style binary search.
*/
@Evaluator(extraName = "7")
static int process(
int field, // hack to keep the formatter happy.
@Fixed int p0, // int is so short this should be on one line but double is not.
@Fixed int p1, // That's not compatible with the templates.
@Fixed int p2, // So we comment to make the formatter not try to change the line.
@Fixed int p3,
@Fixed int p4,
@Fixed int p5,
@Fixed int p6
) {
if (field < p3) {
if (field < p1) {
return p0;
}
if (field < p2) {
return p1;
}
return p2;
}
if (field < p5) {
if (field < p4) {
return p3;
}
return p4;
}
if (field < p6) {
return p5;
}
return p6;
}
/*
* Manual binary search for 8 rounding points, it is faster than linear search or array style binary search.
*/
@Evaluator(extraName = "8")
static int process(
int field,
@Fixed int p0,
@Fixed int p1,
@Fixed int p2,
@Fixed int p3,
@Fixed int p4,
@Fixed int p5,
@Fixed int p6,
@Fixed int p7
) {
if (field < p3) {
if (field < p1) {
return p0;
}
if (field < p2) {
return p1;
}
return p2;
}
if (field < p5) {
if (field < p4) {
return p3;
}
return p4;
}
if (field < p6) {
return p5;
}
if (field < p7) {
return p6;
}
return p7;
}
/*
* Manual binary search for 9 rounding points, it is faster than linear search or array style binary search.
*/
@Evaluator(extraName = "9")
static int process(
int field,
@Fixed int p0,
@Fixed int p1,
@Fixed int p2,
@Fixed int p3,
@Fixed int p4,
@Fixed int p5,
@Fixed int p6,
@Fixed int p7,
@Fixed int p8
) {
if (field < p4) {
if (field < p1) {
return p0;
}
if (field < p2) {
return p1;
}
if (field < p3) {
return p2;
}
return p3;
}
if (field < p6) {
if (field < p5) {
return p4;
}
return p5;
}
if (field < p7) {
return p6;
}
if (field < p8) {
return p7;
}
return p8;
}
/*
* Manual binary search for 10 rounding points, it is faster than linear search or array style binary search.
*/
@Evaluator(extraName = "10")
static int process(
int field,
@Fixed int p0,
@Fixed int p1,
@Fixed int p2,
@Fixed int p3,
@Fixed int p4,
@Fixed int p5,
@Fixed int p6,
@Fixed int p7,
@Fixed int p8,
@Fixed int p9
) {
if (field < p4) {
if (field < p1) {
return p0;
}
if (field < p2) {
return p1;
}
if (field < p3) {
return p2;
}
return p3;
}
if (field < p7) {
if (field < p5) {
return p4;
}
if (field < p6) {
return p5;
}
return p6;
}
if (field < p8) {
return p7;
}
if (field < p9) {
return p8;
}
return p9;
}
}
|
RoundToInt
|
java
|
spring-projects__spring-framework
|
spring-context/src/test/java/org/springframework/resilience/RetryInterceptorTests.java
|
{
"start": 19200,
"end": 19275
}
|
class ____ {
}
@EnableAsync
@EnableResilientMethods
static
|
EnablingConfig
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/dialect/function/TruncFunction.java
|
{
"start": 5581,
"end": 6329
}
|
class ____ implements FunctionRenderer {
private final PatternRenderer truncPattern;
private final PatternRenderer twoArgTruncPattern;
public TruncRenderingSupport(PatternRenderer truncPattern, PatternRenderer twoArgTruncPattern) {
this.truncPattern = truncPattern;
this.twoArgTruncPattern = twoArgTruncPattern;
}
@Override
public void render(
SqlAppender sqlAppender,
List<? extends SqlAstNode> sqlAstArguments,
ReturnableType<?> returnType,
SqlAstTranslator<?> walker) {
final var pattern =
twoArgTruncPattern != null && sqlAstArguments.size() == 2
? twoArgTruncPattern
: truncPattern;
pattern.render( sqlAppender, sqlAstArguments, walker );
}
}
protected static
|
TruncRenderingSupport
|
java
|
apache__camel
|
components/camel-undertow/src/test/java/org/apache/camel/component/undertow/UndertowError500Test.java
|
{
"start": 1128,
"end": 2107
}
|
class ____ extends BaseUndertowTest {
@Test
public void testHttp500Error() throws Exception {
getMockEndpoint("mock:input").expectedBodiesReceived("Hello World");
try {
template.requestBody("http://localhost:{{port}}/foo", "Hello World", String.class);
fail("Should have failed");
} catch (CamelExecutionException e) {
}
MockEndpoint.assertIsSatisfied(context);
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("undertow:http://localhost:{{port}}/foo")
.to("mock:input")
// trigger failure by setting error code to 500
.setHeader(Exchange.HTTP_RESPONSE_CODE, constant(500))
.setBody().constant("Camel cannot do this");
}
};
}
}
|
UndertowError500Test
|
java
|
google__dagger
|
hilt-compiler/main/java/dagger/hilt/processor/internal/Processors.java
|
{
"start": 2887,
"end": 3335
}
|
class ____ an aggregating annotation. */
public static void generateAggregatingClass(
String aggregatingPackage,
AnnotationSpec aggregatingAnnotation,
XTypeElement originatingElement,
Class<?> generatorClass) {
generateAggregatingClass(
aggregatingPackage,
aggregatingAnnotation,
originatingElement,
generatorClass,
Mode.Isolating);
}
/** Generates the aggregating metadata
|
for
|
java
|
apache__flink
|
flink-yarn/src/main/java/org/apache/flink/yarn/configuration/YarnConfigOptions.java
|
{
"start": 1701,
"end": 1877
}
|
class ____ configuration constants used by Flink's YARN runners.
*
* <p>These options are not expected to be ever configured by users explicitly.
*/
@PublicEvolving
public
|
holds
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/PipelineStepWithEventTest.java
|
{
"start": 5681,
"end": 6608
}
|
class ____ extends DelegateAsyncProcessor {
private final StepEventListener listener;
private final String id;
public MyStepEventProcessor(String id, Processor processor, StepEventListener listener) {
super(processor);
this.id = id;
this.listener = listener;
}
@Override
public boolean process(final Exchange exchange, final AsyncCallback callback) {
final StopWatch watch = new StopWatch();
if (listener != null) {
listener.beforeStep(new BeforeStepEvent(exchange, id));
}
return super.process(exchange, doneSync -> {
if (listener != null) {
listener.afterStep(new AfterStepEvent(exchange, id, watch.taken()));
}
callback.done(doneSync);
});
}
}
private static
|
MyStepEventProcessor
|
java
|
apache__flink
|
flink-end-to-end-tests/flink-end-to-end-tests-jdbc-driver/src/main/java/org/apache/flink/table/jdbc/driver/tests/FlinkDriverExample.java
|
{
"start": 1221,
"end": 4835
}
|
class ____ {
public static void main(String[] args) throws Exception {
final String driver = "org.apache.flink.table.jdbc.FlinkDriver";
final String url = "jdbc:flink://localhost:8083";
try {
Class.forName(driver);
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
String tableDir = args[0];
String tableOutput = String.format("%s/output.dat", tableDir);
try (Connection connection = DriverManager.getConnection(url)) {
try (Statement statement = connection.createStatement()) {
checkState(
!statement.execute(
String.format(
"CREATE TABLE test_table(id bigint, val int, str string) "
+ "with ("
+ "'connector'='filesystem',\n"
+ "'format'='csv',\n"
+ "'path'='%s/test_table')",
tableDir)));
// INSERT TABLE returns job id
checkState(
statement.execute(
"INSERT INTO test_table VALUES "
+ "(1, 11, '111'), "
+ "(3, 33, '333'), "
+ "(2, 22, '222'), "
+ "(4, 44, '444')"));
String jobId;
try (ResultSet resultSet = statement.getResultSet()) {
checkState(resultSet.next());
checkState(resultSet.getMetaData().getColumnCount() == 1);
jobId = resultSet.getString("job id");
checkState(!resultSet.next());
}
boolean jobFinished = false;
while (!jobFinished) {
checkState(statement.execute("SHOW JOBS"));
try (ResultSet resultSet = statement.getResultSet()) {
while (resultSet.next()) {
if (resultSet.getString(1).equals(jobId)) {
if (resultSet.getString(3).equals("FINISHED")) {
jobFinished = true;
break;
}
}
}
}
}
// SELECT all data from test_table
List<String> resultList = new ArrayList<>();
try (ResultSet resultSet = statement.executeQuery("SELECT * FROM test_table")) {
while (resultSet.next()) {
resultList.add(
String.format(
"%s,%s,%s",
resultSet.getLong("id"),
resultSet.getInt("val"),
resultSet.getString("str")));
}
}
Collections.sort(resultList);
BufferedWriter bw = new BufferedWriter(new FileWriter(tableOutput));
for (String result : resultList) {
bw.write(result);
bw.newLine();
}
bw.flush();
bw.close();
}
}
}
}
|
FlinkDriverExample
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/plugin/EsqlCorePlugin.java
|
{
"start": 455,
"end": 644
}
|
class ____ extends Plugin implements ExtensiblePlugin {
public static final FeatureFlag EXPONENTIAL_HISTOGRAM_FEATURE_FLAG = new FeatureFlag("esql_exponential_histogram");
}
|
EsqlCorePlugin
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/dialect/temptable/StandardLocalTemporaryTableStrategy.java
|
{
"start": 386,
"end": 1478
}
|
class ____ implements TemporaryTableStrategy {
public static final StandardLocalTemporaryTableStrategy INSTANCE = new StandardLocalTemporaryTableStrategy();
@Override
public String adjustTemporaryTableName(String desiredTableName) {
return desiredTableName;
}
@Override
public TemporaryTableKind getTemporaryTableKind() {
return TemporaryTableKind.LOCAL;
}
@Override
public @Nullable String getTemporaryTableCreateOptions() {
return null;
}
@Override
public String getTemporaryTableCreateCommand() {
return "create local temporary table";
}
@Override
public String getTemporaryTableDropCommand() {
return "drop table";
}
@Override
public String getTemporaryTableTruncateCommand() {
return "delete from";
}
@Override
public String getCreateTemporaryTableColumnAnnotation(int sqlTypeCode) {
return "";
}
@Override
public AfterUseAction getTemporaryTableAfterUseAction() {
return AfterUseAction.NONE;
}
@Override
public BeforeUseAction getTemporaryTableBeforeUseAction() {
return BeforeUseAction.CREATE;
}
}
|
StandardLocalTemporaryTableStrategy
|
java
|
junit-team__junit5
|
platform-tests/src/test/java/org/junit/platform/StackTracePruningTests.java
|
{
"start": 1366,
"end": 7579
}
|
class ____ {
@Test
void shouldPruneStackTraceByDefault() {
EngineExecutionResults results = EngineTestKit.engine("junit-jupiter") //
.selectors(selectMethod(FailingTestTestCase.class, "failingAssertion")) //
.execute();
List<StackTraceElement> stackTrace = extractStackTrace(results);
assertStackTraceDoesNotContain(stackTrace,
"jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:");
}
@Test
void shouldPruneStackTraceWhenEnabled() {
EngineExecutionResults results = EngineTestKit.engine("junit-jupiter") //
.configurationParameter("junit.platform.stacktrace.pruning.enabled", "true") //
.selectors(selectMethod(FailingTestTestCase.class, "failingAssertion")) //
.execute();
List<StackTraceElement> stackTrace = extractStackTrace(results);
assertStackTraceDoesNotContain(stackTrace,
"jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:");
}
@Test
void shouldNotPruneStackTraceWhenDisabled() {
EngineExecutionResults results = EngineTestKit.engine("junit-jupiter") //
.configurationParameter("junit.platform.stacktrace.pruning.enabled", "false") //
.selectors(selectMethod(FailingTestTestCase.class, "failingAssertion")) //
.execute();
List<StackTraceElement> stackTrace = extractStackTrace(results);
assertStackTraceMatch(stackTrace, """
\\Qorg.junit.jupiter.api.AssertionUtils.fail(AssertionUtils.java:\\E.+
>>>>
\\Qorg.junit.platform.commons.util.ReflectionUtils.invokeMethod(ReflectionUtils.java:\\E.+
>>>>
""");
}
@Test
void shouldAlwaysKeepJupiterAssertionStackTraceElement() {
EngineExecutionResults results = EngineTestKit.engine("junit-jupiter") //
.configurationParameter("junit.platform.stacktrace.pruning.enabled", "true") //
.selectors(selectMethod(FailingTestTestCase.class, "failingAssertion")) //
.execute();
List<StackTraceElement> stackTrace = extractStackTrace(results);
assertStackTraceMatch(stackTrace, """
>>>>
\\Qorg.junit.jupiter.api.Assertions.fail(Assertions.java:\\E.+
>>>>
""");
}
@Test
void shouldAlwaysKeepJupiterAssumptionStackTraceElement() {
EngineExecutionResults results = EngineTestKit.engine("junit-jupiter") //
.configurationParameter("junit.platform.stacktrace.pruning.enabled", "true") //
.selectors(selectMethod(FailingTestTestCase.class, "failingAssumption")) //
.execute();
List<StackTraceElement> stackTrace = extractStackTrace(results);
assertStackTraceMatch(stackTrace, """
>>>>
\\Qorg.junit.jupiter.api.Assumptions.assumeTrue(Assumptions.java:\\E.+
>>>>
""");
}
@Test
void shouldKeepExactlyEverythingAfterTestCall() {
EngineExecutionResults results = EngineTestKit.engine("junit-jupiter") //
.configurationParameter("junit.platform.stacktrace.pruning.enabled", "true") //
.selectors(selectMethod(FailingTestTestCase.class, "failingAssertion")) //
.execute();
List<StackTraceElement> stackTrace = extractStackTrace(results);
assertStackTraceMatch(stackTrace,
"""
\\Qorg.junit.jupiter.api.AssertionUtils.fail(AssertionUtils.java:\\E.+
\\Qorg.junit.jupiter.api.Assertions.fail(Assertions.java:\\E.+
\\Qorg.junit.platform.StackTracePruningTests$FailingTestTestCase.failingAssertion(StackTracePruningTests.java:\\E.+
""");
}
@ParameterizedTest
@ValueSource(strings = { "org.junit.platform.StackTracePruningTests$FailingBeforeEachTestCase",
"org.junit.platform.StackTracePruningTests$FailingBeforeEachTestCase$NestedTestCase",
"org.junit.platform.StackTracePruningTests$FailingBeforeEachTestCase$NestedTestCase$NestedNestedTestCase" })
void shouldKeepExactlyEverythingAfterLifecycleMethodCall(Class<?> methodClass) {
EngineExecutionResults results = EngineTestKit.engine("junit-jupiter") //
.configurationParameter("junit.platform.stacktrace.pruning.enabled", "true") //
.selectors(selectMethod(methodClass, "test")) //
.execute();
List<StackTraceElement> stackTrace = extractStackTrace(results);
assertStackTraceMatch(stackTrace,
"""
\\Qorg.junit.jupiter.api.AssertionUtils.fail(AssertionUtils.java:\\E.+
\\Qorg.junit.jupiter.api.Assertions.fail(Assertions.java:\\E.+
\\Qorg.junit.platform.StackTracePruningTests$FailingBeforeEachTestCase.setUp(StackTracePruningTests.java:\\E.+
""");
}
@Test
void shouldPruneStackTracesOfSuppressedExceptions() {
EngineExecutionResults results = EngineTestKit.engine("junit-jupiter") //
.configurationParameter("junit.platform.stacktrace.pruning.enabled", "true") //
.selectors(selectMethod(FailingTestTestCase.class, "multipleFailingAssertions")) //
.execute();
Throwable throwable = getThrowable(results);
for (Throwable suppressed : throwable.getSuppressed()) {
List<StackTraceElement> stackTrace = Arrays.asList(suppressed.getStackTrace());
assertStackTraceDoesNotContain(stackTrace,
"jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:");
}
}
private static List<StackTraceElement> extractStackTrace(EngineExecutionResults results) {
return Arrays.asList(getThrowable(results).getStackTrace());
}
private static Throwable getThrowable(EngineExecutionResults results) {
var failedTestEvent = results.testEvents().failed().list().getFirst();
var testResult = failedTestEvent.getRequiredPayload(TestExecutionResult.class);
return testResult.getThrowable().orElseThrow();
}
private static void assertStackTraceMatch(List<StackTraceElement> stackTrace, String expectedLines) {
List<String> stackStraceAsLines = stackTrace.stream() //
.map(StackTraceElement::toString) //
.toList();
assertLinesMatch(expectedLines.lines().toList(), stackStraceAsLines);
}
private static void assertStackTraceDoesNotContain(List<StackTraceElement> stackTrace, String element) {
String stackStraceAsString = stackTrace.stream() //
.map(StackTraceElement::toString) //
.collect(Collectors.joining());
assertThat(stackStraceAsString).doesNotContain(element);
}
// -------------------------------------------------------------------
@SuppressWarnings("JUnitMalformedDeclaration")
static
|
StackTracePruningTests
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/async/utils/FinallyFunction.java
|
{
"start": 1090,
"end": 1294
}
|
interface ____ a function that is used to perform
* final actions after an asynchronous operation completes, regardless of whether the
* operation was successful or resulted in an exception. This
|
represents
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/IgnoreProvisionKeyWildcardsTest.java
|
{
"start": 21635,
"end": 21922
}
|
interface ____ {",
" @BindsOptionalOf Foo<? extends Bar> fooExtends();",
" @BindsOptionalOf Foo<Bar> foo();",
"}"),
/* kotlinComponentClass = */
NEW_LINES.join(
"@Component(modules = [MyModule::class])",
"
|
MyModule
|
java
|
spring-projects__spring-framework
|
spring-tx/src/test/java/org/springframework/transaction/annotation/AnnotationTransactionAttributeSourceTests.java
|
{
"start": 2270,
"end": 3776
}
|
class ____ {
private final AnnotationTransactionAttributeSource attributeSource = new AnnotationTransactionAttributeSource();
@Test
void serializable() throws Exception {
TestBean1 tb = new TestBean1();
CallCountingTransactionManager ptm = new CallCountingTransactionManager();
TransactionInterceptor ti = new TransactionInterceptor((TransactionManager) ptm, this.attributeSource);
ProxyFactory proxyFactory = new ProxyFactory();
proxyFactory.setInterfaces(ITestBean1.class);
proxyFactory.addAdvice(ti);
proxyFactory.setTarget(tb);
ITestBean1 proxy = (ITestBean1) proxyFactory.getProxy();
proxy.getAge();
assertThat(ptm.commits).isEqualTo(1);
ITestBean1 serializedProxy = SerializationTestUtils.serializeAndDeserialize(proxy);
serializedProxy.getAge();
Advised advised = (Advised) serializedProxy;
TransactionInterceptor serializedTi = (TransactionInterceptor) advised.getAdvisors()[0].getAdvice();
CallCountingTransactionManager serializedPtm =
(CallCountingTransactionManager) serializedTi.getTransactionManager();
assertThat(serializedPtm.commits).isEqualTo(2);
}
@Test
void nullOrEmpty() {
Method method = getMethod(Empty.class, "getAge");
assertThat(this.attributeSource.getTransactionAttribute(method, null)).isNull();
// Try again in case of caching
assertThat(this.attributeSource.getTransactionAttribute(method, null)).isNull();
}
/**
* Test the important case where the invocation is on a proxied
|
AnnotationTransactionAttributeSourceTests
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/insertordering/InsertOrderingRCATest.java
|
{
"start": 33877,
"end": 34000
}
|
enum ____ {
AVG,
MAX,
MIN,
COUNT,
SUM,
EXISTS,
LAST,
FIRST,
INCREASED_BY
}
public
|
DataManipulationFunction
|
java
|
quarkusio__quarkus
|
integration-tests/mongodb-panache/src/main/java/io/quarkus/it/mongodb/panache/TestEndpoint.java
|
{
"start": 197,
"end": 3168
}
|
class ____ {
@GET
public String testAccessors() throws NoSuchMethodException, SecurityException {
checkMethod(AccessorEntity.class, "getString", String.class);
checkMethod(AccessorEntity.class, "isBool", boolean.class);
checkMethod(AccessorEntity.class, "getC", char.class);
checkMethod(AccessorEntity.class, "getS", short.class);
checkMethod(AccessorEntity.class, "getI", int.class);
checkMethod(AccessorEntity.class, "getL", long.class);
checkMethod(AccessorEntity.class, "getF", float.class);
checkMethod(AccessorEntity.class, "getD", double.class);
checkMethod(AccessorEntity.class, "getT", Object.class);
checkMethod(AccessorEntity.class, "getT2", Object.class);
checkMethod(AccessorEntity.class, "setString", void.class, String.class);
checkMethod(AccessorEntity.class, "setBool", void.class, boolean.class);
checkMethod(AccessorEntity.class, "setC", void.class, char.class);
checkMethod(AccessorEntity.class, "setS", void.class, short.class);
checkMethod(AccessorEntity.class, "setI", void.class, int.class);
checkMethod(AccessorEntity.class, "setL", void.class, long.class);
checkMethod(AccessorEntity.class, "setF", void.class, float.class);
checkMethod(AccessorEntity.class, "setD", void.class, double.class);
checkMethod(AccessorEntity.class, "setT", void.class, Object.class);
checkMethod(AccessorEntity.class, "setT2", void.class, Object.class);
try {
checkMethod(AccessorEntity.class, "getTrans2", Object.class);
Assertions.fail("transient field should have no getter: trans2");
} catch (NoSuchMethodException x) {
}
try {
checkMethod(AccessorEntity.class, "setTrans2", void.class, Object.class);
Assertions.fail("transient field should have no setter: trans2");
} catch (NoSuchMethodException x) {
}
// Now check that accessors are called
AccessorEntity entity = new AccessorEntity();
@SuppressWarnings("unused")
byte b = entity.b;
Assertions.assertEquals(1, entity.getBCalls);
entity.i = 2;
Assertions.assertEquals(1, entity.setICalls);
Object trans = entity.trans;
Assertions.assertEquals(0, entity.getTransCalls);
entity.trans = trans;
Assertions.assertEquals(0, entity.setTransCalls);
// accessors inside the entity itself
entity.method();
Assertions.assertEquals(2, entity.getBCalls);
Assertions.assertEquals(2, entity.setICalls);
return "OK";
}
private void checkMethod(Class<?> klass, String name, Class<?> returnType, Class<?>... params)
throws NoSuchMethodException, SecurityException {
Method method = klass.getMethod(name, params);
Assertions.assertEquals(returnType, method.getReturnType());
}
}
|
TestEndpoint
|
java
|
jhy__jsoup
|
src/main/java/org/jsoup/safety/Safelist.java
|
{
"start": 25223,
"end": 25454
}
|
class ____ extends TypedValue {
Protocol(String value) {
super(value);
}
static Protocol valueOf(String value) {
return new Protocol(value);
}
}
abstract static
|
Protocol
|
java
|
google__auto
|
value/src/test/java/com/google/auto/value/processor/AutoValueCompilationTest.java
|
{
"start": 142273,
"end": 142563
}
|
class ____ {",
" abstract String foo();",
" }",
"}");
JavaFileObject parentFileObject =
JavaFileObjects.forSourceLines(
"otherpackage.Parent",
"package otherpackage;",
"",
"public abstract
|
Inner
|
java
|
alibaba__nacos
|
core/src/test/java/com/alibaba/nacos/core/control/remote/TpsControlRequestFilterTest.java
|
{
"start": 1933,
"end": 5216
}
|
class ____ {
TpsControlRequestFilter tpsControlRequestFilter;
MockedStatic<ControlManagerCenter> controlManagerCenterMockedStatic;
@Mock
private ControlManagerCenter controlManagerCenter;
@Mock
private TpsControlManager tpsControlManager;
@BeforeEach
void before() {
tpsControlRequestFilter = new TpsControlRequestFilter();
controlManagerCenterMockedStatic = Mockito.mockStatic(ControlManagerCenter.class);
controlManagerCenterMockedStatic.when(() -> ControlManagerCenter.getInstance()).thenReturn(controlManagerCenter);
Mockito.when(controlManagerCenter.getTpsControlManager()).thenReturn(tpsControlManager);
}
@AfterEach
void after() {
controlManagerCenterMockedStatic.close();
}
/**
* test tps check passed ,response is null.
*/
@Test
void testPass() {
RemoteTpsCheckRequestParserRegistry.register(new RemoteTpsCheckRequestParser() {
@Override
public TpsCheckRequest parse(Request request, RequestMeta meta) {
return new TpsCheckRequest();
}
@Override
public String getPointName() {
return "HealthCheck";
}
@Override
public String getName() {
return "HealthCheck";
}
});
HealthCheckRequest healthCheckRequest = new HealthCheckRequest();
RequestMeta requestMeta = new RequestMeta();
TpsCheckResponse tpsCheckResponse = new TpsCheckResponse(true, 200, "success");
Mockito.when(tpsControlManager.check(any(TpsCheckRequest.class))).thenReturn(tpsCheckResponse);
Response filterResponse = tpsControlRequestFilter.filter(healthCheckRequest, requestMeta, HealthCheckRequestHandler.class);
assertNull(filterResponse);
}
/**
* test tps check rejected ,response is not null.
*/
@Test
void testRejected() {
HealthCheckRequest healthCheckRequest = new HealthCheckRequest();
RequestMeta requestMeta = new RequestMeta();
TpsCheckResponse tpsCheckResponse = new TpsCheckResponse(false, 5031, "rejected");
Mockito.when(tpsControlManager.check(any(TpsCheckRequest.class))).thenReturn(tpsCheckResponse);
Response filterResponse = tpsControlRequestFilter.filter(healthCheckRequest, requestMeta, HealthCheckRequestHandler.class);
assertNotNull(filterResponse);
assertEquals(NacosException.OVER_THRESHOLD, filterResponse.getErrorCode());
assertEquals("Tps Flow restricted:" + tpsCheckResponse.getMessage(), filterResponse.getMessage());
}
/**
* test tps check exception ,return null skip.
*/
@Test
void testTpsCheckException() {
HealthCheckRequest healthCheckRequest = new HealthCheckRequest();
RequestMeta requestMeta = new RequestMeta();
Mockito.when(tpsControlManager.check(any(TpsCheckRequest.class))).thenThrow(new NacosRuntimeException(12345));
Response filterResponse = tpsControlRequestFilter.filter(healthCheckRequest, requestMeta, HealthCheckRequestHandler.class);
assertNull(filterResponse);
}
}
|
TpsControlRequestFilterTest
|
java
|
netty__netty
|
handler/src/main/java/io/netty/handler/ssl/JdkDelegatingPrivateKeyMethod.java
|
{
"start": 12355,
"end": 13200
}
|
class ____ {
private final String jdkAlgorithm;
private final String keyTypeName;
private final int hashCode;
@Override
public boolean equals(Object o) {
if (o == null || getClass() != o.getClass()) {
return false;
}
CacheKey cacheKey = (CacheKey) o;
return Objects.equals(cacheKey.jdkAlgorithm, jdkAlgorithm)
&& Objects.equals(cacheKey.keyTypeName, keyTypeName);
}
@Override
public int hashCode() {
return hashCode;
}
CacheKey(String jdkAlgorithm, String keyTypeName) {
this.jdkAlgorithm = jdkAlgorithm;
this.keyTypeName = keyTypeName;
this.hashCode = 31 * jdkAlgorithm.hashCode() + keyTypeName.hashCode();
}
}
}
|
CacheKey
|
java
|
apache__camel
|
components/camel-jdbc/src/main/java/org/apache/camel/component/jdbc/JdbcOutputType.java
|
{
"start": 852,
"end": 922
}
|
enum ____ {
SelectOne,
SelectList,
StreamList
}
|
JdbcOutputType
|
java
|
spring-projects__spring-framework
|
spring-context/src/main/java/org/springframework/context/annotation/ConfigurationClassUtils.java
|
{
"start": 6621,
"end": 6771
}
|
class ____ within a configuration/component class).
* @param metadata the metadata of the annotated class
* @return {@code true} if the given
|
declared
|
java
|
apache__flink
|
flink-tests/src/test/java/org/apache/flink/test/recovery/SimpleRecoveryFixedDelayRestartStrategyITBase.java
|
{
"start": 1332,
"end": 2280
}
|
class ____ extends SimpleRecoveryITCaseBase {
@ClassRule
public static final MiniClusterWithClientResource MINI_CLUSTER_RESOURCE =
new MiniClusterWithClientResource(
new MiniClusterResourceConfiguration.Builder()
.setConfiguration(getConfiguration())
.setNumberTaskManagers(2)
.setNumberSlotsPerTaskManager(2)
.build());
private static Configuration getConfiguration() {
Configuration config = new Configuration();
config.set(RestartStrategyOptions.RESTART_STRATEGY, FIXED_DELAY.getMainValue());
config.set(RestartStrategyOptions.RESTART_STRATEGY_FIXED_DELAY_ATTEMPTS, 3);
config.set(
RestartStrategyOptions.RESTART_STRATEGY_FIXED_DELAY_DELAY, Duration.ofMillis(100));
return config;
}
}
|
SimpleRecoveryFixedDelayRestartStrategyITBase
|
java
|
spring-projects__spring-security
|
docs/src/test/java/org/springframework/security/docs/servlet/authentication/reauthentication/SimpleConfiguration.java
|
{
"start": 964,
"end": 1765
}
|
class ____ {
// tag::httpSecurity[]
@Bean
public SecurityFilterChain securityFilterChain(HttpSecurity http) throws Exception {
// @formatter:off
http
.authorizeHttpRequests((authorize) -> authorize.anyRequest().authenticated())
.formLogin(Customizer.withDefaults())
.oneTimeTokenLogin(Customizer.withDefaults());
// @formatter:on
return http.build();
}
// end::httpSecurity[]
@Bean
UserDetailsService userDetailsService() {
return new InMemoryUserDetailsManager(
User.withDefaultPasswordEncoder()
.username("user")
.password("password")
.authorities("app")
.build()
);
}
@Bean
OneTimeTokenGenerationSuccessHandler tokenGenerationSuccessHandler() {
return new RedirectOneTimeTokenGenerationSuccessHandler("/ott/sent");
}
}
|
SimpleConfiguration
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/predicate/fulltext/FullTextPredicate.java
|
{
"start": 727,
"end": 2346
}
|
enum ____ {
AND,
OR;
public org.elasticsearch.index.query.Operator toEs() {
return org.elasticsearch.index.query.Operator.fromString(name());
}
}
private final String query;
private final String options;
private final Map<String, String> optionMap;
// common properties
private final String analyzer;
FullTextPredicate(Source source, String query, String options, List<Expression> children) {
super(source, children);
this.query = query;
this.options = options;
// inferred
this.optionMap = FullTextUtils.parseSettings(options, source);
this.analyzer = optionMap.get("analyzer");
}
public String query() {
return query;
}
public String options() {
return options;
}
public Map<String, String> optionMap() {
return optionMap;
}
public String analyzer() {
return analyzer;
}
@Override
public Nullability nullable() {
return Nullability.FALSE;
}
@Override
public DataType dataType() {
return DataTypes.BOOLEAN;
}
@Override
public int hashCode() {
return Objects.hash(query, options);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
FullTextPredicate other = (FullTextPredicate) obj;
return Objects.equals(query, other.query) && Objects.equals(options, other.options);
}
}
|
Operator
|
java
|
apache__avro
|
lang/java/compiler/src/main/java/org/apache/avro/compiler/idl/DocCommentHelper.java
|
{
"start": 1362,
"end": 4709
}
|
class ____ {
/**
* Pattern to match the common whitespace indents in a multi-line String.
* Doesn't match a single-line String, fully matches any multi-line String.
*
* To use: match on a {@link String#trim() trimmed} String, and then replace all
* newlines followed by the group "indent" with a newline.
*/
private static final Pattern WS_INDENT = Pattern.compile("(?U).*\\R(?<indent>\\h*).*(?:\\R\\k<indent>.*)*");
/**
* Pattern to match the whitespace indents plus common stars (1 or 2) in a
* multi-line String. If a String fully matches, replace all occurrences of a
* newline followed by whitespace and then the group "stars" with a newline.
*
* Note: partial matches are invalid.
*/
private static final Pattern STAR_INDENT = Pattern.compile("(?U)(?<stars>\\*{1,2}).*(?:\\R\\h*\\k<stars>.*)*");
private static final ThreadLocal<DocComment> DOC = new ThreadLocal<>();
private static final ThreadLocal<List<String>> WARNINGS = ThreadLocal.withInitial(ArrayList::new);
/**
* Return all warnings that were encountered while parsing, once. Subsequent
* calls before parsing again will return an empty list.
*/
static List<String> getAndClearWarnings() {
List<String> warnings = WARNINGS.get();
WARNINGS.remove();
return warnings;
}
static void setDoc(Token token) {
DocComment newDocComment = new DocComment(token);
DocComment oldDocComment = DOC.get();
if (oldDocComment != null) {
WARNINGS.get()
.add(String.format(
"Found documentation comment at line %d, column %d. Ignoring previous one at line %d, column %d: \"%s\"\n"
+ "Did you mean to use a multiline comment ( /* ... */ ) instead?",
newDocComment.line, newDocComment.column, oldDocComment.line, oldDocComment.column, oldDocComment.text));
}
DOC.set(newDocComment);
}
/**
* Clear any documentation (and generate a warning if there was).
*
* This method should NOT be used after an optional component in a grammar
* (i.e., after a @code{[β¦]} or @code{β¦*} construct), because the optional
* grammar part may have already caused parsing a doc comment special token
* placed after the code block.
*/
static void clearDoc() {
DocComment oldDocComment = DOC.get();
if (oldDocComment != null) {
WARNINGS.get()
.add(String.format(
"Ignoring out-of-place documentation comment at line %d, column %d: \"%s\"\n"
+ "Did you mean to use a multiline comment ( /* ... */ ) instead?",
oldDocComment.line, oldDocComment.column, oldDocComment.text));
}
DOC.remove();
}
static String getDoc() {
DocComment docComment = DOC.get();
DOC.remove();
return docComment == null ? null : docComment.text;
}
/* Package private to facilitate testing */
static String stripIndents(String doc) {
Matcher starMatcher = STAR_INDENT.matcher(doc);
if (starMatcher.matches()) {
return doc.replaceAll("(?U)(?:^|(\\R)\\h*)\\Q" + starMatcher.group("stars") + "\\E\\h?", "$1");
}
Matcher whitespaceMatcher = WS_INDENT.matcher(doc);
if (whitespaceMatcher.matches()) {
return doc.replaceAll("(?U)(\\R)" + whitespaceMatcher.group("indent"), "$1");
}
return doc;
}
private static
|
DocCommentHelper
|
java
|
FasterXML__jackson-databind
|
src/test/java/tools/jackson/databind/deser/CachingOfDeserTest.java
|
{
"start": 828,
"end": 984
}
|
class ____ {
@JsonDeserialize(contentUsing = CustomDeserializer735.class)
public List<Integer> list;
}
public static
|
TestListWithCustom
|
java
|
google__error-prone
|
check_api/src/test/java/com/google/errorprone/util/FindIdentifiersTest.java
|
{
"start": 35373,
"end": 35546
}
|
class ____ method invocations. */
@BugPattern(
severity = SeverityLevel.ERROR,
summary = "Prints all fields in receivers of method invocations")
public static
|
on
|
java
|
apache__camel
|
components/camel-digitalocean/src/main/java/org/apache/camel/component/digitalocean/constants/DigitalOceanOperations.java
|
{
"start": 870,
"end": 1436
}
|
enum ____ {
create,
update,
delete,
list,
ownList,
get,
listBackups,
listActions,
listNeighbors,
listSnapshots,
listKernels,
listAllNeighbors,
enableBackups,
disableBackups,
reboot,
powerCycle,
shutdown,
powerOn,
powerOff,
restore,
resetPassword,
resize,
rebuild,
rename,
changeKernel,
enableIpv6,
enablePrivateNetworking,
takeSnapshot,
transfer,
convert,
attach,
detach,
assign,
unassign,
tag,
untag
}
|
DigitalOceanOperations
|
java
|
elastic__elasticsearch
|
x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderDocument.java
|
{
"start": 4834,
"end": 23872
}
|
class ____ {
public List<String> serviceProviderSigning = List.of();
public List<String> identityProviderSigning = List.of();
public List<String> identityProviderMetadataSigning = List.of();
public void setServiceProviderSigning(Collection<String> serviceProviderSigning) {
this.serviceProviderSigning = serviceProviderSigning == null ? List.of() : List.copyOf(serviceProviderSigning);
}
public void setIdentityProviderSigning(Collection<String> identityProviderSigning) {
this.identityProviderSigning = identityProviderSigning == null ? List.of() : List.copyOf(identityProviderSigning);
}
public void setIdentityProviderMetadataSigning(Collection<String> identityProviderMetadataSigning) {
this.identityProviderMetadataSigning = identityProviderMetadataSigning == null
? List.of()
: List.copyOf(identityProviderMetadataSigning);
}
public void setServiceProviderX509SigningCertificates(Collection<X509Certificate> certificates) {
this.serviceProviderSigning = encodeCertificates(certificates);
}
public List<X509Certificate> getServiceProviderX509SigningCertificates() {
return decodeCertificates(this.serviceProviderSigning);
}
public void setIdentityProviderX509SigningCertificates(Collection<X509Certificate> certificates) {
this.identityProviderSigning = encodeCertificates(certificates);
}
public List<X509Certificate> getIdentityProviderX509SigningCertificates() {
return decodeCertificates(this.identityProviderSigning);
}
public void setIdentityProviderX509MetadataSigningCertificates(Collection<X509Certificate> certificates) {
this.identityProviderMetadataSigning = encodeCertificates(certificates);
}
public List<X509Certificate> getIdentityProviderX509MetadataSigningCertificates() {
return decodeCertificates(this.identityProviderMetadataSigning);
}
private static List<String> encodeCertificates(Collection<X509Certificate> certificates) {
return certificates == null ? List.of() : certificates.stream().map(cert -> {
try {
return cert.getEncoded();
} catch (CertificateEncodingException e) {
throw new ElasticsearchException("Cannot read certificate", e);
}
}).map(Base64.getEncoder()::encodeToString).toList();
}
private static List<X509Certificate> decodeCertificates(List<String> encodedCertificates) {
if (encodedCertificates == null || encodedCertificates.isEmpty()) {
return List.of();
}
return encodedCertificates.stream().map(Certificates::decodeCertificate).toList();
}
private static X509Certificate decodeCertificate(String base64Cert) {
final byte[] bytes = base64Cert.getBytes(StandardCharsets.UTF_8);
try (InputStream stream = new ByteArrayInputStream(bytes)) {
final List<Certificate> certificates = CertParsingUtils.readCertificates(Base64.getDecoder().wrap(stream));
if (certificates.size() == 1) {
final Certificate certificate = certificates.get(0);
if (certificate instanceof X509Certificate) {
return (X509Certificate) certificate;
} else {
throw new ElasticsearchException("Certificate ({}) is not a X.509 certificate", certificate.getClass());
}
} else {
throw new ElasticsearchException("Expected a single certificate, but found {}", certificates.size());
}
} catch (IOException e) {
throw new UncheckedIOException(e);
} catch (CertificateException e) {
throw new ElasticsearchException("Cannot parse certificate(s)", e);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final Certificates that = (Certificates) o;
return Objects.equals(serviceProviderSigning, that.serviceProviderSigning)
&& Objects.equals(identityProviderSigning, that.identityProviderSigning)
&& Objects.equals(identityProviderMetadataSigning, that.identityProviderMetadataSigning);
}
@Override
public int hashCode() {
return Objects.hash(serviceProviderSigning, identityProviderSigning, identityProviderMetadataSigning);
}
}
@Nullable
public String docId;
public String name;
public String entityId;
public String acs;
@Nullable
public String nameIdFormat;
public boolean enabled = true;
public Instant created;
public Instant lastModified;
public Set<String> signMessages = Set.of();
@Nullable
public Long authenticationExpiryMillis;
public final Privileges privileges = new Privileges();
public final AttributeNames attributeNames = new AttributeNames();
public final Certificates certificates = new Certificates();
public SamlServiceProviderDocument() {}
public SamlServiceProviderDocument(StreamInput in) throws IOException {
docId = in.readOptionalString();
name = in.readString();
entityId = in.readString();
acs = in.readString();
enabled = in.readBoolean();
created = in.readInstant();
lastModified = in.readInstant();
nameIdFormat = in.readOptionalString();
authenticationExpiryMillis = in.readOptionalVLong();
privileges.resource = in.readString();
privileges.rolePatterns = new TreeSet<>(in.readCollectionAsSet(StreamInput::readString));
attributeNames.principal = in.readString();
attributeNames.email = in.readOptionalString();
attributeNames.name = in.readOptionalString();
attributeNames.roles = in.readOptionalString();
if (in.getTransportVersion().supports(IDP_CUSTOM_SAML_ATTRIBUTES_ALLOW_LIST)) {
attributeNames.extensions = in.readCollectionAsImmutableSet(StreamInput::readString);
}
certificates.serviceProviderSigning = in.readStringCollectionAsList();
certificates.identityProviderSigning = in.readStringCollectionAsList();
certificates.identityProviderMetadataSigning = in.readStringCollectionAsList();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalString(docId);
out.writeString(name);
out.writeString(entityId);
out.writeString(acs);
out.writeBoolean(enabled);
out.writeInstant(created);
out.writeInstant(lastModified);
out.writeOptionalString(nameIdFormat);
out.writeOptionalVLong(authenticationExpiryMillis);
out.writeString(privileges.resource);
out.writeStringCollection(privileges.rolePatterns == null ? Set.of() : privileges.rolePatterns);
out.writeString(attributeNames.principal);
out.writeOptionalString(attributeNames.email);
out.writeOptionalString(attributeNames.name);
out.writeOptionalString(attributeNames.roles);
if (out.getTransportVersion().supports(IDP_CUSTOM_SAML_ATTRIBUTES_ALLOW_LIST)) {
out.writeStringCollection(attributeNames.extensions);
}
out.writeStringCollection(certificates.serviceProviderSigning);
out.writeStringCollection(certificates.identityProviderSigning);
out.writeStringCollection(certificates.identityProviderMetadataSigning);
}
public String getDocId() {
return docId;
}
public void setDocId(String docId) {
this.docId = docId;
}
public void setName(String name) {
this.name = name;
}
public void setEntityId(String entityId) {
this.entityId = entityId;
}
public void setAcs(String acs) {
this.acs = acs;
}
public void setEnabled(boolean enabled) {
this.enabled = enabled;
}
public void setCreated(Instant created) {
this.created = created;
}
public void setLastModified(Instant lastModified) {
this.lastModified = lastModified;
}
public void setCreatedMillis(Long millis) {
this.created = Instant.ofEpochMilli(millis);
}
public void setLastModifiedMillis(Long millis) {
this.lastModified = Instant.ofEpochMilli(millis);
}
public void setNameIdFormat(String nameIdFormat) {
this.nameIdFormat = nameIdFormat;
}
public void setSignMessages(Collection<String> signMessages) {
this.signMessages = signMessages == null ? Set.of() : Set.copyOf(signMessages);
}
public void setAuthenticationExpiryMillis(Long authenticationExpiryMillis) {
this.authenticationExpiryMillis = authenticationExpiryMillis;
}
public void setAuthenticationExpiry(Duration authnExpiry) {
this.authenticationExpiryMillis = authnExpiry == null ? null : authnExpiry.toMillis();
}
public Duration getAuthenticationExpiry() {
return authenticationExpiryMillis == null ? null : Duration.ofMillis(this.authenticationExpiryMillis);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final SamlServiceProviderDocument that = (SamlServiceProviderDocument) o;
return Objects.equals(docId, that.docId)
&& Objects.equals(name, that.name)
&& Objects.equals(entityId, that.entityId)
&& Objects.equals(acs, that.acs)
&& Objects.equals(enabled, that.enabled)
&& Objects.equals(created, that.created)
&& Objects.equals(lastModified, that.lastModified)
&& Objects.equals(nameIdFormat, that.nameIdFormat)
&& Objects.equals(authenticationExpiryMillis, that.authenticationExpiryMillis)
&& Objects.equals(certificates, that.certificates)
&& Objects.equals(privileges, that.privileges)
&& Objects.equals(attributeNames, that.attributeNames);
}
@Override
public int hashCode() {
return Objects.hash(
docId,
name,
entityId,
acs,
enabled,
created,
lastModified,
nameIdFormat,
authenticationExpiryMillis,
certificates,
privileges,
attributeNames
);
}
private static final ObjectParser<SamlServiceProviderDocument, SamlServiceProviderDocument> DOC_PARSER = new ObjectParser<>(
"service_provider_doc",
true,
SamlServiceProviderDocument::new
);
private static final ObjectParser<Privileges, Void> PRIVILEGES_PARSER = new ObjectParser<>("service_provider_priv", true, null);
private static final ObjectParser<AttributeNames, Void> ATTRIBUTES_PARSER = new ObjectParser<>("service_provider_attr", true, null);
private static final ObjectParser<Certificates, Void> CERTIFICATES_PARSER = new ObjectParser<>("service_provider_cert", true, null);
private static final BiConsumer<SamlServiceProviderDocument, Object> NULL_CONSUMER = (doc, obj) -> {};
static {
DOC_PARSER.declareString(SamlServiceProviderDocument::setName, Fields.NAME);
DOC_PARSER.declareString(SamlServiceProviderDocument::setEntityId, Fields.ENTITY_ID);
DOC_PARSER.declareString(SamlServiceProviderDocument::setAcs, Fields.ACS);
DOC_PARSER.declareBoolean(SamlServiceProviderDocument::setEnabled, Fields.ENABLED);
DOC_PARSER.declareLong(SamlServiceProviderDocument::setCreatedMillis, Fields.CREATED_DATE);
DOC_PARSER.declareLong(SamlServiceProviderDocument::setLastModifiedMillis, Fields.LAST_MODIFIED);
DOC_PARSER.declareStringOrNull(SamlServiceProviderDocument::setNameIdFormat, Fields.NAME_ID);
DOC_PARSER.declareStringArray(SamlServiceProviderDocument::setSignMessages, Fields.SIGN_MSGS);
DOC_PARSER.declareField(
SamlServiceProviderDocument::setAuthenticationExpiryMillis,
parser -> parser.currentToken() == XContentParser.Token.VALUE_NULL ? null : parser.longValue(),
Fields.AUTHN_EXPIRY,
ObjectParser.ValueType.LONG_OR_NULL
);
DOC_PARSER.declareObject(NULL_CONSUMER, (parser, doc) -> PRIVILEGES_PARSER.parse(parser, doc.privileges, null), Fields.PRIVILEGES);
PRIVILEGES_PARSER.declareString(Privileges::setResource, Fields.Privileges.RESOURCE);
PRIVILEGES_PARSER.declareStringArray(Privileges::setRolePatterns, Fields.Privileges.ROLES);
DOC_PARSER.declareObject(NULL_CONSUMER, (p, doc) -> ATTRIBUTES_PARSER.parse(p, doc.attributeNames, null), Fields.ATTRIBUTES);
ATTRIBUTES_PARSER.declareString(AttributeNames::setPrincipal, Fields.Attributes.PRINCIPAL);
ATTRIBUTES_PARSER.declareStringOrNull(AttributeNames::setEmail, Fields.Attributes.EMAIL);
ATTRIBUTES_PARSER.declareStringOrNull(AttributeNames::setName, Fields.Attributes.NAME);
ATTRIBUTES_PARSER.declareStringOrNull(AttributeNames::setRoles, Fields.Attributes.ROLES);
ATTRIBUTES_PARSER.declareStringArray(AttributeNames::setExtensions, Fields.Attributes.EXTENSIONS);
DOC_PARSER.declareObject(NULL_CONSUMER, (p, doc) -> CERTIFICATES_PARSER.parse(p, doc.certificates, null), Fields.CERTIFICATES);
CERTIFICATES_PARSER.declareStringArray(Certificates::setServiceProviderSigning, Fields.Certificates.SP_SIGNING);
CERTIFICATES_PARSER.declareStringArray(Certificates::setIdentityProviderSigning, Fields.Certificates.IDP_SIGNING);
CERTIFICATES_PARSER.declareStringArray(Certificates::setIdentityProviderMetadataSigning, Fields.Certificates.IDP_METADATA);
}
public static SamlServiceProviderDocument fromXContent(String docId, XContentParser parser) throws IOException {
SamlServiceProviderDocument doc = new SamlServiceProviderDocument();
doc.setDocId(docId);
return DOC_PARSER.parse(parser, doc, doc);
}
public ValidationException validate() {
final ValidationException validation = new ValidationException();
if (Strings.isNullOrEmpty(name)) {
validation.addValidationError("field [" + Fields.NAME + "] is required, but was [" + name + "]");
}
if (Strings.isNullOrEmpty(entityId)) {
validation.addValidationError("field [" + Fields.ENTITY_ID + "] is required, but was [" + entityId + "]");
}
if (Strings.isNullOrEmpty(acs)) {
validation.addValidationError("field [" + Fields.ACS + "] is required, but was [" + acs + "]");
}
if (created == null) {
validation.addValidationError("field [" + Fields.CREATED_DATE + "] is required, but was [" + created + "]");
}
if (lastModified == null) {
validation.addValidationError("field [" + Fields.LAST_MODIFIED + "] is required, but was [" + lastModified + "]");
}
final Set<String> invalidSignOptions = Sets.difference(signMessages, ALLOWED_SIGN_MESSAGES);
if (invalidSignOptions.isEmpty() == false) {
validation.addValidationError(
"the values ["
+ invalidSignOptions
+ "] are not permitted for ["
+ Fields.SIGN_MSGS
+ "] - permitted values are ["
+ ALLOWED_SIGN_MESSAGES
+ "]"
);
}
if (Strings.isNullOrEmpty(privileges.resource)) {
validation.addValidationError(
"field [" + Fields.PRIVILEGES + "." + Fields.Privileges.RESOURCE + "] is required, but was [" + privileges.resource + "]"
);
}
if (Strings.isNullOrEmpty(attributeNames.principal)) {
validation.addValidationError(
"field ["
+ Fields.ATTRIBUTES
+ "."
+ Fields.Attributes.PRINCIPAL
+ "] is required, but was ["
+ attributeNames.principal
+ "]"
);
}
if (validation.validationErrors().isEmpty()) {
return null;
} else {
return validation;
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Fields.NAME.getPreferredName(), name);
builder.field(Fields.ENTITY_ID.getPreferredName(), entityId);
builder.field(Fields.ACS.getPreferredName(), acs);
builder.field(Fields.ENABLED.getPreferredName(), enabled);
builder.field(Fields.CREATED_DATE.getPreferredName(), created == null ? null : created.toEpochMilli());
builder.field(Fields.LAST_MODIFIED.getPreferredName(), lastModified == null ? null : lastModified.toEpochMilli());
builder.field(Fields.NAME_ID.getPreferredName(), nameIdFormat);
builder.field(Fields.SIGN_MSGS.getPreferredName(), signMessages == null ? List.of() : signMessages);
builder.field(Fields.AUTHN_EXPIRY.getPreferredName(), authenticationExpiryMillis);
builder.startObject(Fields.PRIVILEGES.getPreferredName());
builder.field(Fields.Privileges.RESOURCE.getPreferredName(), privileges.resource);
builder.field(Fields.Privileges.ROLES.getPreferredName(), privileges.rolePatterns);
builder.endObject();
builder.startObject(Fields.ATTRIBUTES.getPreferredName());
builder.field(Fields.Attributes.PRINCIPAL.getPreferredName(), attributeNames.principal);
builder.field(Fields.Attributes.EMAIL.getPreferredName(), attributeNames.email);
builder.field(Fields.Attributes.NAME.getPreferredName(), attributeNames.name);
builder.field(Fields.Attributes.ROLES.getPreferredName(), attributeNames.roles);
if (attributeNames.extensions != null && attributeNames.extensions.isEmpty() == false) {
builder.field(Fields.Attributes.EXTENSIONS.getPreferredName(), attributeNames.extensions);
}
builder.endObject();
builder.startObject(Fields.CERTIFICATES.getPreferredName());
builder.field(Fields.Certificates.SP_SIGNING.getPreferredName(), certificates.serviceProviderSigning);
builder.field(Fields.Certificates.IDP_SIGNING.getPreferredName(), certificates.identityProviderSigning);
builder.field(Fields.Certificates.IDP_METADATA.getPreferredName(), certificates.identityProviderMetadataSigning);
builder.endObject();
return builder.endObject();
}
public
|
Certificates
|
java
|
spring-projects__spring-framework
|
spring-beans/src/main/java/org/springframework/beans/InvalidPropertyException.java
|
{
"start": 913,
"end": 2108
}
|
class ____ extends FatalBeanException {
private final Class<?> beanClass;
private final String propertyName;
/**
* Create a new InvalidPropertyException.
* @param beanClass the offending bean class
* @param propertyName the offending property
* @param msg the detail message
*/
public InvalidPropertyException(Class<?> beanClass, String propertyName, String msg) {
this(beanClass, propertyName, msg, null);
}
/**
* Create a new InvalidPropertyException.
* @param beanClass the offending bean class
* @param propertyName the offending property
* @param msg the detail message
* @param cause the root cause
*/
public InvalidPropertyException(Class<?> beanClass, String propertyName, String msg, @Nullable Throwable cause) {
super("Invalid property '" + propertyName + "' of bean class [" + beanClass.getName() + "]: " + msg, cause);
this.beanClass = beanClass;
this.propertyName = propertyName;
}
/**
* Return the offending bean class.
*/
public Class<?> getBeanClass() {
return this.beanClass;
}
/**
* Return the name of the offending property.
*/
public String getPropertyName() {
return this.propertyName;
}
}
|
InvalidPropertyException
|
java
|
apache__camel
|
components/camel-bindy/src/test/java/org/apache/camel/dataformat/bindy/csv/BindySimpleCsvFunctionWithExternalMethodTest.java
|
{
"start": 1435,
"end": 3716
}
|
class ____ extends CamelTestSupport {
@EndpointInject("mock:resultMarshal1")
private MockEndpoint mockEndPointMarshal1;
@EndpointInject("mock:resultUnMarshal1")
private MockEndpoint mockEndPointUnMarshal1;
@EndpointInject("mock:resultMarshal2")
private MockEndpoint mockEndPointMarshal2;
@EndpointInject("mock:resultUnMarshal2")
private MockEndpoint mockEndPointUnMarshal2;
public static String replaceToBar(String fooString) {
return fooString.replaceAll("foo", "bar");
}
@Test
public void testUnMarshallMessage() throws Exception {
mockEndPointMarshal1.expectedMessageCount(1);
mockEndPointMarshal1
.expectedBodiesReceived(
"\"12\"\",3\",\"abc,d\"\"foo\"\",abc\",\"10\"" + ConverterUtils.getStringCarriageReturn("WINDOWS"));
BindyCsvRowFormat7621 body = new BindyCsvRowFormat7621();
body.setFirstField("12\",3");
body.setSecondField("abc,d\"foo\",abc");
body.setNumber(new BigDecimal(10));
template.sendBody("direct:startMarshal1", body);
MockEndpoint.assertIsSatisfied(context);
BindyCsvRowFormat7621 model
= mockEndPointUnMarshal1.getReceivedExchanges().get(0).getIn().getBody(BindyCsvRowFormat7621.class);
assertEquals("12\",3", model.getFirstField());
assertEquals("abc,d\"bar\",abc", model.getSecondField());
assertEquals(new BigDecimal(10), model.getNumber());
}
@Override
protected RoutesBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
BindyCsvDataFormat camelDataFormat1 = new BindyCsvDataFormat(BindyCsvRowFormat7621.class);
from("direct:startMarshal1")
.marshal(camelDataFormat1)
.to("mock:resultMarshal1")
.to("direct:middle1");
from("direct:middle1")
.unmarshal(camelDataFormat1)
.to("mock:resultUnMarshal1");
}
};
}
@CsvRecord(separator = ",", quote = "\"", quoting = true, quotingEscaped = false)
public static
|
BindySimpleCsvFunctionWithExternalMethodTest
|
java
|
spring-projects__spring-framework
|
spring-web/src/test/java/org/springframework/http/server/reactive/ListenerWriteProcessorTests.java
|
{
"start": 4476,
"end": 4873
}
|
class ____ implements Subscriber<Void> {
private Throwable error;
public Throwable getError() {
return this.error;
}
@Override
public void onSubscribe(Subscription subscription) {
}
@Override
public void onNext(Void aVoid) {
}
@Override
public void onError(Throwable ex) {
this.error = ex;
}
@Override
public void onComplete() {
}
}
}
|
TestResultSubscriber
|
java
|
apache__kafka
|
group-coordinator/src/main/java/org/apache/kafka/coordinator/group/assignor/RangeSet.java
|
{
"start": 1232,
"end": 6057
}
|
class ____ implements Set<Integer> {
private final int from;
private final int to;
/**
* Constructs a {@code RangeSet} with the specified range.
*
* @param from The starting value (inclusive) of the range.
* @param to The ending value (exclusive) of the range.
*/
public RangeSet(int from, int to) {
this.from = from;
this.to = to;
if (to < from) {
throw new IllegalArgumentException("Invalid range: to must be greater than or equal to from");
}
if ((long) to - (long) from > Integer.MAX_VALUE) {
throw new IllegalArgumentException("Range exceeds the maximum size of Integer.MAX_VALUE");
}
}
@Override
public int size() {
return to - from;
}
@Override
public boolean isEmpty() {
return size() == 0;
}
@Override
public boolean contains(Object o) {
if (o instanceof Integer) {
int value = (Integer) o;
return value >= from && value < to;
}
return false;
}
@Override
public Iterator<Integer> iterator() {
return new Iterator<>() {
private int current = from;
@Override
public boolean hasNext() {
return current < to;
}
@Override
public Integer next() {
if (!hasNext()) throw new NoSuchElementException();
return current++;
}
};
}
@Override
public Object[] toArray() {
Object[] array = new Object[size()];
for (int i = 0; i < size(); i++) {
array[i] = from + i;
}
return array;
}
@Override
@SuppressWarnings("unchecked")
public <T> T[] toArray(T[] a) {
int size = size();
if (a.length < size) {
// Create a new array of the same type as a with the correct size
a = (T[]) Array.newInstance(a.getClass().getComponentType(), size);
}
for (int i = 0; i < size; i++) {
a[i] = (T) Integer.valueOf(from + i);
}
if (a.length > size) {
a[size] = null;
}
return a;
}
@Override
public boolean add(Integer integer) {
throw new UnsupportedOperationException();
}
@Override
public boolean remove(Object o) {
throw new UnsupportedOperationException();
}
@Override
public boolean containsAll(Collection<?> c) {
for (Object o : c) {
if (!contains(o)) return false;
}
return true;
}
@Override
public boolean addAll(Collection<? extends Integer> c) {
throw new UnsupportedOperationException();
}
@Override
public boolean retainAll(Collection<?> c) {
throw new UnsupportedOperationException();
}
@Override
public boolean removeAll(Collection<?> c) {
throw new UnsupportedOperationException();
}
@Override
public void clear() {
throw new UnsupportedOperationException();
}
@Override
public String toString() {
return "RangeSet(from=" + from + " (inclusive), to=" + to + " (exclusive))";
}
/**
* Compares the specified object with this set for equality.
* Returns {@code true} if the specified object is also a set,
* the two sets have the same size, and every member of the specified
* set is contained in this set.
*
* @param o object to be compared for equality with this set
* @return {@code true} if the specified object is equal to this set
*/
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof Set<?> otherSet)) return false;
if (o instanceof RangeSet other) {
if (this.size() == 0 && other.size() == 0) return true;
return this.from == other.from && this.to == other.to;
}
if (otherSet.size() != this.size()) return false;
for (int i = from; i < to; i++) {
if (!otherSet.contains(i)) return false;
}
return true;
}
@Override
public int hashCode() {
// The hash code of a Set is defined as the sum of the hash codes of its elements.
// The hash code of an integer is the integer itself.
// The sum of the integers from 1 to n is n * (n + 1) / 2.
// To get the sum of the integers from 1 + k to n + k, we can add n * k.
// So our hash code comes out to n * (from + to - 1) / 2.
// The arithmetic has to be done using longs, since the division by 2 is equivalent to
// shifting the 33rd bit right.
long sum = size() * ((long) from + (long) to - 1) / 2;
return (int) sum;
}
}
|
RangeSet
|
java
|
spring-projects__spring-boot
|
core/spring-boot-test/src/test/java/org/springframework/boot/test/context/bootstrap/SpringBootTestContextBootstrapperIntegrationTests.java
|
{
"start": 2075,
"end": 2179
}
|
class ____ {
@Bean
ExampleBean exampleBean() {
return new ExampleBean();
}
}
static
|
TestConfig
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java
|
{
"start": 1649,
"end": 3299
}
|
class ____ implements Daemon {
static final Logger LOG =
LoggerFactory.getLogger(PrivilegedNfsGatewayStarter.class);
private String[] args = null;
private DatagramSocket registrationSocket = null;
private Nfs3 nfs3Server = null;
@Override
public void init(DaemonContext context) throws Exception {
System.err.println("Initializing privileged NFS client socket...");
NfsConfiguration conf = new NfsConfiguration();
int clientPort = conf.getInt(NfsConfigKeys.DFS_NFS_REGISTRATION_PORT_KEY,
NfsConfigKeys.DFS_NFS_REGISTRATION_PORT_DEFAULT);
if (clientPort < 1 || clientPort > 1023) {
throw new RuntimeException("Must start privileged NFS server with '" +
NfsConfigKeys.DFS_NFS_REGISTRATION_PORT_KEY + "' configured to a " +
"privileged port.");
}
try {
InetSocketAddress socketAddress =
new InetSocketAddress("localhost", clientPort);
registrationSocket = new DatagramSocket(null);
registrationSocket.setReuseAddress(true);
registrationSocket.bind(socketAddress);
} catch (SocketException e) {
LOG.error("Init failed for port=" + clientPort, e);
throw e;
}
args = context.getArguments();
}
@Override
public void start() throws Exception {
nfs3Server = Nfs3.startService(args, registrationSocket);
}
@Override
public void stop() throws Exception {
if (nfs3Server != null) {
nfs3Server.stop();
}
}
@Override
public void destroy() {
if (registrationSocket != null && !registrationSocket.isClosed()) {
registrationSocket.close();
}
}
}
|
PrivilegedNfsGatewayStarter
|
java
|
apache__flink
|
flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/ExecNodeGraph.java
|
{
"start": 1141,
"end": 1884
}
|
class ____ {
private final FlinkVersion flinkVersion;
private final List<ExecNode<?>> rootNodes;
public ExecNodeGraph(List<ExecNode<?>> rootNodes) {
this(FlinkVersion.current(), rootNodes);
}
public ExecNodeGraph(FlinkVersion flinkVersion, List<ExecNode<?>> rootNodes) {
this.flinkVersion = checkNotNull(flinkVersion, "The flinkVersion should not be null.");
this.rootNodes = checkNotNull(rootNodes, "The rootNodes should not be null.");
checkArgument(!rootNodes.isEmpty(), "The rootNodes should not be empty.");
}
public List<ExecNode<?>> getRootNodes() {
return rootNodes;
}
public FlinkVersion getFlinkVersion() {
return flinkVersion;
}
}
|
ExecNodeGraph
|
java
|
spring-projects__spring-framework
|
spring-context-support/src/main/java/org/springframework/scheduling/quartz/SchedulerAccessorBean.java
|
{
"start": 1426,
"end": 4322
}
|
class ____ extends SchedulerAccessor implements BeanFactoryAware, InitializingBean {
private @Nullable String schedulerName;
private @Nullable Scheduler scheduler;
private @Nullable BeanFactory beanFactory;
/**
* Specify the Quartz {@link Scheduler} to operate on via its scheduler name in the Spring
* application context or also in the Quartz {@link org.quartz.impl.SchedulerRepository}.
* <p>Schedulers can be registered in the repository through custom bootstrapping,
* for example, via the {@link org.quartz.impl.StdSchedulerFactory} or
* {@link org.quartz.impl.DirectSchedulerFactory} factory classes.
* However, in general, it's preferable to use Spring's {@link SchedulerFactoryBean}
* which includes the job/trigger/listener capabilities of this accessor as well.
* <p>If not specified, this accessor will try to retrieve a default {@link Scheduler}
* bean from the containing application context.
*/
public void setSchedulerName(String schedulerName) {
this.schedulerName = schedulerName;
}
/**
* Specify the Quartz {@link Scheduler} instance to operate on.
* <p>If not specified, this accessor will try to retrieve a default {@link Scheduler}
* bean from the containing application context.
*/
public void setScheduler(Scheduler scheduler) {
this.scheduler = scheduler;
}
/**
* Return the Quartz Scheduler instance that this accessor operates on.
*/
@Override
public Scheduler getScheduler() {
Assert.state(this.scheduler != null, "No Scheduler set");
return this.scheduler;
}
@Override
public void setBeanFactory(BeanFactory beanFactory) {
this.beanFactory = beanFactory;
}
@Override
public void afterPropertiesSet() throws SchedulerException {
if (this.scheduler == null) {
this.scheduler = (this.schedulerName != null ? findScheduler(this.schedulerName) : findDefaultScheduler());
}
registerListeners();
registerJobsAndTriggers();
}
protected Scheduler findScheduler(String schedulerName) throws SchedulerException {
if (this.beanFactory instanceof ListableBeanFactory lbf) {
String[] beanNames = lbf.getBeanNamesForType(Scheduler.class);
for (String beanName : beanNames) {
Scheduler schedulerBean = (Scheduler) lbf.getBean(beanName);
if (schedulerName.equals(schedulerBean.getSchedulerName())) {
return schedulerBean;
}
}
}
Scheduler schedulerInRepo = SchedulerRepository.getInstance().lookup(schedulerName);
if (schedulerInRepo == null) {
throw new IllegalStateException("No Scheduler named '" + schedulerName + "' found");
}
return schedulerInRepo;
}
protected Scheduler findDefaultScheduler() {
if (this.beanFactory != null) {
return this.beanFactory.getBean(Scheduler.class);
}
else {
throw new IllegalStateException(
"No Scheduler specified, and cannot find a default Scheduler without a BeanFactory");
}
}
}
|
SchedulerAccessorBean
|
java
|
apache__avro
|
lang/java/avro/src/main/java/org/apache/avro/specific/SpecificData.java
|
{
"start": 21296,
"end": 21453
}
|
class ____ be serialized with toString(). */
protected boolean isStringable(Class<?> c) {
return stringableClasses.contains(c);
}
/** True if a
|
should
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/search/runtime/IpScriptFieldExistsQueryTests.java
|
{
"start": 606,
"end": 1976
}
|
class ____ extends AbstractIpScriptFieldQueryTestCase<IpScriptFieldExistsQuery> {
@Override
protected IpScriptFieldExistsQuery createTestInstance() {
return new IpScriptFieldExistsQuery(randomScript(), leafFactory, randomAlphaOfLength(5));
}
@Override
protected IpScriptFieldExistsQuery copy(IpScriptFieldExistsQuery orig) {
return new IpScriptFieldExistsQuery(orig.script(), leafFactory, orig.fieldName());
}
@Override
protected IpScriptFieldExistsQuery mutate(IpScriptFieldExistsQuery orig) {
if (randomBoolean()) {
new IpScriptFieldExistsQuery(randomValueOtherThan(orig.script(), this::randomScript), leafFactory, orig.fieldName());
}
return new IpScriptFieldExistsQuery(orig.script(), leafFactory, orig.fieldName() + "modified");
}
@Override
public void testMatches() {
assertTrue(createTestInstance().matches(new BytesRef[0], randomIntBetween(1, Integer.MAX_VALUE)));
assertFalse(createTestInstance().matches(new BytesRef[0], 0));
assertFalse(createTestInstance().matches(new BytesRef[] { new BytesRef("not even an IP") }, 0));
}
@Override
protected void assertToString(IpScriptFieldExistsQuery query) {
assertThat(query.toString(query.fieldName()), equalTo("IpScriptFieldExistsQuery"));
}
}
|
IpScriptFieldExistsQueryTests
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLocalDirsHandlerService.java
|
{
"start": 1695,
"end": 7048
}
|
class ____ {
private static final File testDir = new File("target",
TestDirectoryCollection.class.getName()).getAbsoluteFile();
private static final File testFile = new File(testDir, "testfile");
@BeforeEach
public void setup() throws IOException {
testDir.mkdirs();
testFile.createNewFile();
}
@AfterEach
public void teardown() {
FileUtil.fullyDelete(testDir);
}
@Test
public void testDirStructure() throws Exception {
Configuration conf = new YarnConfiguration();
String localDir1 = new File("file:///" + testDir, "localDir1").getPath();
conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir1);
String logDir1 = new File("file:///" + testDir, "logDir1").getPath();
conf.set(YarnConfiguration.NM_LOG_DIRS, logDir1);
LocalDirsHandlerService dirSvc = new LocalDirsHandlerService();
dirSvc.init(conf);
assertEquals(1, dirSvc.getLocalDirs().size());
dirSvc.close();
}
@Test
public void testValidPathsDirHandlerService() throws Exception {
Configuration conf = new YarnConfiguration();
String localDir1 = new File("file:///" + testDir, "localDir1").getPath();
String localDir2 = new File("hdfs:///" + testDir, "localDir2").getPath();
conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir1 + "," + localDir2);
String logDir1 = new File("file:///" + testDir, "logDir1").getPath();
conf.set(YarnConfiguration.NM_LOG_DIRS, logDir1);
LocalDirsHandlerService dirSvc = new LocalDirsHandlerService();
try {
dirSvc.init(conf);
fail("Service should have thrown an exception due to wrong URI");
} catch (YarnRuntimeException e) {
}
assertEquals(STATE.STOPPED,
dirSvc.getServiceState(), "Service should not be inited");
dirSvc.close();
}
@Test
public void testGetFullDirs() throws Exception {
Configuration conf = new YarnConfiguration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
FileContext localFs = FileContext.getLocalFSFileContext(conf);
String localDir1 = new File(testDir, "localDir1").getPath();
String localDir2 = new File(testDir, "localDir2").getPath();
String logDir1 = new File(testDir, "logDir1").getPath();
String logDir2 = new File(testDir, "logDir2").getPath();
Path localDir1Path = new Path(localDir1);
Path logDir1Path = new Path(logDir1);
FsPermission dirPermissions = new FsPermission((short) 0410);
localFs.mkdir(localDir1Path, dirPermissions, true);
localFs.mkdir(logDir1Path, dirPermissions, true);
conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir1 + "," + localDir2);
conf.set(YarnConfiguration.NM_LOG_DIRS, logDir1 + "," + logDir2);
conf.setFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,
0.0f);
NodeManagerMetrics nm = NodeManagerMetrics.create();
LocalDirsHandlerService dirSvc = new LocalDirsHandlerService(nm);
dirSvc.init(conf);
assertEquals(0, dirSvc.getLocalDirs().size());
assertEquals(0, dirSvc.getLogDirs().size());
assertEquals(1, dirSvc.getDiskFullLocalDirs().size());
assertEquals(1, dirSvc.getDiskFullLogDirs().size());
// check the metrics
assertEquals(2, nm.getBadLocalDirs());
assertEquals(2, nm.getBadLogDirs());
assertEquals(0, nm.getGoodLocalDirsDiskUtilizationPerc());
assertEquals(0, nm.getGoodLogDirsDiskUtilizationPerc());
assertEquals("",
dirSvc.getConfig().get(LocalDirsHandlerService.NM_GOOD_LOCAL_DIRS));
assertEquals("",
dirSvc.getConfig().get(LocalDirsHandlerService.NM_GOOD_LOG_DIRS));
assertEquals(localDir1 + "," + localDir2,
dirSvc.getConfig().get(YarnConfiguration.NM_LOCAL_DIRS));
assertEquals(logDir1 + "," + logDir2,
dirSvc.getConfig().get(YarnConfiguration.NM_LOG_DIRS));
conf.setFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,
100.0f);
nm = NodeManagerMetrics.create();
dirSvc = new LocalDirsHandlerService(nm);
dirSvc.init(conf);
assertEquals(1, dirSvc.getLocalDirs().size());
assertEquals(1, dirSvc.getLogDirs().size());
assertEquals(0, dirSvc.getDiskFullLocalDirs().size());
assertEquals(0, dirSvc.getDiskFullLogDirs().size());
// check the metrics
File dir = new File(localDir1);
int utilizationPerc =
(int) ((dir.getTotalSpace() - dir.getUsableSpace()) * 100 /
dir.getTotalSpace());
assertEquals(1, nm.getBadLocalDirs());
assertEquals(1, nm.getBadLogDirs());
assertEquals(utilizationPerc,
nm.getGoodLocalDirsDiskUtilizationPerc());
assertEquals(utilizationPerc, nm.getGoodLogDirsDiskUtilizationPerc());
assertEquals(new Path(localDir2).toString(),
dirSvc.getConfig().get(LocalDirsHandlerService.NM_GOOD_LOCAL_DIRS));
assertEquals(new Path(logDir2).toString(),
dirSvc.getConfig().get(LocalDirsHandlerService.NM_GOOD_LOG_DIRS));
assertEquals(localDir1 + "," + localDir2,
dirSvc.getConfig().get(YarnConfiguration.NM_LOCAL_DIRS));
assertEquals(logDir1 + "," + logDir2,
dirSvc.getConfig().get(YarnConfiguration.NM_LOG_DIRS));
FileUtils.deleteDirectory(new File(localDir1));
FileUtils.deleteDirectory(new File(localDir2));
FileUtils.deleteDirectory(new File(logDir1));
FileUtils.deleteDirectory(new File(logDir2));
dirSvc.close();
}
}
|
TestLocalDirsHandlerService
|
java
|
square__retrofit
|
samples/src/main/java/com/example/retrofit/SimpleService.java
|
{
"start": 1194,
"end": 2197
}
|
interface ____ {
@GET("/repos/{owner}/{repo}/contributors")
Call<List<Contributor>> contributors(@Path("owner") String owner, @Path("repo") String repo);
}
public static void main(String... args) throws IOException {
// Create a very simple REST adapter which points the GitHub API.
Retrofit retrofit =
new Retrofit.Builder()
.baseUrl(API_URL)
.addConverterFactory(GsonConverterFactory.create())
.build();
// Create an instance of our GitHub API interface.
GitHub github = retrofit.create(GitHub.class);
// Create a call instance for looking up Retrofit contributors.
Call<List<Contributor>> call = github.contributors("square", "retrofit");
// Fetch and print a list of the contributors to the library.
List<Contributor> contributors = call.execute().body();
for (Contributor contributor : contributors) {
System.out.println(contributor.login + " (" + contributor.contributions + ")");
}
}
}
|
GitHub
|
java
|
apache__camel
|
core/camel-main/src/main/java/org/apache/camel/main/KubernetesConfigmapsVaultConfigurationProperties.java
|
{
"start": 1091,
"end": 2390
}
|
class ____ extends KubernetesConfigMapVaultConfiguration
implements BootstrapCloseable {
private MainConfigurationProperties parent;
public KubernetesConfigmapsVaultConfigurationProperties(MainConfigurationProperties parent) {
this.parent = parent;
}
public MainConfigurationProperties end() {
return parent;
}
@Override
public void close() {
parent = null;
}
// getter and setters
// --------------------------------------------------------------
// these are inherited from the parent class
// fluent builders
// --------------------------------------------------------------
/**
* Whether to automatically reload Camel upon Configmaps being updated in Kubernetes Cluster.
*/
public KubernetesConfigmapsVaultConfigurationProperties withRefreshEnabled(boolean refreshEnabled) {
setRefreshEnabled(refreshEnabled);
return this;
}
/**
* Specify the configmaps names (or pattern) to check for updates. Multiple configmaps can be separated by comma.
*/
public KubernetesConfigmapsVaultConfigurationProperties withConfigmaps(String configmaps) {
setConfigmaps(configmaps);
return this;
}
}
|
KubernetesConfigmapsVaultConfigurationProperties
|
java
|
grpc__grpc-java
|
examples/src/main/java/io/grpc/examples/helloworld/HelloWorldServer.java
|
{
"start": 1029,
"end": 3434
}
|
class ____ {
private static final Logger logger = Logger.getLogger(HelloWorldServer.class.getName());
private Server server;
private void start() throws IOException {
/* The port on which the server should run */
int port = 50051;
/*
* By default gRPC uses a global, shared Executor.newCachedThreadPool() for gRPC callbacks into
* your application. This is convenient, but can cause an excessive number of threads to be
* created if there are many RPCs. It is often better to limit the number of threads your
* application uses for processing and let RPCs queue when the CPU is saturated.
* The appropriate number of threads varies heavily between applications.
* Async application code generally does not need more threads than CPU cores.
*/
ExecutorService executor = Executors.newFixedThreadPool(2);
server = Grpc.newServerBuilderForPort(port, InsecureServerCredentials.create())
.executor(executor)
.addService(new GreeterImpl())
.build()
.start();
logger.info("Server started, listening on " + port);
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
// Use stderr here since the logger may have been reset by its JVM shutdown hook.
System.err.println("*** shutting down gRPC server since JVM is shutting down");
try {
HelloWorldServer.this.stop();
} catch (InterruptedException e) {
if (server != null) {
server.shutdownNow();
}
e.printStackTrace(System.err);
} finally {
executor.shutdown();
}
System.err.println("*** server shut down");
}
});
}
private void stop() throws InterruptedException {
if (server != null) {
server.shutdown().awaitTermination(30, TimeUnit.SECONDS);
}
}
/**
* Await termination on the main thread since the grpc library uses daemon threads.
*/
private void blockUntilShutdown() throws InterruptedException {
if (server != null) {
server.awaitTermination();
}
}
/**
* Main launches the server from the command line.
*/
public static void main(String[] args) throws IOException, InterruptedException {
final HelloWorldServer server = new HelloWorldServer();
server.start();
server.blockUntilShutdown();
}
static
|
HelloWorldServer
|
java
|
apache__kafka
|
tools/src/main/java/org/apache/kafka/tools/LeaderElectionCommand.java
|
{
"start": 2305,
"end": 10953
}
|
class ____ {
private static final Logger LOG = LoggerFactory.getLogger(LeaderElectionCommand.class);
private static final DecodeJson.DecodeString STRING = new DecodeJson.DecodeString();
private static final DecodeJson.DecodeInteger INT = new DecodeJson.DecodeInteger();
public static void main(String... args) {
Exit.exit(mainNoExit(args));
}
static int mainNoExit(String... args) {
try {
run(Duration.ofMillis(30000), args);
return 0;
} catch (Throwable e) {
System.err.println(e.getMessage());
System.err.println(Utils.stackTrace(e));
return 1;
}
}
static void run(Duration timeoutMs, String... args) throws Exception {
LeaderElectionCommandOptions commandOptions = new LeaderElectionCommandOptions(args);
commandOptions.maybePrintHelpOrVersion();
commandOptions.validate();
ElectionType electionType = commandOptions.getElectionType();
Optional<Set<TopicPartition>> jsonFileTopicPartitions =
Optional.ofNullable(commandOptions.getPathToJsonFile())
.map(LeaderElectionCommand::parseReplicaElectionData);
Optional<String> topicOption = Optional.ofNullable(commandOptions.getTopic());
Optional<Integer> partitionOption = Optional.ofNullable(commandOptions.getPartition());
final Optional<Set<TopicPartition>> singleTopicPartition =
(topicOption.isPresent() && partitionOption.isPresent()) ?
Optional.of(Set.of(new TopicPartition(topicOption.get(), partitionOption.get()))) :
Optional.empty();
/* Note: No need to look at --all-topic-partitions as we want this to be null if it is use.
* The validate function should be checking that this option is required if the --topic and --path-to-json-file
* are not specified.
*/
Optional<Set<TopicPartition>> topicPartitions = jsonFileTopicPartitions.or(() -> singleTopicPartition);
String commandConfigFile;
if (commandOptions.hasAdminClientConfig()) {
System.out.println("Option --admin.config has been deprecated and will be removed in a future version. Use --command-config instead.");
commandConfigFile = commandOptions.getAdminClientConfig();
} else {
commandConfigFile = commandOptions.getCommandConfig();
}
Properties props = (commandConfigFile != null) ? Utils.loadProps(commandConfigFile) : new Properties();
props.setProperty(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, commandOptions.getBootstrapServer());
if (!props.containsKey(AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG)) {
props.setProperty(AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, Integer.toString((int) timeoutMs.toMillis()));
}
if (!props.containsKey(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG)) {
props.setProperty(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, Integer.toString((int) (timeoutMs.toMillis() / 2)));
}
try (Admin adminClient = Admin.create(props)) {
electLeaders(adminClient, electionType, topicPartitions);
}
}
private static void electLeaders(Admin client, ElectionType electionType, Optional<Set<TopicPartition>> partitions) {
LOG.debug("Calling AdminClient.electLeaders({}, {})", electionType, partitions.orElse(null));
Map<TopicPartition, Optional<Throwable>> electionResults;
try {
electionResults = client.electLeaders(electionType, partitions.orElse(null)).partitions().get();
} catch (ExecutionException e) {
if (e.getCause() instanceof TimeoutException) {
String message = "Timeout waiting for election results";
System.out.println(message);
throw new AdminCommandFailedException(message, e.getCause());
} else if (e.getCause() instanceof ClusterAuthorizationException) {
String message = "Not authorized to perform leader election";
System.out.println(message);
throw new AdminCommandFailedException(message, e.getCause().getCause());
} else {
throw new RuntimeException(e);
}
} catch (InterruptedException e) {
System.out.println("Error while making request");
throw new RuntimeException(e);
}
Set<TopicPartition> succeeded = new HashSet<>();
Set<TopicPartition> noop = new HashSet<>();
Map<TopicPartition, Throwable> failed = new HashMap<>();
electionResults.forEach((key, error) -> {
if (error.isPresent()) {
if (error.get() instanceof ElectionNotNeededException) {
noop.add(key);
} else {
failed.put(key, error.get());
}
} else {
succeeded.add(key);
}
});
if (!succeeded.isEmpty()) {
String partitionsAsString = succeeded.stream()
.map(TopicPartition::toString)
.collect(Collectors.joining(", "));
System.out.println(String.format("Successfully completed leader election (%s) for partitions %s",
electionType, partitionsAsString));
}
if (!noop.isEmpty()) {
String partitionsAsString = noop.stream()
.map(TopicPartition::toString)
.collect(Collectors.joining(", "));
System.out.println(String.format("Valid replica already elected for partitions %s", partitionsAsString));
}
if (!failed.isEmpty()) {
AdminCommandFailedException rootException =
new AdminCommandFailedException(String.format("%s replica(s) could not be elected", failed.size()));
failed.forEach((key, value) -> {
System.out.println(
String.format(
"Error completing leader election (%s) for partition: %s: %s",
electionType,
key,
value
)
);
rootException.addSuppressed(value);
});
throw rootException;
}
}
private static Set<TopicPartition> parseReplicaElectionData(String path) {
Optional<JsonValue> jsonFile;
try {
jsonFile = Json.parseFull(Utils.readFileAsString(path));
return jsonFile.map(js -> {
try {
return topicPartitions(js);
} catch (JsonMappingException e) {
throw new RuntimeException(e);
}
}).orElseThrow(() -> new AdminOperationException("Replica election data is empty"));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private static Set<TopicPartition> topicPartitions(JsonValue js) throws JsonMappingException {
return js.asJsonObject().get("partitions")
.map(partitionsList -> {
try {
return toTopicPartition(partitionsList);
} catch (JsonMappingException e) {
throw new RuntimeException(e);
}
})
.orElseThrow(() -> new AdminOperationException("Replica election data is missing \"partitions\" field"));
}
private static Set<TopicPartition> toTopicPartition(JsonValue partitionsList) throws JsonMappingException {
List<TopicPartition> partitions = new ArrayList<>();
Iterator<JsonValue> iterator = partitionsList.asJsonArray().iterator();
while (iterator.hasNext()) {
JsonObject partitionJs = iterator.next().asJsonObject();
String topic = partitionJs.apply("topic").to(STRING);
int partition = partitionJs.apply("partition").to(INT);
partitions.add(new TopicPartition(topic, partition));
}
Set<TopicPartition> duplicatePartitions = partitions.stream()
.filter(i -> Collections.frequency(partitions, i) > 1)
.collect(Collectors.toSet());
if (duplicatePartitions.size() > 0) {
throw new AdminOperationException(String.format(
"Replica election data contains duplicate partitions: %s", String.join(",", duplicatePartitions.toString()))
);
}
return new HashSet<>(partitions);
}
static
|
LeaderElectionCommand
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/JUnit4TearDownNotRunTest.java
|
{
"start": 2353,
"end": 2485
}
|
class ____ {
void tearDown() {}
}
@RunWith(JUnit4.class)
|
TearDownUnannotatedBaseClass
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
|
{
"start": 86097,
"end": 86257
}
|
class ____ an iterator that returns
* the SnapshotDiffReportListing for a snapshottable directory
* between two given snapshots.
*/
private final
|
defines
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/query/sqm/produce/function/NamedSetReturningFunctionDescriptorBuilder.java
|
{
"start": 490,
"end": 3324
}
|
class ____ {
private final SqmFunctionRegistry registry;
private final String registrationKey;
private final String functionName;
private final SetReturningFunctionTypeResolver setReturningTypeResolver;
private ArgumentsValidator argumentsValidator;
private FunctionArgumentTypeResolver argumentTypeResolver;
private String argumentListSignature;
private SqlAstNodeRenderingMode argumentRenderingMode = SqlAstNodeRenderingMode.DEFAULT;
public NamedSetReturningFunctionDescriptorBuilder(
SqmFunctionRegistry registry,
String registrationKey,
String functionName,
SetReturningFunctionTypeResolver typeResolver) {
this.registry = registry;
this.registrationKey = registrationKey;
this.functionName = functionName;
this.setReturningTypeResolver = typeResolver;
}
public NamedSetReturningFunctionDescriptorBuilder setArgumentsValidator(ArgumentsValidator argumentsValidator) {
this.argumentsValidator = argumentsValidator;
return this;
}
public NamedSetReturningFunctionDescriptorBuilder setArgumentTypeResolver(FunctionArgumentTypeResolver argumentTypeResolver) {
this.argumentTypeResolver = argumentTypeResolver;
return this;
}
public NamedSetReturningFunctionDescriptorBuilder setArgumentCountBetween(int min, int max) {
return setArgumentsValidator( StandardArgumentsValidators.between( min, max ) );
}
public NamedSetReturningFunctionDescriptorBuilder setExactArgumentCount(int exactArgumentCount) {
return setArgumentsValidator( StandardArgumentsValidators.exactly( exactArgumentCount ) );
}
public NamedSetReturningFunctionDescriptorBuilder setMinArgumentCount(int min) {
return setArgumentsValidator( StandardArgumentsValidators.min( min ) );
}
public NamedSetReturningFunctionDescriptorBuilder setParameterTypes(FunctionParameterType... types) {
setArgumentsValidator( new ArgumentTypesValidator(argumentsValidator, types) );
setArgumentTypeResolver( StandardFunctionArgumentTypeResolvers.invariant( types ) );
return this;
}
public NamedSetReturningFunctionDescriptorBuilder setArgumentListSignature(String argumentListSignature) {
this.argumentListSignature = argumentListSignature;
return this;
}
public NamedSetReturningFunctionDescriptorBuilder setArgumentRenderingMode(SqlAstNodeRenderingMode argumentRenderingMode) {
this.argumentRenderingMode = argumentRenderingMode;
return this;
}
public SqmSetReturningFunctionDescriptor register() {
return registry.register( registrationKey, descriptor() );
}
public SqmSetReturningFunctionDescriptor descriptor() {
return new NamedSqmSetReturningFunctionDescriptor(
functionName,
argumentsValidator,
setReturningTypeResolver,
argumentTypeResolver,
registrationKey,
argumentListSignature,
argumentRenderingMode
);
}
}
|
NamedSetReturningFunctionDescriptorBuilder
|
java
|
apache__flink
|
flink-queryable-state/flink-queryable-state-runtime/src/test/java/org/apache/flink/queryablestate/network/KvStateServerHandlerTest.java
|
{
"start": 4477,
"end": 34507
}
|
class ____ {
private static KvStateServerImpl testServer;
private static final long READ_TIMEOUT_MILLIS = 10000L;
@BeforeAll
static void setup() {
try {
testServer =
new KvStateServerImpl(
InetAddress.getLocalHost().getHostName(),
Collections.singletonList(0).iterator(),
1,
1,
new KvStateRegistry(),
new DisabledKvStateRequestStats());
testServer.start();
} catch (Throwable e) {
e.printStackTrace();
}
}
@AfterAll
static void tearDown() throws Exception {
testServer.shutdown();
}
/** Tests a simple successful query via an EmbeddedChannel. */
@Test
void testSimpleQuery() throws Exception {
KvStateRegistry registry = new KvStateRegistry();
AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();
MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer =
new MessageSerializer<>(
new KvStateInternalRequest.KvStateInternalRequestDeserializer(),
new KvStateResponse.KvStateResponseDeserializer());
KvStateServerHandler handler =
new KvStateServerHandler(testServer, registry, serializer, stats);
EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler);
// Register state
ValueStateDescriptor<Integer> desc =
new ValueStateDescriptor<>("any", IntSerializer.INSTANCE);
desc.setQueryable("vanilla");
int numKeyGroups = 1;
AbstractStateBackend abstractBackend = new HashMapStateBackend();
DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0);
dummyEnv.setKvStateRegistry(registry);
AbstractKeyedStateBackend<Integer> backend =
createKeyedStateBackend(registry, numKeyGroups, abstractBackend, dummyEnv);
final TestRegistryListener registryListener = new TestRegistryListener();
registry.registerListener(dummyEnv.getJobID(), registryListener);
// Update the KvState and request it
int expectedValue = 712828289;
int key = 99812822;
backend.setCurrentKey(key);
ValueState<Integer> state =
backend.getPartitionedState(
VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, desc);
state.update(expectedValue);
byte[] serializedKeyAndNamespace =
KvStateSerializer.serializeKeyAndNamespace(
key,
IntSerializer.INSTANCE,
VoidNamespace.INSTANCE,
VoidNamespaceSerializer.INSTANCE);
long requestId = Integer.MAX_VALUE + 182828L;
assertThat(registryListener.registrationName).isEqualTo("vanilla");
KvStateInternalRequest request =
new KvStateInternalRequest(registryListener.kvStateId, serializedKeyAndNamespace);
ByteBuf serRequest =
MessageSerializer.serializeRequest(channel.alloc(), requestId, request);
// Write the request and wait for the response
channel.writeInbound(serRequest);
ByteBuf buf = (ByteBuf) readInboundBlocking(channel);
buf.skipBytes(4); // skip frame length
// Verify the response
assertThat(MessageSerializer.deserializeHeader(buf)).isEqualTo(MessageType.REQUEST_RESULT);
long deserRequestId = MessageSerializer.getRequestId(buf);
KvStateResponse response = serializer.deserializeResponse(buf);
buf.release();
assertThat(deserRequestId).isEqualTo(requestId);
int actualValue =
KvStateSerializer.deserializeValue(response.getContent(), IntSerializer.INSTANCE);
assertThat(actualValue).isEqualTo(expectedValue);
assertThat(stats.getNumRequests()).isEqualTo(1).withFailMessage(stats.toString());
// Wait for async successful request report
long deadline = System.nanoTime() + TimeUnit.NANOSECONDS.convert(30, TimeUnit.SECONDS);
while (stats.getNumSuccessful() != 1L && System.nanoTime() <= deadline) {
Thread.sleep(10L);
}
assertThat(stats.getNumSuccessful()).isEqualTo(1L).withFailMessage(stats.toString());
}
/**
* Tests the failure response with {@link UnknownKvStateIdException} as cause on queries for
* unregistered KvStateIDs.
*/
@Test
void testQueryUnknownKvStateID() throws Exception {
KvStateRegistry registry = new KvStateRegistry();
AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();
MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer =
new MessageSerializer<>(
new KvStateInternalRequest.KvStateInternalRequestDeserializer(),
new KvStateResponse.KvStateResponseDeserializer());
KvStateServerHandler handler =
new KvStateServerHandler(testServer, registry, serializer, stats);
EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler);
long requestId = Integer.MAX_VALUE + 182828L;
KvStateInternalRequest request = new KvStateInternalRequest(new KvStateID(), new byte[0]);
ByteBuf serRequest =
MessageSerializer.serializeRequest(channel.alloc(), requestId, request);
// Write the request and wait for the response
channel.writeInbound(serRequest);
ByteBuf buf = (ByteBuf) readInboundBlocking(channel);
buf.skipBytes(4); // skip frame length
// Verify the response
assertThat(MessageSerializer.deserializeHeader(buf)).isEqualTo(MessageType.REQUEST_FAILURE);
RequestFailure response = MessageSerializer.deserializeRequestFailure(buf);
buf.release();
assertThat(response.getRequestId()).isEqualTo(requestId);
assertThat(response.getCause())
.isInstanceOf(UnknownKvStateIdException.class)
.withFailMessage("Did not respond with expected failure cause");
assertThat(stats.getNumRequests()).isEqualTo(1L);
assertThat(stats.getNumFailed()).isEqualTo(1L);
}
/**
* Tests the failure response with {@link UnknownKeyOrNamespaceException} as cause on queries
* for non-existing keys.
*/
@Test
void testQueryUnknownKey() throws Exception {
KvStateRegistry registry = new KvStateRegistry();
AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();
MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer =
new MessageSerializer<>(
new KvStateInternalRequest.KvStateInternalRequestDeserializer(),
new KvStateResponse.KvStateResponseDeserializer());
KvStateServerHandler handler =
new KvStateServerHandler(testServer, registry, serializer, stats);
EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler);
int numKeyGroups = 1;
AbstractStateBackend abstractBackend = new HashMapStateBackend();
DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0);
dummyEnv.setKvStateRegistry(registry);
KeyedStateBackend<Integer> backend =
createKeyedStateBackend(registry, numKeyGroups, abstractBackend, dummyEnv);
final TestRegistryListener registryListener = new TestRegistryListener();
registry.registerListener(dummyEnv.getJobID(), registryListener);
// Register state
ValueStateDescriptor<Integer> desc =
new ValueStateDescriptor<>("any", IntSerializer.INSTANCE);
desc.setQueryable("vanilla");
backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, desc);
byte[] serializedKeyAndNamespace =
KvStateSerializer.serializeKeyAndNamespace(
1238283,
IntSerializer.INSTANCE,
VoidNamespace.INSTANCE,
VoidNamespaceSerializer.INSTANCE);
long requestId = Integer.MAX_VALUE + 22982L;
assertThat(registryListener.registrationName).isEqualTo("vanilla");
KvStateInternalRequest request =
new KvStateInternalRequest(registryListener.kvStateId, serializedKeyAndNamespace);
ByteBuf serRequest =
MessageSerializer.serializeRequest(channel.alloc(), requestId, request);
// Write the request and wait for the response
channel.writeInbound(serRequest);
ByteBuf buf = (ByteBuf) readInboundBlocking(channel);
buf.skipBytes(4); // skip frame length
// Verify the response
assertThat(MessageSerializer.deserializeHeader(buf)).isEqualTo(MessageType.REQUEST_FAILURE);
RequestFailure response = MessageSerializer.deserializeRequestFailure(buf);
buf.release();
assertThat(response.getRequestId()).isEqualTo(requestId);
assertThat(response.getCause())
.isInstanceOf(UnknownKeyOrNamespaceException.class)
.withFailMessage("Did not respond with expected failure cause");
assertThat(stats.getNumRequests()).isEqualTo(1L);
assertThat(stats.getNumFailed()).isEqualTo(1L);
}
/**
* Tests the failure response on a failure on the {@link
* InternalKvState#getSerializedValue(byte[], TypeSerializer, TypeSerializer, TypeSerializer)}
* call.
*/
@Test
void testFailureOnGetSerializedValue() throws Exception {
KvStateRegistry registry = new KvStateRegistry();
AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();
MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer =
new MessageSerializer<>(
new KvStateInternalRequest.KvStateInternalRequestDeserializer(),
new KvStateResponse.KvStateResponseDeserializer());
KvStateServerHandler handler =
new KvStateServerHandler(testServer, registry, serializer, stats);
EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler);
// Failing KvState
InternalKvState<Integer, VoidNamespace, Long> kvState =
new InternalKvState<Integer, VoidNamespace, Long>() {
@Override
public TypeSerializer<Integer> getKeySerializer() {
return IntSerializer.INSTANCE;
}
@Override
public TypeSerializer<VoidNamespace> getNamespaceSerializer() {
return VoidNamespaceSerializer.INSTANCE;
}
@Override
public TypeSerializer<Long> getValueSerializer() {
return LongSerializer.INSTANCE;
}
@Override
public void setCurrentNamespace(VoidNamespace namespace) {
// do nothing
}
@Override
public byte[] getSerializedValue(
final byte[] serializedKeyAndNamespace,
final TypeSerializer<Integer> safeKeySerializer,
final TypeSerializer<VoidNamespace> safeNamespaceSerializer,
final TypeSerializer<Long> safeValueSerializer)
throws Exception {
throw new RuntimeException("Expected test Exception");
}
@Override
public StateIncrementalVisitor<Integer, VoidNamespace, Long>
getStateIncrementalVisitor(int recommendedMaxNumberOfReturnedRecords) {
throw new UnsupportedOperationException();
}
@Override
public void clear() {}
};
KvStateID kvStateId =
registry.registerKvState(
new JobID(),
new JobVertexID(),
new KeyGroupRange(0, 0),
"vanilla",
kvState,
getClass().getClassLoader());
KvStateInternalRequest request = new KvStateInternalRequest(kvStateId, new byte[0]);
ByteBuf serRequest = MessageSerializer.serializeRequest(channel.alloc(), 282872L, request);
// Write the request and wait for the response
channel.writeInbound(serRequest);
ByteBuf buf = (ByteBuf) readInboundBlocking(channel);
buf.skipBytes(4); // skip frame length
// Verify the response
assertThat(MessageSerializer.deserializeHeader(buf)).isEqualTo(MessageType.REQUEST_FAILURE);
RequestFailure response = MessageSerializer.deserializeRequestFailure(buf);
buf.release();
assertThat(response.getCause().getMessage()).contains("Expected test Exception");
assertThat(stats.getNumRequests()).isEqualTo(1L);
assertThat(stats.getNumFailed()).isEqualTo(1L);
}
/** Tests that the channel is closed if an Exception reaches the channel handler. */
@Test
void testCloseChannelOnExceptionCaught() throws Exception {
KvStateRegistry registry = new KvStateRegistry();
AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();
MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer =
new MessageSerializer<>(
new KvStateInternalRequest.KvStateInternalRequestDeserializer(),
new KvStateResponse.KvStateResponseDeserializer());
KvStateServerHandler handler =
new KvStateServerHandler(testServer, registry, serializer, stats);
EmbeddedChannel channel = new EmbeddedChannel(handler);
channel.pipeline().fireExceptionCaught(new RuntimeException("Expected test Exception"));
ByteBuf buf = (ByteBuf) readInboundBlocking(channel);
buf.skipBytes(4); // skip frame length
// Verify the response
assertThat(MessageSerializer.deserializeHeader(buf)).isEqualTo(MessageType.SERVER_FAILURE);
Throwable response = MessageSerializer.deserializeServerFailure(buf);
buf.release();
assertThat(response.getMessage()).contains("Expected test Exception");
channel.closeFuture().await(READ_TIMEOUT_MILLIS);
assertThat(channel.isActive()).isFalse();
}
/**
* Tests the failure response on a rejected execution, because the query executor has been
* closed.
*/
@Test
void testQueryExecutorShutDown() throws Throwable {
KvStateRegistry registry = new KvStateRegistry();
AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();
KvStateServerImpl localTestServer =
new KvStateServerImpl(
InetAddress.getLocalHost().getHostName(),
Collections.singletonList(0).iterator(),
1,
1,
new KvStateRegistry(),
new DisabledKvStateRequestStats());
localTestServer.start();
localTestServer.shutdown();
assertThat(localTestServer.getQueryExecutor().isTerminated()).isTrue();
MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer =
new MessageSerializer<>(
new KvStateInternalRequest.KvStateInternalRequestDeserializer(),
new KvStateResponse.KvStateResponseDeserializer());
KvStateServerHandler handler =
new KvStateServerHandler(localTestServer, registry, serializer, stats);
EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler);
int numKeyGroups = 1;
AbstractStateBackend abstractBackend = new HashMapStateBackend();
DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0);
dummyEnv.setKvStateRegistry(registry);
KeyedStateBackend<Integer> backend =
createKeyedStateBackend(registry, numKeyGroups, abstractBackend, dummyEnv);
final TestRegistryListener registryListener = new TestRegistryListener();
registry.registerListener(dummyEnv.getJobID(), registryListener);
// Register state
ValueStateDescriptor<Integer> desc =
new ValueStateDescriptor<>("any", IntSerializer.INSTANCE);
desc.setQueryable("vanilla");
backend.getPartitionedState(VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, desc);
assertThat(registryListener.registrationName).isEqualTo("vanilla");
KvStateInternalRequest request =
new KvStateInternalRequest(registryListener.kvStateId, new byte[0]);
ByteBuf serRequest = MessageSerializer.serializeRequest(channel.alloc(), 282872L, request);
// Write the request and wait for the response
channel.writeInbound(serRequest);
ByteBuf buf = (ByteBuf) readInboundBlocking(channel);
buf.skipBytes(4); // skip frame length
// Verify the response
assertThat(MessageSerializer.deserializeHeader(buf)).isEqualTo(MessageType.REQUEST_FAILURE);
RequestFailure response = MessageSerializer.deserializeRequestFailure(buf);
buf.release();
assertThat(response.getCause().getMessage()).contains("RejectedExecutionException");
assertThat(stats.getNumRequests()).isEqualTo(1L);
assertThat(stats.getNumFailed()).isEqualTo(1L);
localTestServer.shutdown();
}
/** Tests response on unexpected messages. */
@Test
void testUnexpectedMessage() throws Exception {
KvStateRegistry registry = new KvStateRegistry();
AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();
MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer =
new MessageSerializer<>(
new KvStateInternalRequest.KvStateInternalRequestDeserializer(),
new KvStateResponse.KvStateResponseDeserializer());
KvStateServerHandler handler =
new KvStateServerHandler(testServer, registry, serializer, stats);
EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler);
// Write the request and wait for the response
ByteBuf unexpectedMessage = Unpooled.buffer(8);
unexpectedMessage.writeInt(4);
unexpectedMessage.writeInt(123238213);
channel.writeInbound(unexpectedMessage);
ByteBuf buf = (ByteBuf) readInboundBlocking(channel);
buf.skipBytes(4); // skip frame length
// Verify the response
assertThat(MessageSerializer.deserializeHeader(buf)).isEqualTo(MessageType.SERVER_FAILURE);
Throwable response = MessageSerializer.deserializeServerFailure(buf);
buf.release();
assertThat(stats.getNumRequests()).isEqualTo(0L);
assertThat(stats.getNumFailed()).isEqualTo(0L);
KvStateResponse stateResponse = new KvStateResponse(new byte[0]);
unexpectedMessage =
MessageSerializer.serializeResponse(channel.alloc(), 192L, stateResponse);
channel.writeInbound(unexpectedMessage);
buf = (ByteBuf) readInboundBlocking(channel);
buf.skipBytes(4); // skip frame length
// Verify the response
assertThat(MessageSerializer.deserializeHeader(buf)).isEqualTo(MessageType.SERVER_FAILURE);
response = MessageSerializer.deserializeServerFailure(buf);
buf.release();
assertThat(response)
.isInstanceOf(IllegalArgumentException.class)
.withFailMessage("Unexpected failure cause " + response.getClass().getName());
assertThat(stats.getNumRequests()).isEqualTo(0L);
assertThat(stats.getNumFailed()).isEqualTo(0L);
}
/** Tests that incoming buffer instances are recycled. */
@Test
void testIncomingBufferIsRecycled() throws Exception {
KvStateRegistry registry = new KvStateRegistry();
AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();
MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer =
new MessageSerializer<>(
new KvStateInternalRequest.KvStateInternalRequestDeserializer(),
new KvStateResponse.KvStateResponseDeserializer());
KvStateServerHandler handler =
new KvStateServerHandler(testServer, registry, serializer, stats);
EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler);
KvStateInternalRequest request = new KvStateInternalRequest(new KvStateID(), new byte[0]);
ByteBuf serRequest = MessageSerializer.serializeRequest(channel.alloc(), 282872L, request);
assertThat(serRequest.refCnt()).isEqualTo(1L);
// Write regular request
channel.writeInbound(serRequest);
assertThat(serRequest.refCnt()).isEqualTo(0L).withFailMessage("Buffer not recycled");
// Write unexpected msg
ByteBuf unexpected = channel.alloc().buffer(8);
unexpected.writeInt(4);
unexpected.writeInt(4);
assertThat(unexpected.refCnt()).isEqualTo(1L);
channel.writeInbound(unexpected);
assertThat(unexpected.refCnt()).isEqualTo(0L).withFailMessage("Buffer not recycled");
channel.finishAndReleaseAll();
}
/** Tests the failure response if the serializers don't match. */
@Test
void testSerializerMismatch() throws Exception {
KvStateRegistry registry = new KvStateRegistry();
AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();
MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer =
new MessageSerializer<>(
new KvStateInternalRequest.KvStateInternalRequestDeserializer(),
new KvStateResponse.KvStateResponseDeserializer());
KvStateServerHandler handler =
new KvStateServerHandler(testServer, registry, serializer, stats);
EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler);
int numKeyGroups = 1;
AbstractStateBackend abstractBackend = new HashMapStateBackend();
DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0);
dummyEnv.setKvStateRegistry(registry);
AbstractKeyedStateBackend<Integer> backend =
createKeyedStateBackend(registry, numKeyGroups, abstractBackend, dummyEnv);
final TestRegistryListener registryListener = new TestRegistryListener();
registry.registerListener(dummyEnv.getJobID(), registryListener);
// Register state
ValueStateDescriptor<Integer> desc =
new ValueStateDescriptor<>("any", IntSerializer.INSTANCE);
desc.setQueryable("vanilla");
ValueState<Integer> state =
backend.getPartitionedState(
VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, desc);
int key = 99812822;
// Update the KvState
backend.setCurrentKey(key);
state.update(712828289);
byte[] wrongKeyAndNamespace =
KvStateSerializer.serializeKeyAndNamespace(
"wrong-key-type",
StringSerializer.INSTANCE,
"wrong-namespace-type",
StringSerializer.INSTANCE);
byte[] wrongNamespace =
KvStateSerializer.serializeKeyAndNamespace(
key,
IntSerializer.INSTANCE,
"wrong-namespace-type",
StringSerializer.INSTANCE);
assertThat(registryListener.registrationName).isEqualTo("vanilla");
KvStateInternalRequest request =
new KvStateInternalRequest(registryListener.kvStateId, wrongKeyAndNamespace);
ByteBuf serRequest = MessageSerializer.serializeRequest(channel.alloc(), 182828L, request);
// Write the request and wait for the response
channel.writeInbound(serRequest);
ByteBuf buf = (ByteBuf) readInboundBlocking(channel);
buf.skipBytes(4); // skip frame length
// Verify the response
assertThat(MessageSerializer.deserializeHeader(buf)).isEqualTo(MessageType.REQUEST_FAILURE);
RequestFailure response = MessageSerializer.deserializeRequestFailure(buf);
buf.release();
assertThat(response.getRequestId()).isEqualTo(182828L);
assertThat(response.getCause().getMessage()).contains("IOException");
// Repeat with wrong namespace only
request = new KvStateInternalRequest(registryListener.kvStateId, wrongNamespace);
serRequest = MessageSerializer.serializeRequest(channel.alloc(), 182829L, request);
// Write the request and wait for the response
channel.writeInbound(serRequest);
buf = (ByteBuf) readInboundBlocking(channel);
buf.skipBytes(4); // skip frame length
// Verify the response
assertThat(MessageSerializer.deserializeHeader(buf)).isEqualTo(MessageType.REQUEST_FAILURE);
response = MessageSerializer.deserializeRequestFailure(buf);
buf.release();
assertThat(response.getRequestId()).isEqualTo(182829L);
assertThat(response.getCause().getMessage()).contains("IOException");
assertThat(stats.getNumRequests()).isEqualTo(2L);
assertThat(stats.getNumFailed()).isEqualTo(2L);
}
/** Tests that large responses are chunked. */
@Test
void testChunkedResponse() throws Exception {
KvStateRegistry registry = new KvStateRegistry();
KvStateRequestStats stats = new AtomicKvStateRequestStats();
MessageSerializer<KvStateInternalRequest, KvStateResponse> serializer =
new MessageSerializer<>(
new KvStateInternalRequest.KvStateInternalRequestDeserializer(),
new KvStateResponse.KvStateResponseDeserializer());
KvStateServerHandler handler =
new KvStateServerHandler(testServer, registry, serializer, stats);
EmbeddedChannel channel = new EmbeddedChannel(getFrameDecoder(), handler);
int numKeyGroups = 1;
AbstractStateBackend abstractBackend = new HashMapStateBackend();
DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0);
dummyEnv.setKvStateRegistry(registry);
AbstractKeyedStateBackend<Integer> backend =
createKeyedStateBackend(registry, numKeyGroups, abstractBackend, dummyEnv);
final TestRegistryListener registryListener = new TestRegistryListener();
registry.registerListener(dummyEnv.getJobID(), registryListener);
// Register state
ValueStateDescriptor<byte[]> desc =
new ValueStateDescriptor<>("any", BytePrimitiveArraySerializer.INSTANCE);
desc.setQueryable("vanilla");
ValueState<byte[]> state =
backend.getPartitionedState(
VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE, desc);
// Update KvState
byte[] bytes = new byte[2 * channel.config().getWriteBufferHighWaterMark()];
byte current = 0;
for (int i = 0; i < bytes.length; i++) {
bytes[i] = current++;
}
int key = 99812822;
backend.setCurrentKey(key);
state.update(bytes);
// Request
byte[] serializedKeyAndNamespace =
KvStateSerializer.serializeKeyAndNamespace(
key,
IntSerializer.INSTANCE,
VoidNamespace.INSTANCE,
VoidNamespaceSerializer.INSTANCE);
long requestId = Integer.MAX_VALUE + 182828L;
assertThat(registryListener.registrationName).isEqualTo("vanilla");
KvStateInternalRequest request =
new KvStateInternalRequest(registryListener.kvStateId, serializedKeyAndNamespace);
ByteBuf serRequest =
MessageSerializer.serializeRequest(channel.alloc(), requestId, request);
// Write the request and wait for the response
channel.writeInbound(serRequest);
Object msg = readInboundBlocking(channel);
assertThat(msg).isInstanceOf(ChunkedByteBuf.class).withFailMessage("Not ChunkedByteBuf");
((ChunkedByteBuf) msg).close();
}
// ------------------------------------------------------------------------
/** Queries the embedded channel for data. */
private Object readInboundBlocking(EmbeddedChannel channel)
throws InterruptedException, TimeoutException {
final long sleepMillis = 50L;
long sleptMillis = 0L;
Object msg = null;
while (sleptMillis < READ_TIMEOUT_MILLIS && (msg = channel.readOutbound()) == null) {
Thread.sleep(sleepMillis);
sleptMillis += sleepMillis;
}
if (msg == null) {
throw new TimeoutException();
} else {
return msg;
}
}
/** Frame length decoder (expected by the serialized messages). */
private ChannelHandler getFrameDecoder() {
return new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4);
}
/**
* A listener that keeps the last updated KvState information so that a test can retrieve it.
*/
static
|
KvStateServerHandlerTest
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/oracle/select/OracleSelectTest133.java
|
{
"start": 1002,
"end": 2526
}
|
class ____ extends TestCase {
public void test_0() throws Exception {
String sql = "SELECT to_char(sysdate-interval '1' hour,'hh24:mi:ss') as num FROM DUAL;";
List<SQLStatement> statementList = SQLUtils.parseStatements(sql, JdbcConstants.ORACLE);
assertEquals(1, statementList.size());
SQLSelectStatement stmt = (SQLSelectStatement) statementList.get(0);
assertEquals("SELECT to_char(SYSDATE - INTERVAL '1' HOUR, 'hh24:mi:ss') AS num\n" +
"FROM DUAL;", stmt.toString());
assertEquals("select to_char(sysdate - interval '1' HOUR, 'hh24:mi:ss') as num\n" +
"from DUAL;", stmt.toLowerCaseString());
OracleSchemaStatVisitor visitor = new OracleSchemaStatVisitor();
stmt.accept(visitor);
System.out.println("Tables : " + visitor.getTables());
System.out.println("fields : " + visitor.getColumns());
System.out.println("coditions : " + visitor.getConditions());
System.out.println("relationships : " + visitor.getRelationships());
System.out.println("orderBy : " + visitor.getOrderByColumns());
assertEquals(0, visitor.getTables().size());
assertEquals(0, visitor.getColumns().size());
assertEquals(0, visitor.getConditions().size());
assertEquals(0, visitor.getRelationships().size());
assertEquals(0, visitor.getOrderByColumns().size());
// assertTrue(visitor.containsColumn("srm1.CONSIGNEE_ADDRESS", "id"));
}
}
|
OracleSelectTest133
|
java
|
apache__flink
|
flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/api/config/OptimizerConfigOptions.java
|
{
"start": 1577,
"end": 1645
}
|
class ____ start with "table.optimizer".
*/
@PublicEvolving
public
|
must
|
java
|
apache__kafka
|
server/src/main/java/org/apache/kafka/server/share/fetch/DeliveryCountOps.java
|
{
"start": 977,
"end": 1033
}
|
enum ____ {
INCREASE, DECREASE, NO_OP
}
|
DeliveryCountOps
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/query/criteria/JpaSetReturningFunction.java
|
{
"start": 232,
"end": 366
}
|
interface ____<T> extends JpaCriteriaNode {
/**
* The name of the function.
*/
String getFunctionName();
}
|
JpaSetReturningFunction
|
java
|
apache__flink
|
flink-tests/src/test/java/org/apache/flink/test/checkpointing/StatefulJobWBroadcastStateMigrationITCase.java
|
{
"start": 17670,
"end": 19051
}
|
class ____
extends KeyedBroadcastProcessFunction<
Long, Tuple2<Long, Long>, Tuple2<Long, Long>, Tuple2<Long, Long>> {
private static final long serialVersionUID = 1333992081671604521L;
private MapStateDescriptor<Long, String> stateDesc;
@Override
public void open(OpenContext openContext) throws Exception {
super.open(openContext);
stateDesc =
new MapStateDescriptor<>(
"broadcast-state-3",
BasicTypeInfo.LONG_TYPE_INFO,
BasicTypeInfo.STRING_TYPE_INFO);
}
@Override
public void processElement(
Tuple2<Long, Long> value, ReadOnlyContext ctx, Collector<Tuple2<Long, Long>> out)
throws Exception {
out.collect(value);
}
@Override
public void processBroadcastElement(
Tuple2<Long, Long> value, Context ctx, Collector<Tuple2<Long, Long>> out)
throws Exception {
ctx.getBroadcastState(stateDesc).put(value.f0, Long.toString(value.f1));
}
}
/**
* A simple {@link KeyedBroadcastProcessFunction} that verifies the contents of the broadcast
* state after recovery.
*/
private static
|
CheckpointingKeyedSingleBroadcastFunction
|
java
|
hibernate__hibernate-orm
|
hibernate-envers/src/test/java/org/hibernate/orm/test/envers/integration/accesstype/FieldAccessTypeEntity.java
|
{
"start": 376,
"end": 1632
}
|
class ____ {
@Id
@GeneratedValue
private Integer id;
@Audited
private String data;
public FieldAccessTypeEntity() {
}
public FieldAccessTypeEntity(String data) {
this.data = data;
}
public FieldAccessTypeEntity(Integer id, String data) {
this.id = id;
this.data = data;
}
public Integer getId() {
throw new RuntimeException();
}
public void setId(Integer id) {
throw new RuntimeException();
}
public String getData() {
throw new RuntimeException();
}
public void setData(String data) {
throw new RuntimeException();
}
public Integer readId() {
return id;
}
public void writeData(String data) {
this.data = data;
}
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( !(o instanceof FieldAccessTypeEntity) ) {
return false;
}
FieldAccessTypeEntity that = (FieldAccessTypeEntity) o;
if ( data != null ? !data.equals( that.data ) : that.data != null ) {
return false;
}
if ( id != null ? !id.equals( that.id ) : that.id != null ) {
return false;
}
return true;
}
public int hashCode() {
int result;
result = (id != null ? id.hashCode() : 0);
result = 31 * result + (data != null ? data.hashCode() : 0);
return result;
}
}
|
FieldAccessTypeEntity
|
java
|
hibernate__hibernate-orm
|
hibernate-testing/src/main/java/org/hibernate/testing/orm/domain/animal/Zoo.java
|
{
"start": 798,
"end": 3027
}
|
class ____ {
private Long id;
private String name;
private Classification classification;
private Map directors = new HashMap();
private Map animals = new HashMap();
private Map mammals = new HashMap();
private Address address;
public Zoo() {
}
public Zoo(String name, Address address) {
this.name = name;
this.address = address;
}
@Id
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@ManyToMany
@JoinTable(
name = "t_directors",
joinColumns = @JoinColumn( name = "zoo_fk" ),
inverseJoinColumns = @JoinColumn( name = "director_fk" )
)
@MapKeyColumn( name = "`title`" )
public Map<String,Human> getDirectors() {
return directors;
}
public void setDirectors(Map directors) {
this.directors = directors;
}
@OneToMany
@JoinColumn( name = "mammal_fk" )
@MapKeyColumn( name = "name" )
public Map<String,Mammal> getMammals() {
return mammals;
}
public void setMammals(Map mammals) {
this.mammals = mammals;
}
@OneToMany( mappedBy = "zoo" )
@MapKeyColumn( name = "serialNumber" )
public Map<String, Animal> getAnimals() {
return animals;
}
public void setAnimals(Map animals) {
this.animals = animals;
}
@Embedded
public Address getAddress() {
return address;
}
public void setAddress(Address address) {
this.address = address;
}
@Enumerated( value = EnumType.STRING )
public Classification getClassification() {
return classification;
}
public void setClassification(Classification classification) {
this.classification = classification;
}
@Override
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( !( o instanceof Zoo ) ) {
return false;
}
Zoo zoo = ( Zoo ) o;
if ( address != null ? !address.equals( zoo.address ) : zoo.address != null ) {
return false;
}
if ( name != null ? !name.equals( zoo.name ) : zoo.name != null ) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = name != null ? name.hashCode() : 0;
result = 31 * result + ( address != null ? address.hashCode() : 0 );
return result;
}
}
|
Zoo
|
java
|
square__moshi
|
examples/src/main/java/com/squareup/moshi/recipes/models/Card.java
|
{
"start": 657,
"end": 903
}
|
class ____ {
public final char rank;
public final Suit suit;
public Card(char rank, Suit suit) {
this.rank = rank;
this.suit = suit;
}
@Override
public String toString() {
return String.format("%s%s", rank, suit);
}
}
|
Card
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/tool/schema/extract/internal/AbstractInformationExtractorImpl.java
|
{
"start": 2822,
"end": 60933
}
|
class ____ implements InformationExtractor {
private final String[] tableTypes;
private final String[] extraPhysicalTableTypes;
private final ExtractionContext extractionContext;
private final boolean useJdbcMetadataDefaultsSetting;
private Identifier currentCatalog;
private Identifier currentSchema;
private String currentCatalogFilter;
private String currentSchemaFilter;
public AbstractInformationExtractorImpl(ExtractionContext extractionContext) {
this.extractionContext = extractionContext;
final var configService =
extractionContext.getServiceRegistry()
.requireService( ConfigurationService.class );
useJdbcMetadataDefaultsSetting = configService.getSetting(
"hibernate.temp.use_jdbc_metadata_defaults",
StandardConverters.BOOLEAN,
Boolean.TRUE
);
final String extraPhysicalTableTypesConfig = configService.getSetting(
EXTRA_PHYSICAL_TABLE_TYPES,
StandardConverters.STRING,
configService.getSetting(
EXTRA_PHYSICAL_TABLE_TYPES,
StandardConverters.STRING,
""
)
);
final var dialect = extractionContext.getJdbcEnvironment().getDialect();
extraPhysicalTableTypes = getPhysicalTableTypes( extraPhysicalTableTypesConfig, dialect );
tableTypes = getTableTypes( configService, dialect );
}
private String[] getPhysicalTableTypes(String extraPhysicalTableTypesConfig, Dialect dialect) {
final List<String> physicalTableTypesList = new ArrayList<>();
if ( !isBlank( extraPhysicalTableTypesConfig ) ) {
addAll( physicalTableTypesList,
splitTrimmingTokens( ",;", extraPhysicalTableTypesConfig, false ) );
}
dialect.augmentPhysicalTableTypes( physicalTableTypesList );
return physicalTableTypesList.toArray( EMPTY_STRINGS );
}
private String[] getTableTypes(ConfigurationService configService, Dialect dialect) {
final List<String> tableTypesList = new ArrayList<>();
tableTypesList.add( "TABLE" );
tableTypesList.add( "VIEW" );
if ( getBoolean( ENABLE_SYNONYMS, configService.getSettings() ) ) {
if ( dialect instanceof DB2Dialect ) { //TODO: should not use Dialect types directly!
tableTypesList.add( "ALIAS" );
}
tableTypesList.add( "SYNONYM" );
}
addAll( tableTypesList, extraPhysicalTableTypes );
dialect.augmentRecognizedTableTypes( tableTypesList );
return tableTypesList.toArray( EMPTY_STRINGS );
}
private IdentifierHelper getIdentifierHelper() {
return getJdbcEnvironment().getIdentifierHelper();
}
protected JDBCException convertSQLException(SQLException sqlException, String message) {
return getJdbcEnvironment().getSqlExceptionHelper().convert( sqlException, message );
}
protected String toMetaDataObjectName(Identifier identifier) {
return getIdentifierHelper().toMetaDataObjectName( identifier );
}
protected ExtractionContext getExtractionContext() {
return extractionContext;
}
protected JdbcEnvironment getJdbcEnvironment() {
return extractionContext.getJdbcEnvironment();
}
// The following methods purposely return the column labels that are defined by
// DatabaseMetaData methods that return a ResultSet. Subclasses that do not rely
// on DatabaseMetaData may override these methods to use different column labels.
protected String getResultSetCatalogLabel() {
return "TABLE_CAT";
}
protected String getResultSetSchemaLabel() {
return "TABLE_SCHEM";
}
protected String getResultSetTableNameLabel() {
return "TABLE_NAME";
}
protected String getResultSetTableTypeLabel() {
return "TABLE_TYPE";
}
protected String getResultSetRemarksLabel() {
return "REMARKS";
}
protected String getResultSetPrimaryKeyCatalogLabel() {
return "PKTABLE_CAT";
}
protected String getResultSetPrimaryKeySchemaLabel() {
return "PKTABLE_SCHEM";
}
protected String getResultSetPrimaryKeyTableLabel() {
return "PKTABLE_NAME";
}
protected String getResultSetForeignKeyCatalogLabel() {
return "FKTABLE_CAT";
}
protected String getResultSetForeignKeySchemaLabel() {
return "FKTABLE_SCHEM";
}
protected String getResultSetForeignKeyTableLabel() {
return "FKTABLE_NAME";
}
protected String getResultSetColumnNameLabel() {
return "COLUMN_NAME";
}
protected String getResultSetSqlTypeCodeLabel() {
return "DATA_TYPE";
}
protected String getResultSetTypeNameLabel() {
return "TYPE_NAME";
}
protected String getResultSetColumnSizeLabel() {
return "COLUMN_SIZE";
}
protected String getResultSetDecimalDigitsLabel() {
return "DECIMAL_DIGITS";
}
protected String getResultSetIsNullableLabel() {
return "IS_NULLABLE";
}
protected String getResultSetIndexTypeLabel() {
return "TYPE";
}
protected String getResultSetIndexNameLabel() {
return "INDEX_NAME";
}
protected String getResultSetForeignKeyLabel() {
return "FK_NAME";
}
protected String getResultSetPrimaryKeyNameLabel() {
return "PK_NAME";
}
protected String getResultSetColumnPositionColumn() {
return "KEY_SEQ" ;
}
protected String getResultSetPrimaryKeyColumnNameLabel() {
return "PKCOLUMN_NAME" ;
}
protected String getResultSetForeignKeyColumnNameLabel() {
return "FKCOLUMN_NAME" ;
}
/**
* Must do the following:
* <ol>
* <li>
* obtain a {@link ResultSet} containing a column of existing catalog
* names. The column label must be the same as returned by
* {@link #getResultSetCatalogLabel}.
* </li>
* <li>execute {@code processor.process( resultSet )};</li>
* <li>
* release resources whether {@code processor.process( resultSet )}
* executes successfully or not.
* </li>
* </ol>
* @param processor - the provided ResultSetProcessor.
* @param <T> - defined by {@code processor}
* @return - defined by {@code processor}
* @throws SQLException - if a database error occurs
*/
protected abstract <T> T processCatalogsResultSet(ExtractionContext.ResultSetProcessor<T> processor) throws SQLException;
@Override
public boolean catalogExists(Identifier catalog) {
try {
return processCatalogsResultSet( resultSet -> {
while ( resultSet.next() ) {
final String existingCatalogName = resultSet.getString( getResultSetCatalogLabel() );
// todo : hmm.. case sensitive or insensitive match...
// for now, match any case...
if ( catalog.getText().equalsIgnoreCase( existingCatalogName ) ) {
return true;
}
}
return false;
} );
}
catch (SQLException sqlException) {
throw convertSQLException( sqlException, "Unable to query ResultSet for existing catalogs" );
}
}
/**
* Must do the following:
* <ol>
* <li>
* obtain a {@link ResultSet} containing a row for any existing
* catalog/schema combination as specified by the {@code catalog}
* and {@code schemaPattern} parameters described below. The row
* contents will not be examined by {@code processor.process( resultSet )},
* so column label names are not specified;
* </li>
* <li>execute {@code processor.process( resultSet )};</li>
* <li>
* release resources whether {@code processor.process( resultSet )}
* executes successfully or not.
* </li>
* </ol>
* <p>
* The {@code catalog} and {@code schemaPattern} parameters are as
* specified by {@link DatabaseMetaData#getSchemas(String, String)},
* and are copied here:
* @param catalog β a catalog name; must match the catalog name as it is
* stored in the database; "" retrieves those without
* a catalog; null means catalog name should not be
* used to narrow down the search.
* @param schemaPattern β a schema name; must match the schema name as
* it is stored in the database; null means schema
* name should not be used to narrow down the search.
* @param processor - the provided ResultSetProcessor.
* @param <T> - defined by {@code processor}
* @return - defined by {@code processor}
* @throws SQLException - if a database error occurs
*/
protected abstract <T> T processSchemaResultSet(
String catalog,
String schemaPattern,
ExtractionContext.ResultSetProcessor<T> processor)
throws SQLException;
@Override
public boolean schemaExists(Identifier catalog, Identifier schema) {
final var helper = getIdentifierHelper();
final String catalogFilter =
helper.toMetaDataCatalogName( catalog == null ? extractionContext.getDefaultCatalog() : catalog );
final String schemaFilter =
helper.toMetaDataSchemaName( schema == null ? extractionContext.getDefaultSchema() : schema );
try {
return processSchemaResultSet(
catalogFilter,
schemaFilter,
resultSet -> {
if ( !resultSet.next() ) {
return false;
}
else if ( resultSet.next() ) {
final String catalogName = catalog == null ? "" : catalog.getCanonicalName();
final String schemaName = schema == null ? "" : schema.getCanonicalName();
CORE_LOGGER.debugf(
"Multiple schemas found with that name [%s.%s]",
catalogName,
schemaName
);
}
return true;
}
);
}
catch (SQLException sqlException) {
throw convertSQLException( sqlException, "Unable to query ResultSet for existing schemas" );
}
}
private TableInformation extractTableInformation(ResultSet resultSet) throws SQLException {
return new TableInformationImpl(
this,
getIdentifierHelper(),
extractTableName( resultSet ),
isPhysicalTableType( resultSet.getString( getResultSetTableTypeLabel() ) ),
resultSet.getString( getResultSetRemarksLabel() )
);
}
private Connection getConnection() {
return extractionContext.getJdbcConnection();
}
@Override
public TableInformation getTable(Identifier catalog, Identifier schema, Identifier tableName) {
if ( catalog != null || schema != null ) {
// The table defined an explicit namespace. In such cases we only ever want to look
// in the identified namespace
return locateTableInNamespace( catalog, schema, tableName );
}
else {
// The table did not define an explicit namespace:
// 1) look in current namespace
// 2) look in default namespace
// 3) look in all namespaces - multiple hits is considered an error
// 1) look in current namespace
final Identifier currentSchema = getCurrentSchema();
final Identifier currentCatalog = getCurrentCatalog();
if ( currentCatalog != null || currentSchema != null ) {
final var tableInfo = locateTableInNamespace( currentCatalog, currentSchema, tableName );
if ( tableInfo != null ) {
return tableInfo;
}
}
// 2) look in default namespace
final Identifier defaultCatalog = extractionContext.getDefaultCatalog();
final Identifier defaultSchema = extractionContext.getDefaultSchema();
if ( defaultCatalog != null || defaultSchema != null ) {
final var tableInfo = locateTableInNamespace( defaultCatalog, defaultSchema, tableName );
if ( tableInfo != null ) {
return tableInfo;
}
}
// 3) look in all namespaces
try {
return processTableResultSet(
null,
null,
toMetaDataObjectName( tableName ),
tableTypes,
resultSet -> extractTableInformation( null, null, tableName, resultSet )
);
}
catch (SQLException sqlException) {
throw convertSQLException( sqlException, "Error accessing table metadata" );
}
}
}
private Identifier getCurrentSchema() {
if ( getNameQualifierSupport() == NameQualifierSupport.CATALOG ) {
return null;
}
else if ( currentSchema != null ) {
return currentSchema;
}
else {
final Identifier schema = getJdbcEnvironment().getCurrentSchema();
if ( schema != null ) {
currentSchema = schema;
}
if ( !useJdbcMetadataDefaultsSetting ) {
try {
currentSchema =
getIdentifierHelper()
.toIdentifier( getConnection().getSchema() );
}
catch (SQLException sqle) {
ERROR_LOG.logErrorCodes( sqle.getErrorCode(), sqle.getSQLState() );
}
catch (AbstractMethodError ignore) {
// jConnect and jTDS report that they "support" schemas, but they don't really
}
}
return currentSchema;
}
}
private Identifier getCurrentCatalog() {
if ( getNameQualifierSupport() == NameQualifierSupport.SCHEMA ) {
return null;
}
else if ( currentCatalog != null ) {
return currentCatalog;
}
else {
final Identifier catalog = getJdbcEnvironment().getCurrentCatalog();
if ( catalog != null ) {
currentCatalog = catalog;
}
if ( !useJdbcMetadataDefaultsSetting ) {
try {
currentCatalog =
getIdentifierHelper()
.toIdentifier( getConnection().getCatalog() );
}
catch (SQLException sqle) {
ERROR_LOG.logErrorCodes( sqle.getErrorCode(), sqle.getSQLState() );
}
}
return currentCatalog;
}
}
private String getCurrentCatalogFilter(JdbcEnvironment jdbcEnvironment) {
if ( currentCatalogFilter != null ) {
return currentCatalogFilter;
}
final Identifier currentCatalog = jdbcEnvironment.getCurrentCatalog();
if ( currentCatalog != null ) {
currentCatalogFilter = toMetaDataObjectName( currentCatalog );
}
if ( !useJdbcMetadataDefaultsSetting ) {
try {
currentCatalogFilter = getConnection().getCatalog();
}
catch (SQLException sqle) {
ERROR_LOG.logErrorCodes( sqle.getErrorCode(), sqle.getSQLState() );
}
}
return currentCatalogFilter;
}
private String getCurrentSchemaFilter(JdbcEnvironment jdbcEnvironment) {
if ( currentSchemaFilter != null ) {
return currentSchemaFilter;
}
final Identifier currentSchema = jdbcEnvironment.getCurrentSchema();
if ( currentSchema != null ) {
currentSchemaFilter = toMetaDataObjectName( currentSchema );
}
if ( !useJdbcMetadataDefaultsSetting ) {
try {
currentSchemaFilter = getConnection().getSchema();
}
catch (SQLException sqle) {
ERROR_LOG.logErrorCodes( sqle.getErrorCode(), sqle.getSQLState() );
}
catch (AbstractMethodError ignore) {
// jConnect and jTDS report that they "support" schemas, but they don't really
}
}
return currentSchemaFilter;
}
@Override
public NameSpaceTablesInformation getTables(Identifier catalog, Identifier schema) {
final String catalogFilter = getCatalogFilter( catalog );
final String schemaFilter = getSchemaFilter( schema );
try {
return processTableResultSet(
catalogFilter,
schemaFilter,
"%",
tableTypes,
resultSet -> {
final var tablesInformation = extractNameSpaceTablesInformation( resultSet );
populateTablesWithColumns( catalogFilter, schemaFilter, tablesInformation );
return tablesInformation;
} );
}
catch (SQLException sqlException) {
throw convertSQLException( sqlException, "Error accessing table metadata" );
}
}
private String getCatalogFilter(Identifier catalog) {
if ( supportsCatalogs() ) {
if ( catalog == null ) {
// look in the current namespace
final String currentCatalogFilter = getCurrentCatalogFilter( getJdbcEnvironment() );
if ( currentCatalogFilter != null ) {
return currentCatalogFilter;
}
else {
// 2) look in default namespace
final Identifier defaultCatalog = extractionContext.getDefaultCatalog();
return defaultCatalog != null ? toMetaDataObjectName( defaultCatalog ) : null;
}
}
else {
return toMetaDataObjectName( catalog );
}
}
else {
return null;
}
}
private String getSchemaFilter(Identifier schema) {
if ( supportsSchemas() ) {
if ( schema == null ) {
// 1) look in current namespace
final String currentSchemaFilter = getCurrentSchemaFilter( getJdbcEnvironment() );
if ( currentSchemaFilter != null ) {
return currentSchemaFilter;
}
else {
// 2) look in default namespace
final Identifier defaultSchema = extractionContext.getDefaultSchema();
return defaultSchema != null ? toMetaDataObjectName( defaultSchema ) : null;
}
}
else {
return toMetaDataObjectName( schema );
}
}
else {
return null;
}
}
/**
* Must do the following:
* <ol>
* <li>
* obtain a {@link ResultSet} containing a row for any existing
* catalog/schema/table/column combination as specified by the
* {@code catalog}, {@code schemaPattern}, {@code tableNamePattern},
* and {@code columnNamePattern} parameters described below.
* The {@link ResultSet} must contain the following, consistent with the
* corresponding columns returned by {@link DatabaseMetaData#getColumns}
* <ul>
* <li>column label {@link #getResultSetTableNameLabel} for table name</li>
* <li>column label {@link #getResultSetColumnNameLabel} for column name</li>
* <li>column label {@link #getResultSetSqlTypeCodeLabel} SQL type code from java.sql.Types</li>
* <li>column label {@link #getResultSetTypeNameLabel} for database column type name</li>
* <li>column label {@link #getResultSetColumnSizeLabel} for column size</li>
* <li>column label {@link #getResultSetDecimalDigitsLabel} for number of fractional digits</li>
* <li>column label {@link #getResultSetIsNullableLabel} for nullability</li>
* </ul>
* Rows must be ordered by catalog, schema, table name, and column position.
* </li>
* <li> execute {@code processor.process( resultSet )};</li>
* <li>
* release resources whether {@code processor.process( resultSet )}
* executes successfully or not.
* </li>
* </ol>
* <p>
* The {@code catalog}, {@code schemaPattern}, {@code tableNamePattern},
* and {@code columnNamePattern} parameters are as
* specified by {@link DatabaseMetaData#getColumns(String, String, String, String)},
* and are copied here:
* <p>
* @param catalog β a catalog name; must match the catalog name as it is
* stored in the database; "" retrieves those without
* a catalog; null means that the catalog name should
* not be used to narrow the search
* @param schemaPattern β a schema name pattern; must match the schema
* name as it is stored in the database; ""
* retrieves those without a schema; null means
* that the schema name should not be used to
* narrow the search
* @param tableNamePattern β a table name pattern; must match the table
* name as it is stored in the database
* @param columnNamePattern β a column name pattern; must match the
* column name as it is stored in the database
* @param processor - the provided ResultSetProcessor.
* @param <T> - defined by {@code processor}
* @return - defined by {@code processor}
* @throws SQLException - if a database error occurs
*/
protected abstract <T> T processColumnsResultSet(
String catalog,
String schemaPattern,
String tableNamePattern,
String columnNamePattern,
ExtractionContext.ResultSetProcessor<T> processor)
throws SQLException;
private void populateTablesWithColumns(
String catalogFilter,
String schemaFilter,
NameSpaceTablesInformation tables) {
try {
processColumnsResultSet(
catalogFilter,
schemaFilter,
null,
"%",
resultSet -> {
String currentTableName = "";
TableInformation currentTable = null;
while ( resultSet.next() ) {
if ( !currentTableName.equals( resultSet.getString( getResultSetTableNameLabel() ) ) ) {
currentTableName = resultSet.getString( getResultSetTableNameLabel() );
currentTable = tables.getTableInformation( currentTableName );
}
if ( currentTable != null ) {
currentTable.addColumn( columnInformation( currentTable, resultSet ) );
}
}
return null;
}
);
}
catch (SQLException e) {
throw convertSQLException( e, "Error accessing tables metadata" );
}
}
/*
* Hibernate Reactive overrides this
*/
protected ColumnInformationImpl columnInformation(TableInformation tableInformation, ResultSet resultSet)
throws SQLException {
return new ColumnInformationImpl(
tableInformation,
toIdentifier( resultSet.getString( getResultSetColumnNameLabel() ) ),
resultSet.getInt( getResultSetSqlTypeCodeLabel() ),
new StringTokenizer( resultSet.getString( getResultSetTypeNameLabel() ), "()" ).nextToken(),
resultSet.getInt( getResultSetColumnSizeLabel() ),
resultSet.getInt( getResultSetDecimalDigitsLabel() ),
interpretTruthValue( resultSet.getString( getResultSetIsNullableLabel() ) )
);
}
private NameSpaceTablesInformation extractNameSpaceTablesInformation(ResultSet resultSet)
throws SQLException {
final var tables = new NameSpaceTablesInformation( getIdentifierHelper() );
while ( resultSet.next() ) {
tables.addTableInformation( extractTableInformation( resultSet ) );
}
return tables;
}
/**
* Must do the following:
* <ol>
* <li>
* obtain a {@link ResultSet} containing a row for any existing
* catalog/schema/table/table type combination as specified by the
* {@code catalogFilter}, {@code schemaFilter}, {@code tableNameFilter},
* and {@code tableTypes} parameters described below.
* The {@link ResultSet} must contain the following, consistent with the
* corresponding columns returned by {@link DatabaseMetaData#getTables(String, String, String, String[])}
* <ul>
* <li>column label {@link #getResultSetTableNameLabel} for table name</li>
* <li>column label {@link #getResultSetTableTypeLabel} for table type</li>
* <li>column label {@link #getResultSetRemarksLabel} for table comment</li>
* </ul>
* </li>
* <li> execute {@code processor.process( resultSet )};</li>
* <li>
* release resources whether {@code processor.process( resultSet )}
* executes successfully or not.
* </li>
* </ol>
* <p>
* The {@code catalog}, {@code schemaPattern}, {@code tableNamePattern},
* and {@code columnNamePattern} parameters are as
* specified by {@link DatabaseMetaData#getTables(String, String, String, String[])},
* and are copied here:
*
* @param catalog - a catalog name; must match the catalog name as it is
* stored in the database; "" retrieves those without a
* catalog; null means that the catalog name should not
* be used to narrow the search
* @param schemaPattern - a schema name pattern; must match the schema name
* as it is stored in the database; "" retrieves
* those without a schema; null means that the schema
* name should not be used to narrow the search
* @param tableNamePattern - a table name pattern; must match the table name
* as it is stored in the database
* @param types - a list of table types
* @param processor - the provided ResultSetProcessor.
* @param <T> - defined by {@code processor}
* @return - defined by {@code processor}
* @throws SQLException - if a database error occurs
*/
protected abstract <T> T processTableResultSet(
String catalog,
String schemaPattern,
String tableNamePattern,
String[] types,
ExtractionContext.ResultSetProcessor<T> processor)
throws SQLException;
private TableInformation locateTableInNamespace(
Identifier catalog,
Identifier schema,
Identifier tableName) {
final String catalogFilter = catalogFilter( catalog );
final String schemaFilter = schemaFilter( schema );
final Identifier catalogToUse = supportsCatalogs() ? catalog : null;
final Identifier schemaToUse = supportsSchemas() ? schema : null;
final String tableNameFilter = toMetaDataObjectName( tableName );
try {
return processTableResultSet(
catalogFilter,
schemaFilter,
tableNameFilter,
tableTypes,
resultSet -> extractTableInformation( catalogToUse, schemaToUse, tableName, resultSet )
);
}
catch (SQLException sqlException) {
throw convertSQLException( sqlException, "Error accessing table metadata" );
}
}
private String catalogFilter(Identifier catalog) {
if ( supportsCatalogs() ) {
if ( catalog == null ) {
try {
return getConnection().getCatalog();
}
catch (SQLException ignore) {
return "";
}
}
else {
return toMetaDataObjectName( catalog );
}
}
else {
return null;
}
}
private String schemaFilter(Identifier schema) {
if ( supportsSchemas() ) {
return schema == null ? "" : toMetaDataObjectName( schema );
}
else {
return null;
}
}
private NameQualifierSupport getNameQualifierSupport() {
return getJdbcEnvironment().getNameQualifierSupport();
}
private boolean supportsCatalogs() {
return getNameQualifierSupport().supportsCatalogs();
}
private boolean supportsSchemas() {
return getNameQualifierSupport().supportsSchemas();
}
private TableInformation extractTableInformation(
Identifier catalog,
Identifier schema,
Identifier tableName,
ResultSet resultSet)
throws SQLException {
boolean found = false;
TableInformation tableInformation = null;
while ( resultSet.next() ) {
final Identifier identifier =
toIdentifier( resultSet.getString( getResultSetTableNameLabel() ),
tableName.isQuoted() );
if ( tableName.equals( identifier ) ) {
if ( found ) {
CORE_LOGGER.multipleTablesFound( tableName.render() );
throw new SchemaExtractionException(
String.format(
Locale.ENGLISH,
"More than one table found in namespace (%s, %s) : %s",
catalog == null ? "" : catalog.render(),
schema == null ? "" : schema.render(),
tableName.render()
)
);
}
else {
found = true;
tableInformation = extractTableInformation( resultSet );
addColumns( tableInformation );
}
}
}
if ( !found ) {
CORE_LOGGER.tableNotFound( tableName.render() );
}
return tableInformation;
}
protected abstract String getResultSetTableTypesPhysicalTableConstant();
protected boolean isPhysicalTableType(String tableType) {
final boolean isTableType =
getResultSetTableTypesPhysicalTableConstant()
.equalsIgnoreCase( tableType );
if ( extraPhysicalTableTypes == null ) {
return isTableType;
}
else {
if ( isTableType ) {
return true;
}
else {
for ( String extraPhysicalTableType : extraPhysicalTableTypes ) {
if ( extraPhysicalTableType.equalsIgnoreCase( tableType ) ) {
return true;
}
}
return false;
}
}
}
protected void addColumns(TableInformation tableInformation) {
final var tableName = tableInformation.getName();
final Identifier catalog = tableName.getCatalogName();
final Identifier schema = tableName.getSchemaName();
try {
processColumnsResultSet(
catalog == null ? "" : catalog.getText(),
schema == null ? "" : schema.getText(),
tableName.getTableName().getText(),
"%",
resultSet -> {
while ( resultSet.next() ) {
tableInformation.addColumn( columnInformation( tableInformation, resultSet ) );
}
return null;
}
);
}
catch (SQLException e) {
throw convertSQLException( e, "Error accessing tables metadata" );
}
}
/*
* Used by Hibernate Reactive
*/
protected Boolean interpretTruthValue(String nullable) {
if ( "yes".equalsIgnoreCase( nullable ) ) {
return Boolean.TRUE;
}
else if ( "no".equalsIgnoreCase( nullable ) ) {
return Boolean.FALSE;
}
else {
return null;
}
}
// This method is not currently used.
protected abstract <T> T processPrimaryKeysResultSet(
String catalogFilter,
String schemaFilter,
Identifier tableName,
ExtractionContext.ResultSetProcessor<T> processor)
throws SQLException;
protected abstract <T> T processPrimaryKeysResultSet(
String catalogFilter,
String schemaFilter,
@Nullable String tableName,
ExtractionContext.ResultSetProcessor<T> processor)
throws SQLException;
@Override
public @Nullable PrimaryKeyInformation getPrimaryKey(TableInformation tableInformation) {
final var databaseObjectAccess = extractionContext.getDatabaseObjectAccess();
if ( databaseObjectAccess.isCaching() && supportsBulkPrimaryKeyRetrieval() ) {
return databaseObjectAccess.locatePrimaryKeyInformation( tableInformation.getName() );
}
final var tableName = tableInformation.getName();
final Identifier catalog = tableName.getCatalogName();
final Identifier schema = tableName.getSchemaName();
try {
return processPrimaryKeysResultSet(
catalog == null ? "" : catalog.getText(),
schema == null ? "" : schema.getText(),
tableInformation.getName().getTableName(),
resultSet -> extractPrimaryKeyInformation( tableInformation, resultSet )
);
}
catch (SQLException e) {
throw convertSQLException( e,
"Error while reading primary key meta data for "
+ tableInformation.getName() );
}
}
private PrimaryKeyInformation extractPrimaryKeyInformation(TableInformation tableInformation, ResultSet resultSet)
throws SQLException {
final List<ColumnInformation> columns = new ArrayList<>();
boolean firstPass = true;
Identifier primaryKeyIdentifier = null;
while ( resultSet.next() ) {
final String currentPkName = resultSet.getString( getResultSetPrimaryKeyNameLabel() );
final Identifier currentPrimaryKeyIdentifier =
currentPkName == null ? null : toIdentifier( currentPkName );
if ( firstPass ) {
primaryKeyIdentifier = currentPrimaryKeyIdentifier;
firstPass = false;
}
else {
if ( !Objects.equals( primaryKeyIdentifier, currentPrimaryKeyIdentifier ) ) {
throw new SchemaExtractionException( "Encountered primary keys differing name on table "
+ tableInformation.getName().toString() );
}
}
final int columnPosition = resultSet.getInt( getResultSetColumnPositionColumn() );
final int index = columnPosition - 1;
// Fill up the array list with nulls up to the desired index, because some JDBC drivers don't return results ordered by column position
while ( columns.size() <= index ) {
columns.add( null );
}
final Identifier columnIdentifier =
toIdentifier( resultSet.getString( getResultSetColumnNameLabel() ) );
columns.set( index, tableInformation.getColumn( columnIdentifier ) );
}
if ( firstPass ) {
// we did not find any results (no pk)
return null;
}
else {
// validate column list is properly contiguous
for ( int i = 0; i < columns.size(); i++ ) {
if ( columns.get( i ) == null ) {
throw new SchemaExtractionException( "Primary Key information was missing for KEY_SEQ = " + ( i+1) );
}
}
// build the return
return new PrimaryKeyInformationImpl( primaryKeyIdentifier, columns );
}
}
@Override
public NameSpacePrimaryKeysInformation getPrimaryKeys(Identifier catalog, Identifier schema) {
if ( !supportsBulkPrimaryKeyRetrieval() ) {
throw new UnsupportedOperationException( "Database doesn't support extracting all primary keys at once" );
}
else {
try {
return processPrimaryKeysResultSet(
catalog == null ? "" : catalog.getText(),
schema == null ? "" : schema.getText(),
(String) null,
this::extractNameSpacePrimaryKeysInformation
);
}
catch (SQLException e) {
throw convertSQLException( e,
"Error while reading primary key meta data for namespace "
+ new Namespace.Name( catalog, schema ) );
}
}
}
private TableInformation getTableInformation(
@Nullable String catalogName,
@Nullable String schemaName,
@Nullable String tableName) {
final var qualifiedTableName = new QualifiedTableName(
toIdentifier( catalogName ),
toIdentifier( schemaName ),
toIdentifier( tableName )
);
final var tableInformation =
extractionContext.getDatabaseObjectAccess().locateTableInformation( qualifiedTableName );
if ( tableInformation == null ) {
throw new SchemaExtractionException( "Could not locate table information for " + qualifiedTableName );
}
return tableInformation;
}
protected NameSpacePrimaryKeysInformation extractNameSpacePrimaryKeysInformation(ResultSet resultSet)
throws SQLException {
final var primaryKeysInformation = new NameSpacePrimaryKeysInformation( getIdentifierHelper() );
while ( resultSet.next() ) {
final String currentTableName = resultSet.getString( getResultSetPrimaryKeyTableLabel() );
final String currentPkName = resultSet.getString( getResultSetPrimaryKeyNameLabel() );
final Identifier currentPrimaryKeyIdentifier =
currentPkName == null ? null : toIdentifier( currentPkName );
final var tableInformation = getTableInformation(
resultSet.getString( getResultSetPrimaryKeyCatalogLabel() ),
resultSet.getString( getResultSetPrimaryKeySchemaLabel() ),
currentTableName
);
var primaryKeyInformation =
primaryKeysInformation.getPrimaryKeyInformation( currentTableName );
final List<ColumnInformation> columns;
if ( primaryKeyInformation != null ) {
if ( !Objects.equals( primaryKeyInformation.getPrimaryKeyIdentifier(), currentPrimaryKeyIdentifier ) ) {
throw new SchemaExtractionException( "Encountered primary keys differing name on table "
+ currentTableName );
}
columns = (List<ColumnInformation>) primaryKeyInformation.getColumns();
}
else {
columns = new ArrayList<>();
primaryKeyInformation = new PrimaryKeyInformationImpl( currentPrimaryKeyIdentifier, columns );
primaryKeysInformation.addPrimaryKeyInformation( tableInformation, primaryKeyInformation );
}
final int columnPosition = resultSet.getInt( getResultSetColumnPositionColumn() );
final int index = columnPosition - 1;
// Fill up the array list with nulls up to the desired index, because some JDBC drivers don't return results ordered by column position
while ( columns.size() <= index ) {
columns.add( null );
}
final Identifier columnIdentifier =
toIdentifier( resultSet.getString( getResultSetColumnNameLabel() ) );
columns.set( index, tableInformation.getColumn( columnIdentifier ) );
}
primaryKeysInformation.validate();
return primaryKeysInformation;
}
/**
* Must do the following:
* <ol>
* <li>
* obtain a {@link ResultSet} containing a row for each column
* defined in an index. The {@link ResultSet} must contain the
* following, consistent with the corresponding columns returned
* by {@link DatabaseMetaData#getIndexInfo(String, String, String, boolean, boolean)}
* <ul>
* <li>column label {@link #getResultSetIndexNameLabel} for index name;
* null when TYPE is tableIndexStatistic</li>
* <li>column label {@link #getResultSetIndexTypeLabel} index type:
* <ul>
* <li>
* {@link DatabaseMetaData#tableIndexStatistic} -
* this identifies table statistics that are returned
* in conjunction with a table's index descriptions
* </li>
* <li>
* Any value other than {@link DatabaseMetaData#tableIndexStatistic} -
* this indicates that a table's index description
* (not statisics) is being returned.
* </li>
* </ul>
* Note that Hibernate ignores statistics and does not care
* about the actual type of index.
* </li>
* <li>
* column label {@link #getResultSetColumnNameLabel} -
* column name; <code>null</code> when TYPE is
* {@link DatabaseMetaData#tableIndexStatistic}
* </li>
* </ul>
* The ResultSet must be ordered so that the columns for a
* particular index are in contiguous rows in order of column
* position.
* </li>
* <li> execute {@code processor.process( resultSet )};</li>
* <li>
* release resources whether {@code processor.process( resultSet )}
* executes successfully or not.
* </li>
* </ol>
* <p>
* The {@code catalog}, {@code schemaPattern}, {@code tableNamePattern},
* and {@code columnNamePattern} parameters are as
* specified by {@link DatabaseMetaData#getIndexInfo(String, String, String, boolean, boolean)},
* and are copied here:
* <p>
* @param catalog β a catalog name; must match the catalog name as it is
* stored in this database; "" retrieves those without
* a catalog; null means that the catalog name should
* not be used to narrow the search
* @param schema β a schema name; must match the schema name as it is
* stored in this database; "" retrieves those without
* a schema; null means that the schema name should not
* be used to narrow the search
* @param table β a table name; must match the table name as it is stored
* in this database
* @param unique β when true, return only indices for unique values; when
* false, return indices regardless of whether unique or not
* @param approximate β when true, result is allowed to reflect approximate
* or out of data values; when false, results are
* requested to be accurate
* @param processor - the provided ResultSetProcessor.
* @param <T> - defined by {@code processor}
* @return - defined by {@code processor}
* @throws SQLException - if a database error occurs
*/
protected abstract <T> T processIndexInfoResultSet(
String catalog,
String schema,
@Nullable String table,
boolean unique,
boolean approximate,
ExtractionContext.ResultSetProcessor<T> processor)
throws SQLException;
@Override
public Iterable<IndexInformation> getIndexes(TableInformation tableInformation) {
final var databaseObjectAccess = extractionContext.getDatabaseObjectAccess();
if ( databaseObjectAccess.isCaching() && supportsBulkIndexRetrieval() ) {
return databaseObjectAccess.locateIndexesInformation( tableInformation.getName() );
}
final var tableName = tableInformation.getName();
final Identifier catalog = tableName.getCatalogName();
final Identifier schema = tableName.getSchemaName();
final Map<Identifier, IndexInformationImpl.Builder> builders = new HashMap<>();
try {
processIndexInfoResultSet(
catalog == null ? "" : catalog.getText(),
schema == null ? "" : schema.getText(),
tableName.getTableName().getText(),
false, // DO NOT limit to just unique
true, // DO require up-to-date results
resultSet -> {
while ( resultSet.next() ) {
if ( resultSet.getShort( getResultSetIndexTypeLabel() )
!= DatabaseMetaData.tableIndexStatistic ) {
final Identifier indexIdentifier =
toIdentifier( resultSet.getString( getResultSetIndexNameLabel() ) );
final var builder = indexInformationBuilder( builders, indexIdentifier );
final Identifier columnIdentifier =
toIdentifier( resultSet.getString( getResultSetColumnNameLabel() ) );
final var columnInformation = tableInformation.getColumn( columnIdentifier );
if ( columnInformation == null ) {
// See HHH-10191: this may happen when dealing with Oracle/PostgreSQL function indexes
CORE_LOGGER.logCannotLocateIndexColumnInformation(
columnIdentifier.getText(),
indexIdentifier.getText()
);
}
builder.addColumn( columnInformation );
}
}
return null;
}
);
}
catch (SQLException e) {
throw convertSQLException( e,
"Error accessing index information: "
+ tableInformation.getName() );
}
final List<IndexInformation> indexes = new ArrayList<>( builders.size() );
for ( var builder : builders.values() ) {
final var index = builder.build();
indexes.add( index );
}
return indexes;
}
private static IndexInformationImpl.Builder indexInformationBuilder(
Map<Identifier, IndexInformationImpl.Builder> builders,
Identifier indexIdentifier) {
final var builder = builders.get( indexIdentifier );
if ( builder == null ) {
final var newBuilder = IndexInformationImpl.builder( indexIdentifier );
builders.put( indexIdentifier, newBuilder );
return newBuilder;
}
else {
return builder;
}
}
@Override
public NameSpaceIndexesInformation getIndexes(Identifier catalog, Identifier schema) {
if ( !supportsBulkIndexRetrieval() ) {
throw new UnsupportedOperationException( "Database doesn't support extracting all indexes at once" );
}
else {
try {
return processIndexInfoResultSet(
catalog == null ? "" : catalog.getText(),
schema == null ? "" : schema.getText(),
null,
false,
true,
this::extractNameSpaceIndexesInformation
);
}
catch (SQLException e) {
throw convertSQLException( e,
"Error while reading index information for namespace "
+ new Namespace.Name( catalog, schema ) );
}
}
}
protected NameSpaceIndexesInformation extractNameSpaceIndexesInformation(ResultSet resultSet)
throws SQLException {
final var indexesInformation = new NameSpaceIndexesInformation( getIdentifierHelper() );
while ( resultSet.next() ) {
if ( resultSet.getShort( getResultSetIndexTypeLabel() )
!= DatabaseMetaData.tableIndexStatistic ) {
final var tableInformation = getTableInformation(
resultSet.getString( getResultSetCatalogLabel() ),
resultSet.getString( getResultSetSchemaLabel() ),
resultSet.getString( getResultSetTableNameLabel() )
);
final Identifier indexIdentifier =
toIdentifier( resultSet.getString( getResultSetIndexNameLabel() ) );
final var index = getOrCreateIndexInformation( indexesInformation, indexIdentifier, tableInformation );
final Identifier columnIdentifier =
toIdentifier( resultSet.getString( getResultSetColumnNameLabel() ) );
final var columnInformation = tableInformation.getColumn( columnIdentifier );
if ( columnInformation == null ) {
// See HHH-10191: this may happen when dealing with Oracle/PostgreSQL function indexes
CORE_LOGGER.logCannotLocateIndexColumnInformation(
columnIdentifier.getText(),
indexIdentifier.getText()
);
}
index.getIndexedColumns().add( columnInformation );
}
}
return indexesInformation;
}
private IndexInformation getOrCreateIndexInformation(
NameSpaceIndexesInformation indexesInformation,
Identifier indexIdentifier,
TableInformation tableInformation) {
final var indexes =
indexesInformation.getIndexesInformation( tableInformation.getName().getTableName().getText() );
if ( indexes != null ) {
for ( var index : indexes ) {
if ( indexIdentifier.equals( index.getIndexIdentifier() ) ) {
return index;
}
}
}
final var indexInformation = new IndexInformationImpl( indexIdentifier, new ArrayList<>() );
indexesInformation.addIndexInformation( tableInformation, indexInformation );
return indexInformation;
}
/**
* Must do the following:
* <ol>
* <li>
* obtain a {@link ResultSet} containing a row for each foreign key/
* primary key column making up a foreign key for any existing
* catalog/schema/table combination as specified by the
* {@code catalog}, {@code schema}, and {@code table}
* parameters described below.
* The {@link ResultSet} must contain the following, consistent
* with the corresponding columns returned by {@link DatabaseMetaData#getImportedKeys}:
* <ul>
* <li>
* column label {@link #getResultSetForeignKeyLabel} -
* foreign key name (may be null)
* </li>
* <li>
* column label {@link #getResultSetPrimaryKeyCatalogLabel} -
* primary key table catalog being imported (may be null)
* </li>
* <li>
* column label {@link #getResultSetPrimaryKeySchemaLabel} -
* primary key table schema being imported (may be null)
* </li>
* <li>
* column label {@link #getResultSetPrimaryKeyTableLabel} -
* primary key table name being imported
* </li>
* <li>
* column label {@link #getResultSetForeignKeyColumnNameLabel} -
* foreign key column name
* </li>
* <li>
* column label {@link #getResultSetPrimaryKeyColumnNameLabel} -
* primary key column name being imported
* </li>
* </ul>
* The ResultSet must be ordered by the primary key
* catalog/schema/table and column position within the key.
* </li>
* <li> execute {@code processor.process( resultSet )};</li>
* <li>
* release resources whether {@code processor.process( resultSet )}
* executes successfully or not.
* </li>
* </ol>
* <p>
* The {@code catalog}, {@code schema}, and {@code table}
* parameters are as specified by {@link DatabaseMetaData#getImportedKeys(String, String, String)}
* and are copied here:
*
* @param catalog β a catalog name; must match the catalog name as it is
* stored in the database; "" retrieves those without a
* catalog; null means that the catalog name should not
* be used to narrow the search
* @param schema β a schema name; must match the schema name as it is
* stored in the database; "" retrieves those without a
* schema; null means that the schema name should not be
* used to narrow the search
* @param table β a table name; must match the table name as it is stored
* in the database
* @param processor - the provided ResultSetProcessor.
* @param <T> - defined by {@code processor}
* @return - defined by {@code processor}
* @throws SQLException - if a database error occurs
*/
protected abstract <T> T processImportedKeysResultSet(
String catalog,
String schema,
@Nullable String table,
ExtractionContext.ResultSetProcessor<T> processor)
throws SQLException;
/**
* Must do the following:
* <ol>
* <li>
* obtain a {@link ResultSet} containing a row for each foreign key
* column making up a foreign key for any existing
* foreignCatalog/foreignSchema/foreignTable combination as specified by
* parameters described below.
* The {@link ResultSet} must contain the following, consistent
* with the corresponding columns returned by {@link DatabaseMetaData#getCrossReference}:
* <ul>
* <li>
* column label {@link #getResultSetForeignKeyLabel} -
* foreign key name (may be null)
* </li>
* <li>
* column label {@link #getResultSetPrimaryKeyCatalogLabel} -
* primary key table catalog being imported (may be null)
* </li>
* <li>
* column label {@link #getResultSetPrimaryKeySchemaLabel} -
* primary key table schema being imported (may be null)
* </li>
* <li>
* column label {@link #getResultSetPrimaryKeyTableLabel} -
* primary key table name being imported
* </li>
* <li>
* column label {@link #getResultSetForeignKeyColumnNameLabel} -
* foreign key column name
* </li>
* <li>
* column label {@link #getResultSetPrimaryKeyColumnNameLabel} -
* primary key column name being imported
* </li>
* </ul>
* The ResultSet must be ordered by the primary key
* foreignCatalog/foreignSchema/foreignTable and column position within the key.
* </li>
* <li> execute {@code processor.process( resultSet )};</li>
* <li>
* release resources whether {@code processor.process( resultSet )}
* executes successfully or not.
* </li>
* </ol>
* <p>
* The {@code parentCatalog}, {@code parentSchema}, {@code parentTable},
* {@code foreignCatalog}, {@code foreignSchema}, {@code foreignTable}
* parameters are as specified by {@link DatabaseMetaData#getCrossReference(
* String, String, String, String, String, String)}
* and are copied here:
*
* @param parentCatalog a catalog name; must match the catalog name
* as it is stored in the database; "" retrieves those without a
* catalog; {@code null} means drop catalog name from the selection criteria
* @param parentSchema a schema name; must match the schema name as
* it is stored in the database; "" retrieves those without a schema;
* {@code null} means drop schema name from the selection criteria
* @param parentTable the name of the table that exports the key; must match
* the table name as it is stored in the database
* @param foreignCatalog a catalog name; must match the catalog name as
* it is stored in the database; "" retrieves those without a
* catalog; {@code null} means drop catalog name from the selection criteria
* @param foreignSchema a schema name; must match the schema name as it
* is stored in the database; "" retrieves those without a schema;
* {@code null} means drop schema name from the selection criteria
* @param foreignTable the name of the table that imports the key; must match
* the table name as it is stored in the database
* @param processor - the provided ResultSetProcessor.
* @param <T> - defined by {@code processor}
* @return - defined by {@code processor}
* @throws SQLException - if a database error occurs
* @see #processImportedKeysResultSet(String, String, String,
* ExtractionContext.ResultSetProcessor)
*/
protected abstract <T> T processCrossReferenceResultSet(
String parentCatalog,
String parentSchema,
String parentTable,
String foreignCatalog,
String foreignSchema,
String foreignTable,
ExtractionContext.ResultSetProcessor<T> processor)
throws SQLException;
@Override
public Iterable<ForeignKeyInformation> getForeignKeys(TableInformation tableInformation) {
final var databaseObjectAccess = extractionContext.getDatabaseObjectAccess();
if ( databaseObjectAccess.isCaching() && supportsBulkForeignKeyRetrieval() ) {
return databaseObjectAccess.locateForeignKeyInformation( tableInformation.getName() );
}
final var tableName = tableInformation.getName();
final Identifier catalog = tableName.getCatalogName();
final Identifier schema = tableName.getSchemaName();
final String catalogFilter = catalog == null ? "" : catalog.getText();
final String schemaFilter = schema == null ? "" : schema.getText();
final Map<Identifier, ForeignKeyBuilder> builders = new HashMap<>();
try {
final String table = tableInformation.getName().getTableName().getText();
processImportedKeysResultSet( catalogFilter, schemaFilter, table,
resultSet -> {
process( tableInformation, resultSet, builders );
return null;
} );
final var dialect = getJdbcEnvironment().getDialect();
if ( dialect.useCrossReferenceForeignKeys() ) {
processCrossReferenceResultSet(
null,
null,
dialect.getCrossReferenceParentTableFilter(),
catalogFilter,
schemaFilter,
table,
resultSet -> {
process( tableInformation, resultSet, builders );
return null;
}
);
}
}
catch (SQLException e) {
throw convertSQLException( e,
"Error accessing column metadata: "
+ tableInformation.getName() );
}
final List<ForeignKeyInformation> foreignKeys = new ArrayList<>( builders.size() );
for ( var foreignKeyBuilder : builders.values() ) {
foreignKeys.add( foreignKeyBuilder.build() );
}
return foreignKeys;
}
@Override
public NameSpaceForeignKeysInformation getForeignKeys(Identifier catalog, Identifier schema) {
if ( !supportsBulkForeignKeyRetrieval() ) {
throw new UnsupportedOperationException( "Database doesn't support extracting all foreign keys at once" );
}
else {
try {
return processImportedKeysResultSet(
catalog == null ? "" : catalog.getText(),
schema == null ? "" : schema.getText(),
null,
this::extractNameSpaceForeignKeysInformation
);
}
catch (SQLException e) {
throw convertSQLException( e,
"Error while reading foreign key information for namespace "
+ new Namespace.Name( catalog, schema ) );
}
}
}
protected NameSpaceForeignKeysInformation extractNameSpaceForeignKeysInformation(ResultSet resultSet)
throws SQLException {
final var foreignKeysInformation = new NameSpaceForeignKeysInformation( getIdentifierHelper() );
while ( resultSet.next() ) {
final var tableInformation = getTableInformation(
resultSet.getString( getResultSetForeignKeyCatalogLabel() ),
resultSet.getString( getResultSetForeignKeySchemaLabel() ),
resultSet.getString( getResultSetForeignKeyTableLabel() )
);
final Identifier foreignKeyIdentifier =
toIdentifier( resultSet.getString( getResultSetForeignKeyLabel() ) );
final var foreignKey = getOrCreateForeignKeyInformation( foreignKeysInformation, foreignKeyIdentifier, tableInformation );
final var primaryKeyTableInformation =
extractionContext.getDatabaseObjectAccess()
.locateTableInformation( extractPrimaryKeyTableName( resultSet ) );
if ( primaryKeyTableInformation != null ) {
// the assumption here is that we have not seen this table already based on fully-qualified name
// during previous step of building all table metadata so most likely this is
// not a match based solely on schema/catalog and that another row in this result set
// should match.
final Identifier foreignKeyColumnIdentifier =
toIdentifier( resultSet.getString( getResultSetForeignKeyColumnNameLabel() ) );
final Identifier pkColumnIdentifier =
toIdentifier( resultSet.getString( getResultSetPrimaryKeyColumnNameLabel() ) );
((List<ForeignKeyInformation.ColumnReferenceMapping>) foreignKey.getColumnReferenceMappings()).add(
new ColumnReferenceMappingImpl(
tableInformation.getColumn( foreignKeyColumnIdentifier ),
primaryKeyTableInformation.getColumn( pkColumnIdentifier )
)
);
}
}
return foreignKeysInformation;
}
private ForeignKeyInformation getOrCreateForeignKeyInformation(
NameSpaceForeignKeysInformation foreignKeysInformation,
Identifier foreignKeyIdentifier,
TableInformation tableInformation) {
final var foreignKeys =
foreignKeysInformation.getForeignKeysInformation( tableInformation.getName().getTableName().getText() );
if ( foreignKeys != null ) {
for ( var foreignKey : foreignKeys ) {
if ( foreignKeyIdentifier.equals( foreignKey.getForeignKeyIdentifier() ) ) {
return foreignKey;
}
}
}
final var foreignKeyInformation = new ForeignKeyInformationImpl( foreignKeyIdentifier, new ArrayList<>() );
foreignKeysInformation.addForeignKeyInformation( tableInformation, foreignKeyInformation );
return foreignKeyInformation;
}
private void process(
TableInformation tableInformation,
ResultSet resultSet,
Map<Identifier, ForeignKeyBuilder> fkBuilders)
throws SQLException {
while ( resultSet.next() ) {
// IMPL NOTE: The builder is mainly used to collect the column reference mappings
final Identifier foreignKeyIdentifier =
toIdentifier( resultSet.getString( getResultSetForeignKeyLabel() ) );
final var foreignKeyBuilder = getForeignKeyBuilder( fkBuilders, foreignKeyIdentifier );
final var primaryKeyTableInformation =
extractionContext.getDatabaseObjectAccess()
.locateTableInformation( extractPrimaryKeyTableName( resultSet ) );
if ( primaryKeyTableInformation != null ) {
// the assumption here is that we have not seen this table already based on fully-qualified name
// during previous step of building all table metadata so most likely this is
// not a match based solely on schema/catalog and that another row in this result set
// should match.
final Identifier foreignKeyColumnIdentifier =
toIdentifier( resultSet.getString( getResultSetForeignKeyColumnNameLabel() ) );
final Identifier pkColumnIdentifier =
toIdentifier( resultSet.getString( getResultSetPrimaryKeyColumnNameLabel() ) );
foreignKeyBuilder.addColumnMapping(
tableInformation.getColumn( foreignKeyColumnIdentifier ),
primaryKeyTableInformation.getColumn( pkColumnIdentifier )
);
}
}
}
private ForeignKeyBuilder getForeignKeyBuilder(
Map<Identifier, ForeignKeyBuilder> builders, Identifier foreignKeyIdentifier) {
var foreignKeyBuilder = builders.get( foreignKeyIdentifier );
if ( foreignKeyBuilder == null ) {
foreignKeyBuilder = generateForeignKeyBuilder( foreignKeyIdentifier );
builders.put( foreignKeyIdentifier, foreignKeyBuilder );
}
return foreignKeyBuilder;
}
private ForeignKeyBuilder generateForeignKeyBuilder(Identifier fkIdentifier) {
return new ForeignKeyBuilderImpl( fkIdentifier );
}
protected
|
AbstractInformationExtractorImpl
|
java
|
eclipse-vertx__vert.x
|
vertx-core/src/test/java/io/vertx/tests/http/fileupload/HttpServerFileUploadTest.java
|
{
"start": 1334,
"end": 7161
}
|
class ____ extends SimpleHttpTest {
@Rule
public TemporaryFolder testFolder = new TemporaryFolder();
protected File testDir;
protected HttpServerFileUploadTest(HttpConfig config) {
super(config);
}
@Override
public void setUp() throws Exception {
super.setUp();
testDir = testFolder.newFolder();
}
@Test
public void testFormUploadEmptyFile() {
testFormUploadFile("", false, false, false, false);
}
@Test
public void testFormUploadEmptyFileWithContentLength() {
testFormUploadFile("", true, false, false, false);
}
@Test
public void testFormUploadSmallFile() {
testFormUploadFile(TestUtils.randomAlphaString(100), false, false, false, false);
}
@Test
public void testFormUploadSmallFileWithContentLength() {
testFormUploadFile(TestUtils.randomAlphaString(100), true, false, false, false);
}
@Test
public void testFormUploadMediumFile() {
testFormUploadFile(TestUtils.randomAlphaString(20000), false, false, false, false);
}
@Test
public void testFormUploadMediumFileWithContentLength() {
testFormUploadFile(TestUtils.randomAlphaString(20000), true, false, false, false);
}
@Test
public void testFormUploadLargeFile() {
testFormUploadFile(TestUtils.randomAlphaString(4 * 1024 * 1024), false, false, false, false);
}
@Test
public void testFormUploadLargeFileWithContentLength() {
testFormUploadFile(TestUtils.randomAlphaString(4 * 1024 * 1024), true, false, false, false);
}
@Test
public void testFormUploadEmptyFileStreamToDisk() {
testFormUploadFile("", false, true, false, false);
}
@Test
public void testFormUploadSmallFileStreamToDisk() {
testFormUploadFile(TestUtils.randomAlphaString(100), false, true, false, false);
}
@Test
public void testFormUploadMediumFileStreamToDisk() {
testFormUploadFile(TestUtils.randomAlphaString(20 * 1024), false, true, false, false);
}
@Test
public void testFormUploadLargeFileStreamToDisk() {
testFormUploadFile(TestUtils.randomAlphaString(4 * 1024 * 1024), false, true, false, false);
}
@Test
public void testFormUploadVeryLargeFileStreamToDisk() {
long one_kb = 1024L;
long one_mb = one_kb * 1024L;
long one_gb = one_mb * 1024L;
// long length = one_gb * 10L;
long length = one_mb + 128; // 128MB
Content content = new Content() {
@Override
public long length() {
return length;
}
Buffer chunk_1k = TestUtils.randomBuffer(1024);
long chunkLength = chunk_1k.length();
private void pump(long remaining, WriteStream<Buffer> out, Promise<Void> done) {
while (!out.writeQueueFull()) {
if (remaining > chunkLength) {
out.write(chunk_1k);
remaining -= chunkLength;
} else {
Buffer last = chunk_1k.slice(0, (int)remaining);
out.write(last).onComplete(done);
return;
}
}
long propagated = remaining;
// System.out.println("Full - remaining is " + propagated + "M");
out.drainHandler(v -> {
pump(propagated, out, done);
});
}
@Override
public Future<Void> write(WriteStream<Buffer> out) {
Promise<Void> done = ((ContextInternal)vertx.getOrCreateContext()).promise();
pump(length, out, done);
return done.future();
}
@Override
public boolean verify(Buffer expected) {
return true;
}
};
testFormUploadFile("tmp-0.txt", "tmp-0.txt", content, false, true, false, false);
}
@Test
public void testFormUploadWithExtFilename() {
testFormUploadFile(null, "%c2%a3%20and%20%e2%82%ac%20rates", "the-content", false, true, false, false);
}
@Test
public void testBrokenFormUploadEmptyFile() {
testFormUploadFile("", false, true, true, false);
}
@Test
public void testBrokenFormUploadSmallFile() {
testFormUploadFile(TestUtils.randomAlphaString(100), false, true, true, false);
}
@Test
public void testBrokenFormUploadMediumFile() {
testFormUploadFile(TestUtils.randomAlphaString(20 * 1024), false, true, true, false);
}
@Test
public void testBrokenFormUploadLargeFile() {
testFormUploadFile(TestUtils.randomAlphaString(4 * 1024 * 1024), false, true, true, false);
}
@Test
public void testBrokenFormUploadEmptyFileStreamToDisk() {
testFormUploadFile("", false, true, true, false);
}
@Test
public void testBrokenFormUploadSmallFileStreamToDisk() {
testFormUploadFile(TestUtils.randomAlphaString(100), false, true, true, false);
}
@Test
public void testBrokenFormUploadMediumFileStreamToDisk() {
testFormUploadFile(TestUtils.randomAlphaString(20 * 1024), false, true, true, false);
}
@Test
public void testBrokenFormUploadLargeFileStreamToDisk() {
testFormUploadFile(TestUtils.randomAlphaString(4 * 1024 * 1024), false, true, true, false);
}
@Test
public void testCancelFormUploadEmptyFileStreamToDisk() {
testFormUploadFile("", false, true, false, true);
}
@Test
public void testCancelFormUploadSmallFileStreamToDisk() {
testFormUploadFile(TestUtils.randomAlphaString(100), false, true, false, true);
}
@Test
public void testCancelFormUploadMediumFileStreamToDisk() {
testFormUploadFile(TestUtils.randomAlphaString(20 * 1024), false, true, false, true);
}
@Test
public void testCancelFormUploadLargeFileStreamToDisk() {
testFormUploadFile(TestUtils.randomAlphaString(4 * 1024 * 1024), false, true, false, true);
}
private void testFormUploadFile(String contentStr, boolean includeLength, boolean streamToDisk, boolean abortClient, boolean cancelStream) {
testFormUploadFile("tmp-0.txt", "tmp-0.txt", contentStr, includeLength, streamToDisk, abortClient, cancelStream);
}
|
HttpServerFileUploadTest
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.